├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .style.yapf ├── LICENSE ├── Makefile ├── README.md ├── config.yml.example ├── lxdrunner ├── __init__.py ├── __main__.py ├── appconf.py ├── applog.py ├── dtypes.py ├── lxd.py ├── mngr.py ├── scripts │ ├── __init__.py │ └── setuprunner.sh ├── tls.py ├── util.py └── web.py ├── requirements.dev.in ├── requirements.dev.txt ├── requirements.in ├── requirements.txt ├── scripts ├── build-alpine-image.sh ├── build-debian-image.sh └── setup-lxd.sh ├── service ├── lxdrunner.openrc └── lxdrunner.service ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── config.yml ├── conftest.py ├── data.py ├── gvars.py ├── test_appconf.py ├── test_lxd.py ├── test_mngr.py ├── test_tls.py ├── test_util.py ├── test_web.py └── wf_job.json /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: CI 4 | 5 | # Controls when the action will run. 6 | on: 7 | # Triggers the workflow on push or pull request events but only for the main branch 8 | push: 9 | pull_request: 10 | branches: [master] 11 | 12 | # Allows you to run this workflow manually from the Actions tab 13 | workflow_dispatch: 14 | inputs: 15 | remote_debug: 16 | required: false 17 | default: false 18 | 19 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 20 | jobs: 21 | # This workflow contains a single job called "build" 22 | build: 23 | runs-on: 24 | - self-hosted 25 | - vm 26 | 27 | steps: 28 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 29 | - uses: actions/checkout@v2 30 | with: 31 | fetch-depth: 0 32 | 33 | - name: Setup base environment 34 | run: | 35 | sudo apt-get update -y 36 | sudo apt-get install -y python3-pip python3-venv python3-wheel 37 | 38 | - name: Setup virtualenv 39 | run: python3 -m venv venv 40 | 41 | - name: Development Install 42 | run: | 43 | . ./venv/bin/activate 44 | make upgrade-pip install-dev 45 | 46 | - name: Run pytest 47 | run: | 48 | . ./venv/bin/activate 49 | make tests 50 | 51 | - name: Build Packages 52 | run: | 53 | . ./venv/bin/activate 54 | make packages 55 | WHEEL=$(basename dist/*.whl) 56 | PKGVER=$(python3 ./setup.py --version) 57 | echo "WHEEL=$WHEEL" >> $GITHUB_ENV 58 | echo "PKGVER=$PKGVER" >> $GITHUB_ENV 59 | 60 | - name: Setup LXD 61 | run: | 62 | sudo ./scripts/setup-lxd.sh 63 | # Runner user can't acquire lxd group without exiting. 64 | sudo chown root.runner /var/snap/lxd/common/lxd/unix.socket 65 | 66 | - name: Build LXD Alpine image 67 | run: ./scripts/build-alpine-image.sh 68 | 69 | - name: Export LXD Image 70 | run: | 71 | lxc stop lxdrunner-build 72 | lxc publish lxdrunner-build --alias lxdrunner 73 | lxc image export lxdrunner dist/lxdrunner-alpine.img 74 | 75 | - uses: "marvinpinto/action-automatic-releases@latest" 76 | #if: ${{ github.event.release.tag_name }} 77 | if: startsWith(github.ref, 'refs/tags/') 78 | with: 79 | repo_token: "${{ secrets.GITHUB_TOKEN }}" 80 | prerelease: false 81 | files: | 82 | dist/lxdrunner-alpine.img.tar.gz 83 | dist/${{ env.WHEEL }} 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /config.yml 2 | /attic 3 | /build 4 | /dist 5 | /pkgcache 6 | __pycache__ 7 | *.egg-info 8 | /.eggs 9 | /venv 10 | 11 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | dedent_closing_brackets=True 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | .PHONY: install-piptools install-deps install-dev update-deps tests install-user-unit setup-install pip-install 4 | 5 | upgrade-pip: 6 | python3 -m pip install --upgrade pip wheel setuptools 7 | 8 | install-piptools: 9 | pip3 install pip-tools toml 10 | 11 | install-reqs: 12 | pip3 install -r requirements.txt 13 | 14 | install-reqs-dev: install-piptools 15 | pip3 install -r requirements.dev.txt 16 | 17 | update-requirements: 18 | pip-compile 19 | pip-compile requirements.in requirements.dev.in --output-file requirements.dev.txt 20 | 21 | install: 22 | pip3 install ./ 23 | 24 | install-dev: 25 | pip3 install -e ".[dev]" 26 | 27 | lint: 28 | flake8 29 | 30 | tests: 31 | pytest -vs --disable-warnings tests 32 | 33 | format: 34 | yapf -ir ./ 35 | 36 | install-user-unit: 37 | mkdir -p ~/.config/systemd/user/ 38 | cp service/lxdrunner.service ~/.config/systemd/user/ 39 | 40 | setup-install: 41 | python3 setup.py install 42 | 43 | packages: 44 | python3 setup.py sdist bdist_wheel 45 | 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LXDRunner 2 | 3 | Experimental daemon using [LXD](https://linuxcontainers.org/lxd/introduction/#LXD) to run ephemeral GitHub Actions [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). 4 | 5 | Why use LXD and self-hosted runners ? 6 | 7 | - Trivial to switch between containers and VMs.. 8 | - High density and fast startup when running containers. 9 | - Create your own OS images and get a pristine environment every time. 10 | - Automatic download and provisioning of the latest GHA runner client. 11 | - Access custom hardware: Serial, USB, and PCI attached devices such as phones, GPUs, etc. 12 | - Access sensitive resources that must be handled locally. 13 | 14 | ## How it works 15 | 16 | LXDRunner runs an API endpoint waiting on webhook events from GitHub. No LXD instances are running until needed so resource usage is minimal. Every time Actions runs a workflow an event is sent for each job. LXDRunner reacts to each [workflow_job](https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job) event in queued status by: 17 | 18 | - Mapping workflow labels to a specific LXD config ( image, profile, container type, etc ) 19 | - Launching a pristine LXD instance based on matching config. 20 | - Provisioning instance with the latest GHA runner client to complete the job. 21 | - GHA runner automatically shuts down and deregisters when job is complete. 22 | - Destroying the LXD instance, just like GitHub hosted runners. 23 | 24 | ### Periodic Events 25 | 26 | - Every 24 hours: Checks for new version of actions runner 27 | - Every 12 hours: Cleanup any offline runner registrations 28 | 29 | ### Scaling 30 | KISS, based only on incoming webhooks from GitHub. For each event 1 runner is launched on the fly. 31 | 32 | Instances are ephemeral, automatically deregistered and shutdown on completion. 33 | 34 | More complex scaling could be achieved using the GitHub API at the expense of job latency, higher API and resource usage. 35 | 36 | After release of the ephemeral feature this is now the recommended scaling strategy. https://docs.github.com/en/actions/hosting-your-own-runners/autoscaling-with-self-hosted-runners 37 | 38 | ### Limitations: 39 | 40 | - Workflow runs fail immediately if no runners with matching labels are registered. Remedy this by manually registering a runner with matching labels that is permanently left in the offline state. In this case runs will be queued. 41 | - Runner provisioning is based on bash script. Probably doesn't work on anything other than Ubuntu/Debian based distros without modification. 42 | 43 | 44 | ## Setup: 45 | 46 | LXDRunner works both with repo and organization runners. The service must be accessible over the internet in order to receive webhooks from GitHub. The API endpoint is protected by a secret and TLS. If you want to restrict access by IP you can retrieve a list of GitHub IPs using the [Meta API](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) 47 | 48 | 49 | ### GitHub Setup 50 | 51 | - Setup a PAT with access to the repos and orgs you want serviced. Copy this down you will need it. 52 | 53 | https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token 54 | 55 | Enable the following scopes: 56 | - repo 57 | - workflow 58 | - admin:org 59 | - admin:org_hook 60 | - admin:repo_hook 61 | 62 | Repeat these steps once for each organization and/or user repo you want serviced. 63 | 64 | - Setup a webhook pointing to the LXDRunner host. If servicing multiple orgs or user repos make sure they all use the same webhook secret 65 | 66 | https://docs.github.com/en/developers/webhooks-and-events/creating-webhooks#setting-up-a-webhook 67 | 68 | - **Payload URL**: https://your-hostname:your-port/hooks/lxdrunner 69 | - **Content type**: application/json 70 | - **Secret**: Create a strong secret and copy it down 71 | - Select **Let me select individual events** and choose: 72 | - Workflow runs 73 | - Workflow jobs 74 | - **Active**: Ensure its checked. 75 | 76 | - Manually register one GHA runner for each set of labels you will use and keep this runner offline. This prevents workflow runs from failing immediately when no matching runners are registered. This placeholder runner should NOT have the prefix "lxdrunner" 77 | 78 | https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners 79 | 80 | ### LXDRunner Configuration 81 | 82 | - Copy config.yml.example to config.yml 83 | - Edit config.yml: 84 | - Set your GitHub PAT 85 | - Set your webhook secret 86 | - Setup remotes 87 | - Setup the runnermap. This section maps a set of actions workflow labels to specific LXD settings 88 | such as name, image source, profiles, container type, etc. 89 | - LXDrunner will search for config.yml in current directory or $HOME/.config/lxdrunner/config.yml . Specify 90 | explicit location with ```lxdrunner -c path/to/config.yml``` 91 | - Run some github actions workflows to test. 92 | 93 | ### LXDRunner Installation 94 | 95 | Requirements: Python 3.8 with pip 96 | 97 | The simplest methods are using pip to install a whl package or downloading an LXD image. Download the [latest release](https://github.com/selfbuilt/lxdr/releases/tag/latest). 98 | 99 | - Install with pip: 100 | 101 | ```pip install lxdrunner-0.5.0-py3-none-any.whl``` 102 | 103 | Create config.yml. Start with ```lxdrunner -c config.yml``` 104 | 105 | - Install Alpine based LXD image: 106 | 107 | ``` 108 | # Import image into LXD and start container 109 | lxc image import --alias lxdrunner lxdrunner-alpine.img.tar.gz 110 | lxc launch lxdrunner lxdrunner 111 | ``` 112 | Create configuration file in /home/app/.config/lxdrunner/config.yml 113 | 114 | 115 | # Development 116 | 117 | ## LXDRunner Development Install 118 | 119 | Requirements: Python 3.8 with pip 120 | 121 | - Clone this repo. Setup python virtual env if needed. 122 | - Install with: 123 | ```pip install -e ".[dev]"``` 124 | - Create configuration as detailed below. 125 | - Run LXDRunner from directory : `python -m lxdrunner` 126 | - You can install `lxdrunner` to default locations with: `pip install ./` 127 | 128 | ## TODO: 129 | 130 | - Investigate race condition between cloudinit and setup script adduser 131 | - Fix TLS verification 132 | - Dedup queue 133 | - Not sure pyLXD is thread-safe, investigate. 134 | - Explore alt provisioning methods ( prebaked images, disks mounts, etc ) 135 | - Auto configuration of webhooks through API 136 | - Auto registration of offline placeholder runners 137 | - More logging 138 | - More tests 139 | 140 | ## DONE: 141 | - Limit workers per label-set 142 | - Remote LXD server and image support 143 | - Add support for multiple label maps 144 | - Make changes for ephemeral fix. actions/runner issue 510 145 | - During startup and periodically: 146 | - query GH for queued runs that might have been missed or lost. 147 | - cleanup offline runner registrations and expired LXD workers 148 | 149 | -------------------------------------------------------------------------------- /config.yml.example: -------------------------------------------------------------------------------- 1 | # Github Configuration 2 | 3 | pat: "fake_github_pat" 4 | hooksecret: "fake_webhook_secret" 5 | 6 | # LXD Runner Configuration 7 | 8 | # Prefix used for naming resources ( GH runner names, LXD instances, etc ) 9 | prefix: lxdrunner 10 | # Maximum number of concurrent workers 11 | max_workers: 10 12 | 13 | web_host: 0.0.0.0 14 | web_port: 5000 15 | web_tls: True 16 | 17 | # Remotes for LXD servers. 18 | # 19 | # addr: should be https://: or unix socket path. 20 | # main should be primary LXD server to connect with. Default: local unix socket. 21 | 22 | remotes: 23 | main: 24 | addr: 25 | protocol: lxd 26 | images: 27 | addr: https://images.linuxcontainers.org 28 | protocol: simplestreams 29 | ubuntu: 30 | addr: https://cloud-images.ubuntu.com/releases 31 | protocol: simplestreams 32 | 33 | # The config below maps a set of runner labels to specific LXD 34 | # settings (image, profile, and type). 35 | 36 | runnermap: 37 | 38 | # name: required 39 | # labels: required 40 | # image: required 41 | # type: required ( container, virtual-machine ) 42 | # runner_os: required ( linux, win, osx ) 43 | # runner_arch: required ( x64, arm, arm64 ) 44 | # profiles: default = default 45 | # setup_script: default = internal script in lxdrunner/scripts/setuprunner.sh 46 | 47 | - name: Ubuntu - Container Runner 48 | labels: [ self-hosted ] 49 | image: ubuntu:focal 50 | type: container 51 | runner_arch: x64 52 | runner_os: linux 53 | 54 | - name: Ubuntu - Virtual Machine Runner 55 | labels: [ self-hosted, vm ] 56 | image: ubuntu:focal 57 | type: virtual-machine 58 | runner_arch: x64 59 | runner_os: linux 60 | 61 | - name: Debian 11 - Container Runner 62 | labels: [ self-hosted, debian ] 63 | image: images:debian/11/cloud 64 | profiles: [ default ] 65 | type: container 66 | runner_arch: x64 67 | runner_os: linux 68 | 69 | -------------------------------------------------------------------------------- /lxdrunner/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import version 2 | 3 | try: 4 | __version__ = version('lxdrunner') 5 | except: 6 | __version__ = "0.0.0" 7 | -------------------------------------------------------------------------------- /lxdrunner/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import pathlib 6 | import sys 7 | import logging 8 | 9 | from lxdrunner import appconf 10 | 11 | from .appconf import config as cfg 12 | from .applog import log 13 | from .mngr import RunManager 14 | from . import __version__ 15 | 16 | 17 | def conf_list(): 18 | files = [str(fname) for fname in appconf.def_configs] 19 | return f"[ {' | '.join(files) } ]" 20 | 21 | 22 | def cliparse(): 23 | parser = argparse.ArgumentParser( 24 | description=f'LXDRunner version {__version__}' 25 | ) 26 | parser.prog = vars(sys.modules[__name__])['__package__'] 27 | helptext = f"Configuration file. Default: { conf_list() }" 28 | parser.add_argument( 29 | '-c', dest='cfgfile', type=pathlib.Path, help=helptext, default=None 30 | ) 31 | parser.add_argument( 32 | '-l', 33 | dest='loglevel', 34 | help='Log Level: %(default)s', 35 | choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], 36 | default=logging.getLevelName(log.level) 37 | ) 38 | 39 | parser.add_argument( 40 | '-v', 41 | action='version', 42 | version='%(prog)s {version}'.format(version=__version__) 43 | ) 44 | return parser 45 | 46 | 47 | def main(): 48 | 49 | parser = cliparse() 50 | args = parser.parse_args() 51 | 52 | if args.loglevel: 53 | log.setLevel(args.loglevel) 54 | 55 | if args.cfgfile: 56 | appconf.def_configs.clear() 57 | appconf.def_configs.append(args.cfgfile) 58 | 59 | if not cfg.config_exists(): 60 | print(f"\nConfig file does not exist: { conf_list() }\n") 61 | parser.print_help() 62 | sys.exit(1) 63 | 64 | cfg.load() 65 | 66 | RunManager.configure() 67 | 68 | lxr = RunManager() 69 | lxr.startup_init() 70 | 71 | lxr.start_web_task() 72 | lxr.start_queue_task() 73 | 74 | if sys.flags.interactive: 75 | print("Backgrounding scheduler, going interactive.") 76 | lxr.start_schedule_task() 77 | else: 78 | lxr.run_scheduler() 79 | 80 | return lxr 81 | 82 | 83 | if __name__ == "__main__": 84 | lxr = main() 85 | -------------------------------------------------------------------------------- /lxdrunner/appconf.py: -------------------------------------------------------------------------------- 1 | import importlib.resources 2 | import ipaddress 3 | import os 4 | import pathlib 5 | import threading 6 | import typing 7 | import urllib.parse 8 | 9 | import xdg 10 | from goodconf import GoodConf 11 | from pydantic import ( 12 | BaseModel, IPvAnyAddress, constr, root_validator, validator 13 | ) 14 | 15 | with importlib.resources.path('lxdrunner.scripts', 'setuprunner.sh') as path: 16 | def_script = path 17 | 18 | appname = "lxdrunner" 19 | 20 | def_configs = [ 21 | pathlib.Path("config.yml"), 22 | xdg.xdg_config_home() / f"{appname}/config.yml" 23 | ] 24 | 25 | 26 | class RunnerConf(BaseModel): 27 | name: str 28 | labels: frozenset 29 | image: str 30 | profiles: typing.List[str] = ['default'] 31 | runner_os: typing.Literal['linux', 'win', 'osx'] 32 | runner_arch: typing.Literal['x64', 'arm', 'arm64'] 33 | type: typing.Literal['container', 'virtual-machine'] 34 | setup_script: pathlib.Path = def_script 35 | max_workers: int = 10 36 | worksem: threading.BoundedSemaphore = None 37 | 38 | @validator('worksem', always=True) 39 | def set_worksemaphore(cls, v, *, values): 40 | out = threading.BoundedSemaphore(values['max_workers']) 41 | return out 42 | 43 | class Config: 44 | extra = 'allow' 45 | arbitrary_types_allowed = True 46 | 47 | 48 | class Remote(BaseModel): 49 | protocol: typing.Literal['simplestreams', 'lxd'] 50 | # Not yet available in stable pydantic 51 | # addr: stricturl(allowed_schemes=['http','https','unix'], host_required=False) 52 | addr: typing.Union[None, constr(regex=r"^((https|http)://|/)?")] 53 | 54 | # Workaround pyLXD <= v2.3.0 . File paths not recognized as http+unix:// 55 | @validator('addr') 56 | def fix_unix_addr(cls, v): 57 | if v and v.startswith("/"): 58 | return "http+unix://{}".format(urllib.parse.quote(v), safe="") 59 | return v 60 | 61 | 62 | def makepaths(confdir, cachedir): 63 | class DirPaths(BaseModel): 64 | pkgdir: pathlib.Path = cachedir / "pkgcache" 65 | servcerts: pathlib.Path = confdir / "servercerts" 66 | 67 | return DirPaths() 68 | 69 | 70 | class AppConfig(GoodConf): 71 | "Configuration for My App" 72 | # GitHub 73 | pat: str 74 | hooksecret: str 75 | 76 | # Runner 77 | config_home: pathlib.Path = xdg.xdg_config_home() / appname 78 | cache_home: pathlib.Path = xdg.xdg_cache_home() / appname 79 | dirs: typing.Any = None 80 | 81 | @validator('dirs', pre=True, always=True) 82 | def default_dirs(cls, v, *, values, **kwargs): 83 | return makepaths(values['config_home'], values['cache_home']) 84 | 85 | prefix: str 86 | remotes: typing.Dict[str, Remote] 87 | runnermap: typing.List[RunnerConf] 88 | 89 | web_host: IPvAnyAddress = ipaddress.IPv4Address('0.0.0.0') 90 | web_port: int = 5000 91 | web_tls: bool = True 92 | 93 | cleanup: bool = True 94 | 95 | # For testing 96 | activecfg: typing.FrozenSet[str] = frozenset() 97 | max_workers: int 98 | def_repo_args: dict = {} 99 | def_org_args: dict = {} 100 | 101 | class Config: 102 | default_files = def_configs 103 | file_env_var = "LXDRCFG" 104 | 105 | @root_validator 106 | def check_image_sources(cls, values): 107 | error = "" 108 | for rc in values.get('runnermap'): 109 | if ":" in rc.image: 110 | rem = rc.image.split(":")[0] 111 | if rem not in values.get("remotes"): 112 | error += f"Remote '{rem}' is undefined\n" 113 | if error: 114 | raise ValueError(error) 115 | return values 116 | 117 | def key_pair_paths(self): 118 | return ( 119 | self.config_home / "client.crt", self.config_home / "client.key" 120 | ) 121 | 122 | def app_paths(self): 123 | return [self.config_home, self.cache_home 124 | ] + list(self.dirs.dict().values()) 125 | 126 | def config_exists(self): 127 | return [cfgfile for cfgfile in def_configs if cfgfile.exists()] 128 | 129 | 130 | config = AppConfig(load=False) 131 | -------------------------------------------------------------------------------- /lxdrunner/applog.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logging.basicConfig() 4 | log = logging.getLogger("LXDrun") 5 | log.propagate = False 6 | log.setLevel("WARNING") 7 | 8 | logfmt = logging.Formatter('%(threadName)s: %(message)s') 9 | handler = logging.StreamHandler() 10 | handler.setFormatter(logfmt) 11 | 12 | log.addHandler(handler) 13 | -------------------------------------------------------------------------------- /lxdrunner/dtypes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import typing 4 | 5 | from pydantic import BaseModel, Field, validator 6 | 7 | from . import util 8 | from .appconf import RunnerConf 9 | 10 | 11 | def maketarget(ghargs): 12 | "Return target scope for GitHub depending if request is org or repo" 13 | return ghargs.get("org") or "/".join( 14 | (ghargs.get("owner"), ghargs.get("repo")) 15 | ) 16 | 17 | 18 | class RunnerPackage(BaseModel): 19 | os: str 20 | architecture: str 21 | download_url: str 22 | filename: str 23 | linkname: str 24 | 25 | 26 | class RunnerEvent(BaseModel): 27 | owner: str 28 | repo: str 29 | org: str 30 | target: str = '' 31 | target_url: str = 'https://github.com/' 32 | rc: RunnerConf 33 | pkg: typing.Any 34 | token: str = "" 35 | wf_job_id: str = "" 36 | instname: str = Field(default_factory=util.make_name) 37 | 38 | @validator('target', always=True) 39 | def compute_target(cls, v, values, field, **kwargs): 40 | return maketarget(values) 41 | 42 | @validator('target_url', always=True) 43 | def compute_target_url(v, values): 44 | return v + values['target'] 45 | -------------------------------------------------------------------------------- /lxdrunner/lxd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import logging 5 | import os 6 | import os.path 7 | import pathlib 8 | import threading 9 | import time 10 | from collections import deque 11 | from concurrent.futures import ThreadPoolExecutor 12 | 13 | import pylxd 14 | import pylxd.exceptions 15 | import urllib3 16 | 17 | from . import util 18 | from .appconf import config as cfg 19 | from .applog import log 20 | 21 | urllib3.disable_warnings() 22 | 23 | 24 | def get_client(rname="main", verify=False): 25 | cert = None 26 | remote = cfg.remotes.get(rname) 27 | if remote.addr and remote.addr.startswith("https://"): 28 | cert = cfg.key_pair_paths() 29 | 30 | log.info(f"Connecting to LXD: {remote.addr or 'local unix-socket'}") 31 | return pylxd.Client(endpoint=remote.addr, cert=cert, verify=verify) 32 | 33 | 34 | class LXDRunner: 35 | def __init__(self, connect=True): 36 | self.client = None 37 | if connect: 38 | self.connect() 39 | 40 | self.workers = dict() 41 | self.pool = ThreadPoolExecutor(cfg.max_workers) 42 | 43 | def connect(self): 44 | self.client = get_client("main") 45 | 46 | def pushfile(self, src, instance, dst, **exargs): 47 | " Push file into instance " 48 | log.info("Pushing: %s to %s", src, dst) 49 | with open(src, "rb") as fp: 50 | instance.files.put(dst, fp.read(), **exargs) 51 | 52 | def get_workers(self): 53 | " Return LXD instances that are workers " 54 | return [ 55 | wrkr for wrkr in self.client.instances.all() 56 | if util.has_prefix(wrkr.name) 57 | ] 58 | 59 | def worker_count(self): 60 | return len(self.workers) 61 | 62 | def status(self): 63 | return f"{ len(self.workers) } workers" 64 | 65 | def script_env(self, evt, instname: str): 66 | "Setup environment variables for runner script" 67 | 68 | return dict( 69 | GHA_TOKEN=evt.token, 70 | GHA_URL=evt.target_url, 71 | GHA_NAME=instname, 72 | GHA_EXTRA_LABELS=",".join(evt.rc.labels), 73 | ) 74 | 75 | def launch_instance(self, inst_name: str, rc): 76 | " Launch container/vm instance with given name and config " 77 | 78 | instcfg = dict( 79 | name=inst_name, 80 | ephemeral=True, 81 | profiles=rc.profiles, 82 | source=util.image_to_source(rc.image), 83 | type=rc.type 84 | ) 85 | log.warning("Launching instance %s", inst_name) 86 | inst = self.client.instances.create(instcfg, wait=True) 87 | inst.start(wait=True) 88 | return inst 89 | 90 | def start_gha_runner(self, inst, evt): 91 | 92 | pkg = evt.pkg 93 | 94 | installdir = pathlib.Path("/opt/runner") 95 | pkg_src = os.path.join(str(cfg.dirs.pkgdir), pkg.linkname) 96 | pkg_dst = os.path.join(installdir, "actions-runner.tgz") 97 | script_dst = installdir.joinpath(evt.rc.setup_script.name) 98 | vars_dst = installdir.joinpath("setupvars.conf") 99 | # Setup env vars for script 100 | environment = self.script_env(evt, inst.name) 101 | 102 | # VMs are slow to become available. Retry until 103 | # agent responds. 104 | for num in range(15): 105 | try: 106 | inst.files.mk_dir(installdir, mode="0755") 107 | log.info("Make dir: %s", installdir) 108 | break 109 | except Exception: 110 | time.sleep(5) 111 | 112 | if num >= 14: 113 | log.warning("Runner start timeout, destroying %s", inst.name) 114 | inst.stop(force=True) 115 | return 116 | 117 | # Push runner setup script to instance 118 | self.pushfile(evt.rc.setup_script, inst, script_dst, mode="0755") 119 | inst.files.put(vars_dst, util.env_str(environment), mode="0755") 120 | self.pushfile(pkg_src, inst, pkg_dst, mode="0755") 121 | 122 | # Execute runner setup script 123 | log.info(f"Executing: {script_dst}") 124 | (exitcode, stdout, 125 | stderr) = inst.execute([str(script_dst)], environment=environment) 126 | if exitcode or log.level <= logging.DEBUG: 127 | log.error("===STDOUT====\n%s", stdout) 128 | log.error("===STDERR====\n%s", stderr) 129 | if exitcode: 130 | raise Exception(f"Provisioner exit code: {exitcode}") 131 | 132 | log.info("Provision sucesssful") 133 | 134 | def verify_launch(self, evt): 135 | errs = [] 136 | try: 137 | if ":" not in evt.rc.image: 138 | self.client.images.get_by_alias(evt.rc.image) 139 | except pylxd.exceptions.NotFound: 140 | errs.append(f"image does not exist: {evt.rc.image}") 141 | for prof in evt.rc.profiles: 142 | if not self.client.profiles.exists(prof): 143 | errs.append(f"profile does not exist: {prof}") 144 | if not evt.rc.setup_script.exists(): 145 | errs.append(f"script does not exist: {evt.rc.setup_script}") 146 | if errs: 147 | for err in errs: 148 | log.error("Error: %s", err) 149 | return False 150 | return True 151 | 152 | def _cleanup_instance(self, inst_name): 153 | if not cfg.cleanup: 154 | log.error("Runner start failed, CLEANUP DISABLED") 155 | return False 156 | log.error("Runner start failed, destroying %s", inst_name) 157 | 158 | try: 159 | inst = self.client.instances.get(inst_name) 160 | inst.stop() 161 | inst.delete() 162 | except pylxd.exceptions.LXDAPIException: 163 | return False 164 | return True 165 | 166 | def _launch(self, evt): 167 | " Launch GHA Runner, main method " 168 | 169 | if "ThreadPool" in threading.current_thread().name: 170 | threading.current_thread().setName(evt.instname) 171 | 172 | self.workers[evt.instname] = evt 173 | if not self.verify_launch(evt): 174 | return False 175 | # Any error here needs instance cleanup 176 | noerror = True 177 | try: 178 | inst = self.launch_instance(evt.instname, evt.rc) 179 | self.start_gha_runner(inst, evt) 180 | except Exception as exc: 181 | log.exception(exc) 182 | self._cleanup_instance(evt.instname) 183 | noerror = False 184 | 185 | return noerror 186 | 187 | def launch(self, evt, wait=False): 188 | " Launch GHA Runner " 189 | 190 | def handle_done(futr): 191 | err = futr.exception() 192 | if not err: 193 | return 194 | raise err 195 | 196 | if not wait: 197 | self.pool.submit(self._launch, evt).add_done_callback(handle_done) 198 | else: 199 | self._launch(evt) 200 | 201 | def start_tasks(self): 202 | util.threadit(self.watch_lxd_events, name='LXD-Events') 203 | 204 | def watch_lxd_events(self): 205 | 206 | client = get_client('main') 207 | 208 | ## Workaround for bug in pylxd 2.3.0 : WSS not using certs 209 | ssl_options = {} 210 | if client.api.scheme == 'https': 211 | ssl_options.update( 212 | { 213 | "certfile": client.cert[0], 214 | "keyfile": client.cert[1] 215 | } 216 | ) 217 | 218 | class FixedWSClient(pylxd.client._WebsocketClient): 219 | def __init__(self, *args, **kwargs): 220 | super().__init__(*args, **kwargs, ssl_options=ssl_options) 221 | 222 | ## END workaround 223 | 224 | def process_message(message): 225 | if message.is_text: 226 | message = json.loads(message.data) 227 | if message["metadata"]["action"] == "instance-deleted": 228 | instname = message["metadata"]["source"].split("/")[-1] 229 | if not util.has_prefix(instname): 230 | return 231 | try: 232 | job = self.workers.pop(instname, None) 233 | log.info(f"Removing {instname} {self.status()}") 234 | if job: 235 | job.rc.worksem.release() 236 | except ValueError: 237 | log.error("Semaphore release fail", instname) 238 | pass 239 | 240 | evfilter = set([pylxd.EventType.Lifecycle]) 241 | ws_client = client.events( 242 | event_types=evfilter, websocket_client=FixedWSClient 243 | ) 244 | ws_client.received_message = process_message 245 | ws_client.connect() 246 | ws_client.run() 247 | -------------------------------------------------------------------------------- /lxdrunner/mngr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import datetime 4 | import os 5 | import os.path 6 | import queue 7 | import tempfile 8 | import time 9 | import urllib.request 10 | 11 | import fastcore.net 12 | import schedule 13 | from ghapi.all import GhApi 14 | 15 | from . import dtypes, lxd, tls, util, web 16 | from .appconf import config as cfg 17 | from .applog import log 18 | 19 | 20 | class RunManager: 21 | " LXDRunner Management Class " 22 | 23 | def __init__(self): 24 | self.ghapi = GhApi(token=cfg.pat) 25 | self.lxd = lxd.LXDRunner(connect=False) 26 | self.runnermap = {item.labels: item for item in cfg.runnermap} 27 | self.queues = {item.labels: queue.Queue() for item in cfg.runnermap} 28 | # For testing 29 | # self.activecfg = cfg.activecfg 30 | 31 | # Cache for various github resources 32 | self.reg_tokens = {} 33 | self.orgs = [] 34 | self.repos = [] 35 | self.pkgs = [] 36 | 37 | @staticmethod 38 | def configure(): 39 | for path in cfg.app_paths(): 40 | path.mkdir(exist_ok=True, parents=True) 41 | tls.gen_key_pair() 42 | tls.confirm_certs() 43 | 44 | def get_repos(self): 45 | " Get and cache Github repos for user " 46 | self.repos = self.ghapi.repos.list_for_authenticated_user() 47 | return self.repos 48 | 49 | def get_orgs(self): 50 | " Get and cache Github orgs for user " 51 | self.orgs = self.ghapi.orgs.list_for_authenticated_user() 52 | return self.orgs 53 | 54 | def get_runners(self, ghargs): 55 | " Get registered runners for org or repo " 56 | 57 | if ghargs.get("org"): 58 | apifunc = self.ghapi.actions.list_self_hosted_runners_for_org 59 | else: 60 | apifunc = self.ghapi.actions.list_self_hosted_runners_for_repo 61 | 62 | return apifunc(**ghargs) 63 | 64 | def get_reg_token(self, ghargs): 65 | " Request or return registration token from cache " 66 | 67 | tkey = ghargs.get('target') 68 | reg_token = self.reg_tokens.get(tkey) 69 | 70 | def valid_mins(reg_token): 71 | td = ( 72 | datetime.datetime.fromisoformat(reg_token.expires_at) - 73 | datetime.datetime.now().astimezone() 74 | ) 75 | return td.total_seconds() / 60 76 | 77 | if not reg_token or valid_mins(reg_token) <= 30: 78 | if ghargs.get("org"): 79 | log.info("Getting GHA token org: %s", tkey) 80 | apifunc = self.ghapi.actions.create_registration_token_for_org 81 | else: 82 | log.info("Getting GHA token repo: %s", tkey) 83 | apifunc = self.ghapi.actions.create_registration_token_for_repo 84 | 85 | self.reg_tokens[tkey] = reg_token = apifunc(**ghargs) 86 | 87 | return reg_token 88 | 89 | def get_queued_runs_for_repo(self, owner, repo, **kwargs): 90 | """ Get queued workflow runs for given repo 91 | Return list(workflow_runs) with .jobs set to corresponding 92 | jobs 93 | """ 94 | # 95 | # actions.list_workflow_runs_for_repo( 96 | # owner, repo, actor, branch, event, status, per_page, page) 97 | # 98 | target = dict(owner=owner, repo=repo, status='queued') 99 | 100 | wf_runs = self.ghapi.actions.list_workflow_runs_for_repo( 101 | **target 102 | ).workflow_runs 103 | 104 | for run in wf_runs: 105 | run.jobs = self.ghapi.actions.list_jobs_for_workflow_run( 106 | **target, run_id=run.id 107 | ).jobs 108 | return wf_runs 109 | 110 | def get_queued_runs(self): 111 | " Get queued workflow jobs from all repos " 112 | self.get_repos() 113 | 114 | runs = [] 115 | for repo in self.repos: 116 | runs += self.get_queued_runs_for_repo(repo.owner.login, repo.name) 117 | return runs 118 | 119 | def submit_pending_runs(self): 120 | events = [] 121 | wfruns = self.get_queued_runs() 122 | for wfrun in wfruns: 123 | for job in wfrun.jobs: 124 | org = '' 125 | if wfrun.repository.owner.type == 'Organization': 126 | org = wfrun.repository.owner.login 127 | events.append( 128 | dict( 129 | wf_job_id=job.id, 130 | repo=wfrun.repository.name, 131 | owner=wfrun.repository.owner.login, 132 | org=org, 133 | labels=job.labels 134 | ) 135 | ) 136 | log.warning("Submitted %s pending run events", len(events)) 137 | for evt in events: 138 | self.queue_evt(evt) 139 | 140 | def get_runner_pkg(self, rc): 141 | " Get runner package for given runner config " 142 | 143 | return next( 144 | filter( 145 | lambda pkg: (pkg.os, pkg.architecture) == 146 | (rc.runner_os, rc.runner_arch), self.pkgs 147 | ) 148 | ) 149 | 150 | def get_packages(self): 151 | " Get list of runner packages from actions/runner releases" 152 | 153 | def asset2pkg(asset): 154 | " Convert release assets to obj like list_runner_applications " 155 | parts = asset.name.split('-') 156 | os, arch = parts[2:4] 157 | linkname = "-".join(parts[:4] + ["latest"]) 158 | return dtypes.RunnerPackage( 159 | filename=asset.name, 160 | linkname=linkname, 161 | os=os, 162 | architecture=arch, 163 | download_url=asset.browser_download_url 164 | ) 165 | 166 | rels = self.ghapi.repos.list_releases('actions', 'runner') 167 | rels = rels.filter(lambda rel: not rel.prerelease) 168 | self.pkgs = list(map(asset2pkg, rels[0].assets)) 169 | return self.pkgs 170 | 171 | def update_pkg_cache(self): 172 | " Update runner package cache to latest version " 173 | 174 | log.info("Updating runner package cache") 175 | 176 | self.get_packages() 177 | 178 | if not cfg.dirs.pkgdir.exists(): 179 | cfg.dirs.pkgdir.mkdir(exist_ok=True, parents=True) 180 | 181 | pkgfiles = set() 182 | 183 | for pkg in self.pkgs: 184 | pkgfiles.update((pkg.filename, pkg.linkname)) 185 | filepath = os.path.join(cfg.dirs.pkgdir, pkg.filename) 186 | linkpath = os.path.join(cfg.dirs.pkgdir, pkg.linkname) 187 | 188 | if not os.path.exists(filepath): 189 | log.info("Downloading: " + pkg.filename) 190 | urllib.request.urlretrieve(pkg.download_url, filepath) 191 | 192 | # Create a symlink to runner package making it available 193 | # under a persistent name. In the event pkg is updated 194 | # during launch event. 195 | 196 | # Create temp name for symlink 197 | temp_linkpath = tempfile.mktemp(dir=cfg.dirs.pkgdir) 198 | # Symlink to pkg.filename 199 | os.symlink(pkg.filename, temp_linkpath) 200 | # Atomic replace existing symlink 201 | os.replace(temp_linkpath, linkpath) 202 | 203 | dirfiles = set(os.listdir(cfg.dirs.pkgdir)) 204 | delfiles = dirfiles - pkgfiles 205 | 206 | for fname in delfiles: 207 | log.info(f"Deleting : {fname}") 208 | os.unlink(os.path.join(cfg.dirs.pkgdir, fname)) 209 | 210 | def cleanup_runners(self, ghargs): 211 | " Delete offline runners for given org or repo " 212 | 213 | try: 214 | runners = self.get_runners(ghargs) 215 | except fastcore.net.ExceptionsHTTP[403]: 216 | log.warning(f"Get Runners: Access Denied { ghargs }") 217 | return 218 | 219 | runners = [ 220 | run for run in runners.runners 221 | if run.status == "offline" and util.has_prefix(run.name) 222 | ] 223 | 224 | for run in runners: 225 | ghargs['runner_id'] = run.id 226 | if ghargs.get("org"): 227 | log.info( 228 | "Remove offline runner {org} {runner_id}".format(**ghargs) 229 | ) 230 | self.ghapi.actions.delete_self_hosted_runner_from_org(**ghargs) 231 | else: 232 | log.info( 233 | "Remove offline runner {owner}/{repo} {runner_id}".format( 234 | **ghargs 235 | ) 236 | ) 237 | self.ghapi.actions.delete_self_hosted_runner_from_repo( 238 | **ghargs 239 | ) 240 | 241 | def cleanup(self): 242 | " Run Github cleanup tasks " 243 | 244 | self.get_orgs() 245 | self.get_repos() 246 | 247 | # Only get User scope repos 248 | repos = [repo for repo in self.repos if repo.owner.type == "User"] 249 | 250 | args = [] 251 | 252 | for org in self.orgs: 253 | args.append(dict(org=org.login)) 254 | 255 | for repo in repos: 256 | args.append(dict(owner=repo.owner.login, repo=repo.name)) 257 | 258 | for arg in args: 259 | self.cleanup_runners(arg) 260 | 261 | def queue_evt(self, evt): 262 | " Queue GH webhook event " 263 | labels = frozenset(evt['labels']) 264 | 265 | if not labels in self.runnermap: 266 | log.warn(f"No matching config for labels {labels}") 267 | return 268 | evt['rc'] = self.runnermap.get(labels) 269 | 270 | evt = dtypes.RunnerEvent(**evt) 271 | self.queues[labels].put((time.time(), evt)) 272 | log.info( 273 | f"Queueing: job run id={evt.wf_job_id} {evt.owner}/{evt.repo}" 274 | ) 275 | 276 | def process_evt(self, evt: dtypes.RunnerEvent): 277 | " Process RunnerEvent" 278 | log.info( 279 | f"Processing: check_run id={evt.wf_job_id} {evt.owner}/{evt.repo}" 280 | ) 281 | 282 | evt.token = self.get_reg_token(evt.dict()).token 283 | evt.pkg = self.get_runner_pkg(evt.rc) 284 | 285 | self.lxd.launch(evt) 286 | 287 | def runqueue(self): 288 | " Process event queue " 289 | while True: 290 | for labels, evtq in self.queues.items(): 291 | while not evtq.empty( 292 | ) and self.runnermap[labels].worksem.acquire(): 293 | print(f"Processing Queue {labels}") 294 | try: 295 | (ts, evt) = evtq.get() 296 | self.process_evt(evt) 297 | except queue.Empty as e: 298 | break 299 | except Exception as e: 300 | log.error("Error processing queue") 301 | log.exception(e) 302 | time.sleep(1) 303 | 304 | def start_queue_task(self): 305 | " Start queue runner task " 306 | self.queuetask = util.threadit(self.runqueue, name="Queue") 307 | 308 | def start_web_task(self): 309 | " Start webhooks task " 310 | self.webtask = util.threadit( 311 | web.startserver, args=(self.queue_evt, ), name="Web" 312 | ) 313 | 314 | def start_schedule_task(self): 315 | " Start scheduler task " 316 | self.schedtask = util.threadit(self.run_scheduler, name="Scheduler") 317 | 318 | def run_scheduler(self): 319 | " Run job scheduler " 320 | while True: 321 | schedule.run_pending() 322 | time.sleep(1) 323 | 324 | def startup_init(self): 325 | " Startup initilization " 326 | 327 | self.lxd.connect() 328 | self.lxd.start_tasks() 329 | 330 | self.update_pkg_cache() 331 | self.cleanup() 332 | self.submit_pending_runs() 333 | 334 | schedule.every().day.do(self.update_pkg_cache) 335 | schedule.every(12).hours.do(self.cleanup) 336 | -------------------------------------------------------------------------------- /lxdrunner/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jonans/lxdrunner/d407dacc31ff443aeaf10351cf31605f50bdef09/lxdrunner/scripts/__init__.py -------------------------------------------------------------------------------- /lxdrunner/scripts/setuprunner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | set -eu 4 | exec 2>&1 5 | 6 | PKGDIR="/opt/runner" 7 | PKGNAME=actions-runner.tgz 8 | PKGFILE="$PKGDIR/$PKGNAME" 9 | SETUPFILE="$PKGDIR/setupvars.conf" 10 | 11 | RUNNERUSER=runner 12 | RUNNERHOME="/home/$RUNNERUSER" 13 | 14 | CHECK_ARGS="GHA_TOKEN GHA_URL GHA_NAME" 15 | 16 | fail() { 17 | printf "$1" 18 | exit 1 19 | } 20 | 21 | [ -f $SETUPFILE ] && source $SETUPFILE 22 | 23 | # Ensure required vars are set 24 | ERRS="" 25 | for var in $CHECK_ARGS ; do 26 | eval val=\$$var 27 | if [ -z "$val" ] ; then 28 | ERRS="${ERRS}${var} is not set\n" 29 | fi 30 | done 31 | 32 | [ -n "$ERRS" ] && fail "$ERRS" 33 | 34 | # User handling 35 | 36 | setup_user(){ 37 | 38 | # Avoid adduser race condition with cloud-init. Wait till done. 39 | which cloud-init && cloud-init status -w 40 | 41 | adduser runner --disabled-password --gecos "" 42 | [ "$?" -eq "0" ] || fail "Add user failed" 43 | adduser runner sudo 44 | [ "$?" -eq "0" ] || fail "Add group failed" 45 | echo "runner ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/runner 46 | } 47 | 48 | # Package handling 49 | 50 | unpack(){ 51 | [ -d "$RUNNERHOME" ] || fail "Home $RUNNERHOME doesnt exist" 52 | [ -f "$PKGFILE" ] || fail "Package $PKGFILE doesnt exist" 53 | 54 | sudo -u $RUNNERUSER tar -xvf $PKGFILE -C $RUNNERHOME >/dev/null || fail "Unpack failed" 55 | } 56 | 57 | delaypoweroff(){ 58 | sleep 5 59 | poweroff -f 60 | } 61 | 62 | background(){ 63 | local cmd=$@ 64 | set +e 65 | echo "Backgrounding: $*" 66 | # Redirect FD's 67 | $cmd /dev/null & disown 68 | } 69 | 70 | start_runner(){ 71 | sudo -u $RUNNERUSER ./run.sh 72 | delaypoweroff 73 | } 74 | 75 | reg_runner(){ 76 | sudo -u $RUNNERUSER ./config.sh --unattended --url "$GHA_URL" --token "$GHA_TOKEN" \ 77 | --replace --name "$GHA_NAME" --labels "$GHA_EXTRA_LABELS" --ephemeral 78 | } 79 | 80 | dereg_runner(){ 81 | sudo -u $RUNNERUSER ./config.sh remove --unattended --token "$GHA_TOKEN" 82 | } 83 | 84 | begin_runner(){ 85 | cd $RUNNERHOME 86 | 87 | if reg_runner ; then 88 | echo "Runner registered. Starting up." 89 | background start_runner 90 | else 91 | echo "Error registering runner" 92 | exit 1 93 | fi 94 | } 95 | 96 | 97 | setup_user 98 | unpack 99 | begin_runner 100 | 101 | exit 102 | -------------------------------------------------------------------------------- /lxdrunner/tls.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import getpass 3 | import platform 4 | import ssl 5 | from urllib.parse import urlparse 6 | 7 | import pylxd 8 | from cryptography import x509 9 | from cryptography.hazmat.backends import default_backend 10 | from cryptography.hazmat.primitives import hashes, serialization 11 | from cryptography.hazmat.primitives.asymmetric import rsa 12 | from cryptography.x509.oid import NameOID 13 | 14 | from . import lxd 15 | from .appconf import config as cfg 16 | from .applog import log 17 | 18 | 19 | def gen_priv_key(): 20 | key = rsa.generate_private_key( 21 | public_exponent=65537, 22 | key_size=2048, 23 | ) 24 | return key 25 | 26 | 27 | def gen_pub_key(key): 28 | # Various details about who we are. For a self-signed certificate the 29 | # subject and issuer are always the same. 30 | subject = issuer = x509.Name( 31 | [ 32 | x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u""), 33 | x509.NameAttribute(NameOID.LOCALITY_NAME, u""), 34 | x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"LXDRunner"), 35 | x509.NameAttribute( 36 | NameOID.COMMON_NAME, f"lxdrunner@{platform.node()}" 37 | ), 38 | ] 39 | ) 40 | 41 | cert = x509.CertificateBuilder( 42 | ).subject_name(subject).issuer_name(issuer).public_key( 43 | key.public_key() 44 | ).serial_number(x509.random_serial_number()).not_valid_before( 45 | datetime.datetime.utcnow() 46 | ).not_valid_after( 47 | 48 | # Our certificate will be valid for 10 days 49 | datetime.datetime.utcnow() + datetime.timedelta(days=10) 50 | ).add_extension( 51 | x509.SubjectAlternativeName([x509.DNSName(u"localhost")]), 52 | critical=False, 53 | 54 | # Sign our certificate with our private key 55 | ).sign(key, hashes.SHA256()) 56 | 57 | return cert 58 | 59 | 60 | def gen_key_pair(): 61 | " Generate client key pair and save to disk " 62 | (crt_path, key_path) = cfg.key_pair_paths() 63 | 64 | if crt_path.exists(): 65 | return 66 | 67 | log.info("Generating key pair") 68 | 69 | key = gen_priv_key() 70 | cert = gen_pub_key(key) 71 | with key_path.open("wb") as f: 72 | f.write( 73 | key.private_bytes( 74 | encoding=serialization.Encoding.PEM, 75 | format=serialization.PrivateFormat.TraditionalOpenSSL, 76 | encryption_algorithm=serialization.NoEncryption(), 77 | ) 78 | ) 79 | 80 | with crt_path.open("wb") as f: 81 | f.write(cert.public_bytes(serialization.Encoding.PEM)) 82 | 83 | 84 | def authenticate(client): 85 | " Authenticate with remote LXD server" 86 | while not client.trusted: 87 | trust_pass = getpass.getpass(prompt="Trust Password:") 88 | try: 89 | client.authenticate(trust_pass) 90 | except pylxd.exceptions.LXDAPIException as err: 91 | if str(err) != 'not authorized': 92 | raise 93 | log.error("Authentication Error") 94 | 95 | 96 | def get_peer_cert(host, port): 97 | " Get cert from server " 98 | pemcert = ssl.get_server_certificate((host, port)) 99 | return pemcert 100 | 101 | 102 | def show_fingerprint(pemcert): 103 | " Show cert subject and fingerprint " 104 | pemutf = pemcert.encode('utf-8') 105 | cert = x509.load_pem_x509_certificate(pemutf, default_backend()) 106 | print("Subject:", cert.subject.rfc4514_string()) 107 | print( 108 | "Fingerprint:", 109 | cert.fingerprint(cert.signature_hash_algorithm).hex() 110 | ) 111 | 112 | 113 | def confirm_accept_peer(): 114 | res = input("Accept remote y/n : ") 115 | if res.startswith('y'): 116 | return True 117 | return False 118 | 119 | 120 | def confirm_certs(): 121 | " Confirm all LXD remotes " 122 | for rname, remote in cfg.remotes.items(): 123 | path = cfg.dirs.servcerts / f"{rname}.crt" 124 | if not ( 125 | remote.protocol == 'lxd' and remote.addr 126 | and "https://" in remote.addr 127 | ): 128 | continue 129 | if not path.exists(): 130 | url = urlparse(remote.addr) 131 | host = url.netloc 132 | port = 443 133 | if ":" in url.netloc: 134 | host, port = url.netloc.split(":") 135 | pem = get_peer_cert(host, port) 136 | show_fingerprint(pem) 137 | if not confirm_accept_peer(): 138 | continue 139 | # FIX TLS VERIFICATION 140 | client = lxd.get_client(rname, verify=False) 141 | authenticate(client) 142 | with path.open("wb") as certfile: 143 | certfile.write(pem.encode('utf-8')) 144 | -------------------------------------------------------------------------------- /lxdrunner/util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import secrets 4 | import threading 5 | 6 | from .appconf import config as cfg 7 | 8 | # 9 | # Helper Functions 10 | # 11 | 12 | 13 | def has_prefix(name): 14 | " Check if name is managed by LXDRunner " 15 | return name.startswith(cfg.prefix + "-") 16 | 17 | 18 | def make_name(): 19 | " Generate name based on prefix and random token " 20 | return "{}-{}".format(cfg.prefix, secrets.token_hex(3)) 21 | 22 | 23 | def threadit(func, **kwargs): 24 | thread = threading.Thread(target=func, daemon=True, **kwargs) 25 | thread.start() 26 | return thread 27 | 28 | 29 | def env_str(data): 30 | 31 | sdata = "" 32 | for key, val in data.items(): 33 | sdata += f"{key}={val}\n" 34 | return sdata 35 | 36 | 37 | def image_to_source(image): 38 | " Convert image resource [:] to source object " 39 | alias = image 40 | source = dict(type="image", mode="pull") 41 | 42 | if ":" in image: 43 | remote_name, alias = image.split(":", 1) 44 | remote = cfg.remotes.get(remote_name) 45 | source['protocol'] = remote.protocol 46 | source['server'] = remote.addr 47 | 48 | source['alias'] = alias 49 | return source 50 | -------------------------------------------------------------------------------- /lxdrunner/web.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import hmac 4 | import logging 5 | 6 | from flask import Flask, current_app, request 7 | 8 | from .appconf import config as cfg 9 | from .applog import log 10 | 11 | app = Flask("LXDRun") 12 | 13 | 14 | def validate_webhook(): 15 | " Validate Github webhook signature " 16 | 17 | evt = request.headers.get("X-GitHub-Event") 18 | failmsg = f"Webhook HMAC failed {evt}" 19 | sig = request.headers.get("X-Hub-Signature-256") 20 | 21 | if not evt: 22 | log.warning("Not a Github webhook event") 23 | return False 24 | 25 | if not sig: 26 | log.warning(failmsg + "missing X-Hub-Signature-256") 27 | return False 28 | 29 | mac = "sha256=" + hmac.new( 30 | cfg.hooksecret.encode('utf8'), request.data, "sha256" 31 | ).hexdigest() 32 | 33 | result = hmac.compare_digest(mac, sig) 34 | if not result: 35 | log.warning(failmsg) 36 | log.debug(f"- HMAC: { mac }") 37 | log.debug(f"- GHSIG: { sig }") 38 | return result 39 | 40 | 41 | @app.route("/hooks/lxdrunner", methods=["POST", "GET"]) 42 | def githubhook(): 43 | ghevt = request.headers.get("X-GitHub-Event") 44 | 45 | if not validate_webhook(): 46 | return "UNAUTHORIZED", 401 47 | 48 | data = request.json 49 | 50 | job = data.get("workflow_job", {}) 51 | # Event should be workflow_job, status == queued, self-hosted 52 | if ( 53 | ghevt != "workflow_job" or job.get("status") != "queued" 54 | or "self-hosted" not in job.get("labels") 55 | ): 56 | 57 | log.debug(f"Skipping event: {ghevt} , action={data.get('action')}") 58 | return "Skipping Event" 59 | 60 | gh = dict( 61 | wf_job_id=job.get("id"), 62 | repo=data.get("repository", {}).get("name"), 63 | owner=data.get("repository", {}).get("owner", {}).get("login"), 64 | org=data.get("organization", {}).get("login"), 65 | labels=job.get("labels") 66 | ) 67 | log.info(f"Accepted event: {ghevt} , action={data['action']}") 68 | current_app.queue_evt(gh) 69 | 70 | return "OK" 71 | 72 | 73 | def startserver(queue_evt): 74 | app.queue_evt = queue_evt 75 | tls = 'adhoc' if cfg.web_tls else None 76 | app.run(host=str(cfg.web_host), port=cfg.web_port, ssl_context=tls) 77 | -------------------------------------------------------------------------------- /requirements.dev.in: -------------------------------------------------------------------------------- 1 | yapf 2 | jedi 3 | pytest 4 | build 5 | mypy 6 | flake8 7 | pip-tools 8 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with python 3.8 3 | # To update, run: 4 | # 5 | # pip-compile --output-file=requirements.dev.txt requirements.dev.in requirements.in 6 | # 7 | attrs==20.3.0 8 | # via pytest 9 | build==0.3.1.post1 10 | # via -r requirements.dev.in 11 | certifi==2021.5.30 12 | # via requests 13 | cffi==1.14.6 14 | # via cryptography 15 | charset-normalizer==2.0.4 16 | # via requests 17 | click==8.0.1 18 | # via 19 | # flask 20 | # pip-tools 21 | cryptography==3.4.8 22 | # via pylxd 23 | fastcore==1.3.26 24 | # via ghapi 25 | flake8==3.9.1 26 | # via -r requirements.dev.in 27 | flask==2.0.1 28 | # via -r requirements.in 29 | ghapi==0.1.19 30 | # via -r requirements.in 31 | goodconf[yaml]==2.0.1 32 | # via -r requirements.in 33 | idna==3.2 34 | # via requests 35 | iniconfig==1.1.1 36 | # via pytest 37 | itsdangerous==2.0.1 38 | # via flask 39 | jedi==0.18.0 40 | # via -r requirements.dev.in 41 | jinja2==3.0.1 42 | # via flask 43 | markupsafe==2.0.1 44 | # via jinja2 45 | mccabe==0.6.1 46 | # via flake8 47 | mypy==0.812 48 | # via -r requirements.dev.in 49 | mypy-extensions==0.4.3 50 | # via mypy 51 | packaging==21.0 52 | # via 53 | # build 54 | # fastcore 55 | # ghapi 56 | # pytest 57 | parso==0.8.2 58 | # via jedi 59 | pep517==0.10.0 60 | # via 61 | # build 62 | # pip-tools 63 | pip-tools==6.3.0 64 | # via -r requirements.dev.in 65 | pluggy==0.13.1 66 | # via pytest 67 | py==1.10.0 68 | # via pytest 69 | pycodestyle==2.7.0 70 | # via flake8 71 | pycparser==2.20 72 | # via cffi 73 | pydantic==1.8.2 74 | # via 75 | # -r requirements.in 76 | # goodconf 77 | pyflakes==2.3.1 78 | # via flake8 79 | pylxd==2.3.0 80 | # via -r requirements.in 81 | pyparsing==2.4.7 82 | # via packaging 83 | pytest==6.2.3 84 | # via -r requirements.dev.in 85 | python-dateutil==2.8.2 86 | # via pylxd 87 | requests==2.26.0 88 | # via 89 | # pylxd 90 | # requests-toolbelt 91 | # requests-unixsocket 92 | requests-toolbelt==0.9.1 93 | # via pylxd 94 | requests-unixsocket==0.2.0 95 | # via pylxd 96 | ruamel.yaml==0.17.16 97 | # via goodconf 98 | ruamel.yaml.clib==0.2.6 99 | # via ruamel.yaml 100 | schedule==1.1.0 101 | # via -r requirements.in 102 | six==1.16.0 103 | # via python-dateutil 104 | toml==0.10.2 105 | # via 106 | # build 107 | # pep517 108 | # pytest 109 | typed-ast==1.4.3 110 | # via mypy 111 | typing-extensions==3.7.4.3 112 | # via 113 | # mypy 114 | # pydantic 115 | urllib3==1.26.6 116 | # via 117 | # requests 118 | # requests-unixsocket 119 | werkzeug==2.0.1 120 | # via flask 121 | wheel==0.37.0 122 | # via pip-tools 123 | ws4py==0.5.1 124 | # via pylxd 125 | xdg==5.1.1 126 | # via -r requirements.in 127 | yapf==0.31.0 128 | # via -r requirements.dev.in 129 | 130 | # The following packages are considered to be unsafe in a requirements file: 131 | # pip 132 | # setuptools 133 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | # toml needed for broken ubuntu pip. missing toml 2 | 3 | pylxd 4 | ghapi 5 | Flask 6 | pydantic 7 | goodconf[yaml]>=2.0.* 8 | schedule 9 | xdg 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with python 3.8 3 | # To update, run: 4 | # 5 | # pip-compile 6 | # 7 | certifi==2021.5.30 8 | # via requests 9 | cffi==1.14.6 10 | # via cryptography 11 | charset-normalizer==2.0.4 12 | # via requests 13 | click==8.0.1 14 | # via flask 15 | cryptography==3.4.8 16 | # via pylxd 17 | fastcore==1.3.26 18 | # via ghapi 19 | flask==2.0.1 20 | # via -r requirements.in 21 | ghapi==0.1.19 22 | # via -r requirements.in 23 | goodconf[yaml]==2.0.1 24 | # via -r requirements.in 25 | idna==3.2 26 | # via requests 27 | itsdangerous==2.0.1 28 | # via flask 29 | jinja2==3.0.1 30 | # via flask 31 | markupsafe==2.0.1 32 | # via jinja2 33 | packaging==21.0 34 | # via 35 | # fastcore 36 | # ghapi 37 | pycparser==2.20 38 | # via cffi 39 | pydantic==1.8.2 40 | # via 41 | # -r requirements.in 42 | # goodconf 43 | pylxd==2.3.0 44 | # via -r requirements.in 45 | pyparsing==2.4.7 46 | # via packaging 47 | python-dateutil==2.8.2 48 | # via pylxd 49 | requests==2.26.0 50 | # via 51 | # pylxd 52 | # requests-toolbelt 53 | # requests-unixsocket 54 | requests-toolbelt==0.9.1 55 | # via pylxd 56 | requests-unixsocket==0.2.0 57 | # via pylxd 58 | ruamel.yaml==0.17.16 59 | # via goodconf 60 | ruamel.yaml.clib==0.2.6 61 | # via ruamel.yaml 62 | schedule==1.1.0 63 | # via -r requirements.in 64 | six==1.16.0 65 | # via python-dateutil 66 | typing-extensions==3.10.0.2 67 | # via pydantic 68 | urllib3==1.26.6 69 | # via 70 | # requests 71 | # requests-unixsocket 72 | werkzeug==2.0.1 73 | # via flask 74 | ws4py==0.5.1 75 | # via pylxd 76 | xdg==5.1.1 77 | # via -r requirements.in 78 | 79 | # The following packages are considered to be unsafe in a requirements file: 80 | # pip 81 | -------------------------------------------------------------------------------- /scripts/build-alpine-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | CONTNAME=lxdrunner-build 5 | USER=app 6 | 7 | if ! lxc info $CONTNAME >/dev/null ; then 8 | lxc launch images:alpine/3.14 $CONTNAME 9 | sleep 3 10 | fi 11 | 12 | lxc exec $CONTNAME -- sh <<-END 13 | set -eux 14 | apk update 15 | apk add python3 py3-pip py3-setuptools py3-cryptography 16 | id $USER || adduser -D $USER 17 | END 18 | 19 | MY_UID=$(lxc exec $CONTNAME -- id -u $USER ) 20 | VERSION=$(python3 setup.py --version) 21 | WHEEL=$(basename dist/*.whl) 22 | 23 | lxc file push dist/$WHEEL $CONTNAME/home/$USER/ 24 | lxc file push config.yml.example $CONTNAME/home/$USER/config.yml 25 | 26 | lxc exec $CONTNAME --user $MY_UID --group $MY_UID -- sh <<-END 27 | cd ~$USER 28 | echo 'PATH=~/.local/bin/:$PATH' > ~$USER/.profile 29 | pip3 install ./$WHEEL 30 | END 31 | 32 | lxc file push service/lxdrunner.openrc $CONTNAME/etc/init.d/lxdrunner 33 | lxc exec $CONTNAME -- sh <<-END 34 | rc-update add lxdrunner 35 | END 36 | 37 | -------------------------------------------------------------------------------- /scripts/build-debian-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | CONTNAME=lxdrunner-build 5 | USER=app 6 | 7 | if ! lxc info $CONTNAME >/dev/null ; then 8 | lxc launch images:debian/11 $CONTNAME 9 | sleep 3 10 | fi 11 | 12 | lxc exec $CONTNAME -- sh <<-END 13 | set -eux 14 | apt update 15 | apt install -y python3-dev python3-pip 16 | id $USER || adduser --disabled-password --gecos "" $USER 17 | loginctl enable-linger $USER 18 | END 19 | 20 | MY_UID=$(lxc exec $CONTNAME -- id -u $USER ) 21 | VERSION=$(python3 setup.py --version) 22 | WHEEL=lxdrunner-$VERSION-py3-none-any.whl 23 | 24 | lxc file push dist/$WHEEL $CONTNAME/home/$USER/ 25 | 26 | lxc exec $CONTNAME --user $MY_UID --group $MY_UID -- sh <<-END 27 | cd ~$USER 28 | pip install ./$WHEEL 29 | END 30 | 31 | -------------------------------------------------------------------------------- /scripts/setup-lxd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | snap install lxd 5 | lxd init --auto 6 | 7 | -------------------------------------------------------------------------------- /service/lxdrunner.openrc: -------------------------------------------------------------------------------- 1 | #!/sbin/openrc-run 2 | 3 | name="LXDRunner" 4 | description="Emphemeral selfhosted runners" 5 | command="/home/app/.local/bin/lxdrunner" 6 | pidfile="/var/run/$SVCNAME.pid" 7 | command_background=true 8 | command_user="app:app" 9 | 10 | depend() { 11 | need logger 12 | } 13 | 14 | -------------------------------------------------------------------------------- /service/lxdrunner.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LXDRunner 3 | 4 | [Service] 5 | ExecStart=%h/.local/bin/lxdrunner 6 | Restart=on-failure 7 | RestartSec=10 8 | 9 | # Hardening 10 | SystemCallArchitectures=native 11 | MemoryDenyWriteExecute=true 12 | NoNewPrivileges=true 13 | 14 | [Install] 15 | WantedBy=default.target 16 | 17 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = lxdrunner 3 | version = attr: lxdrunner.__version__ 4 | author = Jonan Santiago 5 | author_email = dont@spam 6 | description = Automatically deploy ephemeral Github self-hosted runners on LXD 7 | long_description = file: README.md 8 | long_description_content_type = text/markdown 9 | url = https://github.com/jonans/lxdrunner 10 | license = GPLv2 11 | keywords = lxd, github 12 | classifiers = 13 | Intended Audience :: Developers 14 | 15 | [options] 16 | packages = find: 17 | python_requires = >=3.8 18 | setup_requires = setuptools-git-versioning 19 | install_requires = 20 | pylxd 21 | ghapi 22 | Flask 23 | pydantic 24 | goodconf[yaml]>=2.0.* 25 | schedule 26 | xdg 27 | 28 | [options.packages.find] 29 | exclude = tests 30 | 31 | [options.extras_require] 32 | dev = 33 | pytest 34 | yapf 35 | jedi 36 | pytest 37 | build 38 | mypy 39 | flake8 40 | pip-tools 41 | isort 42 | 43 | [options.package_data] 44 | lxdrunner = scripts/setuprunner.sh 45 | 46 | [options.entry_points] 47 | console_scripts= 48 | lxdrunner = lxdrunner.__main__:main 49 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import setuptools 4 | 5 | # Work around for https://github.com/pypa/pip/issues/7953 6 | import site 7 | import sys 8 | 9 | site.ENABLE_USER_SITE = "--user" in sys.argv[1:] 10 | 11 | version_config = { 12 | "dev_template": "{tag}.dev{ccount}+{branch}.{sha}", 13 | "dirty_template": "{tag}.dev{ccount}+{branch}.{sha}", 14 | } 15 | 16 | setuptools.setup(version_config=version_config, ) 17 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jonans/lxdrunner/d407dacc31ff443aeaf10351cf31605f50bdef09/tests/__init__.py -------------------------------------------------------------------------------- /tests/config.yml: -------------------------------------------------------------------------------- 1 | # Github Configuration 2 | 3 | pat: "fake_github_pat" 4 | hooksecret: "fake_webhook_secret" 5 | 6 | # LXD Runner Configuration 7 | 8 | # Prefix used for naming resources ( GH runner names, LXD instances, etc ) 9 | prefix: lxdrunner 10 | # Maximum number of concurrent workers 11 | max_workers: 10 12 | 13 | web_host: 0.0.0.0 14 | web_port: 5000 15 | web_tls: True 16 | 17 | # Remotes for LXD servers. 18 | # 19 | # addr: should be https://: or unix socket path. 20 | # main should be primary LXD server to connect with. Default: local unix socket. 21 | 22 | remotes: 23 | main: 24 | addr: 25 | protocol: lxd 26 | images: 27 | addr: https://images.linuxcontainers.org 28 | protocol: simplestreams 29 | ubuntu: 30 | addr: https://cloud-images.ubuntu.com/releases 31 | protocol: simplestreams 32 | 33 | # The config below maps a set of runner labels to specific LXD 34 | # settings (image, profile, and type). 35 | 36 | runnermap: 37 | 38 | # name: required 39 | # labels: required 40 | # image: required 41 | # type: required ( container, virtual-machine ) 42 | # runner_os: required ( linux, win, osx ) 43 | # runner_arch: required ( x64, arm, arm64 ) 44 | # profiles: default = default 45 | # setup_script: default = internal script in lxdrunner/scripts/setuprunner.sh 46 | 47 | - name: Ubuntu - Container Runner 48 | labels: [ self-hosted ] 49 | image: ubuntu:focal 50 | type: container 51 | runner_arch: x64 52 | runner_os: linux 53 | 54 | - name: Ubuntu - Virtual Machine Runner 55 | labels: [ self-hosted, vm ] 56 | image: ubuntu:focal 57 | type: virtual-machine 58 | runner_arch: x64 59 | runner_os: linux 60 | 61 | - name: Debian 11 - Container Runner 62 | labels: [ self-hosted, debian ] 63 | image: images:debian/11/cloud 64 | profiles: [ default ] 65 | type: container 66 | runner_arch: x64 67 | runner_os: linux 68 | 69 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def pytest_configure(config): 5 | print("Setting up test config file ") 6 | os.environ['LXDRCFG'] = "tests/config.yml" 7 | from lxdrunner.appconf import config as cfg 8 | cfg.load("tests/config.yml") 9 | -------------------------------------------------------------------------------- /tests/data.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import dataclasses 3 | 4 | from lxdrunner.appconf import config as cfg 5 | import lxdrunner.dtypes as dtypes 6 | # 7 | # Test Data 8 | # 9 | 10 | org_args = dict(org="testorg", target="testorg") 11 | repo_args = dict( 12 | owner="testowner", repo="testrepo", target="testowner/testrepo" 13 | ) 14 | 15 | now = datetime.datetime.now().astimezone() 16 | 17 | 18 | @dataclasses.dataclass 19 | class Token: 20 | token: str = 'FAKE_TOKEN' 21 | expires_at: str = (now + datetime.timedelta(minutes=60)).isoformat() 22 | 23 | 24 | valid_token = Token() 25 | expired_token = Token(expires_at=now.isoformat()) 26 | 27 | 28 | @dataclasses.dataclass 29 | class Package: 30 | os: str 31 | architecture: str 32 | download_url: str 33 | filename: str 34 | linkname: str 35 | 36 | 37 | pkg0 = dtypes.RunnerPackage( 38 | os='osx', 39 | architecture='x64', 40 | download_url='https://localhost/actions-runner-osx-x64-2.277.1.tar.gz', 41 | filename='actions-runner-osx-x64-2.277.1.tar.gz', 42 | linkname='actions-runner-osx-x64-latest' 43 | ) 44 | pkg0_expected_linkname = "actions-runner-osx-x64-latest" 45 | 46 | pkg1 = dtypes.RunnerPackage( 47 | os='linux', 48 | architecture='arm', 49 | download_url='https://localhost/actions-runner-linux-arm-2.277.1.tar.gz', 50 | filename='actions-runner-linux-arm-2.277.1.tar.gz', 51 | linkname='actions-runner-linux-arm-latest' 52 | ) 53 | pkg1_expected_linkname = "actions-runner-linux-arm-latest" 54 | 55 | pkgs = (pkg0, pkg1) 56 | -------------------------------------------------------------------------------- /tests/gvars.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | testroot = Path(__file__).parent 4 | -------------------------------------------------------------------------------- /tests/test_appconf.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | from pathlib import Path 3 | import lxdrunner.appconf as appconf 4 | 5 | cfg = appconf.config 6 | 7 | # 8 | # Tests 9 | # 10 | 11 | 12 | class TestAppConf: 13 | def test_key_pair_paths(self): 14 | (p1, p2) = cfg.key_pair_paths() 15 | assert p1.with_name("client.crt") 16 | assert p2.with_name("client.key") 17 | -------------------------------------------------------------------------------- /tests/test_lxd.py: -------------------------------------------------------------------------------- 1 | import unittest.mock as mock 2 | 3 | import pytest 4 | from pydantic import BaseModel 5 | import pylxd.exceptions 6 | 7 | from lxdrunner.appconf import config as cfg 8 | from lxdrunner import util 9 | import lxdrunner.lxd 10 | from lxdrunner.dtypes import RunnerEvent 11 | from lxdrunner.appconf import RunnerConf 12 | 13 | # 14 | # Test Data 15 | # 16 | 17 | 18 | class FakeLXDInstance(BaseModel): 19 | name: str 20 | 21 | 22 | non_workers = [FakeLXDInstance(name=x) for x in range(3)] 23 | lxd_workers = [FakeLXDInstance(name=util.make_name()) for x in range(5)] 24 | 25 | goodrc = RunnerConf( 26 | name="Test Runner Conf", 27 | labels=['self-hosted', 'linux'], 28 | image="ubuntu/latest", 29 | profiles=['default'], 30 | runner_os='linux', 31 | runner_arch='x64', 32 | type='container' 33 | ) 34 | 35 | evt = RunnerEvent(owner='owner', repo='repo', org='testorg', rc=goodrc) 36 | 37 | 38 | @pytest.fixture 39 | def lxdm(): 40 | lxd = lxdrunner.lxd.LXDRunner(connect=False) 41 | with mock.patch.object(lxd, 'client'): 42 | yield lxd 43 | 44 | 45 | def test_push_file(lxdm): 46 | instance = mock.Mock() 47 | lxdm.pushfile("/dev/null", instance, "/root/file", mode="0600") 48 | instance.files.put.assert_called_with("/root/file", b'', mode="0600") 49 | 50 | 51 | @mock.patch("lxdrunner.lxd.pylxd.Client") 52 | def test_get_client(m_client, lxdm): 53 | lxdrunner.lxd.get_client("main") 54 | remote = cfg.remotes['main'] 55 | assert m_client.call_args.kwargs == dict( 56 | endpoint=remote.addr, cert=None, verify=False 57 | ) 58 | 59 | m_client.reset_mock() 60 | lxdrunner.lxd.get_client("ubuntu") 61 | remote = cfg.remotes['ubuntu'] 62 | assert m_client.call_args.kwargs == dict( 63 | endpoint=remote.addr, cert=cfg.key_pair_paths(), verify=False 64 | ) 65 | 66 | 67 | def test_get_workers(lxdm): 68 | lxdm.client.instances.all.return_value = lxd_workers + non_workers 69 | workers = lxdm.get_workers() 70 | assert lxdm.client.instances.all.called 71 | assert workers == lxd_workers 72 | 73 | 74 | def test_verify_launch(lxdm): 75 | assert lxdm.verify_launch(evt) 76 | 77 | 78 | def test_verify_launch_missing_image(lxdm): 79 | lxdm.client.images.get_by_alias.side_effect = pylxd.exceptions.NotFound( 80 | 'missing' 81 | ) 82 | assert lxdm.verify_launch(evt) is False 83 | 84 | 85 | def test_verify_launch_missing_profile(lxdm): 86 | lxdm.client.profiles.exists.return_value = False 87 | assert lxdm.verify_launch(evt) is False 88 | 89 | 90 | def test_launch_instance(lxdm): 91 | instname = util.make_name() 92 | expected_cfg = dict( 93 | name=instname, 94 | ephemeral=True, 95 | profiles=goodrc.profiles, 96 | source=dict(type="image", alias=goodrc.image, mode="pull"), 97 | type=goodrc.type 98 | ) 99 | lxdm.launch_instance(instname, goodrc) 100 | assert lxdm.client.instances.create.called 101 | called_cfg = lxdm.client.instances.create.call_args.args[0] 102 | assert called_cfg == expected_cfg 103 | 104 | 105 | def test__launch(lxdm): 106 | 107 | lxdm.start_gha_runner = mock.Mock() 108 | assert lxdm._launch(evt) is True, "Launch should succeed" 109 | 110 | lxdm.start_gha_runner.reset_mock() 111 | lxdm.start_gha_runner.side_effect = Exception("Random exception") 112 | lxdm._cleanup_instance = mock.Mock() 113 | assert lxdm._launch(evt) is False, "Launch should have failed" 114 | assert lxdm._cleanup_instance.called, "Cleanup should have been called" 115 | 116 | 117 | def test__cleanup_instance(lxdm): 118 | assert lxdm._cleanup_instance( 119 | "randomname" 120 | ) is True, "Inst should be stopped, deleted" 121 | 122 | lxdm.client.instances.get.side_effect = pylxd.exceptions.LXDAPIException( 123 | "LXD Failure" 124 | ) 125 | assert lxdm._cleanup_instance( 126 | "randomname" 127 | ) is False, "LXD lookup should have failed" 128 | 129 | 130 | def test_cleanup_instance(lxdm): 131 | res = lxdm._cleanup_instance("fake-instance") 132 | assert res == True, "Cleanup should succeed" 133 | 134 | lxdm.client.reset_mock() 135 | 136 | cfg.cleanup = False 137 | res = lxdm._cleanup_instance("fake-instance") 138 | assert res == False, "Cleanup should be disabled" 139 | 140 | lxdm.client.reset_mock() 141 | 142 | cfg.cleanup = True 143 | lxdm.client.instances.get.side_effect = pylxd.exceptions.LXDAPIException( 144 | "Instance does not exist" 145 | ) 146 | res = lxdm._cleanup_instance("fake-instance") 147 | assert res == False, "Cleanup should fail due to exception." 148 | -------------------------------------------------------------------------------- /tests/test_mngr.py: -------------------------------------------------------------------------------- 1 | import unittest.mock as mock 2 | import pytest 3 | import dataclasses 4 | import os 5 | from pathlib import Path 6 | 7 | from lxdrunner.appconf import config as cfg 8 | 9 | import lxdrunner.mngr 10 | 11 | # 12 | # Test Data 13 | # 14 | 15 | from . import data 16 | from fastcore.foundation import L 17 | 18 | 19 | @dataclasses.dataclass 20 | class GHReleaseAsset: 21 | name: str 22 | browser_download_url: str 23 | 24 | 25 | @dataclasses.dataclass 26 | class GHRelease: 27 | prerelease: bool 28 | assets: list 29 | 30 | 31 | asset = GHReleaseAsset( 32 | name='actions-runner-osx-x64-2.277.1.tar.gz', 33 | browser_download_url= 34 | "https://localhost/actions-runner-osx-x64-2.277.1.tar.gz" 35 | ) 36 | 37 | ghrels = L( 38 | GHRelease(prerelease=True, assets=[asset]), 39 | GHRelease(prerelease=False, assets=[asset]) 40 | ) 41 | 42 | # 43 | # Tests 44 | # 45 | 46 | 47 | @pytest.fixture 48 | def mngr(): 49 | " Return RunManager() object with mocked GHAPI " 50 | mn = lxdrunner.mngr.RunManager() 51 | with mock.patch.object(mn, 'ghapi'): 52 | yield mn 53 | 54 | 55 | def test_get_reg_token_doesnt_exist(mngr): 56 | mngr.get_reg_token(data.org_args) 57 | assert mngr.ghapi.actions.create_registration_token_for_org.called 58 | mngr.ghapi.reset_mock() 59 | mngr.get_reg_token(data.repo_args) 60 | assert mngr.ghapi.actions.create_registration_token_for_repo.called 61 | 62 | 63 | def test_get_reg_token_valid(mngr): 64 | mngr.reg_tokens = {data.org_args['org']: data.valid_token} 65 | mngr.get_reg_token(data.org_args) 66 | assert not mngr.ghapi.actions.create_registration_token_for_org.called 67 | 68 | 69 | def test_get_reg_token_expired(mngr): 70 | mngr.reg_tokens = {data.org_args['org']: data.expired_token} 71 | mngr.get_reg_token(data.org_args) 72 | assert mngr.ghapi.actions.create_registration_token_for_org.called 73 | 74 | 75 | def test_get_runners(mngr): 76 | mngr.get_runners(data.repo_args) 77 | assert mngr.ghapi.actions.list_self_hosted_runners_for_repo.called 78 | 79 | mngr.ghapi.reset_mock() 80 | 81 | mngr.get_runners(data.org_args) 82 | assert mngr.ghapi.actions.list_self_hosted_runners_for_org.called 83 | 84 | 85 | def test_get_packages(mngr): 86 | mngr.ghapi.repos.list_releases.return_value = ghrels 87 | mngr.get_packages() 88 | assert mngr.ghapi.repos.list_releases.called 89 | assert len(mngr.pkgs) == 1, "Pre-release not filtered" 90 | assert mngr.pkgs[0].filename == asset.name 91 | assert mngr.pkgs[0].download_url == asset.browser_download_url 92 | assert mngr.pkgs[0].linkname[:-6] in asset.name, "pkg.linkname incorrect" 93 | 94 | 95 | def touchfile(url, fname): 96 | Path(fname).touch() 97 | 98 | 99 | @mock.patch.object(cfg.dirs, 'pkgdir') 100 | @mock.patch('urllib.request.urlretrieve', side_effect=touchfile) 101 | def test_update_pkg_cache(m_url, m_pkgdir, mngr, tmp_path): 102 | os.chdir(tmp_path) 103 | pkgdir = cfg.dirs.pkgdir = tmp_path 104 | print("ARGS", type(tmp_path), cfg.dirs.pkgdir) 105 | 106 | # Inject list of packages 107 | mngr.get_packages = mock.Mock() 108 | mngr.pkgs = data.pkgs 109 | 110 | pkg_cnt = len(data.pkgs) 111 | 112 | mngr.update_pkg_cache() 113 | # Files will be downloaded 114 | assert m_url.call_count == pkg_cnt, "Download count != pkg count" 115 | # pkgs * 2 files created 116 | assert len( 117 | list(pkgdir.iterdir()) 118 | ) == pkg_cnt * 2, "File count != 2*pkg count" 119 | 120 | m_url.reset_mock() 121 | 122 | fp = Path(pkgdir / "extra_file") 123 | fp.touch() 124 | assert fp.exists(), "extra_file is not present" 125 | 126 | mngr.update_pkg_cache() 127 | # Files exist, no downloaded 128 | assert m_url.call_count == 0, "No files should be downloaded" 129 | assert not fp.exists(), "extra_file should not exist" 130 | -------------------------------------------------------------------------------- /tests/test_tls.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | import pytest 3 | import os 4 | 5 | import lxdrunner.appconf 6 | from lxdrunner.appconf import config as cfg 7 | 8 | import lxdrunner.tls 9 | # 10 | # Tests 11 | # 12 | 13 | 14 | @pytest.fixture 15 | def certfiles(): 16 | with mock.patch( 17 | 'lxdrunner.appconf.AppConfig.key_pair_paths', 18 | return_value=(mock.MagicMock(), mock.MagicMock()) 19 | ): 20 | yield cfg.key_pair_paths() 21 | 22 | 23 | class TestTLS: 24 | 25 | #@mock.patch('lxdrunner.tls.Path') 26 | def test_gen_key_pair(self, certfiles): 27 | mPath = certfiles[0] 28 | lxdrunner.tls.gen_key_pair() 29 | print("CALLS", mPath.mock_calls) 30 | assert mPath.exists.call_count == 1 31 | assert mPath.open.call_count == 0, "keys exist, should not be written" 32 | mPath.exists.return_value = False 33 | lxdrunner.tls.gen_key_pair() 34 | assert mPath.open.call_count == 1, "open key file not called" 35 | assert certfiles[1].open.call_count == 1, "open crt file not called" 36 | -------------------------------------------------------------------------------- /tests/test_util.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from lxdrunner.appconf import config as cfg 4 | import lxdrunner.util 5 | 6 | # 7 | # Test Data 8 | # 9 | 10 | from . import data 11 | 12 | # 13 | # Tests 14 | # 15 | 16 | 17 | class TestUtils: 18 | def test_make_name(self): 19 | name = lxdrunner.util.make_name() 20 | assert lxdrunner.util.has_prefix(name) 21 | 22 | def test_has_prefix(self): 23 | failname = secrets.token_urlsafe(20) 24 | assert lxdrunner.util.has_prefix(failname) is False 25 | goodname = f"{cfg.prefix}-123456789" 26 | assert lxdrunner.util.has_prefix(goodname) 27 | 28 | def test_env_str(self): 29 | env = {"KEY": "VALUE"} 30 | assert lxdrunner.util.env_str(env) == "KEY=VALUE\n" 31 | 32 | def test_image_to_source(self): 33 | image = "debian/11" 34 | source = lxdrunner.util.image_to_source(image) 35 | assert source['alias'] == image 36 | 37 | image = "ubuntu:focal" 38 | source = lxdrunner.util.image_to_source(image) 39 | assert source['protocol'] in ("simplestreams", "lxd") 40 | assert source['server'] == cfg.remotes["ubuntu"].addr 41 | assert source['alias'] == 'focal' 42 | -------------------------------------------------------------------------------- /tests/test_web.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | import json 3 | from pathlib import Path 4 | 5 | import pytest 6 | import flask 7 | 8 | from . import gvars 9 | from lxdrunner.appconf import config as cfg 10 | from lxdrunner import web 11 | from lxdrunner.web import app 12 | import lxdrunner.web 13 | 14 | app.queue_evt = mock.MagicMock() 15 | 16 | headers = {'X-Hub-Signature-256': 'failedsig'} 17 | basedict = {'X-GitHub-Event': 'some-event'} 18 | 19 | 20 | @pytest.fixture 21 | def reqx(): 22 | with app.test_request_context("/myrequest") as treq: 23 | yield treq.request 24 | 25 | 26 | @pytest.fixture 27 | def hdrs(): 28 | return basedict.copy() 29 | 30 | 31 | @pytest.fixture 32 | def passhdrs(): 33 | return { 34 | 'X-GitHub-Event': 35 | 'workflow_job', 36 | 'X-Hub-Signature-256': 37 | 'sha256=c9ee377a883ea21b5f71e3a43a0e66fce9d9ddeb3c050632115536ac64d4cbac' 38 | } 39 | 40 | 41 | def test_verify_webhook_not_event(passhdrs): 42 | with app.test_request_context(headers=passhdrs, data="{}"): 43 | assert web.validate_webhook() is True 44 | del passhdrs['X-GitHub-Event'] 45 | with app.test_request_context(headers=passhdrs): 46 | assert web.validate_webhook() is False 47 | 48 | 49 | def test_verify_webhook_missing_sig(hdrs): 50 | with app.test_request_context(headers=hdrs): 51 | assert web.validate_webhook() is False 52 | 53 | 54 | def test_verify_webhook_wrong_sig(hdrs): 55 | hdrs.update({'X-Hub-Signature-256': 'incorrect_github_signature'}) 56 | with app.test_request_context(headers=hdrs): 57 | assert web.validate_webhook() is False 58 | 59 | 60 | def test_verify_webhook_correct_sig(hdrs): 61 | hdrs.update( 62 | { 63 | 'X-Hub-Signature-256': 64 | 'sha256=d96602158aa0d59b65d26942515163691680544bfa57e44c470712cd4aa800ae' 65 | } 66 | ) 67 | with app.test_request_context(headers=hdrs): 68 | assert web.validate_webhook() is True, "Sig does not match computed" 69 | hdrs.update( 70 | { 71 | 'X-Hub-Signature-256': 72 | 'sha256=986a36d904cc895eaec3f9b14041f7d63a2fddb2076485ea5ada781d176e89a2' 73 | } 74 | ) 75 | with app.test_request_context(headers=hdrs, data='different_payload'): 76 | assert web.validate_webhook() is True, "Sig doest not match computed" 77 | 78 | 79 | wf_job = json.load((gvars.testroot / "wf_job.json").open()) 80 | 81 | 82 | @mock.patch('lxdrunner.web.validate_webhook', return_value=False) 83 | def test_githubhook(m_validate, passhdrs): 84 | 85 | with app.test_request_context(headers=passhdrs, json=wf_job): 86 | res = web.githubhook() 87 | assert res == ("UNAUTHORIZED", 401), "Should be unauthorized" 88 | 89 | m_validate.return_value = True 90 | with app.test_request_context(headers=passhdrs, json=wf_job): 91 | res = web.githubhook() 92 | assert res == "Skipping Event", "Not self-hosted, should be skipped" 93 | 94 | wf_job['workflow_job']['labels'] = ['self-hosted'] 95 | 96 | with app.test_request_context(headers=passhdrs, json=wf_job): 97 | res = web.githubhook() 98 | assert res == "OK", "Event not enqueued" 99 | -------------------------------------------------------------------------------- /tests/wf_job.json: -------------------------------------------------------------------------------- 1 | { 2 | "action": "queued", 3 | "workflow_job": { 4 | "id": 2832853555, 5 | "run_id": 940463255, 6 | "run_url": "https://api.github.com/repos/octo-org/example-workflow/actions/runs/940463255", 7 | "node_id": "MDg6Q2hlY2tSdW4yODMyODUzNT55", 8 | "head_sha": "e3103f8eb03e1ad7f2331c5446b23c070fc54055", 9 | "url": "https://api.github.com/repos/octo-org/example-workflow/actions/jobs/2832853555", 10 | "html_url": "https://github.com/octo-org/example-workflow/runs/2832853555", 11 | "status": "queued", 12 | "conclusion": null, 13 | "started_at": "2021-06-15T19:22:27Z", 14 | "completed_at": null, 15 | "name": "Test workflow", 16 | "steps": [], 17 | "check_run_url": "https://api.github.com/repos/octo-org/example-workflow/check-runs/2832853555", 18 | "labels": [ 19 | "gpu", 20 | "db-app", 21 | "dc-03" 22 | ] 23 | }, 24 | "repository": { 25 | "id": 376034443, 26 | "node_id": "MDEwOlJlcG9zaXRvcnkzNzYwMzQ0ND55", 27 | "name": "example-workflow", 28 | "full_name": "octo-org/example-workflow", 29 | "private": true, 30 | "owner": { 31 | "login": "octo-org", 32 | "id": 33435655, 33 | "node_id": "MDEyOk9yZ2FuaXphdGlvbjMzNDM1Nj55", 34 | "avatar_url": "https://avatars.githubusercontent.com/u/21031067?s=460&u=d851e01410b4f1674f000ba7e0dc94e0b82cd9cc&v=4", 35 | "gravatar_id": "", 36 | "url": "https://api.github.com/users/octo-org", 37 | "html_url": "https://github.com/octo-org", 38 | "followers_url": "https://api.github.com/users/octo-org/followers", 39 | "following_url": "https://api.github.com/users/octo-org/following{/other_user}", 40 | "gists_url": "https://api.github.com/users/octo-org/gists{/gist_id}", 41 | "starred_url": "https://api.github.com/users/octo-org/starred{/owner}{/repo}", 42 | "subscriptions_url": "https://api.github.com/users/octo-org/subscriptions", 43 | "organizations_url": "https://api.github.com/users/octo-org/orgs", 44 | "repos_url": "https://api.github.com/users/octo-org/repos", 45 | "events_url": "https://api.github.com/users/octo-org/events{/privacy}", 46 | "received_events_url": "https://api.github.com/users/octo-org/received_events", 47 | "type": "Organization", 48 | "site_admin": false 49 | }, 50 | "html_url": "https://github.com/octo-org/example-workflow", 51 | "description": "Test workflow", 52 | "fork": false, 53 | "url": "https://api.github.com/repos/octo-org/example-workflow", 54 | "forks_url": "https://api.github.com/repos/octo-org/example-workflow/forks", 55 | "keys_url": "https://api.github.com/repos/octo-org/example-workflow/keys{/key_id}", 56 | "collaborators_url": "https://api.github.com/repos/octo-org/example-workflow/collaborators{/collaborator}", 57 | "teams_url": "https://api.github.com/repos/octo-org/example-workflow/teams", 58 | "hooks_url": "https://api.github.com/repos/octo-org/example-workflow/hooks", 59 | "issue_events_url": "https://api.github.com/repos/octo-org/example-workflow/issues/events{/number}", 60 | "events_url": "https://api.github.com/repos/octo-org/example-workflow/events", 61 | "assignees_url": "https://api.github.com/repos/octo-org/example-workflow/assignees{/user}", 62 | "branches_url": "https://api.github.com/repos/octo-org/example-workflow/branches{/branch}", 63 | "tags_url": "https://api.github.com/repos/octo-org/example-workflow/tags", 64 | "blobs_url": "https://api.github.com/repos/octo-org/example-workflow/git/blobs{/sha}", 65 | "git_tags_url": "https://api.github.com/repos/octo-org/example-workflow/git/tags{/sha}", 66 | "git_refs_url": "https://api.github.com/repos/octo-org/example-workflow/git/refs{/sha}", 67 | "trees_url": "https://api.github.com/repos/octo-org/example-workflow/git/trees{/sha}", 68 | "statuses_url": "https://api.github.com/repos/octo-org/example-workflow/statuses/{sha}", 69 | "languages_url": "https://api.github.com/repos/octo-org/example-workflow/languages", 70 | "stargazers_url": "https://api.github.com/repos/octo-org/example-workflow/stargazers", 71 | "contributors_url": "https://api.github.com/repos/octo-org/example-workflow/contributors", 72 | "subscribers_url": "https://api.github.com/repos/octo-org/example-workflow/subscribers", 73 | "subscription_url": "https://api.github.com/repos/octo-org/example-workflow/subscription", 74 | "commits_url": "https://api.github.com/repos/octo-org/example-workflow/commits{/sha}", 75 | "git_commits_url": "https://api.github.com/repos/octo-org/example-workflow/git/commits{/sha}", 76 | "comments_url": "https://api.github.com/repos/octo-org/example-workflow/comments{/number}", 77 | "issue_comment_url": "https://api.github.com/repos/octo-org/example-workflow/issues/comments{/number}", 78 | "contents_url": "https://api.github.com/repos/octo-org/example-workflow/contents/{+path}", 79 | "compare_url": "https://api.github.com/repos/octo-org/example-workflow/compare/{base}...{head}", 80 | "merges_url": "https://api.github.com/repos/octo-org/example-workflow/merges", 81 | "archive_url": "https://api.github.com/repos/octo-org/example-workflow/{archive_format}{/ref}", 82 | "downloads_url": "https://api.github.com/repos/octo-org/example-workflow/downloads", 83 | "issues_url": "https://api.github.com/repos/octo-org/example-workflow/issues{/number}", 84 | "pulls_url": "https://api.github.com/repos/octo-org/example-workflow/pulls{/number}", 85 | "milestones_url": "https://api.github.com/repos/octo-org/example-workflow/milestones{/number}", 86 | "notifications_url": "https://api.github.com/repos/octo-org/example-workflow/notifications{?since,all,participating}", 87 | "labels_url": "https://api.github.com/repos/octo-org/example-workflow/labels{/name}", 88 | "releases_url": "https://api.github.com/repos/octo-org/example-workflow/releases{/id}", 89 | "deployments_url": "https://api.github.com/repos/octo-org/example-workflow/deployments", 90 | "created_at": "2021-06-11T13:29:13Z", 91 | "updated_at": "2021-06-11T13:33:01Z", 92 | "pushed_at": "2021-06-11T13:32:58Z", 93 | "git_url": "git://github.com/octo-org/example-workflow.git", 94 | "ssh_url": "git@github.com:octo-org/example-workflow.git", 95 | "clone_url": "https://github.com/octo-org/example-workflow.git", 96 | "svn_url": "https://github.com/octo-org/example-workflow", 97 | "homepage": null, 98 | "size": 1, 99 | "stargazers_count": 0, 100 | "watchers_count": 0, 101 | "language": null, 102 | "has_issues": true, 103 | "has_projects": true, 104 | "has_downloads": true, 105 | "has_wiki": true, 106 | "has_pages": false, 107 | "forks_count": 0, 108 | "mirror_url": null, 109 | "archived": false, 110 | "disabled": false, 111 | "open_issues_count": 0, 112 | "license": null, 113 | "forks": 0, 114 | "open_issues": 0, 115 | "watchers": 0, 116 | "default_branch": "main" 117 | }, 118 | "organization": { 119 | "login": "octo-org", 120 | "id": 33435655, 121 | "node_id": "MDEyOk9yZ2FuaXphdGlvbjMzNDM1Nj55", 122 | "url": "https://api.github.com/orgs/octo-org", 123 | "repos_url": "https://api.github.com/orgs/octo-org/repos", 124 | "events_url": "https://api.github.com/orgs/octo-org/events", 125 | "hooks_url": "https://api.github.com/orgs/octo-org/hooks", 126 | "issues_url": "https://api.github.com/orgs/octo-org/issues", 127 | "members_url": "https://api.github.com/orgs/octo-org/members{/member}", 128 | "public_members_url": "https://api.github.com/orgs/octo-org/public_members{/member}", 129 | "avatar_url": "https://avatars.githubusercontent.com/u/21031067?s=460&u=d851e01410b4f1674f000ba7e0dc94e0b82cd9cc&v=4", 130 | "description": "octo-org" 131 | }, 132 | "sender": { 133 | "login": "octocat", 134 | "id": 319655, 135 | "node_id": "MDQ6VXNlcjMxOTY1NQ55", 136 | "avatar_url": "https://avatars.githubusercontent.com/u/21031067?s=460&u=d851e01410b4f1674f000ba7e0dc94e0b82cd9cc&v=4", 137 | "gravatar_id": "", 138 | "url": "https://api.github.com/users/octocat", 139 | "html_url": "https://github.com/octocat", 140 | "followers_url": "https://api.github.com/users/octocat/followers", 141 | "following_url": "https://api.github.com/users/octocat/following{/other_user}", 142 | "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", 143 | "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", 144 | "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", 145 | "organizations_url": "https://api.github.com/users/octocat/orgs", 146 | "repos_url": "https://api.github.com/users/octocat/repos", 147 | "events_url": "https://api.github.com/users/octocat/events{/privacy}", 148 | "received_events_url": "https://api.github.com/users/octocat/received_events", 149 | "type": "User", 150 | "site_admin": true 151 | } 152 | } 153 | --------------------------------------------------------------------------------