├── .gitignore ├── COPYING ├── LICENSE ├── Makefile ├── README.md ├── chantal ├── __init__.py ├── __main__.py ├── controlfile │ ├── __init__.py │ ├── makeish.py │ └── python.py ├── error.py ├── job.py ├── msg.py └── util.py ├── doc ├── ideas.md ├── justin.md ├── machine │ ├── lxd.md │ ├── podman.md │ └── vm.md ├── setup.md ├── simulator.md └── states.md ├── etc ├── justin.conf.example ├── justin.service ├── kevin.conf.example ├── kevin.service ├── kevinfile.example ├── project.conf.example └── tmpfiles.d │ └── kevin.conf ├── justin ├── __init__.py ├── __main__.py ├── config.py ├── machine │ ├── __init__.py │ ├── custom.py │ ├── lxd.py │ ├── podman.py │ └── qemu.py ├── manage.py ├── messages.py ├── protocol.py └── shell.py ├── kevin ├── __init__.py ├── __main__.py ├── action.py ├── build.py ├── build_manager.py ├── chantal.py ├── config.py ├── httpd.py ├── job.py ├── job_manager.py ├── justin.py ├── justin_machine.py ├── kevin.py ├── lrustore.py ├── process.py ├── project.py ├── project_config.py ├── service │ ├── __init__.py │ ├── badge │ │ ├── __init__.py │ │ ├── action.py │ │ └── generator.py │ ├── github │ │ ├── __init__.py │ │ ├── action.py │ │ ├── pull_manager.py │ │ ├── status.py │ │ ├── update.py │ │ ├── util.py │ │ └── webhook.py │ └── symlink.py ├── service_meta.py ├── simulator │ ├── __init__.py │ ├── __main__.py │ ├── github.py │ ├── service.py │ └── util.py ├── task_queue.py ├── trigger.py ├── update.py ├── util.py ├── watchable.py └── watcher.py ├── mandy ├── favicon-error.png ├── favicon-failure.png ├── favicon-running.png ├── favicon-skipped.png ├── favicon-success.png ├── favicon-waiting.png ├── index.html ├── jshint.config ├── mandy.css ├── mandy.js ├── robots.txt ├── terminal.css └── terminal.js └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | *.pyo 4 | ~* 5 | .*.swp 6 | \#* 7 | .#* 8 | 9 | MANIFEST 10 | dist/ 11 | 12 | /*.conf 13 | /test-* 14 | /build 15 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Kevin CI, the stupid continuous integration toolkit. 2 | 3 | Copyright (C) 2015-2015 Michael Enßlin 4 | Copyright (C) 2015-2025 Jonas Jelten 5 | Copyright (C) 2015-2015 Markus Otto 6 | Copyright (C) 2016 Markus Teich 7 | 8 | This program is free software: you can redistribute it and/or modify 9 | it under the terms of the GNU Affero General Public License as published by 10 | the Free Software Foundation, either version 3 of the License, or 11 | (at your option) any later version. 12 | 13 | This program is distributed in the hope that it will be useful, 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | GNU Affero General Public License for more details. 17 | 18 | You should have received a copy of the GNU Affero General Public License 19 | along with this program. If not, see . 20 | 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | pylint: 2 | python3 -m pylint --reports=n --disable=locally-disabled,fixme,too-many-arguments kevin justin chantal 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kevin-CI 2 | 3 | Kevin-CI is a self-hosted continuous integration service. 4 | 5 | With Kevin you have **maximum-speed builds**, spectacular GitHub integration and the best™ CI experience ever. 6 | 7 | Kevin-CI supports [QEMU](https://qemu.org), [LXD](https://canonical.com/lxd) and [Podman](https://podman.io). 8 | 9 | 10 | ### Dafuq? 11 | 12 | Kevin is a self-hostable CI daemon to build [pull requests](https://help.github.com/articles/using-pull-requests/) inside temporary containers. 13 | 14 | 15 | It was mainly developed for [openage](http://github.com/SFTtech/openage/), 16 | but you can use it for _any_ project! 17 | 18 | Kevin can create doc files, bundle software, run tests, make screenshots, 19 | end world hunger, calculate the last digits of pi: all in a custom container. 20 | 21 | Requires: 22 | - [Python >=3.11](https://www.python.org/) 23 | - [aiohttp](https://aiohttp.org/) 24 | - and some container/vm to run jobs in 25 | 26 | 27 | ### Components 28 | 29 | * **Kevin**: Receives triggers and launches the builds 30 | * **Justin**: Provides temporary containers to Kevin 31 | * **Chantal**: Run inside the container to execute the Job 32 | * **Mandy**: Webinterface to view live-results 33 | 34 | 35 | ### How? 36 | 37 | * `kevin` is notified by a GitHub webhook 38 | * It spawns a temporary Container/VM from a template to run the job 39 | * The repo is cloned and the build/test steps in `kevinfile` are executed 40 | * Progress can be viewed live via Web-UI, GitHub, `curl` or websocket API 41 | * Results are instantly reported to GitHub 42 | 43 | 44 | ### Features 45 | 46 | * Makefile-like [control file (`kevinfile`)](etc/kevinfile.example) 47 | * Directly specify command dependencies of your build 48 | * Report the step results and timing back to github 49 | 50 | * Live-view of build console output 51 | * See what the machine builds in real-time 52 | * Store and download resulting files (e.g. releases) 53 | 54 | * GitHub pull requests 55 | * A build is triggered for each new and updated pull request 56 | * When you push to a currently-in-build branch, 57 | the previous build is canceled 58 | 59 | * File output 60 | * Let your project generate files and folders 61 | * They're saved to the static web folder 62 | * Use it to generate documentation, releases, ... 63 | 64 | * Container management 65 | * Jobs are built in temporary throwaway VMs 66 | * Easily change and update the base images 67 | 68 | 69 | ### Setup 70 | 71 | **How?** [Lurk into our setup guide](doc/setup.md). 72 | 73 | 74 | ### Supported Services 75 | 76 | * Hosting [services](/kevin/service/): 77 | * [X] [GitHub](https://github.com/), 78 | * [ ] [GitLab](https://gitlab.com/), 79 | * [ ] ... 80 | * [Supported container/VM tools](/justin/machine/) 81 | * [X] [qemu](http://qemu-project.org) 82 | * [X] [podman](https://podman.io/) 83 | * [x] [lxd](https://linuxcontainers.org/lxd) 84 | * [ ] [docker](https://www.docker.com/) 85 | * [ ] [libvirt](https://libvirt.org/) 86 | * [ ] [xen](https://www.xenproject.org/) 87 | * [ ] [nspawn](http://www.freedesktop.org/software/systemd/man/systemd-nspawn.html) 88 | * [ ] ... 89 | 90 | 91 | ### Contact 92 | 93 | If you have questions, suggestions, encounter any problem, 94 | please join our [Matrix channel](https://matrix.to/#/#sfttech:matrix.org) and ask! 95 | 96 | Of course, create [issues](https://github.com/SFTtech/kevin-ci/issues) 97 | and [pull requests](https://github.com/SFTtech/kevin-ci/pulls). 98 | 99 | 100 | ### License 101 | 102 | Released under the **GNU Affero General Public License** version 3 or later, 103 | see [COPYING](COPYING) and [LICENSE](LICENSE) for details. 104 | -------------------------------------------------------------------------------- /chantal/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The kevin-ci component that runs inside the builder machine. 3 | """ 4 | 5 | import argparse 6 | import typing 7 | 8 | Args = typing.NewType('Args', argparse.Namespace) 9 | -------------------------------------------------------------------------------- /chantal/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | CLI entry point for chantal. 3 | """ 4 | 5 | import argparse 6 | import traceback 7 | import typing 8 | 9 | from . import Args 10 | 11 | from .job import run_job 12 | from .error import FatalJobError, CommandError 13 | from .msg import job_state, stdout 14 | 15 | 16 | def main() -> None: 17 | """ 18 | Takes clone url and commit sha from sys.argv, 19 | builds the project, 20 | and reports progress/stdout via status messages 21 | on its stdout stream. 22 | Takes no stdin and produces no stdout. 23 | """ 24 | try: 25 | cmd = argparse.ArgumentParser( 26 | description=("clone a repo and process the kevinfile with" 27 | "build instructions. ") 28 | ) 29 | 30 | cmd.add_argument("--clone", dest="clone_source", 31 | help=("Location to clone the git repo from. " 32 | "If not given, don't clone.")) 33 | cmd.add_argument("--branch", 34 | help=("Branch to clone. If given, clone only " 35 | "this branch and no other repo history.")) 36 | cmd.add_argument("--checkout", dest="treeish", 37 | help=("Treeish (branch, hash, ...) to check out " 38 | "after clone. If not given, just clone.")) 39 | cmd.add_argument("--desc-file", dest="filename", default="kevinfile", 40 | help=("Filename of the control file ('%(default)s') " 41 | "within the repo folder")) 42 | cmd.add_argument("--desc-format", dest="format", default="makeish", 43 | help="Format of the control file ('%(default)s') ") 44 | cmd.add_argument("--job", default="", 45 | help=("Job name to let the control file " 46 | "perform conditionals")) 47 | cmd.add_argument("--fetch-depth", type=int, default=0, 48 | help=("Depth of commits to clone the repo, " 49 | "use 1 to only download the latest commit")) 50 | cmd.add_argument("--dir", dest="work_location", default="repo", 51 | help=("Directory where the git repo will be " 52 | "cloned and chantal will `cd` to. " 53 | "default is ./%(default)s")) 54 | 55 | args: Args = typing.cast(Args, cmd.parse_args()) 56 | 57 | run_job(args) 58 | 59 | except (FatalJobError, CommandError) as exc: 60 | job_state("error", str(exc)) 61 | stdout("\x1b[31;1mFATAL\x1b[m %s\n" % str(exc)) 62 | 63 | except SystemExit as exc: 64 | if exc.code != 0: 65 | job_state("error", f"chantal exited with {exc.code}") 66 | stdout(f"\x1b[31;1mexit with\x1b[m {exc.code}\n") 67 | 68 | except BaseException as exc: 69 | job_state("error", f"Internal error in Chantal: {exc!r}") 70 | stdout("\x1b[31;1;5minternal error\x1b[m\n") 71 | traceback.print_exc() 72 | 73 | else: 74 | stdout("\n\x1b[1mDone.\x1b[m\n") 75 | 76 | 77 | if __name__ == '__main__': 78 | main() 79 | -------------------------------------------------------------------------------- /chantal/controlfile/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | build job configuration 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from abc import ABC, abstractmethod 8 | from dataclasses import dataclass, field 9 | 10 | 11 | class ControlFile(ABC): 12 | def __init__(self) -> None: 13 | pass 14 | 15 | @abstractmethod 16 | def get_steps(self) -> list[Step]: 17 | raise NotImplementedError() 18 | 19 | 20 | @dataclass 21 | class Step: 22 | name: str 23 | depends: list[str] 24 | cwd: str | None 25 | commands: list[str] 26 | env: dict[str, str] = field(default_factory=dict) 27 | # output_source, output_destination 28 | outputs: list[tuple[str, str]] = field(default_factory=list) 29 | skip: bool = False 30 | hidden: bool = False 31 | 32 | 33 | class ParseError(ValueError): 34 | """ Control file parsing error """ 35 | def __init__(self, lineno, text): 36 | super().__init__(lineno + 1, text) 37 | -------------------------------------------------------------------------------- /chantal/controlfile/python.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from . import ControlFile, Step 4 | 5 | import typing 6 | if typing.TYPE_CHECKING: 7 | from pathlib import Path 8 | from .. import Args 9 | 10 | 11 | class PythonControlFile(ControlFile): 12 | def __init__(self, file: Path, args: Args) -> None: 13 | super().__init__() 14 | 15 | # read and evaluate python control file 16 | raise NotImplementedError() 17 | 18 | def get_steps(self) -> list[Step]: 19 | raise NotImplementedError() 20 | -------------------------------------------------------------------------------- /chantal/error.py: -------------------------------------------------------------------------------- 1 | class FatalJobError(Exception): 2 | """ 3 | Used to terminate the build with a nice error message. 4 | (as opposed to internal build errors, which yield a stack trace) 5 | """ 6 | pass 7 | 8 | 9 | class CommandError(Exception): 10 | """ 11 | Raised when a command to be executed fails. 12 | """ 13 | pass 14 | 15 | 16 | class OutputError(Exception): 17 | """ 18 | Raised when a file output fails. 19 | """ 20 | pass 21 | -------------------------------------------------------------------------------- /chantal/job.py: -------------------------------------------------------------------------------- 1 | """ 2 | Driver for building a job. 3 | Status and output is reported on stdout via json messages. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import os 9 | import shlex 10 | import typing 11 | from pathlib import Path 12 | from time import time 13 | 14 | from .controlfile import ControlFile 15 | from .controlfile.python import PythonControlFile 16 | from .controlfile.makeish import MakeishControlFile 17 | 18 | from .msg import ( 19 | job_state, step_state, stdout, 20 | output_item as msg_output_item, 21 | output_dir as msg_output_dir, 22 | output_file as msg_output_file, 23 | raw_msg 24 | ) 25 | from .util import run_command, filter_t 26 | from .error import CommandError, OutputError 27 | 28 | if typing.TYPE_CHECKING: 29 | from . import Args 30 | from .controlfile import Step 31 | 32 | 33 | def run_job(args: Args) -> None: 34 | """ 35 | Main entry point for building a job. 36 | """ 37 | base_env = os.environ.copy() 38 | base_env.update({ 39 | "TERM": "xterm", 40 | "GCC_COLORS": "yes", 41 | "KEVIN": "true", 42 | "CHANTAL": "true", 43 | "KEVIN_JOB": args.job or "", 44 | }) 45 | 46 | if args.clone_source: 47 | _clone_repo(args, base_env) 48 | elif args.work_location: 49 | os.chdir(args.work_location) 50 | 51 | control_file: ControlFile 52 | if args.format == "python": 53 | control_file = PythonControlFile(Path(args.filename), args) 54 | elif args.format == "makeish": 55 | control_file = MakeishControlFile(Path(args.filename), args) 56 | else: 57 | raise ValueError(f"unhandled control file format {args.control_file_format!r}") 58 | 59 | steps = control_file.get_steps() 60 | 61 | _process_steps(steps, base_env, args) 62 | 63 | 64 | def _clone_repo(args: Args, env) -> None: 65 | job_state("running", "cloning repo") 66 | 67 | if args.fetch_depth > 0: 68 | shallow = ("--depth %d" % args.fetch_depth) 69 | else: 70 | shallow = None 71 | 72 | if args.treeish: 73 | refname = f"kevin-{args.branch.replace(':', '/')}" if args.branch else "kevin-build" 74 | 75 | # to silence main/master warnings (must be distinct from refname...) 76 | run_command("git config --global init.defaultBranch stuff-default", env=env, hide_invoc=True) 77 | 78 | # don't clone, instead fetch from remote to select treeish 79 | run_command(f"git init '{args.work_location}'", env=env) 80 | os.chdir(args.work_location) 81 | run_command(f"git remote add origin {shlex.quote(args.clone_source)}", env=env) 82 | run_command(filter_t(("git", "fetch", shallow, "--no-tags", "--prune", "origin", 83 | f"{args.treeish}:{refname}")), 84 | env=env) 85 | run_command(f"git checkout -q '{refname}'", env=env) 86 | 87 | else: 88 | if args.branch: 89 | branch = f"--branch '{args.branch}' --single-branch" 90 | else: 91 | branch = None 92 | 93 | run_command(filter_t(("git clone", shallow, branch, shlex.quote(args.clone_source), args.work_location)), env=env) 94 | os.chdir(args.work_location) 95 | 96 | 97 | def _process_steps(steps: list[Step], base_env: dict[str, str], args: Args) -> None: 98 | for step in steps: 99 | if not step.skip: 100 | step_state(step, "waiting", "waiting") 101 | 102 | jobtimer = time() 103 | 104 | # steps that errored 105 | errors: list[str] = [] 106 | # steps that succeeded 107 | success: set[str] = set() 108 | 109 | for step in steps: 110 | depend_issues = set(step.depends) - success 111 | 112 | if step.skip: 113 | # the step has been marked to be skipped in the control file. 114 | # do not run it or produce any output. 115 | if not depend_issues: 116 | success.add(step.name) 117 | continue 118 | 119 | if not errors: 120 | job_state("running", "running (" + step.name + ")") 121 | 122 | if depend_issues: 123 | text = "depends failed: " + ", ".join(depend_issues) 124 | step_state(step, "error", text) 125 | stdout("\n\x1b[36;1m[%s]\x1b[m\n\x1b[31;1m%s\x1b[m\n" % 126 | (step.name, text)) 127 | continue 128 | 129 | if step.commands or step.outputs: 130 | step_state(step, "running", "running") 131 | 132 | steptimer = time() 133 | stdout("\n\x1b[36;1m[%s]\x1b[m\n" % step.name) 134 | 135 | try: 136 | step_env = base_env.copy() 137 | step_env.update(step.env) 138 | 139 | # execute commands 140 | for command in step.commands: 141 | run_command(command, env=step_env, cwd=step.cwd, shell=True) 142 | 143 | # then, transfer output files 144 | for output_src, output_dst in step.outputs: 145 | output_item(output_src, output_dst) 146 | 147 | except (CommandError, OutputError) as exc: 148 | # failure in step command. 149 | step_state(step, "failure", str(exc.args[0])) 150 | 151 | if not step.hidden: 152 | errors.append(step.name) 153 | job_state( 154 | "failure", 155 | "steps failed: " + ", ".join(sorted(errors)) 156 | ) 157 | else: 158 | step_state( 159 | step, "success", 160 | "completed in %.2f s" % (time() - steptimer) 161 | ) 162 | success.add(step.name) 163 | 164 | if not errors: 165 | job_state("success", "completed in %.2f s" % (time() - jobtimer)) 166 | 167 | 168 | def output_item(source_name: str, output_name: str) -> None: 169 | """ 170 | Outputs one output item, as listed in the config. 171 | """ 172 | source_path = Path(source_name) 173 | 174 | # announce file or dir transfer 175 | if source_path.is_file(): 176 | output_file(source_path, output_name) 177 | elif source_path.is_dir(): 178 | output_dir(source_path, output_name) 179 | else: 180 | raise OutputError("non-existing output: %s" % source_path) 181 | 182 | # finalize the file transfer 183 | msg_output_item(output_name) 184 | 185 | 186 | def output_file(path: Path, targetpath: str) -> None: 187 | """ 188 | Outputs a single raw file. Temporarily switches the control stream 189 | to binary mode. 190 | """ 191 | size = path.stat().st_size 192 | with path.open('rb') as fileobj: 193 | # change to binary mode 194 | msg_output_file(targetpath, size) 195 | 196 | remaining = size 197 | while remaining > 0: 198 | # read max 8MiB at once 199 | chunksize = min(remaining, 8 * 1024**2) 200 | data = fileobj.read(chunksize) 201 | if not data: 202 | # the file size has changed... but we promised to deliver! 203 | data = b'\0' * chunksize 204 | 205 | raw_msg(data) 206 | remaining -= len(data) 207 | 208 | 209 | def output_dir(path: Path, targetpath: str) -> None: 210 | """ 211 | Recursively outputs a directory. 212 | """ 213 | msg_output_dir(targetpath) 214 | for entry in path.iterdir(): 215 | entrytargetpath = targetpath + '/' + entry.name 216 | if entry.is_file(): 217 | output_file(entry, entrytargetpath) 218 | elif entry.is_dir(): 219 | output_dir(entry, entrytargetpath) 220 | else: 221 | pass 222 | -------------------------------------------------------------------------------- /chantal/msg.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code for sending messages to Kevin on stdout. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import json 8 | import sys 9 | import typing 10 | 11 | if typing.TYPE_CHECKING: 12 | from .controlfile import Step 13 | 14 | 15 | def msg(**kwargs) -> None: 16 | """ 17 | Writes a JSON-ified version of kwargs to the msg stream. 18 | """ 19 | sys.stdout.buffer.write(json.dumps(kwargs).encode()) 20 | sys.stdout.buffer.write(b'\n') 21 | sys.stdout.buffer.flush() 22 | 23 | 24 | def raw_msg(data: bytes): 25 | """ 26 | Writes a raw bytes object to the msg stream. 27 | 28 | The other side must be informed about the raw data with an appropriate 29 | json message, or the behavior will obviously be undefined. 30 | """ 31 | sys.stdout.buffer.write(data) 32 | sys.stdout.buffer.flush() 33 | 34 | 35 | def stdout(text: str): 36 | """ 37 | Sends a stdout message. 38 | """ 39 | msg(cmd="stdout", text=text) 40 | 41 | 42 | def job_state(state: str, text: str): 43 | """ 44 | Sends a job state message. 45 | """ 46 | msg(cmd="job-state", state=state, text=text) 47 | 48 | 49 | def step_state(step: Step, state: str, text: str): 50 | """ 51 | Sends a step state message, if the step was not marked hidden. 52 | """ 53 | if step.hidden: 54 | return 55 | 56 | msg(cmd="step-state", step=step.name, state=state, text=text) 57 | 58 | 59 | def output_item(name: str): 60 | """ 61 | Sends an output-item message. 62 | """ 63 | msg(cmd="output-item", name=name) 64 | 65 | 66 | def output_file(path: str, size: int): 67 | """ 68 | Sends an output file message. 69 | Send 'size' bytes of raw data using raw_msg immediately afterwards. 70 | """ 71 | msg(cmd="output-file", path=path, size=size) 72 | 73 | 74 | def output_dir(path: str): 75 | """ 76 | Sends an output dir message. 77 | """ 78 | msg(cmd="output-dir", path=path) 79 | -------------------------------------------------------------------------------- /chantal/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility routines. 3 | """ 4 | 5 | import codecs 6 | import os 7 | import shlex 8 | 9 | from .msg import stdout 10 | from .error import CommandError 11 | 12 | 13 | def filter_t(input: list[str | None] | tuple[str | None, ...]) -> list[str]: 14 | return [elem for elem in input if elem] 15 | 16 | 17 | def run_command(cmd: str | list[str], 18 | env: dict[str, str], 19 | cwd: str | None = None, 20 | shell: bool = False, 21 | hide_invoc: bool = False): 22 | """ 23 | Prints the command name, then runs it. 24 | Throws CommandError on retval != 0. 25 | 26 | Env is the environment variables that are passed. 27 | """ 28 | cmd_list: list 29 | cmd_str: str 30 | if isinstance(cmd, str): 31 | cmd_list = shlex.split(cmd) 32 | cmd_str = cmd 33 | elif isinstance(cmd, list): 34 | cmd_list = cmd 35 | cmd_str = shlex.join(cmd) 36 | else: 37 | raise ValueError(f"cmd not list or str: {cmd!r}") 38 | 39 | if not hide_invoc: 40 | stdout(f"\x1b[32;1m$\x1b[m {cmd_str}\n") 41 | 42 | child_pid, tty_fd = os.forkpty() 43 | if child_pid < 0: 44 | raise OSError("could not fork") 45 | 46 | if child_pid == 0: 47 | # we're the child 48 | 49 | # enter a custom work dir 50 | if cwd: 51 | tgt = os.path.expanduser(os.path.expandvars(cwd)) 52 | os.chdir(tgt) 53 | 54 | # launch the subprocess here. 55 | if shell: 56 | os.execve("/bin/sh", ["sh", "-c", cmd_str], env) 57 | else: 58 | os.execvpe(cmd_list[0], cmd_list, env) 59 | # we only reach this point if the execve has failed 60 | print("\x1b[31;1mcould not execve\x1b[m") 61 | raise SystemExit(1) 62 | 63 | # we're the parent; process the child's stdout and wait for it to 64 | # terminate. 65 | output_decoder = codecs.getincrementaldecoder('utf-8')(errors='replace') 66 | while True: 67 | try: 68 | stdout(output_decoder.decode(os.read(tty_fd, 65536))) 69 | except OSError: 70 | # slave has been closed 71 | os.close(tty_fd) 72 | _, status = os.waitpid(child_pid, 0) 73 | retval = status % 128 + status // 256 74 | break 75 | 76 | if retval != 0: 77 | stdout("\x1b[31;1mcommand returned %d\x1b[m\n" % retval) 78 | raise CommandError("command failed: %s [%d]" % (cmd_str, retval)) 79 | -------------------------------------------------------------------------------- /doc/ideas.md: -------------------------------------------------------------------------------- 1 | Ideas 2 | ===== 3 | 4 | 5 | These are things that might come in handy for kevin someday. 6 | If you'd like to start implementing one of those, you can 7 | ask on our IRC (freenode.net #sfttech) for assistance. 8 | 9 | 10 | #### Job finish actions 11 | 12 | * Notifications via IRC, email, ... 13 | 14 | 15 | #### Local development 16 | 17 | * Prepare kevin for your usage on your local machine 18 | * Any project can be built with a simple command only (`kevin run`?) 19 | * This doesn't even have to take place in a VM since it's your code 20 | 21 | #### More services 22 | 23 | * `gitolite`: git-post-receive hook to trigger kevin 24 | * Any other github-like or git-related hosting service 25 | 26 | 27 | #### More containers 28 | 29 | * Implement the stubs in the `justin/machine` folder to support other machines 30 | 31 | 32 | #### VM management 33 | 34 | * Direct management console via control connection 35 | * Ressource limitations (e.g. vm memory, max running vms) 36 | 37 | 38 | #### Compiler interaction 39 | 40 | * Parse compiler output (asan, errors, warnings, ...) 41 | * Project setting: `compilers=gcc,clang,ghc, ...`, `parse=True` 42 | * Perform some actions 43 | * Directly comment offending line on github 44 | * Collect statistics, ... 45 | -------------------------------------------------------------------------------- /doc/justin.md: -------------------------------------------------------------------------------- 1 | Justin machine provider 2 | ======================= 3 | 4 | Justin is the daemon which executes the containers/virtual machines. 5 | 6 | Justin starts them when Kevin requests a machine and cleans them up afterwards. 7 | 8 | The backends are implemented in [`justin/machine/`](/justin/machine), and there's a [configuration guide for each backend](machine/). 9 | 10 | 11 | Managing VMs 12 | ------------ 13 | 14 | Once your VM is created (see [setup.md](setup.md)), and justin is running, 15 | you can launch and SSH into it. 16 | 17 | The `justin.manage` helper boots the machine in management mode 18 | and opens an ssh shell in it. 19 | 20 | ``` bash 21 | python -m justin.manage unix:///run/kevin/justin my-machine-name $optional-command 22 | ``` 23 | 24 | It uses the exact same access kevin would use, 25 | except the machine is persistent. 26 | 27 | In there, update the machine, install packages, whatever. 28 | All jobs will copy that image to run on. 29 | 30 | If you want a temporary machine (like a job gets), call `justin.manage --volatile`. 31 | -------------------------------------------------------------------------------- /doc/machine/lxd.md: -------------------------------------------------------------------------------- 1 | LXD Containers 2 | ============== 3 | 4 | Justin can manage [LXD](https://linuxcontainers.org/lxd/) containers. 5 | The launched container must run a SSH server such that the buildsystem is able to execute the build steps via remote commands. 6 | 7 | 8 | ## Image Creation 9 | 10 | A minimal example to create such a container follows. 11 | If you execute this as a script, it's something like a `Dockerfile`, just without the Docker stuff :) 12 | 13 | ```python 14 | import subprocess 15 | import shlex 16 | 17 | def ex(cmd, output=False): 18 | print(f"# {cmd}") 19 | cmd = shlex.split(cmd) 20 | if output: 21 | return subprocess.check_output(cmd) 22 | else: 23 | return subprocess.check_call(cmd) 24 | 25 | # this key can then log into the containers 26 | with open("/home/kevin/.ssh/id_rsa.pub") as hdl: 27 | authorized_keys = hdl.read() 28 | 29 | # this is the container we build the template in 30 | template_container = "kevincitemplate" 31 | 32 | # after the template container is done, store the filesystem as this image 33 | template_image = "kevin/debian-sid" 34 | 35 | # base the image on debian sid 36 | ex(f"lxc launch images:debian/sid {template_container}") 37 | 38 | # install packages 39 | ex(f"lxc exec {template_container} -- apt-get update") 40 | ex(f"lxc exec {template_container} -- apt-get install -y git openssh-server") 41 | 42 | # setup user and ssh 43 | ex(f"lxc exec {template_container} -- useradd -m chantal") 44 | ex(f"lxc exec {template_container} -- mkdir /home/chantal/.ssh") 45 | ex(f"lxc exec {template_container} -- sh -c \"echo '{authorized_keys}' > /home/chantal/.ssh/authorized_keys\"") 46 | ex(f"lxc exec {template_container} -- chmod 700 /home/chantal/.ssh") 47 | ex(f"lxc exec {template_container} -- chmod 644 /home/chantal/.ssh/authorized_keys") 48 | ex(f"lxc exec {template_container} -- chown -R chantal:root /home/chantal/.ssh") 49 | ex(f"lxc exec {template_container} -- systemctl enable --now ssh") 50 | 51 | print("public key of the container's ssh daemon:") 52 | print(ex(f"lxc exec {template_container} -- cat /etc/ssh/ssh_host_ed25519_key.pub", output=True).decode()) 53 | 54 | ## now that ssh is available, you could do further setup steps via Ansible, for example. 55 | 56 | # stop the container and convert it to an image 57 | ex(f"lxc stop {template_container}") 58 | ex(f"lxc publish {template_container} --alias '{template_image}'") 59 | ex(f"lxc rm {template_container}") 60 | ``` 61 | 62 | Running this script creates you a container image (named after `template_image`), which Justin can then use to spawn temporary CI containers from. 63 | 64 | 65 | ## Run a Temporary CI Container 66 | 67 | You can test-launch such a temporary container with: 68 | 69 | ```bash 70 | lxc launch kevin/debian-sid lolkevintest --ephemeral 71 | 72 | # log into the container 73 | lxc exec lolkevintest -- bash 74 | ``` 75 | 76 | When it's turned off (`poweroff` in the container or with `lxc stop lolkevintest` on the host), it's automatically deleted! 77 | 78 | 79 | ## Container Image Updating 80 | 81 | You will see the image we created here: 82 | ```bash 83 | lxc image ls 84 | 85 | lxc image info kevin/debian-sid 86 | ``` 87 | 88 | There you can also remove it (`lxc image delete kevin/debian-sid`) and create a new one with the script above. 89 | 90 | To update the image without deleting it, do these steps: 91 | ```bash 92 | # create non-ephemeral container to update the image 93 | lxc launch kevin/debian-sid tmp-imageupdate 94 | 95 | # do the update steps interactively (or script the steps like above) 96 | lxc exec tmp-imageupdate -- bash 97 | 98 | # once you're done, stop the container so we can export it as image again 99 | lxc stop tmp-imageupdate 100 | lxc publish tmp-imageupdate --alias kevin/debian-sid 101 | lxc rm tmp-imageupdate 102 | 103 | # you can clean up the old images in the image list 104 | lxc image ls 105 | lxc image delete 106 | ``` 107 | -------------------------------------------------------------------------------- /doc/machine/podman.md: -------------------------------------------------------------------------------- 1 | Podman Containers 2 | ================= 3 | 4 | In order to make justin be able to manage podman containers you need to provide it with 5 | a suitable container image. This image must run a ssh server such that the buildsystem 6 | is able to execute the build steps via remote commands. 7 | 8 | A minimal example for a Dockerfile building a usable debian sid image would be 9 | 10 | ```dockerfile 11 | FROM debian:sid 12 | 13 | ARG authorized_keys 14 | ENV AUTHORIZED_KEYS=$authorized_keys 15 | 16 | # install packages 17 | RUN apt-get update 18 | RUN apt-get install -y git openssh-server 19 | 20 | # setup user and ssh 21 | RUN useradd -m chantal 22 | RUN mkdir /home/chantal/.ssh 23 | RUN echo "$AUTHORIZED_KEYS" > /home/chantal/.ssh/authorized_keys 24 | RUN chmod 700 /home/chantal/.ssh 25 | RUN chmod 644 /home/chantal/.ssh/authorized_keys 26 | RUN chown -R chantal:root /home/chantal/.ssh 27 | RUN mkdir -p /var/run/sshd /run/sshd 28 | 29 | # SSH login fix. Otherwise user is kicked off after login 30 | RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd 31 | 32 | ENV NOTVISIBLE "in users profile" 33 | RUN echo "export VISIBLE=now" >> /etc/profile 34 | 35 | RUN cat /etc/ssh/ssh_host_ed25519_key.pub 36 | 37 | EXPOSE 22 38 | CMD ["/usr/sbin/sshd", "-D", "-p", "22"] 39 | ``` 40 | 41 | Podman needs its [own preparation](https://github.com/containers/libpod/tree/master/docs/tutorials): 42 | * e.g. the subuids and subgids described in [their rootless guide](https://github.com/containers/libpod/blob/master/docs/tutorials/rootless_tutorial.md). 43 | 44 | This image would then be built as the user running the justin daemon like 45 | ```shell script 46 | su podman build -t --build-arg authorized_keys="" . 47 | ``` 48 | Instead of passing the build users (the one running the kevin deamon) ssh key via container build args it is 49 | also possible to just copy the public key from the host system. 50 | -------------------------------------------------------------------------------- /doc/machine/vm.md: -------------------------------------------------------------------------------- 1 | VM container setup 2 | ================== 3 | 4 | Use this guide if your container will run as a real virtual machine (QEMU, ...). 5 | 6 | You'll set up the system that will process a build job. 7 | Because of that, you shold prepare it in such a way this VM is suited well for building your job! 8 | 9 | Basically, you'll have a full system installation (Linux, ...) that is then reached and controlled via SSH and Python. 10 | 11 | - [Setup the OS](https://wiki.archlinux.org/index.php/QEMU#Creating_new_virtualized_system) 12 | - Install 13 | - `python >=3.4` 14 | - `git` 15 | - `ssh` daemon 16 | - `sudo` 17 | - In the VM, create user `chantal` 18 | - In `visudo`, give `NOPASSWD: ALL` permissions to `chantal` 19 | - That way, `chantal` easily gets root permissions in the machine 20 | - Enable and run the `sshd` service 21 | - Setup password-less SSH access (`ssh-copy-id`) for above `kevin` user to `chantal@container_vm` 22 | - Add `kevin`'s `id_rsa.pub` into `~chantal/.ssh/authorized_keys` 23 | - Store the contents of the container's `/etc/ssh/ssh_host_ed25519_key.pub` 24 | to the `justin.conf` so the key for this VM can be verified 25 | - **Set up the container** in the way you'd like to test your project 26 | - If your build involves graphical things, you could set up `tigervnc` or `x11vnc` 27 | -------------------------------------------------------------------------------- /doc/setup.md: -------------------------------------------------------------------------------- 1 | Kevin setup guide 2 | ================= 3 | 4 | Components needed for a smooth experience\*: 5 | 6 | * `Kevin`: Main daemon 7 | * `Justin`: Container provider daemon 8 | * `Chantal`: In-container agent to run the job 9 | * `Mandy`: Webinterface 10 | 11 | \* Disclaimer: The experience may not actually be smooth and likely more components are needed. 12 | 13 | 14 | tl;dr 15 | ----- 16 | 17 | * Install `kevin` on a server 18 | * Create/edit `kevin.conf`, `justin.conf` and add a config for all your projects to be built 19 | * Set up a VM or container with active SSH and add it to `justin.conf` 20 | * Add `kevin-ci` as github webhook 21 | * Run `kevin` and `justin` (e.g. as `systemd` service) 22 | * Add the `kevinfile` control file to your project repo 23 | * Pull requests are built inside a temporary container copy 24 | * Set up a webserver to serve the `mandy` webinterface and your build output folder 25 | 26 | 27 | Data flow 28 | --------- 29 | 30 | * `kevin` interacts with the outside world (your git hoster). 31 | It gets notified by pull requests. You need a server for it. 32 | 33 | * `kevin` contacts `justin` to start a VM. You need a server for `justin` again, 34 | but it can be the same machine where `kevin` is running on. 35 | There can be multiple `justin`s. 36 | 37 | * `justin` launches a container/virtual machine provided by **you**. You 38 | create the machine template (just set up a debian...) or use docker, etc. 39 | Your pull request is then built inside that container by `chantal`. 40 | 41 | * After the job was run, the machine is reverted back to the template. 42 | 43 | * `mandy` presents the build progress in your browser in real-time. 44 | 45 | You are responsible for managing and updating the container :smile:. 46 | Which is one of the main reasons we created `kevin`. 47 | 48 | 49 | Security design 50 | --------------- 51 | 52 | `kevin` is designed in a way that arbitrary code can be executed in the 53 | container and will not harm your host system, apart from the usual virtual 54 | machine/container exit exploits that we can't prevent. 55 | 56 | Our code is of course 100% bugfree, we even have created a program 57 | to verify that kevin can never hang up. Hrr hrr... 58 | 59 | 60 | Project config 61 | -------------- 62 | 63 | Kevin supports hosting multiple projects. 64 | Each project can spawn as many "jobs" as you like to have. 65 | 66 | * **Build**: Equal to a commit hash. Spawns jobs which must succeed. 67 | * **Job**: Performed by *chantal* inside a container. 68 | * **Step**: One step in a job, as defined in the in-repo `kevinfile` control file. 69 | * **Command**: Each step runs one or more commands. 70 | 71 | 72 | #### Component setup 73 | 74 | We support different container backends. 75 | Have a look inside [`justin/machine/`](/justin/machine) to see which ones and to add more container types. 76 | 77 | 78 | ##### Host system (Kevin) 79 | 80 | Kevin gets a notification that it should build something, and kevin then notifies justin to provide a container. 81 | 82 | - Install 83 | - `python >=3.11` 84 | - `aiohttp >=2.0` 85 | - Create user `kevin` (you can, of course, change that) 86 | - Create `/etc/kevin/kevin.conf` from [`kevin.conf.example`](/etc/kevin.conf.example) 87 | - Create a password-less SSH key with `ssh-keygen -t rsa -b 4096` for the user `kevin` 88 | - Install the `kevin` Python module (ideally, as a [systemd unit](/etc/kevin.service) or whatever) 89 | 90 | 91 | ##### Container provider (Justin) 92 | 93 | Justin starts and cleans up containers when Kevin requests them. 94 | 95 | - Install 96 | - `python >=3.6` 97 | - your container implementation of choice: `qemu`, `libpod`, `docker`, ... 98 | 99 | - Create `/etc/kevin/justin.conf` from [`justin.conf.example`](/etc/justin.conf.example) 100 | 101 | - Register this `justin` by adding it to the `kevin.conf` `[justin]` section. 102 | - If this `justin` is on the same machine as `kevin`: 103 | - add `justin_name=userid@/run/kevin/justin` 104 | and `kevin` will use this Unix socket to contact `justin` 105 | 106 | - If this `justin` is a **different physical machine** than the host for `kevin`: 107 | - Create user `justin` on the `justin` host 108 | - In `kevin.conf`, section `[justin]`, add `justin_name=justin@vmserver.name`, 109 | `kevin` will then contact justin via SSH 110 | - In `~justin/.ssh/authorized_keys`, force the ssh command to 111 | `command="python3 -m justin.shell useridentifier" ssh-rsa kevinkeyblabla...` 112 | This sets up password-less SSH access (`ssh-copy-id`..) 113 | for `kevin` to `justin@vmserver.name` and forces the justin shell. 114 | 115 | - optional: create firewall rules to prevent the VMs launched by `justin` 116 | from talking to internals of your network 117 | 118 | 119 | ##### Guest systems (Chantal) 120 | 121 | Setting up the guest system depends on the container technology you use. 122 | 123 | * [Qemu](machine/vm.md) 124 | * [Podman](machine/podman.md) 125 | * [LXD](machine/lxd.md) 126 | * Your [favorite-to-be-implemented](/justin/machine) backend 127 | 128 | 129 | ##### Project 130 | 131 | The project config determines active Kevin plugins and their configuration. 132 | 133 | - On the `kevin` machine, 134 | create a folder where project configurations reside in 135 | - `/etc/kevin/projects/` may be a good location 136 | - In there, create `lolmyproject.conf` from the 137 | [`etc/project.conf.example`](/etc/project.conf.example) file 138 | - For each project, create a file with a suitable name in `/etc/kevin/projects/` 139 | 140 | - For the project you want to test, 141 | create [kevin control file](/etc/kevinfile.example) `kevinfile` 142 | in the project repo root (e.g. `~/dev/openage/kevinfile`) 143 | - You can change the name to anything you want, even [`kartoffelsalat`](https://www.youtube.com/watch?v=idKxckZiCsU) 144 | - If you change it, set the name in the `lolmyproject.conf` 145 | 146 | - [Create GitHub webhook](https://developer.github.com/webhooks/creating/): 147 | - Settings > Webhooks and Services > Add Webhook 148 | - content type: JSON, URL: `http://your-kevin-host:webhook_port/hook-github` 149 | - Create a secret with e.g. `pwgen 1 30` and save it in the github webhook 150 | and your `projects/yourproject.conf` section `[github_webhook]` 151 | - Select events: **Pull Request** and **Push** 152 | 153 | 154 | ##### Webinterface (Mandy) 155 | 156 | - Just serve the `mandy` folder on any machine in the world. 157 | - Use `nginx` or `lighttpd` or `apache`, it does not matter. 158 | - Enter the location where `mandy` can be reached in `kevin.conf` 159 | - To allow output file downloads, you have to serve your static folder with 160 | another (or the same) webserver. 161 | 162 | 163 | #### Testing 164 | 165 | * You can **directly run Chantal** without all the other fuzz and see how it will build stuff. 166 | * Kevin would do the same thing in the container/VM. 167 | * To test, invoke `python3 -m chantal --help` and be enlightnened. 168 | * You can run Chantal inside your project without cloning it: 169 | * `python3 -m chantal --dir /your/project/repo $jobname` 170 | * `$jobname` is used for evaluating conditions in the `kevinfile`. 171 | Later, the `jobname` passed to Chantal will be taken from the job name 172 | you set up for a project in the its config file `lolmyproject.conf`. 173 | 174 | * The same test can be done within your VM/container: 175 | * `python3 -m chantal --dir /tmp/clonedest --clone $clone_url --checkout $branchname_or_hashname $jobname` 176 | 177 | 178 | 179 | #### Running 180 | 181 | * Persistent storage for `kevin` is done in `[projects]`/`output_folder`. 182 | * All build data will be stored there. 183 | * There's no cleanup mechanism yet (you may implemente it :) 184 | * [systemd](https://www.freedesktop.org/wiki/Software/systemd/) setup 185 | * copy and adjust `etc/kevin.service` to `/etc/systemd/system/kevin.service` 186 | * copy and adjust `etc/justin.service` to `/etc/systemd/system/justin.service` 187 | * copy and adjust `etc/tmpfiles.d/kevin.conf` to `/etc/tmpfiles.d/kevin.conf` 188 | * enable the service with `systemctl enable $name.service` 189 | * start them with `systemctl start $name.service` 190 | * Non-daemon launch 191 | * Run `kevin` with `python3 -m kevin` 192 | * Run `justin` with `python3 -m justin` 193 | * After setup, [manage a container](justin.md#managing-vms) with `python3 -m justin.manage` 194 | * For example: `python3 -m justin.manage unix:///run/kevin/justin your_vm_id` 195 | 196 | * We recommend to first test with a dummy repository that just contains a simple `kevinfile`, instead of the "real" project. 197 | * Test without `github`: Try using the [Kevin Simulator](simulator.md) 198 | * Test with `github`: Just make a pull request! 199 | 200 | 201 | ### Contact 202 | 203 | If you encounter any problem, please [contact us](/README.md#contact) and ask! 204 | 205 | If things crash, bug, or whatever, [create an issue](https://github.com/SFTtech/kevin-ci/issues)! 206 | 207 | If you think this guide or anything else in this project is crap and sucks, 208 | [just do better](https://github.com/SFTtech/kevin-ci/pulls)! 209 | -------------------------------------------------------------------------------- /doc/simulator.md: -------------------------------------------------------------------------------- 1 | Hosting site simulator 2 | ====================== 3 | 4 | 5 | To allow development without a test repo e.g. at github, 6 | we provide a simulator that mimics the api of that service. 7 | 8 | These are located in `kevin/simulator`. 9 | 10 | 11 | Example for building `some-repo` with a kevin currently running with 12 | the given config file: 13 | 14 | ``` 15 | python -m kevin.simulator http://github.com/SFTtech/openage projectname /some/kevin.conf github 16 | ``` 17 | 18 | Alternatively, for a local repo on your machine: 19 | 20 | ``` 21 | python -m kevin.simulator --local-repo ~/devel/some-repo/.git projectname /some/kevin.conf github 22 | ``` 23 | 24 | 25 | This command delivers a webhook as if somebody had pushed to a repo, 26 | then the simulator waits for kevins status updates etc. 27 | -------------------------------------------------------------------------------- /doc/states.md: -------------------------------------------------------------------------------- 1 | Job states 2 | ========== 3 | 4 | | Job state | In Filesystem | In Python interpreter | 5 | |------------------------------|-----------------------------------|-----------------------| 6 | | Freshly received | job_id doesn't exist | in jobs.ACTIVE | 7 | | | | in httpd.job_queue | 8 | |------------------------------|-----------------------------------|-----------------------| 9 | | Building | job_id/generated exists | in jobs.ACTIVE | 10 | | | | job.build running | 11 | |------------------------------|-----------------------------------|-----------------------| 12 | | Completed-Cached | job_id/generated exists | in jobs.CACHED | 13 | | (failure or success) | job_id/completed exists | | 14 | | | job_id/log exists | | 15 | |------------------------------|-----------------------------------|-----------------------| 16 | | Completed | job_id/generated exists | nowhere | 17 | | (failure or success) | job_id/completed exists | | 18 | | | job_id/log exists | | 19 | |------------------------------|-----------------------------------|-----------------------| 20 | 21 | Transitions 22 | =========== 23 | 24 | | Event | State from | State to | Actions | 25 | |-------------------|------------------|------------------|--------------------------------------------------------------| 26 | | Webhook | | Freshly received | Add to jobs.ACTIVE, httpd.job_queue | 27 | | Builder available | Freshly received | Building | call job.build(), mkdir job_id/generated | 28 | | Abort, Finish | Building | Completed-cached | Add to jobs.CACHED, touch job_id/completed, write job_id/log | 29 | | Cache full | Completed-Cached | Completed | Remove from jobs.CACHED | 30 | | Retry | Completed/Cached | Freshly received | rm -r job_id, add to jobs.ACTIVE, httpd.job_queue | 31 | |-------------------|------------------|------------------|--------------------------------------------------------------| 32 | -------------------------------------------------------------------------------- /etc/justin.conf.example: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # configuration for justin, which provides build machines 3 | ############################################################################### 4 | [justin] 5 | name = test-justin 6 | control_socket = /run/kevin/justin 7 | control_socket_permissions = 770 8 | #control_socket_group = kevin 9 | 10 | # range of ports where VMs will listen for SSH 11 | # when the vm is spawned, it selects a port from this range. 12 | # when you don't have VMs, specify the port where to connect to here. 13 | # (can be set to [22,22] to only use port 22) 14 | # 15 | # the selected port is passed below as {SSHPORT} to the incovation. 16 | vm_ports = [7888,8000] 17 | 18 | 19 | ############################################################################### 20 | # Qemu VM 21 | 22 | #[some-random-vm-id] 23 | 24 | # machine name, which is requested by a job of some project. 25 | #name = machine-which-the-job-requests 26 | 27 | # machine type, see justin/machine/ for supported backends. 28 | #type = qemu 29 | 30 | # user used for ssh login 31 | #ssh_user = chantal 32 | 33 | # host used by kevin to connect to. 34 | # special value: "localhost" => kevin connects to justin machine 35 | #ssh_host = localhost 36 | 37 | # public key of the VM's ssh server so we can verify we connected to the right one. 38 | # you can either: 39 | # * specify the ssh server public key of the vm directly 40 | # (ssh-rsa AAAAB3Nza... or ecdsa-sha2-nistp256 AAAAE2..., etc) 41 | # * provide a path to the public key file or known hosts file 42 | # (~/.ssh/known_hosts, /etc/ssh/ssh_*_key.pub) 43 | # 44 | # comment out both to disable host key verification. 45 | #ssh_known_host_key = ssh-ed25519 AAAAAB3Nza_The_VM's_/etc/ssh/ssh_host_ed25519_key.pub_CONTENT 46 | #ssh_known_host_key_file = /some/mountpoint/of/vm/etc/ssh/ssh_host_ed25519_key.pub 47 | 48 | # VM image that you created, is used to create working copies from. 49 | #base_image = /path/to/permanent/diskimage.qcow2 50 | 51 | # VM image file name that will be stored for copy-on-write data. 52 | # This path will be suffixed with ".someid" for each parallel VM run 53 | #overlay_image = /tmp/kevin-vm-tmp.img 54 | 55 | # invocation: 2 CPUs, 2G RAM, no display 56 | #command = qemu-system-x86_64 -machine type=q35,accel=kvm -cpu host -smp 2 -m 2G -drive file={IMAGENAME},if=virtio,format=qcow2 -device virtio-balloon -display none -net nic,model=virtio -net user,hostfwd=tcp:127.0.0.1:{SSHPORT}-:22 -object rng-random,id=rng0,filename=/dev/urandom -device virtio-rng-pci,rng=rng0 57 | # folder sharing from host to VM: 58 | # QEMU: -virtfs local,path=/shared/host/path/maybe/for/ccache,mount_tag=yourtag,security_model=mapped-xattr,id=p9net0 59 | # fstab in VM: yourtag /wherever 9p trans=virtio,version=9p2000.L 0 0 60 | # remote desktop: 61 | # QEMU: -vnc 127.0.0.1:1 instead of -display none 62 | # then use a vncclient to connect to port 5901 63 | # (maybe forward the port to your machine with `ssh -L 12345:localhost:5901 youruser@justinhost`, 64 | # then connect with a vncviewer: e.g. `vinagre localhost:12345`) 65 | 66 | 67 | ############################################################################### 68 | ## LXD containers 69 | 70 | #[random-container-id] 71 | #name = name-requested-by-job 72 | #type = lxd 73 | 74 | ## lxd image to create each CI container from 75 | ## it has to listen on port 22 for ssh 76 | #base_image = lxd-image-name 77 | 78 | #ssh_user = chantal 79 | ## ssh_host can be '__dynamic__', then the container IP is dynamically fetched 80 | #ssh_host = __dynamic__ 81 | #ssh_port = 22 82 | #ssh_known_host_key = ssh-ed25519 AAAA_lxd-container's_/etc/ssh/ssh_host_ed25519_key.pub_CONTENT 83 | ## alternatively, load host key from filesystem dynamically: 84 | #ssh_known_host_key_file = /the/mounted/container/filesystem/etc/ssh/ssh_host_ed25519_key.pub 85 | 86 | 87 | ############################################################################### 88 | # Podman containers 89 | 90 | #[random-container-id] 91 | #name = name-requested-by-job 92 | #type = podman 93 | 94 | # docker base image for each CI container 95 | #base_image = my-container-name:my-container-version 96 | 97 | #ssh_user = chantal 98 | #ssh_host = localhost 99 | #ssh_known_host_key = ssh-ed25519 AAAA_podmain-container's_/etc/ssh/ssh_host_ed25519_key.pub_CONTENT 100 | #ssh_known_host_key_file = /the/mounted/container/filesystem/etc/ssh/ssh_host_ed25519_key.pub 101 | 102 | # configure how to start the container, BASE_IMAGE will be 103 | # substituted with the docker image provided above 104 | #command = podman run -dt -p {SSHPORT}:22/tcp --name {IMAGENAME} {BASE_IMAGE} 105 | 106 | 107 | ############################################################################### 108 | # Custom container/command invocation 109 | 110 | #[custom-commands] 111 | 112 | #name = machine-name-for-job 113 | #type = custom 114 | 115 | # when kevin connects to container via ssh, he uses this user 116 | #ssh_user = chantal 117 | 118 | # kevin's ssh connects to this host. 119 | # special value: "localhost" => kevin connects to justin machine 120 | #ssh_host = localhost 121 | 122 | # custom commands for container control. 123 | # when the launch command terminates, the container is assumed to be shut down. 124 | # see justin/machine/custom.py for implementation. 125 | #prepare = sh -c 'echo preparing manage=$JUSTIN_MANAGE' 126 | #launch = sh -c 'echo launching manage=$JUSTIN_MANAGE; nc -lp $JUSTIN_SSH_PORT' 127 | #cleanup = echo "cleaning up" 128 | 129 | 130 | ############################################################################### 131 | #[some-other-vm-id] 132 | #... definitions for another vm, just like the ones under [some-random-vm-id] or [custom-commands] 133 | -------------------------------------------------------------------------------- /etc/justin.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Justin machine provider for Kevin 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=justin 8 | ExecStart=/usr/bin/env python3 -u -m justin 9 | Restart=on-failure 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /etc/kevin.conf.example: -------------------------------------------------------------------------------- 1 | # kevin ci main config file 2 | 3 | [kevin] 4 | name = kevin-ci 5 | max_jobs_queued = 50 6 | max_jobs_running = 2 7 | # least recently accessed builds to keep in memory 8 | builds_cached_max = 200 9 | 10 | [projects] 11 | config_folder = /etc/kevin/projects 12 | output_folder = /your/writable/static/web/folder/served/below 13 | 14 | # the internet interaction: 15 | [web] 16 | # url prefix for build output stuff 17 | static_url = http://your.kevin.host/output_folder/statically/ 18 | 19 | # url prefix for build status links 20 | mandy_url = http://your.kevin.host/mandy/ 21 | 22 | # kevin's web worker will listen on that port 23 | dyn_port = 7777 24 | # dyn_host = 0.0.0.0 25 | 26 | # set those to the host/port where mandy can reach kevin 27 | # if kevin is behind a proxy, set the values to reach that proxy 28 | dyn_frontend_host = your.kevin.host 29 | dyn_frontend_port = 7777 30 | dyn_frontend_ssl = false 31 | 32 | # configure available justin instances 33 | # these are programs running on some machine to provide and launch VMs 34 | [justin] 35 | 36 | # chose any name for a justin you wanna register here 37 | #some_name = how-can-it-be-reached 38 | 39 | # when justin is running on the same physical machine: 40 | #lol_unix_justin = unix://me@/run/kevin/justin 41 | 42 | # reuse your known hosts file 43 | #lol_ssh_justin_keyfile = ssh://yourmom@your.justin.host:22 = ~/.ssh/known_hosts 44 | 45 | # specify public key directly 46 | #lol_ssh_justin_direct = ssh://yourdad@your.justin.host:22 = ssh-rsa ROFLOLKEY... 47 | -------------------------------------------------------------------------------- /etc/kevin.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kevin Continuous Integration 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=kevin 8 | ExecStart=/usr/bin/env python3 -u -m kevin 9 | Restart=on-failure 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /etc/kevinfile.example: -------------------------------------------------------------------------------- 1 | # kevin control file 2 | # syntax similar to makefiles 3 | 4 | # * each step can depend on other steps 5 | # * it can have optional settings like cwd or env variables 6 | # * the step consists of one or more commands 7 | # * after all commands of a step, output files can be saved 8 | # 9 | # EXAMPLE: 10 | # 11 | # stepname: other-stepname other-stepname-that-must-suceed-before bla bla 12 | # - optional rule setup stuff, e.g.: 13 | # - env: somekey=somevalue text="can be quoted" # set environment variables 14 | # - hidden # hide the step from the active step list 15 | # - skip # skip this step 16 | # - skip (? if job == "some_jobname" ?) # skip only if jobname matches 17 | # # available vars: all args of chantal. 18 | # 19 | # # there can be one or more output file declarations. 20 | # # those also support the if-conditionals 21 | # - output: "some/file/to/save/in/static/output/folder" as "output_name" 22 | # - output: "or/some/directory/to/send/" as "output_dir_name" 23 | # - cwd: "~/dir/to/run/the/commands/in/" 24 | # 25 | # # then, the step commands follow: 26 | # echo "some command executed in sh" 27 | # echo "if one command fails, the step will fail" 28 | # echo "it fails when returncode != 0" 29 | # 30 | # cd somewhere/ && echo "each line is in its own shell!" 31 | # echo "so the following command is no longer in 'somewhere/'" 32 | 33 | # the steps are executed in order. 34 | # any step with no more pending dependencies is executed. 35 | # if multiple are ready, the first in the order will be run first. 36 | 37 | tamale: 38 | - env: STUFF="hot hot" 39 | echo "do you like tamale?" 40 | echo "$STUFF tamale?" 41 | 42 | outputgen: 43 | - hidden 44 | - output: "/tmp/produced_file" as "root_result" 45 | echo "ohai" | tee /tmp/produced_file 46 | echo "running on whitespaceOS" (? if job == "whitespaceOS" ?) 47 | 48 | cpuinfo: tamale outputgen 49 | echo "we're running on:" 50 | lscpu 51 | 52 | great_success: cpuinfo 53 | echo "raus raus raus, die nyan-katze" 54 | 55 | echo "now, copy this file in your project and modify it!" 56 | -------------------------------------------------------------------------------- /etc/project.conf.example: -------------------------------------------------------------------------------- 1 | # kevin project config. 2 | # enables and configures kevin features 3 | # 4 | # each [section] configures a module 5 | # to have multiple instances of the same module, 6 | # the module name is suffixed with .0, .1, ... 7 | # e.g. to have multiple compile jobs (maybe on different justin machines) 8 | 9 | [project] 10 | # general project configuration 11 | name = murrica-freedom-simulator-9001 12 | job_timeout = 10m 13 | job_silence_timeout = 5min 14 | job_max_output = 100MiB 15 | 16 | # name of the build job task configuration file within the repo 17 | job_desc_file = kevinfile 18 | # file format: 'makeish' (declarative make-like steps) or 'python' 19 | job_desc_format = makeish 20 | 21 | # git configuration 22 | git_fetch_depth = 0 23 | 24 | [github_webhook] 25 | # github delivers an authenticated captain hook to kevin 26 | hooksecret = roflsharedsecret 27 | # these repos are allowed to send captain webhooks 28 | repos = yourmom/repo, yourdad/somerepo 29 | # kevin can be controlled through github label values. 30 | # control labels are automatically removed upon acceptance only 31 | # if there's a suitable [github_status] which has permissions. 32 | # comment this to disable. 33 | ctrl_labels_rebuild = kevin-rebuild-pl0x 34 | 35 | [github_status] 36 | # used to display the progress status on github 37 | # create that access token for a github account 38 | user = yourmom 39 | token = tokentokentokenbaby 40 | # to use this status reporter (and its login token) for specific repos only, 41 | # set the repo names here (the default, "any", is for all repos) 42 | repos = any 43 | #repos = yourkitteh/mice_reloaded, macrohard/doors 44 | 45 | #[status_badge] 46 | ## create status.svg for a build 47 | #base_text = build status 48 | #success_text = success 49 | #fail_text = failed 50 | #error_text = errored 51 | 52 | #[symlink_branch] 53 | ## link commits to branch names 54 | ## 55 | ## specify branches separated by comma 56 | ## branch spec: github/yourorg/yourrepo/master, ... 57 | #only = 58 | #exclude = 59 | ## how to alias branches: :, ... 60 | #alias = github/yourorg/yourrepo/master:master 61 | ## where the branch symlinks will be placed, 62 | ## relative to project directory (or absolute) 63 | #target_dir = branches/ 64 | 65 | # "job" module configuration, which performs a build via justin. 66 | [job.0] 67 | # invokes a container launch 68 | name = GNU-debian-hurd # this name is passed to chantal! 69 | description = launch the vm and give me freedom 70 | machine = name-which-the-job-requests-from-justin 71 | # use a different control file for this job. 72 | # default otherwise: use project setting. 73 | #job_desc_file = kevin/hurdbuild 74 | 75 | [job.1] 76 | # another job, triggered by the same webhook. 77 | name = whitespaceOS # this name is also passed to chantal! 78 | description = brainfuck based emulator running haiku 79 | machine = haiku 80 | -------------------------------------------------------------------------------- /etc/tmpfiles.d/kevin.conf: -------------------------------------------------------------------------------- 1 | D /run/kevin 0771 justin kevin - 2 | -------------------------------------------------------------------------------- /justin/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Justin module properties 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | import shutil 8 | import os 9 | from collections import defaultdict 10 | 11 | from .config import CFG 12 | from .protocol import JustinProto 13 | 14 | 15 | class Justin: 16 | """ 17 | Global state storage for this justin daemon. 18 | """ 19 | 20 | def __init__(self): 21 | # next used handle_id to identify a running container 22 | self.handle_id = 0 23 | 24 | # next free connection id to identify incoming control connections 25 | self.connection_id = 0 26 | 27 | # handle_id -> Container instance 28 | self.running = dict() 29 | 30 | # contains all used ssh ports 31 | # hostname -> used ports 32 | self.used_ports = defaultdict(set) 33 | 34 | def prepare_socket(self): 35 | try: 36 | os.unlink(CFG.control_socket) 37 | except OSError: 38 | if os.path.exists(CFG.control_socket): 39 | raise 40 | else: 41 | sockdir = os.path.dirname(CFG.control_socket) 42 | if not os.path.exists(sockdir): 43 | try: 44 | logging.info("creating socket directory '%s'", sockdir) 45 | os.makedirs(sockdir, exist_ok=True) 46 | except PermissionError as exc: 47 | raise exc from None 48 | 49 | async def run(self): 50 | logging.warning("listening on '%s'...", CFG.control_socket) 51 | 52 | loop = asyncio.get_running_loop() 53 | proto_tasks = set() 54 | 55 | def create_proto(): 56 | """ creates the asyncio protocol instance """ 57 | proto = JustinProto(self) 58 | 59 | # create message "worker" task 60 | proto_task = loop.create_task(proto.process_messages()) 61 | proto_tasks.add(proto_task) 62 | 63 | proto_task.add_done_callback( 64 | lambda fut: proto_tasks.remove(proto_task)) 65 | 66 | return proto 67 | 68 | server = await loop.create_unix_server(create_proto, CFG.control_socket) 69 | 70 | if CFG.control_socket_group: 71 | # this only works if the current user is a member of the 72 | # target group! 73 | shutil.chown(CFG.control_socket, None, CFG.control_socket_group) 74 | 75 | if CFG.control_socket_permissions: 76 | mode = int(CFG.control_socket_permissions, 8) 77 | os.chmod(CFG.control_socket, mode) 78 | 79 | try: 80 | await server.serve_forever() 81 | except asyncio.CancelledError: 82 | logging.info("exiting...") 83 | logging.warning("served %d connections", self.handle_id) 84 | 85 | for proto_task in proto_tasks: 86 | proto_task.cancel() 87 | 88 | await asyncio.gather(*proto_tasks, 89 | return_exceptions=True) 90 | 91 | raise 92 | 93 | def register_free_port(self, hostname): 94 | """ 95 | Return a free ssh port. 96 | `port` is (lower, upper), this function returns the next 97 | available port in that range. 98 | 99 | if no free port can be found, returns None 100 | """ 101 | 102 | lower, upper = CFG.ssh_port_range 103 | 104 | ret = None 105 | 106 | current = lower 107 | while True: 108 | if current in self.used_ports[hostname]: 109 | current += 1 110 | if current > upper: 111 | raise RuntimeError("no free port found") 112 | else: 113 | # TODO: test if the socket is in use by other process? 114 | ret = current 115 | break 116 | 117 | self.used_ports[hostname].add(ret) 118 | 119 | return ret 120 | 121 | def get_connection_id(self): 122 | """ return the next free connection id """ 123 | ret = self.connection_id 124 | self.connection_id += 1 125 | return ret 126 | 127 | def create_handle(self, new_machine): 128 | """ 129 | create a new machine handle 130 | """ 131 | 132 | new_handle = self.handle_id 133 | self.handle_id += 1 134 | self.running[new_handle] = new_machine 135 | 136 | return new_handle 137 | 138 | def delete_handle(self, machine_id): 139 | """ 140 | remove the machine handle with given id. 141 | """ 142 | machine = self.running[machine_id] 143 | 144 | if not machine.dynamic_ssh_config(): 145 | self.used_ports[machine.ssh_host].remove(machine.ssh_port) 146 | 147 | del self.running[machine_id] 148 | 149 | def get_machine(self, machine_id): 150 | """ 151 | return the handle for machine with given id. 152 | """ 153 | return self.running[machine_id] 154 | -------------------------------------------------------------------------------- /justin/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Justin is the machine (container/vm) provider for Kevin CI. 3 | """ 4 | 5 | import argparse 6 | import asyncio 7 | import logging 8 | 9 | from kevin.util import log_setup 10 | 11 | from . import Justin 12 | from .config import CFG 13 | 14 | 15 | def main(): 16 | """ Justin service launch """ 17 | 18 | cmd = argparse.ArgumentParser( 19 | description="Kevin CI Justin - machine provider") 20 | 21 | cmd.add_argument("-c", "--config", default="/etc/kevin/justin.conf", 22 | help="file name of the configuration to use.") 23 | cmd.add_argument("-d", "--debug", action="store_true", 24 | help="enable asyncio debugging") 25 | cmd.add_argument("-v", "--verbose", action="count", default=0, 26 | help="increase program verbosity") 27 | cmd.add_argument("-q", "--quiet", action="count", default=0, 28 | help="decrease program verbosity") 29 | 30 | args = cmd.parse_args() 31 | 32 | print("\x1b[1;32mJustin machine service initializing...\x1b[m") 33 | 34 | log_setup(args.verbose - args.quiet) 35 | 36 | loop = asyncio.new_event_loop() 37 | 38 | # enable asyncio debugging 39 | loop.set_debug(args.debug) 40 | 41 | # parse config 42 | logging.debug("[cfg] loading...") 43 | CFG.load(args.config) 44 | 45 | logging.error("\x1b[1;32mstarting justin...\x1b[m") 46 | 47 | # state storage 48 | justin = Justin() 49 | justin.prepare_socket() 50 | try: 51 | asyncio.run(justin.run(), debug=args.debug) 52 | except KeyboardInterrupt: 53 | pass 54 | 55 | print("cya!") 56 | 57 | 58 | if __name__ == "__main__": 59 | main() 60 | -------------------------------------------------------------------------------- /justin/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Justin daemon config parsing 3 | """ 4 | 5 | from configparser import ConfigParser 6 | from pathlib import Path 7 | import logging 8 | import re 9 | 10 | from .machine import CONTAINERS, ContainerConfigFile 11 | 12 | class Config: 13 | def __init__(self): 14 | self.control_socket = None 15 | self.name = None 16 | self.control_socket_permissions = None 17 | self.control_socket_group = None 18 | self.ssh_port_range = (None, None) 19 | 20 | # machinename -> machineconfig 21 | # config created by Container.config() 22 | self.machines = dict() 23 | 24 | def load(self, filename, shell=False): 25 | cfg = ConfigParser() 26 | 27 | if not Path(filename).exists(): 28 | print("\x1b[31mConfig file '%s' does not exist.\x1b[m" % ( 29 | filename)) 30 | exit(1) 31 | 32 | cfg.read(filename) 33 | 34 | try: 35 | justincfg = cfg["justin"] 36 | 37 | self.name = justincfg["name"] 38 | self.control_socket = justincfg["control_socket"] 39 | 40 | self.control_socket_permissions = ( 41 | justincfg.get("control_socket_permissions")) 42 | 43 | self.control_socket_group = ( 44 | justincfg.get("control_socket_group")) 45 | 46 | # ssh ports may be a range or a single port 47 | ssh_port_range = justincfg["vm_ports"] 48 | mat = re.match(r"\[(\d+),(\d+)\]", ssh_port_range) 49 | if mat: 50 | # port range 51 | lower, upper = int(mat.group(1)), int(mat.group(2)) 52 | if not lower <= upper: 53 | raise ValueError("invalid port range (>): [%d,%d]" % ( 54 | lower, upper)) 55 | self.ssh_port_range = lower, upper 56 | else: 57 | raise ValueError("vm_ports malformed, should be =[from,to]") 58 | 59 | # further config ideas: 60 | # max parallel machines, memory usage checking 61 | 62 | except KeyError as exc: 63 | print("\x1b[31mConfig file is missing entry: %s\x1b[m" % (exc)) 64 | exit(1) 65 | 66 | if not shell: 67 | cfgpath = Path(filename).parent 68 | self.load_machines(cfg, cfgpath) 69 | 70 | self.verify() 71 | 72 | def load_machines(self, cfg, cfgpath): 73 | for machineid, machinecfg in cfg.items(): 74 | if machineid in ("justin", "DEFAULT"): 75 | # is for the main config above. 76 | continue 77 | elif machineid in self.machines: 78 | raise ValueError("Machine %s specified more than once" % ( 79 | machineid)) 80 | 81 | if "type" not in machinecfg: 82 | raise KeyError("Machine %s has no type=" % (machineid)) 83 | 84 | machineclassname = machinecfg["type"] 85 | try: 86 | machineclass = CONTAINERS[machineclassname] 87 | except KeyError: 88 | raise ValueError(f"Unknown Machine type {machineclassname!r}") from None 89 | 90 | # each machine type requests different config options, 91 | # these are parsed here. 92 | machineconfig = machineclass.config(machineid, 93 | machinecfg, 94 | cfgpath) 95 | 96 | if not isinstance(machineconfig, ContainerConfigFile): 97 | raise Exception("'%s' did not return ContainerConfigFile" % ( 98 | machineclassname)) 99 | 100 | self.machines[machineid] = (machineconfig, machineclass) 101 | logging.debug("[cfg] loaded machine %s (%s)", 102 | machineid, 103 | machineclassname) 104 | 105 | def verify(self): 106 | """ Verifies the validity of the loaded config attributes """ 107 | # TODO 108 | pass 109 | 110 | 111 | # global justin configuration instance 112 | CFG = Config() 113 | -------------------------------------------------------------------------------- /justin/machine/custom.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom container, just shell scripts are invoked. 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | import os 8 | import shlex 9 | import subprocess 10 | 11 | from . import Container, ContainerConfigFile 12 | 13 | 14 | class Custom(Container): 15 | """ 16 | Represents a custom virtual machine/container 17 | launched by custom shell scripts. 18 | """ 19 | 20 | def __init__(self, cfg): 21 | super().__init__(cfg) 22 | self.manage = False 23 | self.process = None 24 | 25 | @classmethod 26 | def dynamic_ssh_config(cls) -> bool: 27 | return False 28 | 29 | @classmethod 30 | def config(cls, machine_id, cfgdata, cfgpath): 31 | cfg = ContainerConfigFile(machine_id, cfgdata, cfgpath) 32 | 33 | cfg.prepare = cfgdata["prepare"] 34 | cfg.launch = cfgdata["launch"] 35 | cfg.cleanup = cfgdata["cleanup"] 36 | 37 | return cfg 38 | 39 | async def prepare(self, manage=False): 40 | self.manage = manage 41 | 42 | prepare_env = os.environ.copy() 43 | prepare_env["JUSTIN_MANAGE"] = "true" if self.manage else "" 44 | 45 | command = shlex.split(self.cfg.prepare) 46 | proc = await asyncio.create_subprocess_exec(*command, env=prepare_env) 47 | 48 | try: 49 | ret = await asyncio.wait_for(proc.wait(), timeout=60) 50 | except asyncio.TimeoutError as exc: 51 | raise RuntimeError("timeout waiting for " 52 | "container preparation") from exc 53 | 54 | if ret != 0: 55 | raise RuntimeError(f"could not prepare container: returned {ret}") 56 | 57 | async def launch(self): 58 | logging.debug("Launching container which shall listen " 59 | "on ssh port %d", self.ssh_port) 60 | 61 | launch_env = os.environ.copy() 62 | launch_env["JUSTIN_SSH_PORT"] = str(self.ssh_port) 63 | launch_env["JUSTIN_MANAGE"] = "true" if self.manage else "" 64 | 65 | command = [] 66 | for part in shlex.split(self.cfg.launch): 67 | part = part.replace("{SSHPORT}", str(self.ssh_port)) 68 | command.append(part) 69 | 70 | self.process = await asyncio.create_subprocess_exec( 71 | *command, 72 | stdin=subprocess.PIPE, 73 | stdout=None, 74 | stderr=None, 75 | env=launch_env 76 | ) 77 | self.process.stdin.close() 78 | 79 | async def is_running(self): 80 | if self.process: 81 | return self.process.returncode is None 82 | 83 | return False 84 | 85 | async def wait_for_shutdown(self, timeout): 86 | if not self.process: 87 | return 88 | 89 | try: 90 | await asyncio.wait_for(self.process.wait(), timeout) 91 | return True 92 | 93 | except asyncio.TimeoutError: 94 | logging.warning("shutdown wait timed out.") 95 | return False 96 | 97 | async def terminate(self): 98 | if not self.process: 99 | return 100 | 101 | if self.process.returncode is not None: 102 | return 103 | 104 | try: 105 | self.process.terminate() 106 | await asyncio.wait_for(self.process.wait(), timeout=10) 107 | 108 | except asyncio.TimeoutError: 109 | self.process.kill() 110 | await self.process.wait() 111 | 112 | self.process = None 113 | 114 | async def cleanup(self): 115 | command = shlex.split(self.cfg.cleanup) 116 | cleanup_env = os.environ.copy() 117 | cleanup_env["JUSTIN_MANAGE"] = "true" if self.manage else "" 118 | 119 | proc = await asyncio.create_subprocess_exec(*command, env=cleanup_env) 120 | 121 | try: 122 | ret = await asyncio.wait_for(proc.wait(), timeout=60) 123 | except asyncio.TimeoutError as exc: 124 | raise RuntimeError("timeout cleaning up container") from exc 125 | 126 | if ret != 0: 127 | raise RuntimeError(f"could not clean up container: {ret}") 128 | -------------------------------------------------------------------------------- /justin/machine/lxd.py: -------------------------------------------------------------------------------- 1 | """ 2 | LXD containers. 3 | 4 | https://linuxcontainers.org/lxd/ 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import asyncio 10 | import logging 11 | import uuid 12 | import shlex 13 | import subprocess 14 | import time 15 | import typing 16 | import yaml 17 | 18 | from . import Container, ContainerConfigFile 19 | 20 | if typing.TYPE_CHECKING: 21 | from typing import Any 22 | 23 | 24 | class LXD(Container): 25 | """ 26 | Represents a LXD container. 27 | """ 28 | 29 | def __init__(self, cfg) -> None: 30 | super().__init__(cfg) 31 | self._container_id: str | None = None 32 | self._manage = False 33 | 34 | @classmethod 35 | def dynamic_ssh_config(cls) -> bool: 36 | return True 37 | 38 | @classmethod 39 | def config(cls, machine_id, cfgdata, cfgpath): 40 | cfg = ContainerConfigFile(machine_id, cfgdata, cfgpath) 41 | 42 | cfg.base_image = cfgdata["base_image"] 43 | 44 | return cfg 45 | 46 | @staticmethod 47 | async def _run( 48 | cmd: str, 49 | output: bool = False, 50 | shell: bool = False, 51 | ) -> str | asyncio.subprocess.Process: 52 | 53 | print(f"[lxd] $ {cmd}") 54 | stdout = subprocess.PIPE if output else None 55 | 56 | if shell: 57 | proc = await asyncio.create_subprocess_shell(cmd, stdout=stdout) 58 | else: 59 | proc = await asyncio.create_subprocess_exec(*shlex.split(cmd), stdout=stdout) 60 | 61 | out, err = await proc.communicate() 62 | 63 | if output: 64 | return out.decode() 65 | else: 66 | return proc 67 | 68 | async def _rinc( 69 | self, 70 | cmd: str, 71 | output: bool = False, 72 | shell: bool = False, 73 | ) -> str | asyncio.subprocess.Process: 74 | if self._container_id is None: 75 | raise Exception("container id not running") 76 | 77 | if shell: 78 | return await self._run(f"lxc exec {self._container_id} -- bash -c {shlex.quote(cmd)}", output=output) 79 | else: 80 | return await self._run(f"lxc exec {self._container_id} -- {cmd}", output=output) 81 | 82 | async def _lxc_info(self) -> dict[str, Any]: 83 | out = await self._run(f"lxc info {self._container_id}", output=True) 84 | info = yaml.safe_load(out) 85 | return info 86 | 87 | async def _get_ip( 88 | self, 89 | want_v4: bool = True, want_v6: bool = False, 90 | timeout: float = 10.0, 91 | retry_delay: float = 0.5, 92 | ) -> tuple[str | None, str | None]: 93 | """ 94 | queries the ip address for the container. 95 | it's fetched from the lxd agent. 96 | """ 97 | 98 | ipv4: str | None = None 99 | ipv6: str | None = None 100 | 101 | attempt = 0 102 | start_time = time.time() 103 | end_time = start_time + timeout 104 | 105 | while True: 106 | if (ipv4 or not want_v4) and (ipv6 or not want_v6): 107 | break 108 | attempt += 1 109 | attempt_time = time.time() 110 | 111 | if attempt_time >= end_time: 112 | raise TimeoutError(f"couldn't fetch lxd container ip after {attempt_time - start_time:.02f}s in {attempt} attempts") 113 | 114 | cont = False 115 | info = await self._lxc_info() 116 | ips = info["Resources"]["Network usage"]["eth0"]["IP addresses"] 117 | 118 | inetv4 = ips.get("inet") 119 | if inetv4: 120 | ipv4_candidate, scope = inetv4.split(maxsplit=1) 121 | if scope == "(global)": 122 | # remove the /netsize 123 | ipv4 = ipv4_candidate.split("/", maxsplit=1)[0] 124 | cont = True 125 | 126 | inetv6 = ips.get("inet6") 127 | if inetv6: 128 | ipv6_candidate, scope = inetv6.split(maxsplit=1) 129 | # TODO: maybe using the link ipv6 is also fine 130 | if scope == "(global)": 131 | # remove the /netsize 132 | ipv6 = ipv6_candidate.split("/", maxsplit=1)[0] 133 | cont = True 134 | 135 | if cont: 136 | continue 137 | else: 138 | # TODO event-based waiting for container ip 139 | await asyncio.sleep(min(retry_delay, end_time - attempt_time)) 140 | 141 | return ipv4, ipv6 142 | 143 | async def prepare(self, manage: bool = False) -> None: 144 | """ 145 | Prepare the container image, or rather 146 | """ 147 | self._manage = manage 148 | 149 | out = await self._run("lxc version", output=True) 150 | lxc_version = yaml.safe_load(out) 151 | self.lxd_client_version = lxc_version["Client version"] 152 | 153 | async def launch(self): 154 | logging.debug("[lxd] launching container") 155 | 156 | self._container_id = (self.cfg.base_image.replace('/', '-').replace(':', '-') 157 | + "-" + str(uuid.uuid4())) 158 | 159 | cmd = f"lxc launch {self.cfg.base_image} {self._container_id}" 160 | if not self._manage: 161 | # remove after stop 162 | cmd += " --ephemeral" 163 | 164 | proc = await self._run(cmd) 165 | if proc.returncode != 0: 166 | raise Exception("failed to start lxd container") 167 | 168 | async def connection_info(self) -> dict[str, Any]: 169 | """ Return infos about how to connect to the container """ 170 | 171 | # fetch the container's ip 172 | if self.ssh_host == "__dynamic__": 173 | if not (await self.is_running()): 174 | raise Exception("failed getting ip for container - container not running") 175 | 176 | await self._rinc("systemctl is-system-running --wait") 177 | 178 | logging.debug("[lxd] fetching container ip...") 179 | ipv4, ipv6 = await self._get_ip() 180 | ssh_host = ipv4 181 | # TODO: ipv6 link local or global connection? 182 | logging.debug("[lxd] got container ip: %s", ssh_host) 183 | else: 184 | ssh_host = self.ssh_host 185 | 186 | return { 187 | "ssh_user": self.ssh_user, 188 | "ssh_host": ssh_host, 189 | "ssh_port": self.ssh_port, 190 | "ssh_known_host_key": self.ssh_known_host_key, 191 | } 192 | 193 | async def is_running(self) -> bool: 194 | if not self._container_id: 195 | return False 196 | 197 | info = await self._lxc_info() 198 | return "running" == info["Status"].lower() 199 | 200 | async def wait_for_shutdown(self, timeout=60): 201 | if not self._container_id: 202 | return 203 | 204 | # TODO doesn't seem to be supported yet except for active polling? 205 | # but lxc stop already waits for the stop anyway! 206 | 207 | return True 208 | 209 | async def terminate(self): 210 | if not self._container_id: 211 | return 212 | 213 | # we don't run poweroff since that's up to Chantal's `cleanup` 214 | 215 | await asyncio.wait_for( 216 | self._run(f"lxc stop --timeout 10 {self._container_id}"), 217 | timeout=11, 218 | ) 219 | 220 | if self._manage: 221 | # export the container filesystem as image 222 | await asyncio.wait_for( 223 | self._run(f"lxc publish {self._container_id} --alias {self.cfg.base_image} --compression none --reuse"), 224 | timeout=600, 225 | ) 226 | 227 | await asyncio.wait_for( 228 | self._run(f"lxc rm {self._container_id}"), 229 | timeout=60 230 | ) 231 | self._container_id = None 232 | 233 | async def cleanup(self): 234 | # when we stop an ephemeral container it's removed anyway 235 | if not self._container_id: 236 | return 237 | 238 | await asyncio.wait_for( 239 | self._run(f"lxc rm --force {self._container_id}"), 240 | timeout=60 241 | ) 242 | -------------------------------------------------------------------------------- /justin/machine/podman.py: -------------------------------------------------------------------------------- 1 | """ 2 | Podman containers. 3 | 4 | https://podman.io/ 5 | """ 6 | 7 | import asyncio 8 | import logging 9 | import uuid 10 | import shlex 11 | import subprocess 12 | 13 | from . import Container, ContainerConfigFile 14 | 15 | 16 | class Podman(Container): 17 | """ 18 | Represents a pdoman container. 19 | """ 20 | 21 | def __init__(self, cfg): 22 | super().__init__(cfg) 23 | self.running_image = None 24 | self.container_id = None 25 | 26 | @classmethod 27 | def dynamic_ssh_config(cls) -> bool: 28 | return False 29 | 30 | @classmethod 31 | def config(cls, machine_id, cfgdata, cfgpath): 32 | cfg = ContainerConfigFile(machine_id, cfgdata, cfgpath) 33 | 34 | cfg.base_image = cfgdata["base_image"] 35 | cfg.command = cfgdata["command"] 36 | 37 | return cfg 38 | 39 | async def prepare(self, manage=False): 40 | """ 41 | No need to prepare the container image as we can directly run it 42 | """ 43 | if manage: 44 | raise RuntimeError("Docker image cannot be started in management mode") 45 | 46 | async def launch(self): 47 | logging.debug("[podman] launching container with ssh port %d", self.ssh_port) 48 | 49 | self.running_image = (self.cfg.base_image.replace('/', '-').replace(':', '-') 50 | + "-" + str(uuid.uuid4())) 51 | 52 | command = [] 53 | for part in shlex.split(self.cfg.command): 54 | part = part.replace("{BASE_IMAGE}", str(self.cfg.base_image)) 55 | part = part.replace("{SSHPORT}", str(self.ssh_port)) 56 | part = part.replace("{IMAGENAME}", str(self.running_image)) 57 | command.append(part) 58 | 59 | logging.debug(f"[podman] $ {' '.join(command)}") 60 | process = await asyncio.create_subprocess_exec( 61 | *command, 62 | stdin=subprocess.PIPE, 63 | stdout=subprocess.PIPE, 64 | stderr=None 65 | ) 66 | process.stdin.close() 67 | 68 | # podman echoes the container id 69 | line = await process.stdout.readline() 70 | if line: 71 | self.container_id = line.strip().decode() 72 | if self.container_id: 73 | logging.debug("[podman] spawned container with hash %s" % self.container_id) 74 | 75 | else: 76 | raise Exception("no container id was provided by podman, " 77 | "pls investigate launch command") 78 | 79 | ret = await process.wait() 80 | 81 | if ret != 0: 82 | self.running_image = None 83 | self.container_id = None 84 | 85 | raise Exception("failed to start podman container") 86 | 87 | async def is_running(self): 88 | if not self.running_image: 89 | return False 90 | 91 | command = ['podman', 'inspect', '-f', '\'{{.State.Running}}\'', self.running_image] 92 | logging.debug(f"[podman] $ {' '.join(command)}") 93 | process = await asyncio.create_subprocess_exec( 94 | *command, 95 | ) 96 | out, err = await process.communicate() 97 | return 'true' in out.decode() 98 | 99 | async def wait_for_shutdown(self, timeout=60): 100 | if not self.running_image: 101 | return 102 | 103 | command = ['podman', 'wait', self.running_image] 104 | logging.debug(f"[podman] $ {' '.join(command)}") 105 | process = await asyncio.create_subprocess_exec( 106 | *command, 107 | ) 108 | 109 | try: 110 | await asyncio.wait_for(process.wait(), timeout) 111 | return True 112 | 113 | except asyncio.TimeoutError: 114 | logging.warning("[podman] shutdown wait timed out " 115 | f"for container {self.running_image}") 116 | return False 117 | 118 | async def terminate(self): 119 | if not self.running_image: 120 | return 121 | 122 | command = ['podman', 'stop', self.running_image] 123 | logging.debug(f"[podman] $ {' '.join(command)}") 124 | process = await asyncio.create_subprocess_exec( 125 | *command, 126 | ) 127 | await asyncio.wait_for(process.wait(), timeout=20) 128 | 129 | self.process = None 130 | 131 | async def cleanup(self): 132 | if not self.running_image: 133 | return 134 | 135 | command = ["podman", "rm", "-f", self.running_image] 136 | logging.debug(f"[podman] $ {' '.join(command)}") 137 | process = await asyncio.create_subprocess_exec( 138 | *command, 139 | ) 140 | await asyncio.wait_for(process.wait(), timeout=20) 141 | -------------------------------------------------------------------------------- /justin/machine/qemu.py: -------------------------------------------------------------------------------- 1 | """ 2 | Wraps a QEMU virtual machine. 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | import os 8 | from pathlib import Path 9 | import shlex 10 | import subprocess 11 | 12 | from . import Container, ContainerConfigFile 13 | 14 | 15 | class QEMU(Container): 16 | """ 17 | Represents a qemu virtual machine. 18 | """ 19 | 20 | def __init__(self, cfg): 21 | super().__init__(cfg) 22 | self.manage = False 23 | self.process = None 24 | self.running_image = None 25 | 26 | @classmethod 27 | def dynamic_ssh_config(cls) -> bool: 28 | return True 29 | 30 | @classmethod 31 | def config(cls, machine_id, cfgdata, cfgpath): 32 | cfg = ContainerConfigFile(machine_id, cfgdata, cfgpath) 33 | 34 | base_img = Path(cfgdata["base_image"]) 35 | if not base_img.is_absolute(): 36 | base_img = cfgpath / base_img 37 | cfg.base_image = base_img.absolute() 38 | 39 | overlay_img = Path(cfgdata["overlay_image"]) 40 | if not overlay_img.is_absolute(): 41 | overlay_img = cfgpath / overlay_img 42 | cfg.overlay_image = overlay_img.absolute() 43 | 44 | cfg.command = cfgdata["command"] 45 | 46 | if not cfg.base_image.is_file(): 47 | raise FileNotFoundError("base image: %s" % cfg.base_image) 48 | 49 | return cfg 50 | 51 | async def prepare(self, manage=False): 52 | self.manage = manage 53 | 54 | if not self.manage: 55 | # create a temporary runimage 56 | idx = 0 57 | while True: 58 | tmpimage = Path(str(self.cfg.overlay_image) + "_%02d" % idx) 59 | if not tmpimage.is_file(): 60 | break 61 | idx += 1 62 | 63 | self.running_image = tmpimage 64 | 65 | command = [ 66 | "qemu-img", "create", 67 | "-o", "backing_file=" + str(self.cfg.base_image), 68 | "-f", "qcow2", 69 | str(self.running_image), 70 | ] 71 | 72 | proc = await asyncio.create_subprocess_exec(*command) 73 | 74 | try: 75 | ret = await asyncio.wait_for(proc.wait(), timeout=60) 76 | except asyncio.TimeoutError as exc: 77 | raise RuntimeError("timeout when creating " 78 | "overlay image") from exc 79 | 80 | if ret != 0: 81 | raise RuntimeError(f"could not create overlay image: " 82 | f"qemu-img returned {ret}") 83 | 84 | else: 85 | # TODO: even in management mode, create a cow image, 86 | # but in the end merge it back into a new image and 87 | # perform an atomic rename(2) in order to atomically 88 | # replace the VM. 89 | # currently, builds that were triggered while the VM 90 | # is being managed may use a corrupted image. 91 | 92 | # TODO: disallow multiple management connections at once. 93 | 94 | logging.info("QEMU VM launching in management mode!") 95 | # to manage, use the base image to run 96 | self.running_image = str(self.cfg.base_image) 97 | 98 | async def launch(self): 99 | if self.running_image is None: 100 | raise RuntimeError("runimage was not prepared!") 101 | 102 | logging.debug("Launching VM which shall listen " 103 | "on ssh port %d", self.ssh_port) 104 | 105 | command = [] 106 | for part in shlex.split(self.cfg.command): 107 | part = part.replace("{IMAGENAME}", str(self.running_image)) 108 | part = part.replace("{SSHPORT}", str(self.ssh_port)) 109 | command.append(part) 110 | 111 | self.process = await asyncio.create_subprocess_exec( 112 | *command, 113 | stdin=subprocess.PIPE, 114 | stdout=None, 115 | stderr=None 116 | ) 117 | self.process.stdin.close() 118 | 119 | async def is_running(self): 120 | if self.process: 121 | return self.process.returncode is None 122 | 123 | return False 124 | 125 | async def wait_for_shutdown(self, timeout): 126 | if not self.process: 127 | return 128 | 129 | try: 130 | await asyncio.wait_for(self.process.wait(), timeout) 131 | return True 132 | 133 | except asyncio.TimeoutError: 134 | logging.warning("shutdown wait timed out.") 135 | return False 136 | 137 | async def terminate(self): 138 | if not self.process: 139 | return 140 | 141 | if self.process.returncode is not None: 142 | return 143 | 144 | try: 145 | self.process.terminate() 146 | await asyncio.wait_for(self.process.wait(), timeout=10) 147 | 148 | except asyncio.TimeoutError: 149 | self.process.kill() 150 | await self.process.wait() 151 | 152 | self.process = None 153 | 154 | async def cleanup(self): 155 | if not self.manage and self.running_image is not None: 156 | try: 157 | os.unlink(str(self.running_image)) 158 | except FileNotFoundError: 159 | pass 160 | -------------------------------------------------------------------------------- /justin/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | SSH client for a machine managed by justin. 5 | """ 6 | 7 | import argparse 8 | import asyncio 9 | import logging 10 | 11 | from kevin.justin import JustinSSH, JustinSocket 12 | from kevin.process import SSHProcess 13 | from kevin.util import parse_listen_entry, log_setup 14 | 15 | 16 | async def spawn_shell(justin, machine_id, volatile, command): 17 | """ 18 | Spawns an interactive shell with justin. 19 | """ 20 | 21 | logging.debug("connecting to justin...") 22 | await justin.create() 23 | 24 | logging.debug("looking up machine '%s'...", machine_id) 25 | machine = await justin.create_machine(machine_id) 26 | 27 | if machine is None: 28 | raise Exception("machine '%s' was not found on justin '%s'. " 29 | "available:\n%s" % ( 30 | machine_id, justin, await justin.get_machines())) 31 | 32 | manage = not volatile 33 | logging.debug("preparing and launching machine (manage=%s)..." % manage) 34 | await machine.prepare(manage=manage) 35 | await machine.launch() 36 | 37 | logging.debug("machine launched, waiting for ssh...") 38 | await machine.wait_for_ssh_port() 39 | 40 | if manage: 41 | logging.warning("please shut down the machine gracefully " 42 | "to avoid data loss (=> `sudo poweroff`)") 43 | 44 | # ssh into the machine, force tty allocation 45 | async with SSHProcess(command, 46 | machine.ssh_user, machine.ssh_host, 47 | machine.ssh_port, machine.ssh_known_host_key, pipes=False, 48 | options=["-t"]) as proc: 49 | ret = await proc.wait() 50 | 51 | # wait for the machine to exit gracefully 52 | wait_time = 30 53 | logging.warning(f"waiting {wait_time}s for machine to shut down") 54 | await machine.wait_for_shutdown(wait_time) 55 | 56 | await machine.terminate() 57 | await machine.cleanup() 58 | 59 | return ret 60 | 61 | 62 | def main(): 63 | """ Connect to a pty of some machine provided by justin """ 64 | 65 | cmd = argparse.ArgumentParser() 66 | cmd.add_argument("--volatile", action="store_true", 67 | help="don't start the machine in management mode") 68 | cmd.add_argument("justin_id", 69 | help=("justin connection information: " 70 | "unix://socketpath, unix://user@socket " 71 | "or ssh://user@host:port")) 72 | cmd.add_argument("machine_id", help="machine identification") 73 | cmd.add_argument("command", nargs="*", 74 | help="command to execute. default: shell.") 75 | cmd.add_argument("-d", "--debug", action="store_true", 76 | help="enable asyncio debugging") 77 | cmd.add_argument("-v", "--verbose", action="count", default=0, 78 | help="increase program verbosity") 79 | cmd.add_argument("-q", "--quiet", action="count", default=0, 80 | help="decrease program verbosity") 81 | 82 | args = cmd.parse_args() 83 | 84 | # set up log level 85 | log_setup(args.verbose - args.quiet) 86 | 87 | loop = asyncio.get_event_loop() 88 | 89 | # enable asyncio debugging 90 | loop.set_debug(args.debug) 91 | 92 | user, connection, location, key = parse_listen_entry( 93 | "justin_id", args.justin_id, require_key=False) 94 | 95 | if connection == "ssh": 96 | host, port = location 97 | justin = JustinSSH("manage", host, port, user, key) 98 | 99 | elif connection == "unix": 100 | justin = JustinSocket("manage", location, user) 101 | 102 | else: 103 | raise Exception("unknown justin connection type: %s" % connection) 104 | 105 | ret = 1 106 | try: 107 | ret = loop.run_until_complete( 108 | spawn_shell(justin, args.machine_id, args.volatile, args.command)) 109 | 110 | except KeyboardInterrupt: 111 | print("\njustin.manage killed by keyboard interrupt\n") 112 | 113 | loop.stop() 114 | loop.run_forever() 115 | loop.close() 116 | 117 | exit(ret) 118 | 119 | if __name__ == '__main__': 120 | main() 121 | -------------------------------------------------------------------------------- /justin/shell.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | SSH -> unix socket bridge for justin. 5 | 6 | sends the ssh-forced user as login message to justin. 7 | """ 8 | 9 | import argparse 10 | import fcntl 11 | import os 12 | import selectors 13 | import socket 14 | import sys 15 | 16 | from enum import Enum 17 | 18 | from .protocol import JustinProto 19 | from .config import CFG 20 | from .messages import Login 21 | 22 | 23 | 24 | def main(): 25 | """ 26 | Spawns a shell that relays the messages to the justin unix socket. 27 | """ 28 | 29 | cmd = argparse.ArgumentParser() 30 | cmd.add_argument("user", help="the user that connected to justin") 31 | cmd.add_argument("-c", "--config", default="/etc/kevin/justin.conf", 32 | help="corresponding justin daemon config file") 33 | 34 | args = cmd.parse_args() 35 | 36 | CFG.load(args.config, shell=True) 37 | 38 | user = args.user 39 | peer = (os.environ.get("SSH_CLIENT") or "local").split()[0] 40 | 41 | # connection relay 42 | sel = selectors.DefaultSelector() 43 | 44 | # connect to justin unix socket. 45 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 46 | 47 | try: 48 | sock.connect(CFG.control_socket) 49 | except FileNotFoundError: 50 | print("justin socket not found: '%s' missing" % CFG.control_socket) 51 | return 52 | 53 | class Buf(Enum): 54 | outbuf = 0 55 | inbuf = 1 56 | 57 | # relay buffers. 58 | # outbuf: unix -> stdout 59 | # inbuf: stdin -> unix 60 | outbuf = bytearray() 61 | inbuf = bytearray() 62 | 63 | # enum lookup to bytearray, as it is not hashable for the dict. 64 | ebuf = { 65 | Buf.inbuf: inbuf, 66 | Buf.outbuf: outbuf, 67 | } 68 | 69 | # store a maximum of 8 MiB per buffer 70 | # TODO: make configurable 71 | max_size = 8 * 1024 * 1024 72 | 73 | # define which fds and events are used for a buffer 74 | # buffer -> [(fd, event), ...] 75 | pipes = { 76 | Buf.outbuf: [(sys.stdout.fileno(), selectors.EVENT_WRITE), 77 | (sock.fileno(), selectors.EVENT_READ)], 78 | Buf.inbuf: [(sys.stdin.fileno(), selectors.EVENT_READ), 79 | (sock.fileno(), selectors.EVENT_WRITE)], 80 | } 81 | 82 | # maps {buf -> pipe}: which pipe to use to send buf to. 83 | write_pipes = dict() 84 | 85 | # {pipe -> buf}: which buffer is filled by pipe 86 | read_pipes = dict() 87 | 88 | # set streams to nonblocking and gather buffer-pipe assignments 89 | for buf, fdactions in pipes.items(): 90 | for pipe, event in fdactions: 91 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) 92 | fcntl.fcntl(pipe, fcntl.F_SETFL, flags | os.O_NONBLOCK) 93 | 94 | if event == selectors.EVENT_WRITE: 95 | write_pipes[buf] = pipe 96 | elif event == selectors.EVENT_READ: 97 | read_pipes[pipe] = buf 98 | 99 | def add_writer(pipe, buf, bufname): 100 | """ 101 | register a pipe to wait for a can-write-to event. 102 | passes data to the register event 103 | """ 104 | try: 105 | sel.register(pipe, selectors.EVENT_WRITE, buf) 106 | except KeyError: 107 | events = sel.get_key(pipe).events 108 | sel.modify(write_pipes[bufname], 109 | events | selectors.EVENT_WRITE, 110 | buf) 111 | 112 | def del_writer(pipe): 113 | """ Remove a pipe from write-to queue """ 114 | events = sel.get_key(pipe).events 115 | 116 | # only event: write -> remove. 117 | if events == selectors.EVENT_WRITE: 118 | sel.unregister(pipe) 119 | else: 120 | # just remove the wait-for-write. 121 | sel.modify(pipe, 122 | events & ~selectors.EVENT_WRITE) 123 | 124 | # register all wait-for-read events 125 | for pipe in read_pipes.keys(): 126 | sel.register(pipe, selectors.EVENT_READ) 127 | 128 | # send user login message to justin 129 | msg = Login(user, peer) 130 | data = msg.pack(JustinProto.DEFAULT_MODE) 131 | inbuf += data 132 | add_writer(write_pipes[Buf.inbuf], inbuf, Buf.inbuf) 133 | 134 | # process events 135 | # TODO: use asyncio, but it's rather complicated for this use case. 136 | while sel.get_map().keys(): 137 | events = sel.select() 138 | for event, mask in events: 139 | while True: 140 | try: 141 | # perform read or write action depending on flag. 142 | if mask & selectors.EVENT_READ: 143 | bufname = read_pipes[event.fileobj] 144 | pipe = write_pipes[bufname] 145 | buf = ebuf[bufname] 146 | free = max_size - len(buf) 147 | 148 | data = os.read(event.fd, min(16384, free)) 149 | if not data: # end of file, cya! 150 | sel.unregister(event.fileobj) 151 | 152 | # one of the streams closed, let's exit 153 | exit() 154 | break 155 | 156 | buf.extend(data) 157 | 158 | # the buffer now has data, enqueue send: 159 | add_writer(pipe, buf, bufname) 160 | 161 | if mask & selectors.EVENT_WRITE: 162 | buf = event.data 163 | pipe = event.fileobj 164 | if not buf: 165 | # no more data to send, dequeue send. 166 | del_writer(pipe) 167 | break 168 | 169 | nwrote = os.write(event.fd, buf) 170 | del buf[:nwrote] 171 | 172 | except BlockingIOError: 173 | # we processed all currently available/writable data 174 | break 175 | 176 | 177 | if __name__ == '__main__': 178 | main() 179 | else: 180 | raise RuntimeError("this script must be run stand-alone") 181 | -------------------------------------------------------------------------------- /kevin/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The kevin-ci component that runs on the host system. 3 | """ 4 | -------------------------------------------------------------------------------- /kevin/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Program entry point 3 | """ 4 | 5 | import argparse 6 | import asyncio 7 | import logging 8 | import os 9 | 10 | from . import kevin 11 | from .config import CFG 12 | from .util import log_setup 13 | 14 | 15 | def main(): 16 | """ Main entry point """ 17 | 18 | cmd = argparse.ArgumentParser( 19 | description="Kevin CI - the trashy continuous integration service") 20 | 21 | cmd.add_argument("-c", "--config", default="/etc/kevin/kevin.conf", 22 | help="file name of the configuration to use.") 23 | cmd.add_argument("--volatile", action="store_true", 24 | help=("disable persistent job storage, mainly for " 25 | "testing purposes")) 26 | cmd.add_argument("-d", "--debug", action="store_true", 27 | help="enable asyncio debugging") 28 | cmd.add_argument("-v", "--verbose", action="count", default=0, 29 | help="increase program verbosity") 30 | cmd.add_argument("-q", "--quiet", action="count", default=0, 31 | help="decrease program verbosity") 32 | 33 | args = cmd.parse_args() 34 | 35 | print("\x1b[1;32mKevin CI initializing...\x1b[m") 36 | 37 | # set up log level 38 | log_setup(args.verbose - args.quiet) 39 | 40 | # pass commandline args 41 | CFG.set_cmdargs(args) 42 | 43 | # load all config files 44 | CFG.load(args.config) 45 | 46 | # print proxy environment variables 47 | proxy_vars = [env_var for env_var in os.environ.keys() 48 | if env_var.lower().endswith("_proxy")] 49 | if proxy_vars: 50 | logging.info("active proxy config:") 51 | for proxy_var in proxy_vars: 52 | logging.info(f" {proxy_var}={os.environ[proxy_var]}") 53 | else: 54 | logging.info("no active proxy configuration.") 55 | 56 | logging.error("\x1b[1;32mKevin CI running...\x1b[m") 57 | 58 | try: 59 | asyncio.run(kevin.run(CFG), debug=args.debug) 60 | 61 | except KeyboardInterrupt: 62 | logging.info("exiting...") 63 | 64 | except Exception: 65 | logging.exception("\x1b[31;1mfatal internal exception\x1b[m") 66 | 67 | print("cya!") 68 | 69 | if __name__ == '__main__': 70 | main() 71 | -------------------------------------------------------------------------------- /kevin/action.py: -------------------------------------------------------------------------------- 1 | """ 2 | Build action base class definition. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import typing 8 | from abc import abstractmethod 9 | 10 | from .service import Service 11 | 12 | if typing.TYPE_CHECKING: 13 | from .watcher import Watcher 14 | from .project import Project 15 | from .build import Build 16 | 17 | 18 | class Action(Service): 19 | """ 20 | When a build produces updates, children of this class are used to perform 21 | some actions, e.g. sending mail, setting status, etc. 22 | """ 23 | 24 | def __init__(self, cfg: dict[str, str], project: Project): 25 | super().__init__(cfg, project) 26 | 27 | @abstractmethod 28 | async def get_watcher(self, build: Build, completed: bool) -> Watcher | None: 29 | """ 30 | Return a watcher object which is then registered for build updates. 31 | """ 32 | pass 33 | -------------------------------------------------------------------------------- /kevin/build_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Build caching and creation. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import typing 8 | 9 | from .build import Build 10 | from .lrustore import LRUStore 11 | 12 | if typing.TYPE_CHECKING: 13 | from .project import Project 14 | from typing import Callable 15 | 16 | # (project, commit_hash) 17 | build_id_t = tuple[Project, str] 18 | 19 | 20 | class BuildManager: 21 | """ 22 | Manages which builds are in-memory. 23 | """ 24 | 25 | def __init__(self, max_cached: int) -> None: 26 | del_id = object() 27 | def delete_check(build: Build, deleter: Callable[[], None]) -> bool: 28 | if not build.completed: 29 | build.call_on_complete(del_id, lambda _build: deleter()) 30 | return False 31 | return True 32 | 33 | def revive(build: Build): 34 | build.rm_call_on_complete(del_id) 35 | 36 | # stores known builds by (project, hash) -> build 37 | self._builds: LRUStore[build_id_t, Build] = LRUStore(max_size=max_cached, 38 | max_killmap_size=10, 39 | delete_check=delete_check, 40 | revive=revive) 41 | 42 | async def _load_or_create_build(self, 43 | project: Project, 44 | commit_hash: str, 45 | create_new: bool, 46 | force_rebuild: bool = False) -> Build | None: 47 | cache_key: build_id_t = (project, commit_hash) 48 | 49 | cached_build = self._builds.get(cache_key) 50 | if cached_build and not force_rebuild: 51 | return cached_build 52 | 53 | newbuild = Build(project, commit_hash) 54 | 55 | if force_rebuild: 56 | if cached_build: 57 | del self._builds[cache_key] 58 | else: 59 | # try loading from filesystem 60 | await newbuild.load() 61 | 62 | # store build to the cache 63 | # newbuild.completed => it could be loaded from fs 64 | # create_new => as its a new build, always store it 65 | # force_rebuild => we deleted it before 66 | if newbuild.completed or create_new: 67 | self._builds[cache_key] = newbuild 68 | else: 69 | # if the build couldn't be loaded from fs 70 | # and it's not a new build 71 | return None 72 | 73 | return newbuild 74 | 75 | async def new_build(self, project: Project, commit_hash: str, 76 | force_rebuild: bool = False) -> Build: 77 | """ 78 | Create a new build or return it from the cache. 79 | """ 80 | ret = await self._load_or_create_build(project, commit_hash, 81 | create_new=True, force_rebuild=force_rebuild) 82 | assert ret is not None 83 | return ret 84 | 85 | async def get_build(self, project: Project, commit_hash: str): 86 | """ 87 | Return an existing build from the cache or a completed build from the filessystem. 88 | Return None if it coultn't be found or it's not complete. 89 | """ 90 | 91 | return await self._load_or_create_build(project, commit_hash, create_new=False) 92 | -------------------------------------------------------------------------------- /kevin/chantal.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code for creating and interfacing with Chantal instances. 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | from pathlib import Path 8 | import subprocess 9 | 10 | from .util import INF, SSHKnownHostFile, AsyncWith 11 | from .process import Process, SSHProcess, ProcessFailed, ProcTimeoutError 12 | 13 | 14 | class Chantal(AsyncWith): 15 | """ 16 | Virtual machine instance, with ssh login data. 17 | For a proper clean-up, call cleanup() or use with 'with'. 18 | 19 | # TODO: different connection methods (e.g. agent, non-ssh commands) 20 | """ 21 | def __init__(self, machine, loop=None): 22 | self.machine = machine 23 | self._loop = loop or asyncio.get_event_loop() 24 | self._ssh_worked = self._loop.create_future() 25 | 26 | def can_connect(self): 27 | """ return if the vm ssh connection was successful once. """ 28 | if self._ssh_worked.done(): 29 | return self._ssh_worked.result() 30 | 31 | return False 32 | 33 | async def create(self): 34 | """ create and prepare the machine """ 35 | await self.machine.prepare() 36 | await self.machine.launch() 37 | 38 | async def upload(self, local_path, remote_folder=".", timeout=10): 39 | """ 40 | Uploads the file or directory from local_path to 41 | remote_folder (default: ~). 42 | """ 43 | 44 | with SSHKnownHostFile(self.machine.ssh_host, 45 | self.machine.ssh_port, 46 | self.machine.ssh_known_host_key) as hostfile: 47 | command = [ 48 | "scp", 49 | "-P", str(self.machine.ssh_port), 50 | "-q", 51 | ] + hostfile.get_options() + [ 52 | "-r", 53 | str(local_path), 54 | self.machine.ssh_user + "@" + 55 | self.machine.ssh_host + ":" + 56 | str(remote_folder), 57 | ] 58 | 59 | async with Process(command) as proc: 60 | ret = await proc.wait_for(timeout) 61 | 62 | if ret != 0: 63 | raise ProcessFailed(ret, "scp upload failed") 64 | 65 | async def download(self, remote_path, local_folder, timeout=10): 66 | """ 67 | Downloads the file or directory from remote_path to local_folder. 68 | Warning: Contains no safeguards regarding filesize. 69 | Clever arguments for remote_path or local_folder might 70 | allow break-outs. 71 | """ 72 | 73 | with SSHKnownHostFile(self.machine.ssh_host, 74 | self.machine.ssh_port, 75 | self.machine.ssh_known_host_key) as hostfile: 76 | command = [ 77 | "scp", "-q", 78 | "-P", str(self.machine.ssh_port), 79 | ] + hostfile.get_options() + [ 80 | "-r", 81 | self.machine.ssh_user + "@" + self.machine.ssh_host + ":" + remote_path, 82 | local_folder, 83 | ] 84 | 85 | async with Process(command) as proc: 86 | ret = await proc.wait_for(timeout) 87 | 88 | if ret != 0: 89 | raise ProcessFailed(ret, "scp down failed") 90 | 91 | def exec_remote(self, remote_command, 92 | timeout=INF, silence_timeout=INF, 93 | must_succeed=True): 94 | """ 95 | Runs the command via ssh, returns an Process handle. 96 | """ 97 | 98 | return SSHProcess(remote_command, 99 | self.machine.ssh_user, self.machine.ssh_host, 100 | self.machine.ssh_port, self.machine.ssh_known_host_key, 101 | timeout=timeout, 102 | silence_timeout=silence_timeout, 103 | must_succeed=must_succeed) 104 | 105 | async def run_command(self, remote_command, 106 | timeout=INF, silence_timeout=INF, 107 | must_succeed=True): 108 | """ 109 | Raises subprocess.TimeoutExpired if the process has not terminated 110 | within 'timeout' seconds, or if it has not produced any output in 111 | 'silence_timeout' seconds. 112 | """ 113 | 114 | async with self.exec_remote(remote_command, 115 | timeout, silence_timeout, 116 | must_succeed) as proc: 117 | 118 | # ignore output, but this handles the timeouts. 119 | async for _, _ in proc.output(): 120 | pass 121 | 122 | return await proc.wait() 123 | 124 | async def cleanup(self): 125 | """ 126 | Waits for the VM to finish and cleans up. 127 | """ 128 | try: 129 | if self.can_connect(): 130 | await self.run_command(('sudo', 'poweroff'), timeout=10, 131 | must_succeed=False) 132 | except subprocess.TimeoutExpired: 133 | raise RuntimeError("VM shutdown timeout") 134 | finally: 135 | try: 136 | logging.debug("terminating machine %s..." % self.machine) 137 | await self.machine.terminate() 138 | logging.debug("cleaning up machine %s..." % self.machine) 139 | await self.machine.cleanup() 140 | logging.debug("cleanup complete for machine %s..." % self.machine) 141 | except subprocess.SubprocessError: 142 | logging.warning("[chantal] failed telling justin about VM " 143 | "teardown, but he'll do that on its own.") 144 | 145 | async def __aenter__(self): 146 | await self.create() 147 | return self 148 | 149 | async def __aexit__(self, exc, value, traceback): 150 | del exc, traceback # unused 151 | try: 152 | await self.cleanup() 153 | except Exception as new_exc: 154 | # the cleanup failed, throw the exception from the old one 155 | raise new_exc from value 156 | 157 | async def wait_for_connection(self, timeout=60, retry_delay=0.5, 158 | try_timeout=10): 159 | """ 160 | Wait until the vm can be controlled via ssh 161 | """ 162 | 163 | # TODO: support contacting chantal through 164 | # plain socket and not only ssh 165 | # and allow preinstallations of chantal 166 | # -> SSHChantal, ... 167 | try: 168 | await self.machine.wait_for_ssh_port(timeout, 169 | retry_delay, try_timeout) 170 | except ProcTimeoutError: 171 | self._ssh_worked.set_result(False) 172 | raise 173 | 174 | self._ssh_worked.set_result(True) 175 | 176 | async def install(self, timeout=10): 177 | """ 178 | Install chantal on the VM 179 | """ 180 | 181 | # TODO: allow to skip chantal installation 182 | kevindir = Path(__file__) 183 | await self.upload(kevindir.parent.parent / "chantal", 184 | timeout=timeout) 185 | 186 | def run(self, job): 187 | """ 188 | execute chantal in the container. 189 | return a state object, use its .output() function to get 190 | an async iterator. 191 | """ 192 | 193 | chantal_command = [ 194 | "python3", "-u", "-m", "chantal", 195 | "--clone", job.build.clone_url, 196 | "--checkout", job.build.commit_hash, 197 | ] 198 | 199 | if depth := job.build.project.cfg.git_fetch_depth: 200 | chantal_command.extend(["--fetch-depth", depth]) 201 | 202 | if job.build.branch: 203 | chantal_command.extend([ 204 | "--branch", job.build.branch, 205 | ]) 206 | 207 | chantal_command.extend([ 208 | "--desc-file", job.build.project.cfg.job_desc_file, 209 | "--desc-format", job.build.project.cfg.job_desc_format, 210 | "--job", job.name, 211 | ]) 212 | 213 | return self.exec_remote( 214 | chantal_command, 215 | timeout=job.build.project.cfg.job_timeout, 216 | silence_timeout=job.build.project.cfg.job_silence_timeout, 217 | ) 218 | -------------------------------------------------------------------------------- /kevin/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code for loading and parsing config options. 3 | """ 4 | 5 | from collections import defaultdict 6 | from configparser import ConfigParser 7 | from pathlib import Path 8 | 9 | import logging 10 | import os 11 | 12 | from .util import parse_connection_entry 13 | 14 | 15 | class Config: 16 | """ Global configuration for kevin. """ 17 | def __init__(self): 18 | self.ci_name = None 19 | self.max_jobs_queued = None 20 | self.max_jobs_running = None 21 | 22 | self.static_url = None 23 | 24 | # web listener config 25 | self.dyn_port = None 26 | self.dyn_address = None 27 | self.mandy_url = None 28 | self.dyn_ssl = None 29 | 30 | self.project_folder = None 31 | self.output_folder = None 32 | 33 | self.projects = dict() 34 | self.justins = dict() 35 | 36 | self.args = None 37 | self.volatile = True 38 | 39 | # maps {HookHandler class -> {kwargname -> argvalue}} 40 | # this basically determines the constructor arguments 41 | # for instanciated url handlers. 42 | # TODO: relocate as it's httpd.py-specific. 43 | # but moving requires a lot of code overhead. 44 | self.urlhandlers = defaultdict(lambda: defaultdict(list)) 45 | 46 | def set_cmdargs(self, args): 47 | """ Set runtime arguments """ 48 | self.args = args 49 | self.volatile = args.volatile 50 | 51 | if self.args.volatile: 52 | logging.warning("\x1b[1;31mYou are running in volatile mode, " 53 | "nothing will be stored on disk!\x1b[m") 54 | 55 | def load(self, filename): 56 | """ Loads the attributes from the config file """ 57 | raw = ConfigParser() 58 | 59 | if not Path(filename).exists(): 60 | logging.error("\x1b[31mConfig file '%s' does not exist.\x1b[m", 61 | filename) 62 | exit(1) 63 | 64 | raw.read(filename) 65 | 66 | # remember the current section for the error message below :) 67 | current_section = None 68 | 69 | try: 70 | cfglocation = Path(filename).parent 71 | 72 | # main config 73 | current_section = "kevin" 74 | kevin = raw[current_section] 75 | self.ci_name = kevin["name"] 76 | self.max_jobs_queued = int(kevin["max_jobs_queued"]) 77 | self.max_jobs_running = int(kevin["max_jobs_running"]) 78 | 79 | self.builds_cached_max = int(kevin["builds_cached_max"]) 80 | 81 | # project configurations. 82 | from .project import Project 83 | 84 | current_section = "projects" 85 | projects = raw[current_section] 86 | 87 | # for each project, there's a projname.conf in that folder 88 | projfolder = Path(projects["config_folder"]) 89 | if not projfolder.is_absolute(): 90 | projfolder = cfglocation / projfolder 91 | 92 | if not projfolder.is_dir(): 93 | raise NotADirectoryError(str(projfolder)) 94 | 95 | self.project_folder = projfolder 96 | 97 | self.output_folder = Path(projects["output_folder"]) 98 | if not self.output_folder.is_absolute(): 99 | self.output_folder = cfglocation / self.output_folder 100 | 101 | # TODO: maybe explicitly require file paths to be listed 102 | # instead of iterating through all present files. 103 | for projectfile in self.project_folder.iterdir(): 104 | if not str(projectfile).endswith(".conf"): 105 | logging.warning("[projects] ignoring non .conf file '%s'", 106 | projectfile) 107 | continue 108 | 109 | # create the project 110 | newproj = Project(projectfile, self) 111 | if newproj.name in self.projects: 112 | raise NameError("Project '%s' defined twice!" % ( 113 | newproj.name)) 114 | 115 | logging.info("[projects] loaded %s", newproj.name) 116 | 117 | self.projects[newproj.name] = newproj 118 | 119 | # merge things required by projects 120 | self.project_postprocess() 121 | 122 | # web configuration 123 | current_section = "web" 124 | web = raw[current_section] 125 | self.dyn_port = int(web["dyn_port"]) 126 | self.dyn_address = web.get("dyn_address", "::1") 127 | self.static_url = web["static_url"] 128 | self.mandy_url = web["mandy_url"] 129 | 130 | self.dyn_frontend_host = web["dyn_frontend_host"] 131 | self.dyn_frontend_port = int(web["dyn_frontend_port"]) 132 | self.dyn_frontend_ssl = web["dyn_frontend_ssl"].lower() 133 | if self.dyn_frontend_ssl == "true": 134 | self.dyn_frontend_ssl = True 135 | elif self.dyn_frontend_ssl == "false": 136 | self.dyn_frontend_ssl = False 137 | else: 138 | raise ValueError("dyn_frontend_ssl must be either true or false.") 139 | 140 | # vm providers 141 | current_section = "justin" 142 | justin_entries = raw[current_section] 143 | for name, url in justin_entries.items(): 144 | if name in self.justins: 145 | raise ValueError("Justin double-defined: %s" % name) 146 | 147 | result = parse_connection_entry(name, url, cfglocation) 148 | 149 | self.justins[name] = { 150 | "user": result[0], 151 | "connection": result[1], 152 | "location": result[2], 153 | "key": result[3], 154 | } 155 | 156 | except KeyError as exc: 157 | logging.error("\x1b[31mMissing config entry " 158 | f"in section {current_section}: {exc}\x1b[m") 159 | exit(1) 160 | 161 | self.verify() 162 | 163 | def verify(self): 164 | """ 165 | Verifies the validity of the loaded attributes 166 | """ 167 | if not self.static_url.endswith('/'): 168 | raise ValueError("static_url must end in '/': '%s'" % 169 | self.static_url) 170 | if not self.output_folder.is_dir(): 171 | raise NotADirectoryError(str(self.output_folder)) 172 | if not os.access(str(self.output_folder), os.W_OK): 173 | raise OSError("output_folder is not writable") 174 | 175 | def project_postprocess(self): 176 | """ 177 | Postprocessing for all the project triggers/actions. 178 | 179 | Accross projects, configurations may need merging. 180 | Namely, if there's only one webhook handler for multiple projects, 181 | the configs need to be prepared for that. 182 | """ 183 | # gather triggers to be installed. 184 | for _, project in self.projects.items(): 185 | # for each handler type (e.g. github webhook), 186 | # collect all the configs 187 | for trigger in project.triggers: 188 | 189 | # install requested implicit watchers 190 | project.add_watchers(trigger.get_watchers()) 191 | 192 | # perform config merging operations 193 | trigger.merge_cfg(self.urlhandlers) 194 | 195 | 196 | # global config instance for the running kevin. 197 | CFG = Config() 198 | -------------------------------------------------------------------------------- /kevin/job_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Find matching justins to distribute jobs on. 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | import random 8 | 9 | from collections import defaultdict 10 | 11 | from .justin import JustinSSH, JustinSocket, JustinError 12 | from .justin_machine import MachineError 13 | 14 | 15 | class JobManager: 16 | """ 17 | Keeps an overview about the reachable justins and their VMs. 18 | 19 | Used by Job.run() to provide a JustinMachine. 20 | """ 21 | 22 | # TODO: better selection if this justin is suitable, 23 | # e.g. has the resources to spawn the machine. 24 | # may involve further queries to that justin. 25 | 26 | def __init__(self, loop, config): 27 | self.loop = loop 28 | 29 | # lookup which justins provide the machine 30 | # {machinename: {justin0: {machine_id0, ...}} 31 | self.machines = defaultdict(lambda: defaultdict(set)) 32 | 33 | # list of known justin connections: {name: Justin} 34 | self.justins = dict() 35 | 36 | # queue where lost connections will be stuffed into 37 | self.pending_justins = asyncio.Queue() 38 | 39 | # set of tasks that try to reconnect to disconnected justins 40 | self.running_reconnects = set() 41 | 42 | # create justins from the config 43 | for justinname, justincfg in config.justins.items(): 44 | if justincfg["connection"] == "ssh": 45 | host, port = justincfg["location"] 46 | justin = JustinSSH(justinname, 47 | host, port, 48 | justincfg["user"], 49 | justincfg["key"], 50 | loop=self.loop) 51 | 52 | elif justincfg["connection"] == "unix": 53 | justin = JustinSocket(justinname, 54 | justincfg["location"], justincfg["user"], 55 | loop=self.loop) 56 | 57 | # TODO: allow justin bypass by launching VM locally without a 58 | # justin daemon (that is the justin.JustinVirtual). 59 | # elif justincfg["connection"] == "virtual": 60 | # justin = JustinVirtual() 61 | 62 | else: 63 | raise Exception("unknown justin connection type: %s -> %s" % ( 64 | justinname, justincfg["connection"])) 65 | 66 | # remember the justin by name 67 | self.justins[justinname] = justin 68 | 69 | # when justin disconnects, perform the reconnect. 70 | justin.on_disconnect(self.justin_lost) 71 | 72 | # none of the justins is connected initially 73 | self.pending_justins.put_nowait(justin) 74 | 75 | async def run(self): 76 | try: 77 | async with asyncio.TaskGroup() as tg: 78 | tg.create_task(self._process()) 79 | except asyncio.CancelledError: 80 | await self._shutdown() 81 | raise 82 | 83 | async def _process(self): 84 | """ 85 | create control connections to all known justins. 86 | """ 87 | while True: 88 | # wait for any justin that got lost. 89 | justin = await self.pending_justins.get() 90 | 91 | if not justin.active: 92 | logging.info(f"dropping deactivaved justin {justin}") 93 | continue 94 | 95 | logging.info(f"connecting pending justin {justin}...") 96 | 97 | # TODO: drop it from the vm availability list 98 | reconnection = self.loop.create_task(self.reconnect(justin)) 99 | 100 | # save the pending reconnects in a set 101 | self.running_reconnects.add(reconnection) 102 | reconnection.add_done_callback( 103 | lambda fut: self.running_reconnects.remove(reconnection) 104 | ) 105 | 106 | async def reconnect(self, justin, try_interval=30): 107 | """ tries to reconnect a justin """ 108 | 109 | # try reconnecting forever 110 | while True: 111 | try: 112 | await self.connect_to_justin(justin) 113 | break 114 | 115 | except JustinError as exc: 116 | # connection rejections, auth problems, ... 117 | 118 | logging.warning(f"failed communicating " 119 | f"with justin '{justin.name}'") 120 | logging.warning(f"\x1b[31merror\x1b[m: $ {exc}") 121 | logging.warning(" are you sure that justin entry " 122 | f"'{justin.name}' (= {justin}) " 123 | f"is valid and running?") 124 | logging.warning(" I'll retry connecting in " 125 | f"{try_interval} seconds...") 126 | 127 | # clean up justin 128 | await justin.close() 129 | await asyncio.sleep(try_interval) 130 | 131 | except asyncio.CancelledError: 132 | raise 133 | 134 | except Exception: 135 | logging.exception(f"Fatal error while reconnecting to {justin}") 136 | justin.active = False 137 | break 138 | 139 | 140 | async def connect_to_justin(self, justin): 141 | """ 142 | Connect to a Justin machine provider. 143 | When this function does not except, 144 | the connection is assumed to be sucessful. 145 | """ 146 | # TODO: refresh machine lists for reconnect 147 | 148 | await justin.create() 149 | 150 | justin_machines = await justin.get_machines() 151 | 152 | # machine_id, (vmclassname, machine_name) 153 | for machine_id, (vm_type, machine_name) in justin_machines.items(): 154 | del vm_type # unused 155 | if machine_name not in self.machines: 156 | logging.info("container '%s' now available!", machine_name) 157 | self.machines[machine_name][justin].add(machine_id) 158 | 159 | def justin_lost(self, justin): 160 | """ 161 | Called when a Justin lost its connection. 162 | 163 | If a connection can't be established, 164 | this function is called as well! 165 | """ 166 | 167 | # count how many justins provide a vm 168 | provided_count = defaultdict(lambda: 0) 169 | for machine_name, justin_providers in self.machines.items(): 170 | justin_providers.pop(justin, None) 171 | provided_count[machine_name] += len(justin_providers.keys()) 172 | 173 | # remove unavailable vms 174 | for machine_name, count in provided_count.items(): 175 | if count == 0: 176 | logging.info("machine '%s' no longer available", machine_name) 177 | del self.machines[machine_name] 178 | 179 | # and queue the justin for reconnection 180 | self.pending_justins.put_nowait(justin) 181 | 182 | async def get_machine(self, name): 183 | """ 184 | return a JustinMachine that matches the given name. 185 | """ 186 | 187 | candidate_justins = self.machines.get(name) 188 | if not candidate_justins: 189 | raise MachineError(f"No justin could provide {name}") 190 | 191 | # this could need a separate datastructure.... 192 | # no random dict choice available otherwise. 193 | justin = random.choice(list(candidate_justins.keys())) 194 | 195 | # get a machine 196 | machine_id = random.sample(sorted(candidate_justins[justin]), 1)[0] 197 | 198 | # TODO: if this justin doesn't wanna spawn the machine, try another one. 199 | return await justin.create_machine(machine_id) 200 | 201 | async def _shutdown(self): 202 | """ 203 | Terminate Justin connections. 204 | """ 205 | 206 | expected_cancels = len(self.running_reconnects) 207 | 208 | # cancel the reconnects tasks 209 | for reconnect in self.running_reconnects: 210 | reconnect.cancel() 211 | 212 | # let all tasks run and gather their cancellation exceptions 213 | reconnects_canceled = [ 214 | res for res in await asyncio.gather( 215 | *self.running_reconnects, 216 | return_exceptions=True 217 | ) 218 | if isinstance(res, asyncio.CancelledError) 219 | ] 220 | 221 | if expected_cancels != len(reconnects_canceled): 222 | logging.warning("not all reconnects were canceled!") 223 | 224 | # close all justin connections 225 | for _, justin in self.justins.items(): 226 | await justin.close() 227 | -------------------------------------------------------------------------------- /kevin/justin_machine.py: -------------------------------------------------------------------------------- 1 | """ 2 | Controlling interface for machines hosted on justin 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | import time 8 | 9 | from justin import messages 10 | from justin.machine import Container 11 | 12 | from .process import SSHProcess, ProcTimeoutError 13 | 14 | 15 | class JustinError(Exception): 16 | """ 17 | Error that occurs when Justin does something fishy, 18 | for example provide nonsense, talk garbage or cook salmon. 19 | """ 20 | 21 | def __init__(self, msg): 22 | super().__init__(msg) 23 | 24 | 25 | class MachineError(JustinError): 26 | """ 27 | Raised when a request to a Container was not successful. 28 | """ 29 | pass 30 | 31 | 32 | class JustinMachine(Container): 33 | """ 34 | Provides the same interface as any machine container, 35 | but instead relays the commands to a justin server. 36 | 37 | Use this handle to interact with the VM, i.e. boot it, terminate it, ... 38 | 39 | An instance of this class is created by justin, it also provides cfg. 40 | """ 41 | def __init__(self, cfg, run_id, justin): 42 | super().__init__(cfg) 43 | 44 | self.run_id = run_id 45 | self.justin = justin 46 | 47 | @classmethod 48 | def dynamic_ssh_config(cls): 49 | return True 50 | 51 | @classmethod 52 | def config(cls, machine_id, cfgdata, cfgpath): 53 | raise Exception("config() on the VM controller called") 54 | 55 | async def prepare(self, manage=False): 56 | msg = await self.justin.query(messages.Prepare(run_id=self.run_id, 57 | manage=manage)) 58 | if not isinstance(msg, messages.OK): 59 | raise MachineError(f"Failed to prepare: {msg.msg}") 60 | 61 | async def launch(self): 62 | msg = await self.justin.query(messages.Launch(run_id=self.run_id)) 63 | if not isinstance(msg, messages.OK): 64 | raise MachineError(f"Failed to launch machine: {msg.msg}") 65 | 66 | msg = await self.justin.query(messages.GetConnectionInfo(run_id=self.run_id)) 67 | if not isinstance(msg, messages.ConnectionInfo): 68 | raise MachineError(f"Failed to get connection info: {msg.msg}") 69 | 70 | # this is used to connect to the remote container instance! 71 | self.ssh_host = msg.ssh_host 72 | self.ssh_port = msg.ssh_port 73 | self.ssh_known_host_key = msg.ssh_known_host_key 74 | self.ssh_user = msg.ssh_user 75 | 76 | async def status(self): 77 | return await self.justin.query(messages.Status(run_id=self.run_id)) 78 | 79 | async def is_running(self): 80 | # we have to implement it because @abstractmethod, but 81 | # we override `status` as well, so it's never called. 82 | raise Exception("VM proxy 'is_running' should never be called!") 83 | 84 | async def terminate(self): 85 | msg = await self.justin.query(messages.Terminate(run_id=self.run_id)) 86 | if not isinstance(msg, messages.OK): 87 | raise MachineError(f"Failed to kill machine: {msg.msg}") 88 | 89 | async def cleanup(self): 90 | msg = await self.justin.query(messages.Cleanup(run_id=self.run_id)) 91 | if not isinstance(msg, messages.OK): 92 | raise MachineError(f"Failed to clean up: {msg.msg}") 93 | return msg 94 | 95 | async def wait_for_ssh_port(self, timeout=60, retry_delay=0.2, 96 | try_timeout=15): 97 | """ 98 | Loops until the SSH port is open. 99 | raises ProcTimeoutError on timeout. 100 | """ 101 | 102 | # TODO: provide the loop as optional constructor argument 103 | loop = asyncio.get_event_loop() 104 | 105 | raw_acquired = False 106 | endtime = time.time() + timeout 107 | while True: 108 | await asyncio.sleep(retry_delay) 109 | 110 | if not raw_acquired: 111 | logging.debug("testing for ssh port %s:%d...", self.ssh_host, self.ssh_port) 112 | 113 | established = loop.create_future() 114 | 115 | def connection_made(reader, writer): 116 | """ called when the connection was made """ 117 | del reader, writer # unused 118 | established.set_result(True) 119 | 120 | try: 121 | transp, _ = await loop.create_connection( 122 | lambda: asyncio.StreamReaderProtocol( 123 | asyncio.StreamReader(), connection_made 124 | ), self.ssh_host, self.ssh_port) 125 | 126 | except ConnectionRefusedError: 127 | logging.debug(" \x1b[31;5mrefused\x1b[m!") 128 | 129 | except Exception as exc: 130 | logging.error("error creating connection: %s", exc) 131 | 132 | else: 133 | try: 134 | await asyncio.wait_for(established, 135 | timeout=try_timeout) 136 | raw_acquired = True 137 | logging.debug(" \x1b[32;5mopen\x1b[m!") 138 | transp.close() 139 | continue 140 | except asyncio.TimeoutError: 141 | logging.debug(" \x1b[31;5mtimeout\x1b[m!") 142 | 143 | else: 144 | logging.debug("testing for ssh service %s@%s:%d...", self.ssh_user, self.ssh_host, self.ssh_port) 145 | 146 | async with SSHProcess(["true"], 147 | self.ssh_user, 148 | self.ssh_host, 149 | self.ssh_port, 150 | self.ssh_known_host_key) as proc: 151 | 152 | try: 153 | ret = await proc.wait_for(try_timeout) 154 | 155 | if ret == 0: 156 | logging.debug(" \x1b[32;5;1msuccess\x1b[m!") 157 | break 158 | else: 159 | logging.debug(" \x1b[31;5;1mfailed\x1b[m!") 160 | 161 | except ProcTimeoutError: 162 | logging.debug(" \x1b[31;5;1mtimeout\x1b[m!") 163 | 164 | if time.time() > endtime: 165 | logging.debug("\x1b[31mTIMEOUT\x1b[m") 166 | if raw_acquired: 167 | logging.info("TCP connection established, but no SSH.") 168 | if self.ssh_known_host_key is not None: 169 | logging.info(" Are you sure the ssh key is correct?") 170 | logging.info(" -> %s", self.ssh_known_host_key) 171 | 172 | raise ProcTimeoutError(["ssh", "%s@%s:%s" % ( 173 | self.ssh_user, 174 | self.ssh_host, 175 | self.ssh_port)], timeout) 176 | 177 | async def wait_for_shutdown(self, timeout=20): 178 | """ 179 | Request from justin so he tells us when the machine is dead. 180 | """ 181 | msg = await self.justin.query(messages.ShutdownWait(run_id=self.run_id, 182 | timeout=timeout)) 183 | if not isinstance(msg, messages.OK): 184 | raise MachineError(f"Failed to wait for shutdown: {msg.msg}") 185 | 186 | return msg 187 | -------------------------------------------------------------------------------- /kevin/kevin.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main Queue and Justin management entity. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import asyncio 8 | import typing 9 | 10 | from .build_manager import BuildManager 11 | from .httpd import HTTPD 12 | from .job_manager import JobManager 13 | from .task_queue import TaskQueue 14 | 15 | if typing.TYPE_CHECKING: 16 | from .config import Config 17 | 18 | 19 | async def run(config: Config): 20 | """ 21 | This is Kevin. He will build your job. Guaranteed to be bug-free(*). 22 | 23 | Jobs from various sources are put into the Queue, 24 | which is processed by running each jobs via Justin with Chantal. 25 | 26 | (*) Disclaimer: May not actually be bug-free. 27 | 28 | Runs Kevin foreveeeeeerrrrrrrrrr! 29 | """ 30 | 31 | # job distribution 32 | job_manager = JobManager(asyncio.get_running_loop(), config) 33 | 34 | # build creation 35 | build_manager = BuildManager(max_cached=config.builds_cached_max) 36 | 37 | # queue where build jobs will end up in 38 | queue = TaskQueue(asyncio.get_running_loop(), job_manager, 39 | config.max_jobs_running, config.max_jobs_queued) 40 | 41 | # webserver: receives hooks and provides websocket api 42 | httpd = HTTPD(config.urlhandlers, queue, build_manager) 43 | 44 | try: 45 | async with asyncio.TaskGroup() as tg: 46 | tg.create_task(queue.run()) 47 | tg.create_task(job_manager.run()) 48 | 49 | except asyncio.CancelledError: 50 | # stop http listening 51 | await httpd.stop() 52 | raise 53 | -------------------------------------------------------------------------------- /kevin/lrustore.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections import OrderedDict 4 | 5 | from typing import Callable, TypeVar, Generic 6 | 7 | K = TypeVar("K") 8 | V = TypeVar("V") 9 | 10 | 11 | delete_check_t = Callable[[V, Callable[[], None]], bool] 12 | revive_t = Callable[[V], None] 13 | 14 | 15 | class LRUStore(Generic[K, V]): 16 | def __init__(self, max_size: int, 17 | max_killmap_size: int = 10, 18 | delete_check: delete_check_t[V] | None = None, 19 | revive: revive_t[V] | None = None) -> None: 20 | """ 21 | max_size: 22 | number of entries until the oldest gets deleted 23 | max_killmap_size: 24 | number of entries to keep that couldn't get deleted due to `delete_check` 25 | when exceeded, delete the oldest killed entry first anyway. 26 | delete_check: fun(value, deleter) -> bool: 27 | function that is called when item should be deleted. 28 | if it returns False, item can't be deleted yet. 29 | when the item is ready for deletion, it must call deleter() 30 | revive: fun(value) -> bool: 31 | function called when item is scheduled for deletion, but 32 | was revived while waiting. 33 | this is used to remove the somewhere-registered deleter set in `delete_check` again. 34 | """ 35 | assert max_size >= 1 36 | 37 | self._max_size = max_size 38 | self._max_killmap_size = max_killmap_size 39 | self._map: OrderedDict[K, V] = OrderedDict() 40 | self._killmap: OrderedDict[K, V] = OrderedDict() 41 | 42 | self._delcheck: delete_check_t[V] | None = delete_check 43 | self._revive: revive_t[V] | None = revive 44 | 45 | def __delitem__(self, key: K): 46 | """ 47 | remove an item explicitly 48 | """ 49 | self._map.pop(key, None) 50 | self._killmap.pop(key, None) 51 | 52 | def __setitem__(self, key: K, val: V) -> None: 53 | """ 54 | store a new entry. drop the oldest if store is full. 55 | """ 56 | known = self._map.get(key) 57 | if known is not None: 58 | # move to end with new value 59 | self._map.move_to_end(key, last=True) 60 | return 61 | 62 | self._map[key] = val 63 | 64 | if len(self._map) > self._max_size: 65 | delkey, delval = self._map.popitem(last=False) 66 | print("xxx deleting") 67 | if self._delcheck: 68 | print("xxx in killmap") 69 | # if deletion should be postponed, hold reference in separate killmap 70 | def delete_item(): 71 | del self._killmap[delkey] 72 | print("xxx killed from killmap") 73 | if not self._delcheck(delval, delete_item): 74 | self._killmap[delkey] = delval 75 | 76 | if len(self._killmap) > self._max_killmap_size: 77 | # delete the oldest kill item anyway 78 | self._killmap.popitem(last=False) 79 | 80 | def get(self, key: K, default: V | None = None) -> V | None: 81 | item = self._map.get(key) 82 | # bump priority to max 83 | if item is not None: 84 | self._map.move_to_end(key, last=True) 85 | return item 86 | 87 | # revive if scheduled for deletion 88 | to_kill = self._killmap.get(key) 89 | if to_kill is not None: 90 | try: 91 | if self._revive: 92 | self._revive(to_kill) 93 | finally: 94 | del self._killmap[key] 95 | self[key] = to_kill 96 | return to_kill 97 | 98 | return None 99 | -------------------------------------------------------------------------------- /kevin/project.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project handling routines. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import typing 8 | 9 | from .action import Action 10 | from .project_config import Config as ProjectConfig 11 | from .trigger import Trigger 12 | 13 | if typing.TYPE_CHECKING: 14 | from pathlib import Path 15 | from .config import Config as KevinConfig 16 | from .build import Build 17 | from .watcher import Watcher 18 | 19 | 20 | class Project: 21 | """ 22 | Represents a CI project, which has Builds that are started from Triggers, 23 | and to process the build, Actions are run. 24 | Watchers are subscribed to updates of this build. 25 | """ 26 | 27 | def __init__(self, proj_cfg_path: Path, config: KevinConfig): 28 | 29 | self.cfg = ProjectConfig(proj_cfg_path) 30 | 31 | self.storage_path: Path = config.output_folder / self.cfg.project_name 32 | self.name = self.cfg.project_name 33 | 34 | # these will invoke a project build 35 | self.triggers: list[Trigger] = list() 36 | 37 | # these will receive or distribute build updates 38 | self.actions: list[Action] = list() 39 | 40 | # additional watchers to be attached 41 | self._watchers: list[Watcher] = list() 42 | 43 | # sort the services from the config in the appropriate list 44 | for service in self.cfg.get_services(self): 45 | match service: 46 | case Trigger(): 47 | self.triggers.append(service) 48 | case Action(): 49 | self.actions.append(service) 50 | case _: 51 | raise Exception(f"configured project service is not a trigger or action: {service}") 52 | 53 | def add_watchers(self, watchers: list[Watcher]): 54 | """ Add actions manually as they may be created by e.g. triggers. """ 55 | self._watchers.extend(watchers) 56 | 57 | async def attach_actions(self, build: Build, completed: bool): 58 | """ 59 | Register all actions defined in this project 60 | so they receives updates from the build. 61 | The build may be complete already and some actions 62 | should not be attached then. 63 | """ 64 | for action in self.actions: 65 | watcher = await action.get_watcher(build, completed) 66 | if watcher: 67 | await build.register_watcher(watcher) 68 | 69 | # attach additional watchers which were created by some trigger. 70 | for watcher in self._watchers: 71 | await build.register_watcher(watcher) 72 | 73 | def __str__(self): 74 | return f"" 75 | -------------------------------------------------------------------------------- /kevin/project_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project configuration file definition 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from configparser import ConfigParser 8 | import logging 9 | import re 10 | import typing 11 | 12 | 13 | from .service_meta import get_service 14 | from .util import parse_size, parse_time 15 | 16 | if typing.TYPE_CHECKING: 17 | from pathlib import Path 18 | from .service_meta import Service 19 | from .project import Project 20 | 21 | 22 | class Config: 23 | """ 24 | Configuration options for one project. 25 | 26 | The config consists of [project] for project settings 27 | followed by a number of [$service] sections. 28 | 29 | These activate the specified service for the project 30 | with the followed config. 31 | """ 32 | 33 | def __init__(self, cfg_path: Path): 34 | self._cfg_path = cfg_path 35 | 36 | # parse the project config file 37 | self._raw: ConfigParser | None = ConfigParser() 38 | self._raw.read(cfg_path) 39 | 40 | current_section = None 41 | 42 | try: 43 | # general project config in [project] 44 | current_section = "project" 45 | projcfg = self._raw[current_section] 46 | self.project_name = projcfg["name"] 47 | self.job_max_output = parse_size(projcfg["job_max_output"]) 48 | self.job_timeout = parse_time(projcfg["job_timeout"]) 49 | self.job_silence_timeout = parse_time( 50 | projcfg["job_silence_timeout"]) 51 | self.job_desc_file = projcfg["job_desc_file"] 52 | self.job_desc_format = projcfg.get("job_desc_format", "makeish") 53 | if self.job_desc_format not in ("makeish", "python"): 54 | raise ValueError("job_desc_format must be either 'makeish' or 'python', " 55 | f"not {self.job_desc_format}") 56 | self.git_fetch_depth = projcfg.get("git_fetch_depth") 57 | 58 | except KeyError as exc: 59 | logging.exception(f"\x1b[31mConfig file '{self._cfg_path}' section [{current_section}] " 60 | f"is missing entry {exc!r}\x1b[m") 61 | exit(1) 62 | 63 | def get_services(self, project: Project) -> list[Service]: 64 | services: list[Service] = list() 65 | 66 | if self._raw is None: 67 | raise Exception("config not parsed") 68 | try: 69 | current_section = None 70 | 71 | # configuration for triggers and actions 72 | # these define what can trigger a project job, 73 | # and what should be done then. 74 | for modulename, config in self._raw.items(): 75 | if modulename in {"DEFAULT", "project"}: 76 | continue 77 | 78 | # for the error message generation below 79 | current_section = modulename 80 | 81 | # To support more than one config for the same service, 82 | # the config can be suffixed with .00, .01, ... 83 | hassuffix = re.match(r"([^\.]+)\.(\d+)$", modulename) 84 | if hassuffix: 85 | modulename = hassuffix.group(1) 86 | 87 | # fetch the service class by the section name 88 | # this is a child class of "Service". 89 | modulecls = get_service(modulename) 90 | 91 | # TODO: cross references with some "include" statement 92 | # to use [modulename] of some other file stated 93 | # or: use a python config... 94 | 95 | # create the service with the config section 96 | # e.g. GitHubHook(config, project) 97 | # or job.0 becomes JobAction 98 | module = modulecls(dict(config), project) 99 | 100 | services.append(module) 101 | 102 | self._raw = None 103 | 104 | except KeyError as exc: 105 | logging.exception(f"\x1b[31mConfig file '{self._cfg_path}' section [{current_section}] " 106 | f"is missing entry {exc!r}\x1b[m") 107 | exit(1) 108 | 109 | return services 110 | -------------------------------------------------------------------------------- /kevin/service/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | In this module are implementations of supported services. 3 | Those can be triggers, actions or both. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import typing 9 | from abc import ABC 10 | 11 | if typing.TYPE_CHECKING: 12 | from ..project import Project 13 | 14 | 15 | class Service(ABC): 16 | """ 17 | Base class for all services for a project. 18 | A service is e.g. a IRC notification, 19 | a build trigger via some webhook, etc. 20 | """ 21 | 22 | def __init__(self, cfg: dict[str, str], project: Project): 23 | del cfg # unused here. subclasses use it, though. 24 | self.project = project 25 | 26 | def get_project(self) -> Project: 27 | """ Return the associated project """ 28 | return self.project 29 | -------------------------------------------------------------------------------- /kevin/service/badge/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | 'StatusBadge', 3 | 'BadgeGenerator', 4 | ) 5 | 6 | from .action import StatusBadge 7 | from .generator import BadgeGenerator 8 | -------------------------------------------------------------------------------- /kevin/service/badge/action.py: -------------------------------------------------------------------------------- 1 | """ 2 | Status badge generation for a build. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import enum 8 | import logging 9 | import os.path 10 | import typing 11 | 12 | from pathlib import Path 13 | 14 | from .generator import BadgeGenerator 15 | 16 | from ...action import Action 17 | from ...config import CFG 18 | from ...update import BuildState, BuildFinished 19 | from ...watcher import Watcher 20 | 21 | 22 | if typing.TYPE_CHECKING: 23 | from ...build import Build 24 | from ...project import Project 25 | from ...update import Update 26 | 27 | 28 | class BadgeType(enum.Enum): 29 | success = enum.auto() 30 | fail = enum.auto() 31 | error = enum.auto() 32 | 33 | 34 | class StatusBadge(Action): 35 | """ 36 | GitHub status updater action, enable in a project to 37 | allow real-time build updates via the github api. 38 | """ 39 | 40 | def __init__(self, cfg: dict[str, str], project: Project): 41 | super().__init__(cfg, project) 42 | base_text = cfg.get("base_text", "build status") 43 | texts = { 44 | BadgeType.success: ("green", cfg.get("success_text", "success")), 45 | BadgeType.fail: ("red", cfg.get("fail_text", "failed")), 46 | BadgeType.error: ("blue", cfg.get("error_text", "errored")), 47 | } 48 | 49 | # create all badge files in static output directory 50 | # each build then links to it 51 | self._badges_path = project.storage_path / 'badge' 52 | 53 | if CFG.volatile: 54 | return 55 | 56 | logging.debug('[status_badge] perparing badge files in %s', self._badges_path) 57 | self._badges_path.mkdir(parents=True, exist_ok=True) 58 | 59 | for badge_type, (colorscheme, text) in texts.items(): 60 | gen = BadgeGenerator(base_text, text, right_color=colorscheme) 61 | badge_path = self._badges_path / f"{badge_type.name}.svg" 62 | with badge_path.open("w") as badge_file: 63 | badge_file.write(gen.get_svg()) 64 | 65 | async def get_watcher(self, build: Build, completed: bool) -> Watcher | None: 66 | if completed: 67 | return None 68 | 69 | return _BadgeCreator(build, self) 70 | 71 | class _BadgeCreator(Watcher): 72 | def __init__(self, build: Build, config: StatusBadge): 73 | self._build = build 74 | self._cfg = config 75 | 76 | async def on_update(self, update: Update) -> None: 77 | badge_type: BadgeType | None = None 78 | 79 | match update: 80 | case BuildState(): 81 | if update.is_succeeded(): 82 | badge_type = BadgeType.success 83 | else: 84 | match update.state: 85 | case "failure": 86 | badge_type = BadgeType.fail 87 | case "error": 88 | badge_type = BadgeType.error 89 | case _: 90 | return 91 | 92 | case BuildFinished(): 93 | self._build.deregister_watcher(self) 94 | 95 | case _: 96 | return 97 | 98 | if badge_type is not None: 99 | self._link_badge(badge_type) 100 | 101 | def _link_badge(self, badgetype: BadgeType) -> None: 102 | try: # try forming a relative path on the same FS 103 | # TODO python3.12 use relative_to(..., walk_up=True) 104 | badges_path = Path(os.path.relpath(self._cfg._badges_path.resolve(), self._build.path.resolve())) 105 | except ValueError: 106 | badges_path = self._cfg._badges_path 107 | 108 | build_badge = self._build.path / "status.svg" 109 | build_badge_tmp = build_badge.with_name(".status.svg.tmp") 110 | 111 | if CFG.volatile: 112 | logging.debug('[status_badge] would create %s badge as %s', badgetype.name, build_badge) 113 | return 114 | 115 | logging.debug('[status_badge] creating %s badge as %s', badgetype.name, build_badge) 116 | build_badge_tmp.symlink_to( 117 | badges_path / f"{badgetype.name}.svg" 118 | ) 119 | build_badge_tmp.rename(build_badge) 120 | -------------------------------------------------------------------------------- /kevin/service/badge/generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Status badge generation 3 | """ 4 | 5 | import argparse 6 | import inspect 7 | 8 | 9 | class BadgeGenerator: 10 | """ 11 | used for generating status badges. 12 | such a badge is usually embedded in a website so it shows 13 | the CI status with a shiny color. 14 | """ 15 | 16 | def __init__(self, text0, text1, right_color=None): 17 | """ 18 | store the badge texts. 19 | text0 is the first part, text1 the second. 20 | """ 21 | self._text0 = text0 22 | self._text1 = text1 23 | 24 | self.right_color = right_color 25 | 26 | def get_svg(self): 27 | """ 28 | simple svg badge generation 29 | """ 30 | 31 | text0 = self._text0 32 | text1 = self._text1 33 | 34 | # gray, the left part 35 | color0 = ("#444d56", "#1c1f22") 36 | 37 | # right part is customizable 38 | if self.right_color == "green": 39 | color1 = ("#34d058", "#269a3e") 40 | elif self.right_color == "red": 41 | color1 = ("#c65c64", "#cb2431") 42 | elif self.right_color == "blue": 43 | color1 = ("#008bc6", "#005eb6") 44 | else: 45 | raise Exception(f"unknown color scheme requested: {self.right_color!r}") 46 | 47 | # definitely correct text size calculation 48 | # determined by trial and error 49 | # only works for "usual" text lengths 50 | width0 = len(text0) * 6.8 + 10 51 | width1 = len(text1) * 6.8 + 10 52 | 53 | # best view and edit the svg with inkscape to test adjustments live... 54 | svg_content = inspect.cleandoc(f""" 55 | 57 | 58 | 59 | 62 | 70 | 71 | 72 | 73 | 74 | {text0 + ' ' + text1} 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 101 | 102 | 106 | {text0} 107 | 108 | 111 | {text0} 112 | 113 | 118 | {text1} 119 | 120 | 124 | {text1} 125 | 126 | 127 | 128 | """) 129 | return svg_content 130 | 131 | 132 | def main(): 133 | """ 134 | for testing, create a svg with any text 135 | """ 136 | cli = argparse.ArgumentParser(description='') 137 | cli.add_argument("--color", "-c", choices=['red', 'blue', 'green'], 138 | default="blue", 139 | help=("right hand side background color, " 140 | "default=%(default)s")) 141 | cli.add_argument("text0") 142 | cli.add_argument("text1") 143 | 144 | args = cli.parse_args() 145 | 146 | badge = Badge(args.text0, args.text1, args.color) 147 | 148 | print(badge.get_svg()) 149 | 150 | 151 | if __name__ == "__main__": 152 | main() 153 | -------------------------------------------------------------------------------- /kevin/service/github/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | GitHub integration for Kevin. 3 | 4 | Can receive WebHooks (pull_request or push) 5 | Can send status updates to a pull requests or a pushed branch. 6 | """ 7 | 8 | __all__ = ( 9 | 'GitHubHook', 10 | 'GitHubStatus', 11 | ) 12 | 13 | from .action import GitHubHook, GitHubStatus 14 | -------------------------------------------------------------------------------- /kevin/service/github/action.py: -------------------------------------------------------------------------------- 1 | """ 2 | GitHub interaction entry points for Kevin. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import typing 8 | 9 | from .pull_manager import GitHubPullManager 10 | from .webhook import GitHubHookHandler 11 | from .status import GitHubBuildStatusUpdater 12 | from ...action import Action 13 | from ...httpd import HookTrigger 14 | from ...watcher import Watcher 15 | 16 | if typing.TYPE_CHECKING: 17 | from ...build import Build 18 | from ...project import Project 19 | 20 | 21 | class GitHubHook(HookTrigger): 22 | """ 23 | A trigger from a GitHub webhook. 24 | This class is instanced multiple times, maybe even more 25 | for one project. 26 | 27 | Having one of those for each project is the normal case. 28 | """ 29 | 30 | def __init__(self, cfg, project) -> None: 31 | super().__init__(cfg, project) 32 | 33 | # shared secret 34 | self.hooksecret: bytes = cfg["hooksecret"].encode() 35 | 36 | # allowed github repos 37 | self.repos: set[str] = set() 38 | for repo in cfg["repos"].split(","): 39 | repo = repo.strip() 40 | if repo: 41 | self.repos.add(repo) 42 | 43 | # assign labelname => action for custom control labels 44 | self.ctrl_labels = dict() 45 | for label in cfg.get("ctrl_labels_rebuild", "").split(','): 46 | label = label.strip() 47 | if label: 48 | self.ctrl_labels[label] = "rebuild" 49 | 50 | # pull request manager to detect build aborts 51 | self.pull_manager = GitHubPullManager(self.repos) 52 | 53 | def get_watchers(self): 54 | return [self.pull_manager] 55 | 56 | def get_handler(self): 57 | return ("/hooks/github", GitHubHookHandler) 58 | 59 | 60 | class GitHubStatus(Action): 61 | """ 62 | GitHub status updater action, enable in a project to 63 | allow real-time build updates via the github api. 64 | """ 65 | def __init__(self, cfg, project: Project) -> None: 66 | super().__init__(cfg, project) 67 | self.auth_user = cfg["user"] 68 | self.auth_pass = cfg["token"] 69 | self.repos: set[str] = set() 70 | for repo in cfg.get("repos", "any").split(","): 71 | repo = repo.strip() 72 | if repo: 73 | self.repos.add(repo) 74 | 75 | async def get_watcher(self, build: Build, completed: bool) -> Watcher | None: 76 | # we return a watcher even if the build is completed already 77 | # -> a second pull request for the same build is reported as completed without a rebuild.. 78 | return GitHubBuildStatusUpdater(build, self) 79 | -------------------------------------------------------------------------------- /kevin/service/github/pull_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cross-links between pull requests 3 | """ 4 | from __future__ import annotations 5 | 6 | import logging 7 | import typing 8 | 9 | from .update import GitHubPullRequest 10 | from ...update import (BuildState, QueueActions) 11 | from ...watcher import Watcher 12 | 13 | if typing.TYPE_CHECKING: 14 | from ...task_queue import TaskQueue 15 | from ...update import Update 16 | 17 | 18 | class GitHubPullManager(Watcher): 19 | """ 20 | Tracks running pull requests and aborts a running build 21 | if the same pull request gets an update. 22 | 23 | Subscribes to all builds. 24 | """ 25 | 26 | def __init__(self, repos: set[str]) -> None: 27 | # repos this pullmanager is responsible for 28 | self.repos = repos 29 | 30 | # all the pulls that we triggered 31 | # (project_name, repo, pull_id) -> (commit_hash, queue) 32 | self._running_pull_builds: dict[tuple[str, str, int], tuple[str, TaskQueue | None]] = dict() 33 | 34 | async def on_update(self, update: Update): 35 | match update: 36 | case GitHubPullRequest(): 37 | # new pull request information that may cause an abort. 38 | key = (update.project_name, update.repo, update.pull_id) 39 | 40 | if update.repo not in self.repos: 41 | # repo is not handled by this pull manager, 42 | # don't do anything. 43 | return 44 | 45 | # get the running build id for this pull request 46 | entry = self._running_pull_builds.get(key) 47 | 48 | if entry is not None: 49 | commit_hash, queue = entry 50 | 51 | else: 52 | # that pull is not running currently, so 53 | # store that it's running. 54 | # the queue is unknown, set it to None. 55 | self._running_pull_builds[key] = (update.commit_hash, None) 56 | return 57 | 58 | if commit_hash == update.commit_hash: 59 | # the same build is running currently, just ignore it 60 | pass 61 | 62 | else: 63 | # the pull request is running already, 64 | # now abort the previous build for it. 65 | 66 | if not queue: 67 | # we didn't get the "Enqueued" update for the build 68 | logging.warning("[github] wanted to abort build " 69 | "in unknown queue") 70 | 71 | else: 72 | # abort it 73 | await queue.abort_build(update.project_name, commit_hash) 74 | 75 | # and store the new build id for that pull request 76 | self._running_pull_builds[key] = (update.commit_hash, None) 77 | 78 | case QueueActions(): 79 | # catch the queue of the build actions 80 | # only if we track that build, we store the queue 81 | 82 | # select the tracked build and store the learned queue 83 | for key, (commit_hash, _) in self._running_pull_builds.items(): 84 | if update.build_id == commit_hash: 85 | self._running_pull_builds[key] = (commit_hash, update.queue) 86 | 87 | case BuildState(): 88 | # build state to remove a running pull request 89 | if update.is_completed(): 90 | for key, (commit_hash, queue) in self._running_pull_builds.items(): 91 | if update.build_id == commit_hash: 92 | # remove the build from the run list 93 | del self._running_pull_builds[key] 94 | return 95 | -------------------------------------------------------------------------------- /kevin/service/github/update.py: -------------------------------------------------------------------------------- 1 | """ 2 | Message updates sent due to GitHub stuff. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from abc import ABC, abstractmethod 8 | 9 | from ...update import GeneratedUpdate 10 | 11 | 12 | class GitHubStatusURL(ABC, GeneratedUpdate): 13 | """ Where GitHub status updates are to be sent to """ 14 | 15 | def __init__(self, destination: str, repo: str) -> None: 16 | self.destination = destination 17 | self.repo = repo 18 | 19 | @abstractmethod 20 | def target_id(self) -> str: 21 | raise NotImplementedError() 22 | 23 | 24 | class GitHubPullRequestStatusURL(GitHubStatusURL): 25 | """ Where status updates for a GitHub pull request are sent to """ 26 | def __init__(self, destination: str, repo: str, pull_id: int) -> None: 27 | super().__init__(destination, repo) 28 | self.pull_id: int = pull_id 29 | 30 | def target_id(self) -> str: 31 | return f"{self.repo}/pull/{self.pull_id}" 32 | 33 | 34 | class GitHubBranchStatusURL(GitHubStatusURL): 35 | """ Where status updates for GitHub branch update are sent to """ 36 | def __init__(self, destination: str, repo: str, branch_name: str) -> None: 37 | super().__init__(destination, repo) 38 | self.branch_name: str = branch_name 39 | 40 | def target_id(self) -> str: 41 | return f"{self.repo}/branch/{self.branch_name}" 42 | 43 | 44 | class GitHubPullRequest(GeneratedUpdate): 45 | """ Sent when a github pull request was created or updated """ 46 | 47 | def __init__(self, project_name, repo, pull_id, commit_hash): 48 | self.project_name = project_name 49 | self.repo = repo 50 | self.pull_id = pull_id 51 | self.commit_hash = commit_hash 52 | 53 | 54 | class GitHubBranchUpdate(GeneratedUpdate): 55 | """ 56 | Sent when a branch on github is pushed to. 57 | e.g. refs/heads/master, which is not a pull request. 58 | 59 | This update can be consumed e.g. by badge generators 60 | or symlink handlers. 61 | 62 | TODO: base on a generic BranchUpdate event. 63 | """ 64 | 65 | def __init__(self, project_name, repo, branch, commit_hash): 66 | self.project_name = project_name 67 | self.repo = repo 68 | self.branch = branch 69 | self.commit_hash = commit_hash 70 | 71 | 72 | class GitHubLabelUpdate(GeneratedUpdate): 73 | """ 74 | Send to perform changes to github issue/pull request labels. 75 | """ 76 | 77 | def __init__(self, project_name, repo, pull_id, issue_url, 78 | action, label): 79 | self.project_name = project_name 80 | self.repo = repo 81 | self.pull_id = pull_id 82 | self.issue_url = issue_url 83 | self.action = action 84 | self.label = label 85 | -------------------------------------------------------------------------------- /kevin/service/github/util.py: -------------------------------------------------------------------------------- 1 | import hmac 2 | from hashlib import sha1 3 | 4 | 5 | def verify_secret(blob, headers, secret): 6 | """ 7 | verify the github hmac signature with our shared secret. 8 | """ 9 | 10 | localsignature = hmac.new(secret, blob, sha1) 11 | goodsig = 'sha1=' + localsignature.hexdigest() 12 | msgsig = headers.get("X-Hub-Signature") 13 | if not msgsig: 14 | raise ValueError("message doesn't have a signature.") 15 | 16 | if hmac.compare_digest(msgsig, goodsig): 17 | return True 18 | 19 | return False 20 | -------------------------------------------------------------------------------- /kevin/service/symlink.py: -------------------------------------------------------------------------------- 1 | """ 2 | Symlink management for the build. 3 | Used to maintain branch pointers to commits. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import logging 9 | import os.path 10 | import typing 11 | from pathlib import Path 12 | 13 | from ..action import Action 14 | from ..config import CFG 15 | from ..update import BuildSource, BuildState, BuildFinished 16 | from ..util import strlazy 17 | from ..watcher import Watcher 18 | 19 | if typing.TYPE_CHECKING: 20 | from ..update import Update 21 | from ..build import Build 22 | from ..project import Project 23 | 24 | 25 | class SymlinkBranch(Action): 26 | """ 27 | Manages branch symlinks in the output folder. 28 | """ 29 | 30 | def __init__(self, cfg: dict[str, str], project: Project) -> None: 31 | super().__init__(cfg, project) 32 | 33 | target_dir_raw = cfg.get("target_dir", "branches/") 34 | self.target_dir: Path = project.storage_path / Path(target_dir_raw) 35 | # exclude project job output dir 36 | if str(self.target_dir.resolve()).startswith(str((project.storage_path / "jobs").resolve())): 37 | raise ValueError("[symlink_branch] 'target_dir' clashes with build job directory:" 38 | f"{self.target_dir}") 39 | 40 | self._only_branches: set[str] = set() 41 | for branch in cfg.get("only", "").split(","): 42 | branch = branch.strip() 43 | if branch: 44 | self._only_branches.add(branch) 45 | 46 | self._exclude_branches: set[str] = set() 47 | for branch in cfg.get("exclude", "").split(","): 48 | branch = branch.strip() 49 | if branch: 50 | self._exclude_branches.add(branch) 51 | 52 | self._aliases: dict[str, str] = dict() 53 | for branch in cfg.get("alias", "").split(","): 54 | branch = branch.strip() 55 | if branch: 56 | if ":" not in branch: 57 | raise ValueError(f"branch alias format is :, missing in {branch!r}") 58 | branch, alias = branch.split(":", maxsplit=1) 59 | self._aliases[branch] = alias 60 | 61 | if self._only_branches: 62 | self._only_branches -= self._exclude_branches 63 | 64 | if CFG.volatile: 65 | return 66 | self.target_dir.mkdir(exist_ok=True, parents=True) 67 | 68 | def source_allowed(self, update: BuildSource) -> bool: 69 | if not (update.repo_id and update.branch): 70 | return False 71 | branch_id = f"{update.repo_id}/{update.branch}" 72 | if self._only_branches: 73 | return branch_id in self._only_branches 74 | 75 | return branch_id not in self._exclude_branches 76 | 77 | def get_allowed_branches_str(self) -> str: 78 | if self._only_branches: 79 | return f"only: {', '.join(self._only_branches)}" 80 | 81 | return f"not: {', '.join(self._exclude_branches)}" 82 | 83 | def get_alias(self, branch: str) -> str: 84 | alias = self._aliases.get(branch) 85 | return alias or branch 86 | 87 | async def get_watcher(self, build: Build, completed: bool) -> Watcher | None: 88 | if completed: 89 | return None 90 | 91 | return SymlinkCreator(build, self) 92 | 93 | 94 | class SymlinkCreator(Watcher): 95 | """ 96 | Watches for git source messages so it can create symlinks 97 | that link to the commit for that branch. 98 | """ 99 | 100 | def __init__(self, build: Build, config: SymlinkBranch): 101 | self._build = build 102 | self._cfg = config 103 | 104 | # build sources 105 | self._build_sources: list[BuildSource] = list() 106 | 107 | async def on_update(self, update: Update): 108 | match update: 109 | case BuildSource(): 110 | if self._cfg.source_allowed(update): 111 | logging.debug("[symlink_branch] registering link source %s/%s", update.repo_id, update.branch) 112 | self._build_sources.append(update) 113 | else: 114 | logging.debug("[symlink_branch] ignoring link source %s/%s due to match rules: '%s'", update.repo_id, update.branch, strlazy(lambda: self._cfg.get_allowed_branches_str())) 115 | 116 | case BuildState(): 117 | if update.is_completed(): 118 | for source in self._build_sources: 119 | self._link_source(source) 120 | 121 | case BuildFinished(): 122 | self._build.deregister_watcher(self) 123 | 124 | case _: 125 | pass 126 | 127 | def _link_source(self, source: BuildSource) -> None: 128 | """ 129 | symlink platform/repo/branch from a build source to the build directory. 130 | """ 131 | link_name = self._cfg.get_alias(f"{source.repo_id}/{source.branch}") 132 | 133 | link_path = self._cfg.target_dir / link_name 134 | if '..' in Path(link_name).parts: 135 | raise ValueError(f"[symlink_branch] branch link {link_name!r} contains '..', could be placed outside target directory") 136 | if not CFG.volatile: 137 | link_path.parent.mkdir(exist_ok=True, parents=True) 138 | 139 | tmp_branch_link = link_path.parent / f".{link_path.name}.tmp" 140 | 141 | try: 142 | # TODO python3.12 use relative_to(..., walk_up=True) 143 | build_path = Path(os.path.relpath(self._build.path.resolve(), link_path.parent)) 144 | except ValueError: 145 | # absolute path needed for filesystem boundary 146 | build_path = self._build.path.resolve() 147 | 148 | if CFG.volatile: 149 | logging.debug("[symlink_branch] would link branch %s to %s", link_path, build_path) 150 | return 151 | 152 | logging.debug("[symlink_branch] linking branch %s to %s", link_path, build_path) 153 | # allow / in branch names 154 | link_path.parent.mkdir(parents=True, exist_ok=True) 155 | tmp_branch_link.symlink_to(build_path) 156 | tmp_branch_link.rename(link_path) 157 | -------------------------------------------------------------------------------- /kevin/service_meta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Supported service base definitions. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | 8 | from .job import JobAction 9 | from .service import Service, github, badge, symlink 10 | 11 | 12 | def get_service(service_name: str) -> type[Service]: 13 | """ 14 | get the service class for a service name 15 | """ 16 | 17 | match service_name: 18 | case "job": 19 | return JobAction 20 | case "status_badge": 21 | return badge.StatusBadge 22 | case "symlink_branch": 23 | return symlink.SymlinkBranch 24 | case "github_webhook": 25 | return github.GitHubHook 26 | case "github_status": 27 | return github.GitHubStatus 28 | case _: 29 | raise ValueError(f"unknown service {service_name!r} requested") 30 | -------------------------------------------------------------------------------- /kevin/simulator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/kevin/simulator/__init__.py -------------------------------------------------------------------------------- /kevin/simulator/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Simulates the server-side pull request api. 5 | 6 | Delivers a pull request hook to the specified url, 7 | then waits for interaction. 8 | """ 9 | 10 | import argparse 11 | import asyncio 12 | 13 | from . import github 14 | from ..util import log_setup 15 | 16 | 17 | def main(): 18 | cmd = argparse.ArgumentParser() 19 | cmd.add_argument("repo", help="clone url/path to the test repo") 20 | cmd.add_argument("--branch", "-b", help="branch of the repo to use. default: use default branch") 21 | cmd.add_argument("--commit", help="commit hash to build. default: use branch head") 22 | cmd.add_argument("project", help="project to trigger the build for") 23 | cmd.add_argument("config_file", help="config file of to-be-tested kevin") 24 | cmd.add_argument("-p", "--port", type=int, default=8423, 25 | help="port to run the simulation on") 26 | cmd.add_argument("-l", "--listen", default="127.0.0.1", 27 | help="address to listen on for requests") 28 | cmd.add_argument("--local-repo", action="store_true", 29 | help=("serve a filesystem-local repo via http. " 30 | "beware: provide the .git of that repo! " 31 | "`git update-server-info` is called on that!")) 32 | cmd.add_argument("--local-repo-address", default="10.0.2.2", 33 | help=("the builder machine can reach this simulator " 34 | "under the given address.")) 35 | cmd.add_argument("-d", "--debug", action="store_true", 36 | help="enable asyncio debugging") 37 | cmd.add_argument("-v", "--verbose", action="count", default=0, 38 | help="increase program verbosity") 39 | cmd.add_argument("-q", "--quiet", action="count", default=0, 40 | help="decrease program verbosity") 41 | 42 | sp = cmd.add_subparsers(dest="module", required=True) 43 | 44 | # call argparser hooks 45 | github.GitHub.argparser(sp) 46 | 47 | args = cmd.parse_args() 48 | 49 | # set up log level 50 | log_setup(args.verbose - args.quiet) 51 | 52 | service = args.service(args) 53 | 54 | try: 55 | asyncio.run(service.run(), debug=args.debug) 56 | except KeyboardInterrupt: 57 | pass 58 | 59 | print("cya!") 60 | 61 | 62 | if __name__ == "__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /kevin/simulator/service.py: -------------------------------------------------------------------------------- 1 | """ 2 | Definitions for common functionality of simulated services. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import ipaddress 8 | from argparse import Namespace 9 | 10 | from ..config import Config 11 | 12 | 13 | class Service: 14 | """ 15 | Base class for a simulated service. 16 | """ 17 | 18 | def __init__(self, args: Namespace) -> None: 19 | self.cfg = Config() 20 | self.cfg.load(args.config_file) 21 | 22 | # git repo serving: 23 | self.local_repo = args.local_repo 24 | self.local_repo_address = args.local_repo_address 25 | self.repo_server: str | None = None 26 | 27 | # repo config 28 | self.repo = args.repo 29 | self.branch = args.branch # name or None 30 | self.commit = args.commit # commit hash or None 31 | self.project = args.project 32 | if self.project not in self.cfg.projects: 33 | raise ValueError("unknown project '%s', available: %s" % ( 34 | self.project, self.cfg.projects.keys() 35 | )) 36 | 37 | # simulator reachability: 38 | self.port = args.port 39 | self.listen = ipaddress.ip_address(args.listen) 40 | 41 | @classmethod 42 | def argparser(cls, subparsers): 43 | """ implement to add a service-specific argparser """ 44 | raise NotImplementedError() 45 | 46 | async def run(self): 47 | """ simulator-specific code """ 48 | raise NotImplementedError() 49 | -------------------------------------------------------------------------------- /kevin/simulator/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility function for the simulator 3 | """ 4 | 5 | import asyncio 6 | 7 | from asyncio.subprocess import Process 8 | 9 | from typing import Callable 10 | 11 | 12 | 13 | async def get_hash(repo, branch_name: str | None = None) -> str: 14 | """ 15 | return the commit hash of the given repo at HEAD or a branch name. 16 | """ 17 | 18 | if branch_name: 19 | ref = f"refs/heads/{branch_name}" 20 | else: 21 | ref = "HEAD" 22 | 23 | ref_hash: str | None = None 24 | 25 | def line_handler(refhash: str, refname: str, proc: Process) -> bool: 26 | nonlocal ref_hash 27 | if ref == refname: 28 | ref_hash = refhash 29 | proc.terminate() 30 | return False 31 | return True 32 | 33 | await git_ls_remote(repo, ref, line_handler) 34 | 35 | if not ref_hash: 36 | raise Exception(f"could not find {ref!r} in repo {repo!r}") 37 | return ref_hash 38 | 39 | 40 | async def get_refnames(repo: str, ref: str, only_branches: bool = False) -> list[str]: 41 | ref_names: list[str] = list() 42 | 43 | def line_handler(refhash: str, refname: str, proc: Process) -> bool: 44 | if ref == refhash and refname != "HEAD": 45 | ref_names.append(refname) 46 | return True 47 | 48 | await git_ls_remote(repo, None, line_handler) 49 | 50 | if only_branches: 51 | return [name.lstrip("refs/heads/") 52 | for name in ref_names 53 | if name.startswith("refs/heads/")] 54 | 55 | return ref_names 56 | 57 | 58 | async def git_ls_remote(repo, ref: str | None = None, 59 | line_func: Callable[[str, str, Process], bool] = lambda h, n, p: True) -> None: 60 | cmd = ["git", "ls-remote", repo] + ([ref] if ref else []) 61 | proc = await asyncio.create_subprocess_exec( 62 | *cmd, 63 | stdout=asyncio.subprocess.PIPE, 64 | ) 65 | 66 | if proc.stdout is None: 67 | raise Exception("stdout not captured") 68 | 69 | while True: 70 | data = await proc.stdout.readline() 71 | if not data: 72 | break 73 | line = data.decode('utf8').rstrip() 74 | refhash, refname = line.split() 75 | keep_looping = line_func(refhash, refname, proc) 76 | if not keep_looping: 77 | break 78 | 79 | retval = await proc.wait() 80 | 81 | if retval != 0: 82 | raise Exception(f"failed to determine {ref!r} hash in {repo!r}") 83 | 84 | 85 | async def update_server_info(repo): 86 | """ 87 | call `git update-server-info` in the given repo. 88 | """ 89 | 90 | print("updating http server info in '%s'..." % repo) 91 | proc = await asyncio.create_subprocess_shell( 92 | "cd %s && git update-server-info" % repo 93 | ) 94 | 95 | await proc.wait() 96 | -------------------------------------------------------------------------------- /kevin/task_queue.py: -------------------------------------------------------------------------------- 1 | """ 2 | Task queuing for Kevin. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import asyncio 8 | import functools 9 | import logging 10 | import traceback 11 | import typing 12 | 13 | if typing.TYPE_CHECKING: 14 | from .build import Build 15 | from .job import Job 16 | from .job_manager import JobManager 17 | 18 | 19 | class TaskQueue: 20 | """ 21 | Queue to manage pending builds and jobs. 22 | """ 23 | 24 | def __init__(self, 25 | loop: asyncio.AbstractEventLoop, 26 | job_manager: JobManager, 27 | max_running: int, 28 | max_queued: int): 29 | 30 | # event loop 31 | self._loop = loop 32 | 33 | # job distribution 34 | self._job_manager = job_manager 35 | 36 | # builds that should be run 37 | self._build_queue: asyncio.Queue[Build] = asyncio.Queue(maxsize=max_queued) 38 | 39 | # (project_name, commit_hash) -> Build 40 | self._builds: dict[tuple[str, str], Build] = dict() 41 | 42 | # jobs that should be run 43 | self._job_queue: asyncio.Queue[Job] = asyncio.Queue(maxsize=max_queued) 44 | 45 | # running jobs 46 | # job -> job_task 47 | self._jobs: dict[Job, asyncio.Task[Job]] = dict() 48 | 49 | # was the execution of the queue cancelled 50 | self._cancelled = False 51 | 52 | # number of jobs running in parallel 53 | self._max_running: int = max_running 54 | 55 | async def run(self): 56 | try: 57 | async with asyncio.TaskGroup() as tg: 58 | tg.create_task(self.process_builds()) 59 | tg.create_task(self.process_jobs()) 60 | except asyncio.CancelledError: 61 | await self.cancel() 62 | raise 63 | 64 | async def process_builds(self) -> None: 65 | """ 66 | process items from the build queue 67 | """ 68 | while True: 69 | build = await self._build_queue.get() 70 | 71 | build_key = (build.project.name, build.commit_hash) 72 | # it's in the dict if it wasn't aborted in the meantime 73 | if build_key in self._builds: 74 | try: 75 | def remove_build(build: Build): 76 | self._builds.pop(build_key, None) 77 | 78 | # this blocks just as long as it needs to schedule jobs 79 | await build.enqueue(self, on_finish=remove_build) 80 | except Exception: 81 | logging.exception("failed to run build %s", build) 82 | 83 | async def add_build(self, build: Build): 84 | """ 85 | Add a build to be processed. 86 | Called from where a new build was created and should now be run. 87 | """ 88 | 89 | build_key = (build.project.name, build.commit_hash) 90 | 91 | if build_key not in self._builds and build.requires_run(): 92 | logging.info("[queue] adding build: [\x1b[33m%s\x1b[m] @ %s", 93 | build.commit_hash, 94 | build.clone_url) 95 | 96 | self._builds[build_key] = build 97 | 98 | # the build shall now run. 99 | # this is done by adding jobs to this queue. 100 | await self._build_queue.put(build) 101 | 102 | def remove_build(self, build: Build): 103 | """ Remove a finished build """ 104 | del self._builds[(build.project.name, build.commit_hash)] 105 | 106 | async def abort_build(self, project_name: str, commit_hash: str): 107 | """ Abort a running build by aborting all pending jobs """ 108 | 109 | build_key = (project_name, commit_hash) 110 | build = self._builds.get(build_key) 111 | 112 | if build: 113 | if not build.completed: 114 | await build.abort() 115 | del self._builds[build_key] 116 | 117 | async def add_job(self, job): 118 | """ Add a job to the queue """ 119 | 120 | if job.completed: 121 | # don't enqueue completed jobs. 122 | return 123 | 124 | try: 125 | # place the job into the pending list. 126 | self._job_queue.put_nowait(job) 127 | 128 | except asyncio.QueueFull: 129 | await job.error("overloaded; job was dropped.") 130 | 131 | async def process_jobs(self): 132 | """ process jobs from the queue forever """ 133 | 134 | while not self._cancelled: 135 | 136 | if self._job_queue.empty(): 137 | logging.info("[queue] \x1b[32mWaiting for job...\x1b[m") 138 | 139 | # fetch new job from the queue 140 | job = await self._job_queue.get() 141 | 142 | logging.info("[queue] \x1b[32mProcessing job\x1b[m %s.%s for " 143 | "[\x1b[34m%s\x1b[m]...", 144 | job.build.project.name, 145 | job.name, 146 | job.build.commit_hash) 147 | 148 | # spawn the build job. 149 | # the job will be distributed to one of the runners. 150 | job_task = self._loop.create_task(job.run(self._job_manager)) 151 | 152 | self._jobs[job] = job_task 153 | 154 | # register the callback when the job is done 155 | job_task.add_done_callback(functools.partial( 156 | self.job_done, job=job)) 157 | 158 | # wait for jobs to complete if there are too many running 159 | # this can be done very dynamically in the future. 160 | if len(self._jobs) >= self._max_running or self._cancelled: 161 | logging.info("[queue] runlimit of %d reached, " 162 | "waiting for completion...", self._max_running) 163 | 164 | # wait until a "slot" is available, then the next job 165 | # can be processed. 166 | await asyncio.wait( 167 | self._jobs.values(), 168 | return_when=asyncio.FIRST_COMPLETED) 169 | 170 | def job_done(self, task: asyncio.Task, job: Job) -> None: 171 | """ callback for finished jobs """ 172 | logging.info("[queue] Job %s.%s finished for [\x1b[34m%s\x1b[m].", 173 | job.build.project.name, 174 | job.name, 175 | job.build.commit_hash) 176 | 177 | exc = task.exception() 178 | if exc: 179 | logging.error("[queue] job %s excepted: %s", job, "".join(traceback.format_exception(exc))) 180 | 181 | try: 182 | del self._jobs[job] 183 | except KeyError: 184 | logging.exception("\x1b[31mBUG\x1b[m: job %s not in running set", job) 185 | 186 | async def cancel(self): 187 | """ cancel all running jobs """ 188 | 189 | to_cancel = len(self._jobs) 190 | self._cancelled = True 191 | 192 | if to_cancel == 0: 193 | return 194 | 195 | logging.info("[queue] cancelling running jobs...") 196 | 197 | for job_fut in self._jobs.values(): 198 | job_fut.cancel() 199 | 200 | # wait until all jobs were cancelled 201 | results = await asyncio.gather(*self._jobs.values(), 202 | return_exceptions=True) 203 | 204 | cancels = [res for res in results if 205 | isinstance(res, asyncio.CancelledError)] 206 | 207 | logging.info("[queue] cancelled %d/%d job%s", 208 | len(cancels), 209 | to_cancel, 210 | "s" if to_cancel > 1 else "") 211 | 212 | async def cancel_job(self, job): 213 | """ cancel the given job by accessing its future """ 214 | 215 | if job not in self._jobs: 216 | logging.error("[queue] tried to cancel unknown job: %s", job) 217 | else: 218 | self._jobs[job].cancel() 219 | -------------------------------------------------------------------------------- /kevin/trigger.py: -------------------------------------------------------------------------------- 1 | """ 2 | Build trigger base class definition. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import typing 8 | 9 | from .service import Service 10 | 11 | if typing.TYPE_CHECKING: 12 | from .project import Project 13 | 14 | 15 | class Trigger(Service): 16 | """ 17 | Base class for all project build triggers. 18 | These can start a build by some means, either by external notification, 19 | or by active polling. 20 | """ 21 | 22 | def __init__(self, cfg: dict[str, str], project: Project): 23 | super().__init__(cfg, project) 24 | 25 | def get_watchers(self): 26 | """ 27 | Return a list of watcher objects. Those will be attached additionally 28 | to the other watchers returned by some action. That way, e.g. a 29 | github trigger can attach a pull request watcher. 30 | """ 31 | return [] 32 | 33 | def merge_cfg(self, urlhandlers): 34 | """ 35 | Perform merge operations so that this trigger only functions as 36 | a config for another class that is instanciated later. 37 | E.g. the config for all the webhooks is defined multiple times 38 | as a trigger, but the server waiting for it is only created once. 39 | This function prepares this merging. 40 | """ 41 | pass 42 | -------------------------------------------------------------------------------- /kevin/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Various utility functions. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import asyncio 8 | import logging 9 | import os 10 | import re 11 | import tempfile 12 | import typing 13 | 14 | from pathlib import Path 15 | 16 | if typing.TYPE_CHECKING: 17 | from typing import Sequence 18 | 19 | # convenience infinity. 20 | INF = float("inf") 21 | 22 | 23 | def log_setup(setting, default=1): 24 | """ 25 | Perform setup for the logger. 26 | Run before any logging.log thingy is called. 27 | 28 | if setting is 0: the default is used, which is WARNING. 29 | else: setting + default is used. 30 | """ 31 | 32 | levels = (logging.ERROR, logging.WARNING, logging.INFO, 33 | logging.DEBUG, logging.NOTSET) 34 | 35 | factor = clamp(default + setting, 0, len(levels) - 1) 36 | level = levels[factor] 37 | 38 | logging.basicConfig(level=level, format="[%(asctime)s] [%(name)s] %(message)s") 39 | logging.error("loglevel: %s", logging.getLevelName(level)) 40 | logging.captureWarnings(True) 41 | 42 | logging.getLogger("asyncio").setLevel(level=logging.WARNING) 43 | 44 | 45 | def clamp(number, smallest, largest): 46 | """ return number but limit it to the inclusive given value range """ 47 | return max(smallest, min(number, largest)) 48 | 49 | 50 | # prefix to factor ** x map 51 | SIZESUFFIX_POWER = { 52 | "": 0, 53 | "K": 1, 54 | "M": 2, 55 | "G": 3, 56 | "T": 4, 57 | "P": 5, 58 | "E": 6, 59 | } 60 | 61 | 62 | def parse_size(text): 63 | """ 64 | parse a text like '10G' as 10 gigabytes = 10 * 1000**3 bytes 65 | returns size in bytes. 66 | """ 67 | 68 | mat = re.match(r"(\d+)\s*([KMGTPE]?)(i?)B", text) 69 | if not mat: 70 | raise ValueError( 71 | "invalid size '%s', expected e.g. 10B, 42MiB or 1337KiB" % text) 72 | 73 | number = int(mat.group(1)) 74 | suffix = mat.group(2) 75 | factor = 1024 if mat.group(3) else 1000 76 | 77 | power = SIZESUFFIX_POWER[suffix] 78 | 79 | return number * (factor ** power) 80 | 81 | 82 | def parse_time(text, allow_inf=True): 83 | """ 84 | parse a text like '10min' as 10 * 60 s 85 | returns time in seconds. 86 | """ 87 | 88 | if allow_inf and text == "inf": 89 | return float("+inf") 90 | 91 | mat = re.match(r"(\d+)\s*(min|[hsm])", text) 92 | if not mat: 93 | raise ValueError( 94 | "invalid duration '%s', valid: 10min, 10m, 42h or 1337s" % text) 95 | 96 | number = int(mat.group(1)) 97 | suffix = mat.group(2) 98 | 99 | factor = {"s": 1, "min": 60, "m": 60, "h": 3600}[suffix] 100 | return number * factor 101 | 102 | 103 | class SSHKnownHostFile: 104 | """ 105 | provide a temporary known hosts file for ssh 106 | """ 107 | def __init__(self, host, port, key): 108 | self.host = host 109 | self.port = int(port) 110 | self.key = key 111 | self.tmpfile = None 112 | 113 | def create(self): 114 | """ Generate a temporary file with the key content """ 115 | 116 | if self.key is not None: 117 | self.tmpfile = tempfile.NamedTemporaryFile(mode='w') 118 | 119 | # entry in the known hosts file 120 | key_data = "[%s]:%s %s\n" % (self.host, self.port, self.key) 121 | 122 | self.tmpfile.write(key_data) 123 | self.tmpfile.file.flush() 124 | 125 | def remove(self): 126 | """ Remove the generated file """ 127 | if self.key is not None: 128 | self.tmpfile.close() 129 | 130 | def empty(self): 131 | """ 132 | Return true if there is no key stored, 133 | i.e. no host key verification is performed because the key was None 134 | """ 135 | return self.key is None 136 | 137 | def get_options(self): 138 | """ Return the ssh options to use this temporary known hosts file """ 139 | 140 | if self.key is None: 141 | return [ 142 | "-o", "UserKnownHostsFile=/dev/null", 143 | "-o", "StrictHostKeyChecking=no", 144 | ] 145 | 146 | if not self.tmpfile: 147 | raise Exception("SSHKnownHostFile::create() not called " 148 | "or not SSHKnownHostFile not used 'with'") 149 | 150 | return [ 151 | "-o", "UserKnownHostsFile=%s" % self.tmpfile.name, 152 | "-o", "StrictHostKeyChecking=yes", 153 | ] 154 | 155 | def __enter__(self): 156 | self.create() 157 | return self 158 | 159 | def __exit__(self, exc, value, traceback): 160 | self.remove() 161 | 162 | 163 | def parse_connection_entry(name, entry, cfglocation=None, require_key=True, 164 | protos=("unix", "ssh")): 165 | """ 166 | parse a connection configuration entry. 167 | supported: unix and ssh. 168 | """ 169 | 170 | if cfglocation is None: 171 | cfglocation = Path(".") 172 | 173 | def parse_ssh(match): 174 | """ parse the ssh connection entry """ 175 | connection = "ssh" 176 | 177 | user = match.group(1) 178 | 179 | # (host, port) 180 | location = (match.group(2), int(match.group(3))) 181 | 182 | # ssh key entry or name of key file 183 | if match.group(4): 184 | key_entry = match.group(5).strip() 185 | 186 | if key_entry.startswith("ssh-"): 187 | key = key_entry 188 | else: 189 | # it's given as path to public key storage file 190 | path = Path(os.path.expanduser(key_entry)) 191 | 192 | if not path.is_absolute(): 193 | path = cfglocation / path 194 | 195 | with open(str(path)) as keyfile: 196 | key = keyfile.read().strip() 197 | else: 198 | if require_key: 199 | raise ValueError("For '%s=': please specify " 200 | "ssh key or keyfile with " 201 | "'... = $key or $filename'" % (name)) 202 | 203 | # no key was given. 204 | key = None 205 | 206 | return user, connection, location, key 207 | 208 | def parse_unix(match): 209 | """ parse the unix connection entry """ 210 | connection = "unix" 211 | 212 | if match.group(1): 213 | user = match.group(2) 214 | else: 215 | user = None 216 | 217 | location = match.group(3) 218 | 219 | return user, connection, location, None 220 | 221 | formats = { 222 | "ssh": (("ssh://user@host:port = ssh-rsa vmfingerprint " 223 | "(or ~/.ssh/known_hosts)"), 224 | re.compile(r"ssh://(.+)@(.+):(\d+)\s*(=\s*(.*))?"), 225 | parse_ssh), 226 | 227 | "unix": ("unix://justinuser@/path/to/socket", 228 | re.compile(r"unix://((.+)@)?(.+)"), 229 | parse_unix), 230 | } 231 | 232 | for proto in protos: 233 | match = formats[proto][1].match(entry) 234 | 235 | if match: 236 | return formats[proto][2](match) 237 | 238 | raise ValueError("you wrote:\n'%s = %s'\n" 239 | "-> you need to provide one of:\n %s" % ( 240 | name, entry, "\n ".join( 241 | format[0] for format in formats.values() 242 | ))) 243 | 244 | 245 | class AsyncWith: 246 | """ 247 | Base class for objects that are usable with `async with` only. 248 | """ 249 | 250 | def __enter__(self): 251 | raise Exception("use async with!") 252 | 253 | def __exit__(self, exc, value, traceback): 254 | raise Exception("use async with!") 255 | 256 | 257 | class TerminateTaskGroup(Exception): 258 | """ Exception to terminate asyncio.TaskGroup """ 259 | pass 260 | 261 | 262 | async def terminate_task_group(when: asyncio.Event): 263 | """ task part of the TaskGroup which triggers termination when the event fires """ 264 | await when.wait() 265 | raise TerminateTaskGroup() 266 | 267 | 268 | class strlazy: 269 | """ 270 | usage: logging.debug("rolf %s", strlazy(lambda: do_something())) 271 | do_something is only called when the debug message is actually printed 272 | do_something could also be an f-string. 273 | """ 274 | def __init__(self, fun): 275 | self._fun = fun 276 | def __str__(self): 277 | return self._fun() 278 | 279 | 280 | class strflazy: 281 | """ 282 | usage: logging.debug("rolf %s", strflazy("stuff %s", do_something())) 283 | do_something is only exected when the debug message is actually logged 284 | does %-style formatting once str-evaluated. 285 | """ 286 | def __init__(self, txt, *args): 287 | self._txt = txt 288 | self._args = args 289 | def __str__(self): 290 | return self._txt % self._args 291 | 292 | 293 | T = typing.TypeVar("T") 294 | def first_instance(seq: Sequence[T], base: type[T]) -> T | None: 295 | """ 296 | give a sequence, return the first element that isinstance of T's subtype `base`. 297 | """ 298 | result: T | None = None 299 | for item in seq: 300 | if isinstance(item, base): 301 | result = item 302 | break 303 | 304 | return result 305 | -------------------------------------------------------------------------------- /kevin/watchable.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is the base for Kevin's message bus. 3 | A watchable can be watched by a watcher. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import asyncio 9 | import typing 10 | 11 | from .watcher import Watcher 12 | from .update import Update 13 | 14 | 15 | class Watchable: 16 | """ 17 | Abstract watchable which can be watched by a Watcher. 18 | """ 19 | 20 | def __init__(self) -> None: 21 | self._watchers: set[Watcher] = set() 22 | self._watchers_lock = asyncio.Lock() 23 | 24 | async def register_watcher(self, watcher: Watcher): 25 | """ 26 | Register a watcher object, 27 | which gets updates sent by send_update(update). 28 | """ 29 | 30 | if not isinstance(watcher, Watcher): 31 | raise Exception("invalid watcher type: %s" % type(watcher)) 32 | 33 | self._watchers.add(watcher) 34 | 35 | await self.on_watcher_registered(watcher) 36 | 37 | async def on_watcher_registered(self, watcher: Watcher): 38 | """ 39 | Custom actions when a watcher subscribes for receiving new updates 40 | """ 41 | pass 42 | 43 | def deregister_watcher(self, watcher: Watcher, missing_ok: bool = False): 44 | """ Un-subscribe a watcher from the notification list """ 45 | if missing_ok: 46 | self._watchers.discard(watcher) 47 | else: 48 | self._watchers.remove(watcher) 49 | 50 | def on_watcher_deregistered(self, watcher: Watcher): 51 | """ Custom actions when a watcher unsubscribes """ 52 | pass 53 | 54 | async def send_update(self, update: Update, 55 | exclude: typing.Callable[[Watcher], bool] | None = None, 56 | **kwargs): 57 | """ 58 | Send an update to all registered watchers 59 | Exclude: callable that can exclude subscribers from 60 | receiving the update. (called with func(subscriber)) 61 | """ 62 | 63 | # for enhanced debugging experience, 64 | # usable in the non-existant case that kevin has bugs. 65 | #if isinstance(update, Update): 66 | # print(f"{self} => {type(update)}= {update.dump()}") 67 | 68 | await self.on_send_update(update, **kwargs) 69 | 70 | # copy list of watchers so an update can add and remove watchers 71 | for watcher in self._watchers.copy(): 72 | if exclude and exclude(watcher): 73 | continue 74 | 75 | await watcher.on_update(update) 76 | 77 | async def on_send_update(self, update: Update, **kwargs): 78 | """ Called when an update is about to be sent """ 79 | pass 80 | -------------------------------------------------------------------------------- /kevin/watcher.py: -------------------------------------------------------------------------------- 1 | """ 2 | Job watching. 3 | You can receive job updates with a Watcher. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import typing 9 | 10 | if typing.TYPE_CHECKING: 11 | from .update import Update 12 | 13 | 14 | class Watcher: 15 | """ 16 | Abstract event watcher. Gets notified by a Watchable. 17 | 18 | When registered to SomeWatchable.register_watcher(Watcher(...)), 19 | each update will be supplied to the watcher then. 20 | """ 21 | 22 | async def on_update(self, update: Update) -> None: 23 | """ 24 | Process the update here. 25 | """ 26 | pass 27 | -------------------------------------------------------------------------------- /mandy/favicon-error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-error.png -------------------------------------------------------------------------------- /mandy/favicon-failure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-failure.png -------------------------------------------------------------------------------- /mandy/favicon-running.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-running.png -------------------------------------------------------------------------------- /mandy/favicon-skipped.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-skipped.png -------------------------------------------------------------------------------- /mandy/favicon-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-success.png -------------------------------------------------------------------------------- /mandy/favicon-waiting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SFTtech/kevin-ci/5e56edbb31785dc34315f572c23f53b9e34b130f/mandy/favicon-waiting.png -------------------------------------------------------------------------------- /mandy/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Kevin CI - Mandy 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 23 |
24 | 25 | 26 | -------------------------------------------------------------------------------- /mandy/jshint.config: -------------------------------------------------------------------------------- 1 | { 2 | "esversion": 6, 3 | "browser": true, 4 | "devel": true, 5 | "strict": "global" 6 | } 7 | -------------------------------------------------------------------------------- /mandy/mandy.css: -------------------------------------------------------------------------------- 1 | /* basic page layout */ 2 | html, body { 3 | margin: 0; 4 | padding: 0; 5 | } 6 | 7 | #sidebar { 8 | position: fixed; 9 | overflow-y: scroll; 10 | height: 100%; 11 | overflow-x: hidden; 12 | width: 300px; 13 | font-family: sans-serif; 14 | } 15 | 16 | #terminal { 17 | position: absolute; 18 | left: 300px; 19 | padding: 3px; 20 | width: calc(100% - 300px - 6px); 21 | min-height: 100%; 22 | } 23 | 24 | /* style of sidebar elements */ 25 | #sidebar a { 26 | display: block; 27 | padding: 5px 2px 5px 2px; 28 | box-sizing: border-box; 29 | background-color: white; 30 | border-color: lightgray; 31 | border-style: solid; 32 | border-width: 2px 0px 0px 0px; 33 | color: #202020; 34 | } 35 | 36 | #sidebar a:link, a:visited { 37 | text-decoration: none; 38 | color: #202020; 39 | } 40 | 41 | #sidebar a:hover, a:active { 42 | } 43 | 44 | #sidebar .buildsources { 45 | margin-left: 10px; 46 | margin-top: 2px; 47 | margin-bottom: -5px; 48 | margin-right: -2px; 49 | } 50 | 51 | #sidebar .buildsource { 52 | border-width: 2px 0px 0px 2px; 53 | border-color: lightgray; 54 | } 55 | 56 | #sidebar .errormessage { 57 | background-color: red; 58 | margin-top: 10px; 59 | } 60 | 61 | #sidebar .job { 62 | } 63 | 64 | #sidebar .job.inactive { 65 | opacity: 0.65; 66 | } 67 | 68 | #sidebar .jobstates { 69 | margin-left: 10px; 70 | margin-top: 2px; 71 | margin-bottom: -5px; 72 | margin-right: -2px; 73 | } 74 | 75 | #sidebar .jobstate { 76 | border-width: 2px 0px 0px 2px; 77 | } 78 | 79 | #sidebar .outputitems { 80 | margin-left: 10px; 81 | margin-top: 2px; 82 | margin-bottom: -5px; 83 | margin-right: -2px; 84 | } 85 | 86 | #sidebar .outputitem { 87 | background-color: lightblue; 88 | border-width: 2px 0px 0px 2px; 89 | } 90 | 91 | /* background colors for sidebar stati */ 92 | #sidebar .waiting { 93 | background-color: gray; 94 | } 95 | 96 | #sidebar .skipped { 97 | background-color: gray; 98 | } 99 | 100 | #sidebar .running { 101 | background-color: #d8c618; 102 | } 103 | 104 | #sidebar .success { 105 | background-color: #00b000; 106 | } 107 | 108 | #sidebar .failure { 109 | background-color: red; 110 | } 111 | 112 | #sidebar .error { 113 | background-color: red; 114 | } 115 | -------------------------------------------------------------------------------- /mandy/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: / 3 | -------------------------------------------------------------------------------- /mandy/terminal.css: -------------------------------------------------------------------------------- 1 | .terminal { 2 | font-family: "Lucidia Console", monospace; 3 | font-size: 12pt; 4 | color: #aaa; 5 | background-color: black; 6 | white-space: pre-wrap; 7 | word-break: break-all; 8 | } 9 | 10 | 11 | /* for use by anchors that were inserted */ 12 | .anchor { 13 | font-family: sans-serif; 14 | font-size: 10pt; 15 | color: white; 16 | background-color: grey; 17 | border-style: solid; 18 | border-width: 1px; 19 | border-color: red; 20 | visibility: hidden; 21 | } 22 | 23 | /* the following classes are for use by individual spans in the terminal. */ 24 | 25 | .bold { 26 | font-weight: bold; 27 | color: #d7d7d7; 28 | } 29 | 30 | .italic { 31 | font-style: italic; 32 | color: #87af5f; 33 | } 34 | 35 | .underline { 36 | text-decoration: underline; 37 | color: #87afd7; 38 | } 39 | 40 | .col0 { 41 | color: black; 42 | } 43 | 44 | .bgcol0 { 45 | background-color: black; 46 | } 47 | 48 | .col1 { 49 | color: #cd0000; 50 | } 51 | 52 | .bgcol1 { 53 | background-color: #cd0000; 54 | } 55 | 56 | .col2 { 57 | color: #00cd00; 58 | } 59 | 60 | .bgcol2 { 61 | background-color: #00cd00; 62 | } 63 | 64 | .col3 { 65 | color: #cdcd00; 66 | } 67 | 68 | .bgcol3 { 69 | background-color: #cdcd00; 70 | } 71 | 72 | .col4 { 73 | color: #5c5cff; 74 | } 75 | 76 | .bgcol4 { 77 | background-color: #5c5cff; 78 | } 79 | 80 | .col5 { 81 | color: #cd00cd; 82 | } 83 | 84 | .bgcol5 { 85 | background-color: #cd00cd; 86 | } 87 | 88 | .col6 { 89 | color: #00cdcd; 90 | } 91 | 92 | .bgcol6 { 93 | background-color: #00cdcd; 94 | } 95 | 96 | .col7 { 97 | color: #e5e5e5; 98 | } 99 | 100 | .bgcol7 { 101 | background-color: #e5e5e5; 102 | } 103 | 104 | .col0.bold { 105 | color: #4d4d4d; 106 | } 107 | 108 | .col1.bold { 109 | color: red; 110 | } 111 | 112 | .col2.bold { 113 | color: lime; 114 | } 115 | 116 | .col3.bold { 117 | color: yellow; 118 | } 119 | 120 | .col4.bold { 121 | color: #5c5cff; 122 | } 123 | 124 | .col5.bold { 125 | color: fuchsia; 126 | } 127 | 128 | .col6.bold { 129 | color: aqua; 130 | } 131 | 132 | .col7.bold { 133 | color: white; 134 | } 135 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from setuptools import setup 4 | 5 | import os 6 | import glob 7 | 8 | 9 | setup( 10 | name="kevin", 11 | version="0.7", 12 | description="Self-hostable continuous integration toolkit", 13 | long_description=( 14 | "Components for running a continuous integration service " 15 | "right on your own servers.\n" 16 | "Kevin interacts with the Internet, Justin manages the virtual machines " 17 | "and containers, Chantal executes your build jobs.\n\n" 18 | "It's designed to interact with a code hosting platform like GitHub " 19 | "but can easily be extended for others.\n" 20 | "Pull requests are build in temporary containers that are deleted" 21 | "after execution." 22 | ), 23 | maintainer="SFT Technologies", 24 | maintainer_email="jj@stusta.net", 25 | url="https://github.com/SFTtech/kevin-ci", 26 | license='AGPL3+', 27 | packages=[ 28 | "kevin", 29 | "kevin.service", 30 | "kevin.simulator", 31 | "chantal", 32 | "chantal.controlfile", 33 | "justin", 34 | "justin.machine", 35 | ], 36 | data_files=[ 37 | ("/usr/lib/systemd/system/", [ 38 | "etc/kevin.service", 39 | "etc/justin.service", 40 | ]), 41 | ("/usr/lib/tmpfiles.d", [ 42 | "etc/tmpfiles.d/kevin.conf", 43 | ]), 44 | ("/etc/kevin", [ 45 | "etc/kevinfile.example", 46 | "etc/kevin.conf.example", 47 | "etc/justin.conf.example", 48 | ]), 49 | ("/etc/kevin/projects", [ 50 | "etc/project.conf.example", 51 | ]), 52 | ("/usr/share/webapps/mandy", 53 | glob.glob(os.path.join(os.path.dirname(__file__), "mandy/*"))), 54 | ], 55 | platforms=[ 56 | 'Linux', 57 | ], 58 | classifiers=[ 59 | ("License :: OSI Approved :: " 60 | "GNU Affero General Public License v3 or later (AGPLv3+)"), 61 | "Topic :: Software Development :: Build Tools", 62 | "Topic :: Software Development :: Testing", 63 | "Topic :: Software Development :: Quality Assurance", 64 | "Topic :: Software Development :: Version Control", 65 | "Topic :: Internet :: WWW/HTTP", 66 | "Intended Audience :: Developers", 67 | "Intended Audience :: Science/Research", 68 | "Environment :: Web Environment", 69 | "Environment :: Console", 70 | "Operating System :: POSIX :: Linux" 71 | ], 72 | ) 73 | --------------------------------------------------------------------------------