├── .buildkite ├── autogenerate_pipeline.py └── test_description.json ├── .github └── workflows │ └── black.yaml ├── .gitignore ├── .platform ├── CODEOWNERS ├── Cargo.toml ├── LICENSE ├── README.md ├── coverage_config_aarch64.json.sample ├── coverage_config_x86_64.json ├── coverage_config_x86_64.json.sample ├── dependabot-monthly.yml ├── dependabot-weekly.yml ├── integration_tests ├── conftest.py ├── test_benchmark.py ├── test_commit_format.py ├── test_coverage.py └── utils.py ├── src └── lib.rs └── test_run.py /.buildkite/autogenerate_pipeline.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 5 | """ 6 | This script is printing the Buildkite pipeline.yml to stdout. 7 | This can also be used as a library to print the steps from a different pipeline 8 | specified as a parameter to the `generate_test_pipeline`. 9 | 10 | The pipeline is generated based on the test configuration in 11 | `test_description.json`. The JSON file contains a list of tests to be run by 12 | all rust-vmm components. Each test has a default timeout of 5 minutes. 13 | 14 | Some components need to override the default configuration such that they can 15 | access devices while running the tests (for example access to `/dev/kvm`), 16 | access to a temporary volume, and others. Some components may also need to skip 17 | some of the tests. As such, this script supports overriding the following 18 | configurations through environment variables: 19 | - `X86_LINUX_AGENT_TAGS`: overrides the tags by which the x86_64 linux agent is 20 | selected. 21 | - `AARCH64_LINUX_AGENT_TAGS`: overrides the tags by which the aarch64 linux 22 | agent is selected. 23 | - `DOCKER_PLUGIN_CONFIG`: specifies additional configuration for the docker 24 | plugin. For available configuration, please check the 25 | https://github.com/buildkite-plugins/docker-buildkite-plugin. 26 | - `TESTS_TO_SKIP`: specifies a list of tests to be skipped. 27 | - `TIMEOUTS_MIN`: overrides the timeout value for specific tests. 28 | 29 | NOTE: The variable `TESTS_TO_SKIP` is specified as a JSON list with the names 30 | of the tests to be skipped. The variable `TIMEOUTS_MIN` is a dictionary where 31 | each key is the name of a test and each value is the number of minutes for the 32 | timeout. The other variables are specified as dictionaries, where the first key 33 | is `tests` and its value is a list of test names where the configuration should 34 | be applied; the second key is `cfg` and its value is a dictionary with the 35 | actual configuration. 36 | 37 | Examples of a valid configuration: 38 | ```shell 39 | TESTS_TO_SKIP='["commit-format"]' 40 | DOCKER_PLUGIN_CONFIG='{ 41 | "tests": ["coverage"], 42 | "cfg": { 43 | "devices": [ "/dev/vhost-vdpa-0" ], 44 | "privileged": true 45 | } 46 | }' 47 | TIMEOUTS_MIN='["style": 30]' 48 | ``` 49 | """ 50 | 51 | import yaml 52 | import json 53 | import os 54 | import sys 55 | import pathlib 56 | import copy 57 | 58 | from argparse import ArgumentParser, RawTextHelpFormatter 59 | from textwrap import dedent 60 | 61 | # This represents the version of the rust-vmm-container used 62 | # for running the tests. 63 | CONTAINER_VERSION = "g825a7ab" 64 | # The suffix suggests that the dev image with `v{N}-riscv` tag is not to be 65 | # confused with real `riscv64` image (it's actually a `x86_64` image with 66 | # `qemu-system-riscv64` installed), since AWS yet has `riscv64` machines 67 | # available. 68 | CONTAINER_VERSION_RISCV = CONTAINER_VERSION + "-riscv" 69 | # This represents the version of the Buildkite Docker plugin. 70 | DOCKER_PLUGIN_VERSION = "v5.3.0" 71 | 72 | X86_AGENT_TAGS = os.getenv("X86_LINUX_AGENT_TAGS") 73 | AARCH64_AGENT_TAGS = os.getenv("AARCH64_LINUX_AGENT_TAGS") 74 | DOCKER_PLUGIN_CONFIG = os.getenv("DOCKER_PLUGIN_CONFIG") 75 | TESTS_TO_SKIP = os.getenv("TESTS_TO_SKIP") 76 | TIMEOUTS_MIN = os.getenv("TIMEOUTS_MIN") 77 | # This env allows setting the hypervisor on which the tests are running at the 78 | # pipeline level. This will not override the hypervisor tag in case one is 79 | # already specified in the test definition. 80 | # Most of the repositories don't really need to run on KVM per se, but we are 81 | # experiencing some timeouts mostly with the mshv hosts right now, and we are 82 | # fixing the default to kvm to work around that problem. 83 | # More details here: https://github.com/rust-vmm/community/issues/137 84 | DEFAULT_AGENT_TAG_HYPERVISOR = os.getenv("DEFAULT_AGENT_TAG_HYPERVISOR", "kvm") 85 | 86 | BUILDKITE_PATH = pathlib.Path(__file__).parent.resolve() 87 | 88 | 89 | class BuildkiteStep: 90 | """ 91 | This builds a Buildkite step according to a json configuration and the 92 | environment variables `X86_LINUX_AGENT_TAGS`, `AARCH64_LINUX_AGENT_TAGS`, 93 | `DOCKER_PLUGIN_CONFIG`, `TESTS_TO_SKIP` and `TIMEOUTS_MIN`. 94 | The output is a dictionary. 95 | """ 96 | 97 | def __init__(self): 98 | """ 99 | Initialize a Buildkite step with default values. 100 | """ 101 | # Default values. 102 | # The order in which the attributes are initialized is the same as the 103 | # order in which the keys will appear in the YAML file, because Python 104 | # dictionaries are ordered. For readability reasons, this order should 105 | # not be changed. 106 | self.step_config = { 107 | "label": None, 108 | "command": None, 109 | "retry": {"automatic": False}, 110 | "agents": {"os": "linux"}, 111 | "plugins": [ 112 | { 113 | f"docker#{DOCKER_PLUGIN_VERSION}": { 114 | "image": f"rustvmm/dev:{CONTAINER_VERSION}", 115 | "always-pull": True, 116 | } 117 | } 118 | ], 119 | "timeout_in_minutes": 15, 120 | } 121 | 122 | def _set_platform(self, platform): 123 | """Set platform if given in the json input.""" 124 | 125 | if platform: 126 | # We need to change `aarch64` to `arm` because of the way we are 127 | # setting the tags on the host. 128 | if platform == "aarch64": 129 | platform = "arm" 130 | self.step_config["agents"]["platform"] = f"{platform}.metal" 131 | 132 | def _set_hypervisor(self, hypervisor): 133 | """Set hypervisor if given in the json input.""" 134 | supported_hypervisors = ["kvm", "mshv"] 135 | if hypervisor: 136 | if hypervisor in supported_hypervisors: 137 | self.step_config["agents"]["hypervisor"] = hypervisor 138 | 139 | def _set_conditional(self, conditional): 140 | """Set conditional if given in the json input.""" 141 | 142 | if conditional: 143 | self.step_config["if"] = conditional 144 | 145 | def _set_timeout_in_minutes(self, timeout): 146 | """Set the timeout if given in the json input.""" 147 | if timeout: 148 | self.step_config["timeout_in_minutes"] = timeout 149 | 150 | def _set_agent_queue(self, queue): 151 | """Set the agent queue if provided in the json input.""" 152 | if queue: 153 | self.step_config["agents"]["queue"] = queue 154 | 155 | def _add_docker_config(self, cfg): 156 | """Add configuration for docker if given in the json input.""" 157 | 158 | if cfg: 159 | target = self.step_config["plugins"][0][f"docker#{DOCKER_PLUGIN_VERSION}"] 160 | for key, val in cfg.items(): 161 | target[key] = val 162 | 163 | def _env_change_config(self, test_name, env_var, target, override=False): 164 | """ 165 | Helper function to add to/override configuration of `target` 166 | if `env_var` is set and this test appears in its list. 167 | """ 168 | 169 | if env_var: 170 | env_cfg = json.loads(env_var) 171 | 172 | tests = env_cfg.get("tests") 173 | assert tests, f"Environment variable {env_var} is missing the `tests` key." 174 | 175 | cfg = env_cfg.get("cfg") 176 | assert cfg, f"Environment variable {env_var} is missing the `cfg` key." 177 | 178 | if test_name in tests: 179 | if override: 180 | target.clear() 181 | for key, val in cfg.items(): 182 | target[key] = val 183 | 184 | def _env_override_agent_tags(self, test_name): 185 | """ 186 | Override the tags by which the linux agent is selected 187 | using the `X86_LINUX_AGENT_TAGS` and `AARCH64_LINUX_AGENT_TAGS` 188 | environment variables. 189 | """ 190 | 191 | env_var = None 192 | platform = self.step_config["agents"].get("platform") 193 | 194 | # Since the platform is optional, only override the config if the 195 | # platform was provided. 196 | if platform: 197 | if platform == "x86_64.metal" and X86_AGENT_TAGS: 198 | env_var = X86_AGENT_TAGS 199 | if platform == "arm.metal" and AARCH64_AGENT_TAGS: 200 | env_var = AARCH64_AGENT_TAGS 201 | 202 | target = self.step_config["agents"] 203 | self._env_change_config(test_name, env_var, target, override=True) 204 | 205 | def _env_add_docker_config(self, test_name): 206 | """ 207 | Specify additional configuration for the docker plugin using the 208 | `DOCKER_PLUGIN_CONFIG` environment variable. 209 | """ 210 | 211 | target = self.step_config["plugins"][0][f"docker#{DOCKER_PLUGIN_VERSION}"] 212 | self._env_change_config(test_name, DOCKER_PLUGIN_CONFIG, target) 213 | 214 | def _env_override_timeout(self, test_name): 215 | if TIMEOUTS_MIN: 216 | timeouts_min = json.loads(TIMEOUTS_MIN) 217 | if test_name in timeouts_min: 218 | self.timeout_in_minutes = timeouts_min[test_name] 219 | 220 | def build(self, input): 221 | """ 222 | Build a Buildkite step using the `input` configuration that must 223 | specify some mandatory keys and can also provide optional ones. 224 | Further configuration from environment variables may be added. 225 | """ 226 | 227 | test_name = input.get("test_name") 228 | command = input.get("command") 229 | platform = input.get("platform") 230 | hypervisor = input.get("hypervisor") 231 | docker = input.get("docker_plugin") 232 | conditional = input.get("conditional") 233 | timeout = input.get("timeout_in_minutes") 234 | queue = input.get("queue") 235 | 236 | # Mandatory keys. 237 | assert test_name, "Step is missing test name." 238 | platform_string = f"-{platform}" if platform else "" 239 | self.step_config["label"] = f"{test_name}{platform_string}" 240 | 241 | assert command, "Step is missing command." 242 | if "{target_platform}" in command: 243 | assert platform, "Command requires platform, but platform is missing." 244 | command = command.replace("{target_platform}", platform) 245 | # Modify command and tag name for `riscv64` CI 246 | if platform == "riscv64": 247 | # Wrap command with '' to avoid escaping early by `ENTRYPOINT` 248 | command = json.dumps(command) 249 | # Overwrite image tag for riscv64 platform CI 250 | self.step_config["plugins"][0][f"docker#{DOCKER_PLUGIN_VERSION}"][ 251 | "image" 252 | ] = f"rustvmm/dev:{CONTAINER_VERSION_RISCV}" 253 | # Since we are using qemu-system inside a x86_64 container, we 254 | # should set `platform` field to x86_64 and unset the hypervisor to 255 | # be passed 256 | platform = "x86_64" 257 | hypervisor = "" 258 | self.step_config["command"] = command 259 | 260 | # Optional keys. 261 | self._set_platform(platform) 262 | self._set_hypervisor(hypervisor) 263 | self._set_conditional(conditional) 264 | self._add_docker_config(docker) 265 | self._set_timeout_in_minutes(timeout) 266 | self._set_agent_queue(queue) 267 | 268 | # Override/add configuration from environment variables. 269 | self._env_override_agent_tags(test_name) 270 | self._env_add_docker_config(test_name) 271 | self._env_override_timeout(test_name) 272 | 273 | # We're now adding the keys for which we don't have explicit support 274 | # (i.e. there is no checking/updating taking place). We are just 275 | # forwarding the key, values without any change. 276 | # We need to filter for keys that have special meaning and which we 277 | # don't want to re-add. 278 | special_keys = [ 279 | "conditional", 280 | "docker_plugin", 281 | "platform", 282 | "test_name", 283 | "queue", 284 | "hypervisor", 285 | ] 286 | additional_keys = { 287 | k: v 288 | for k, v in input.items() 289 | if not (k in self.step_config) and not (k in special_keys) 290 | } 291 | if additional_keys: 292 | self.step_config.update(additional_keys) 293 | 294 | # Return the object's attributes and their values as a dictionary. 295 | return self.step_config 296 | 297 | 298 | class BuildkiteConfig: 299 | """ 300 | This builds the final Buildkite configuration from the json input 301 | using BuidkiteStep objects. The output is a dictionary that can 302 | be put into yaml format by the pyyaml package. 303 | """ 304 | 305 | def __init__(self): 306 | self.bk_config = None 307 | 308 | def build(self, input, platform_allowlist): 309 | """Build the final Buildkite configuration fron the json input.""" 310 | 311 | self.bk_config = {"steps": []} 312 | tests = input.get("tests") 313 | assert tests, "Input is missing list of tests." 314 | 315 | for test in tests: 316 | platforms = test.get("platform") 317 | test_name = test.get("test_name") 318 | 319 | if TESTS_TO_SKIP: 320 | tests_to_skip = json.loads(TESTS_TO_SKIP) 321 | if test_name in tests_to_skip: 322 | continue 323 | 324 | # The platform is optional. When it is not specified, we don't add 325 | # it to the step so that we can run the test in any environment. 326 | if not platforms: 327 | platforms = [None] 328 | 329 | for platform in platforms: 330 | # Filter test enabled in platform_allowlist 331 | if platform is not None and platform not in platform_allowlist: 332 | # Skip disabled platform 333 | continue 334 | 335 | step_input = copy.deepcopy(test) 336 | step_input["platform"] = platform 337 | if not step_input.get("hypervisor"): 338 | step_input["hypervisor"] = DEFAULT_AGENT_TAG_HYPERVISOR 339 | 340 | step = BuildkiteStep() 341 | step_output = step.build(step_input) 342 | self.bk_config["steps"].append(step_output) 343 | 344 | # Return the object's attributes and their values as a dictionary. 345 | return self.bk_config 346 | 347 | 348 | def determine_allowlist(config_file): 349 | """Determine the what platforms should be enabled for this crate""" 350 | 351 | try: 352 | with open(config_file, "r") as file: 353 | platforms = [line.strip() for line in file.readlines()] 354 | return platforms 355 | except Exception as e: 356 | # Fall back to default platform if anything goes wrong 357 | return ["x86_64", "aarch64"] 358 | 359 | 360 | def generate_pipeline(config_file, platform_allowlist): 361 | """Generate the pipeline yaml file from a json configuration file.""" 362 | 363 | with open(config_file) as json_file: 364 | json_cfg = json.load(json_file) 365 | json_file.close() 366 | 367 | config = BuildkiteConfig() 368 | output = config.build(json_cfg, platform_allowlist) 369 | yaml.dump(output, sys.stdout, sort_keys=False) 370 | 371 | 372 | if __name__ == "__main__": 373 | help_text = dedent( 374 | """ 375 | This script supports overriding the following configurations through 376 | environment variables: 377 | - X86_LINUX_AGENT_TAGS: overrides the tags by which the x86_64 linux 378 | agent is selected. 379 | - AARCH64_LINUX_AGENT_TAGS: overrides the tags by which the aarch64 380 | linux agent is selected. 381 | - DOCKER_PLUGIN_CONFIG: specifies additional configuration for the 382 | docker plugin. For available configuration, please check 383 | https://github.com/buildkite-plugins/docker-buildkite-plugin. 384 | - TESTS_TO_SKIP: specifies a list of tests to be skipped. 385 | - TIMEOUTS_MIN: overrides the timeout value for specific tests. 386 | """ 387 | ) 388 | parser = ArgumentParser(description=help_text, formatter_class=RawTextHelpFormatter) 389 | # By default we're generating the rust-vmm-ci pipeline with the test 390 | # configuration committed to this repository. 391 | # This parameter is useful for generating the pipeline for repositories 392 | # that have custom pipelines, and it helps with keeping the container 393 | # version the same across pipelines. 394 | parser.add_argument( 395 | "-t", 396 | "--test-description", 397 | metavar="JSON_FILE", 398 | help="The path to the JSON file containing the test" " description for the CI.", 399 | default=f"{BUILDKITE_PATH}/test_description.json", 400 | ) 401 | parser.add_argument( 402 | "-p", 403 | "--platform-allowlist", 404 | metavar="PLATFORM_DOT_FILE", 405 | help=( 406 | "The path to the dotfile containing platforms the crate's CI should run on.\n" 407 | "If the file does not exist, falls back to default `platform_allowlist` (x86_64 and arm64).\n" 408 | "The dotfile contains strings of architectures to be enabled separated by\n" 409 | "newlines." 410 | ), 411 | default=f"{os.getcwd()}/.platform", 412 | ) 413 | args = parser.parse_args() 414 | platform_allowlist = determine_allowlist(args.platform_allowlist) 415 | generate_pipeline(args.test_description, platform_allowlist) 416 | -------------------------------------------------------------------------------- /.buildkite/test_description.json: -------------------------------------------------------------------------------- 1 | { 2 | "tests": [ 3 | { 4 | "test_name": "build-gnu", 5 | "command": "RUSTFLAGS=\"-D warnings\" cargo build --release", 6 | "platform": [ 7 | "x86_64", 8 | "aarch64", 9 | "riscv64" 10 | ] 11 | }, 12 | { 13 | "test_name": "build-musl", 14 | "command": "RUSTFLAGS=\"-D warnings\" cargo build --release --target {target_platform}-unknown-linux-musl", 15 | "platform": [ 16 | "x86_64", 17 | "aarch64" 18 | ] 19 | }, 20 | { 21 | "test_name": "style", 22 | "command": "cargo fmt --all -- --check --config format_code_in_doc_comments=true" 23 | }, 24 | { 25 | "test_name": "unittests-gnu", 26 | "command": "cargo test --all-features --workspace", 27 | "platform": [ 28 | "x86_64", 29 | "aarch64", 30 | "riscv64" 31 | ], 32 | "docker_plugin": { 33 | "privileged": true 34 | } 35 | }, 36 | { 37 | "test_name": "unittests-musl", 38 | "command": "cargo test --all-features --workspace --target {target_platform}-unknown-linux-musl", 39 | "platform": [ 40 | "x86_64", 41 | "aarch64" 42 | ], 43 | "docker_plugin": { 44 | "privileged": true 45 | } 46 | }, 47 | { 48 | "test_name": "unittests-gnu-release", 49 | "command": "cargo test --release --all-features --workspace", 50 | "platform": [ 51 | "x86_64", 52 | "aarch64", 53 | "riscv64" 54 | ], 55 | "docker_plugin": { 56 | "privileged": true 57 | } 58 | }, 59 | { 60 | "test_name": "unittests-musl-release", 61 | "command": "cargo test --release --all-features --workspace --target {target_platform}-unknown-linux-musl", 62 | "platform": [ 63 | "x86_64", 64 | "aarch64" 65 | ], 66 | "docker_plugin": { 67 | "privileged": true 68 | } 69 | }, 70 | { 71 | "test_name": "clippy", 72 | "command": "cargo clippy --workspace --bins --examples --benches --all-features --all-targets -- -D warnings -D clippy::undocumented_unsafe_blocks", 73 | "platform": [ 74 | "x86_64", 75 | "aarch64", 76 | "riscv64" 77 | ] 78 | }, 79 | { 80 | "test_name": "check-warnings", 81 | "command": "RUSTFLAGS=\"-D warnings\" cargo check --all-targets --all-features --workspace", 82 | "platform": [ 83 | "x86_64", 84 | "aarch64", 85 | "riscv64" 86 | ] 87 | }, 88 | { 89 | "test_name": "coverage", 90 | "command": "pytest $(find . -type f -name \"test_coverage.py\")", 91 | "docker_plugin": { 92 | "privileged": true 93 | }, 94 | "platform": [ 95 | "x86_64" 96 | ] 97 | }, 98 | { 99 | "test_name": "commit-format", 100 | "command": "pytest $(find . -type f -name \"test_commit_format.py\")", 101 | "docker_plugin": { 102 | "propagate-environment": true 103 | } 104 | }, 105 | { 106 | "test_name": "cargo-audit", 107 | "command": "[ -e Cargo.lock ] || cargo generate-lockfile; cargo audit -q --deny warnings", 108 | "platform": [ 109 | "x86_64" 110 | ] 111 | } 112 | ] 113 | } 114 | -------------------------------------------------------------------------------- /.github/workflows/black.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | pull_request: 4 | 5 | jobs: 6 | black: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | 11 | - name: Install black 12 | run: pip install black 13 | 14 | - name: Run black 15 | run: black . --check 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | 3 | -------------------------------------------------------------------------------- /.platform: -------------------------------------------------------------------------------- 1 | x86_64 2 | aarch64 3 | riscv64 -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @andreeaflorescu @gsserge @lauralt @sameo @roypat @ShadowCurse 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-vmm-ci" 3 | version = "0.1.0" 4 | 5 | [dependencies] 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-vmm-ci 2 | 3 | The `rust-vmm-ci` repository contains [integration tests](#integration-tests) 4 | and [Buildkite pipeline](#buildkite-pipeline) definitions that are used for 5 | running the CI for all rust-vmm crates. 6 | 7 | CI tests are executed on the container image maintained at [rust-vmm/rust-vmm-container repo](https://github.com/rust-vmm/rust-vmm-container) with builds available on [Docker Hub](https://hub.docker.com/r/rustvmm/dev/tags). 8 | 9 | Having a centralized place for the tests is one of the enablers for keeping the 10 | same quality standard for all crates in rust-vmm. 11 | 12 | ## Getting Started with rust-vmm-ci 13 | 14 | To run the integration tests defined in the pipeline as part of the CI: 15 | 16 | 1. Add rust-vmm-ci as a git submodule to your repository 17 | 18 | ```bash 19 | # Add rust-vmm-ci as a submodule. This will point to the latest rust-vmm-ci 20 | # commit from the main branch. The following command will also add a 21 | # `.gitmodules` file and the `rust-vmm-ci` to the index. 22 | git submodule add https://github.com/rust-vmm/rust-vmm-ci.git 23 | # Commit the changes to your repository so that the CI can run using the 24 | # rust-vmm-ci pipeline and tests. 25 | git commit -s -m "Added rust-vmm-ci as submodule" 26 | ``` 27 | 28 | 2. Create the coverage test configuration file named 29 | `coverage_config_ARCH.json` in the root of the repository, where `ARCH` is the 30 | architecture of the machine. 31 | There are two coverage test configuration files, one per each platform. 32 | The example of the configuration file for the `x86_64` architecture can be 33 | found in 34 | [coverage_config_x86_64.json.sample](coverage_config_x86_64.json.sample), 35 | and the example of the configuration file for the `aarch64` architecture can be 36 | found in 37 | [coverage_config_aarch64.json.sample](coverage_config_aarch64.json.sample). 38 | 39 | The json must have the following fields: 40 | 41 | - `coverage_score`: The coverage of the repository. 42 | - `exclude_path`: This field is used for excluding files from the report. 43 | File paths that match the given regular expression are skipped. (for example, 44 | if multiple files are to be skipped, they must be separated with `|`). 45 | It should be used to exclude autogenerated files. If the repository does not 46 | have any autogenerated files, `exclude_path` should be an empty string. 47 | 48 | Additionally, the following optional fields are available: 49 | 50 | - `crate_features`: By default, we pass `--all-features` to cargo when collecting 51 | coverage. To have the coverage report only include a subset of features (in addition 52 | to default features, which are always enabled), specify them in this field 53 | as a comma-separated string. 54 | 55 | This file is required for the coverage integration so it needs to be added 56 | to the repository as well. 57 | 58 | 3. Copy one of the two provided dependabot configurations to `.github/dependabot.yml`, 59 | e.g. run `cp rust-vmm-ci/dependabot-{weekly,monthly}.yml .github/dependabot.yml`. 60 | Note that just symlinking the file does not work, as dependabot will not 61 | follow symlinks into submodules. This means that updates to these files made in 62 | rust-vmm-ci will need to be manually consumed for now. We recommend setting up 63 | weekly dependabot updates only if the crate receives multiple contributions a week, 64 | and if you expect to have the bandwidth to address weekly dependency PRs. 65 | 66 | 4. Create a new pipeline definition in Buildkite. For this step ask one of the 67 | rust-vmm Buildkite [admins](CODEOWNERS) to create one for you. The process is explained 68 | [here](https://github.com/rust-vmm/community/blob/main/docs/setup_new_repo.md#set-up-ci). 69 | 70 | 5. There is a script that autogenerates a dynamic Buildkite pipeline. Each step 71 | in the pipeline has a default timeout of 5 minutes. To run the CI using this dynamic pipeline, 72 | you need to add a step that is uploading the rust-vmm-ci pipeline: 73 | ```bash 74 | ./rust-vmm-ci/.buildkite/autogenerate_pipeline.py | buildkite-agent pipeline upload 75 | ``` 76 | This allows overriding some values and extending others through environment 77 | variables. 78 | - `X86_LINUX_AGENT_TAGS`: overrides the tags by which the x86_64 linux agent is 79 | selected; the default values are 80 | 81 | `{"os": "linux", "platform": "x86.metal"}` 82 | - `AARCH64_LINUX_AGENT_TAGS`: overrides the tags by which the aarch64 linux 83 | agent is selected. The default values are 84 | 85 | `{"os": "linux", "platform": "arm.metal"}` 86 | - `DOCKER_PLUGIN_CONFIG`: specifies additional configuration for the docker 87 | plugin. For available configuration, please check the 88 | https://github.com/buildkite-plugins/docker-buildkite-plugin. 89 | - `TESTS_TO_SKIP`: specifies a list of tests to be skipped. 90 | - `TIMEOUTS_MIN`: overrides the timeout value for specific tests. 91 | - `DEFAULT_AGENT_TAG_HYPERVISOR`: sets the hypervisor on which all the tests in 92 | the pipeline run. By default, the selected hypervisor is KVM because the 93 | hosts running KVM at the time of this change showed better performance and 94 | experienced timeouts less often. NOTE: This will not override the hypervisor 95 | defined at the test step level. If a test already defines a hypervisor tag 96 | that will remain intact. 97 | 98 | The variable `TESTS_TO_SKIP` is specified as a JSON list with the names 99 | of the tests to be skipped. The variable `TIMEOUTS_MIN` is a dictionary where 100 | each key is the name of a test and each value is the number of minutes for the 101 | timeout. The other variables are specified as dictionaries, where the first key 102 | is `tests` and its value is a list of test names where the configuration should 103 | be applied; the second key is `cfg` and its value is a dictionary with the 104 | actual configuration. 105 | 106 | For example, we can skip the test `commit-format`, have a timeout of 30 minutes 107 | for the test `style` and extend the docker plugin specification as follows: 108 | ```shell 109 | TESTS_TO_SKIP='["commit-format"]' TIMEOUTS_MIN='{"style": 30}' DOCKER_PLUGIN_CONFIG='{ 110 | "tests": ["coverage"], 111 | "cfg": { 112 | "devices": [ "/dev/vhost-vdpa-0" ], 113 | "privileged": true 114 | } 115 | }' ./rust-vmm-ci/.buildkite/autogenerate_pipeline.py | buildkite-agent pipeline upload 116 | ``` 117 | 118 | For most use cases, overriding or extending the configuration is not necessary. We may 119 | want to do so if, for example, the platform needs a custom device that is not available 120 | on the existing test instances or if we need a specialized hypervisor. 121 | 122 | 6. Tests will be running on `x86_64` and `aarch64` platforms by default. To change 123 | this, e.g. to enable other experimental platforms like `riscv64`, a `.platform` 124 | file can be included in the repository root. This file documents what platforms 125 | are to be enabled for the repository. 126 | 127 | If `.platform` file is provided, it will be strictly observed. In `.platform` 128 | file, each platform are separated by newline character. Currently, we support 129 | `x86_64`, `aarch64` and `riscv64` platforms. 130 | 131 | For example, we can enable tests to be run on `riscv64` platform in addition to 132 | `x86_64` and `aarch64` by: 133 | ``` 134 | x86_64 135 | aarch64 136 | riscv64 137 | ``` 138 | 139 | 7. The code owners of the repository will have to setup a WebHook for 140 | triggering the CI on 141 | [pull request](https://developer.github.com/v3/activity/events/types/#pullrequestevent) 142 | and [push](https://developer.github.com/v3/activity/events/types/#pushevent) 143 | events. 144 | 145 | ## Buildkite Pipeline 146 | 147 | The [Buildkite](https://buildkite.com) pipeline is the definition of tests to 148 | be run as part of the CI. It includes steps for running unit tests and linters 149 | (including coding style checks), and computing the coverage. 150 | 151 | Currently the tests can run on Linux `x86_64` and `aarch64` hosts. 152 | 153 | Example of step that checks the build: 154 | 155 | ```yaml 156 | steps: 157 | - label: build-gnu-x86_64 158 | command: cargo build --release 159 | retry: 160 | automatic: false 161 | agents: 162 | os: linux 163 | platform: x86_64.metal 164 | plugins: 165 | - docker#v3.8.0: 166 | image: rustvmm/dev:v16 167 | always-pull: true 168 | timeout_in_minutes: 5 169 | ``` 170 | 171 | To see all steps in the pipeline check the output of the 172 | [.buildkite/autogenerate_pipeline.py](.buildkite/autogenerate_pipeline.py) script. 173 | 174 | ### Custom Pipeline 175 | 176 | Some crates might need to test functionality that is specific to that 177 | particular component and thus cannot be added to the common pipeline. 178 | 179 | In this situation, the repositories need to create a JSON file with a custom 180 | test configuration. The preferred path is `.buildkite/custom-tests.json`. 181 | 182 | For example to test the build with one non-default 183 | [feature](https://doc.rust-lang.org/1.19.0/book/first-edition/conditional-compilation.html) 184 | enabled, the following configuration can be added: 185 | 186 | ```json 187 | { 188 | "tests": [ 189 | { 190 | "test_name": "build-bzimage", 191 | "command": "cargo build --release --features bzimage", 192 | "platform": [ 193 | "x86_64" 194 | ] 195 | } 196 | ] 197 | } 198 | ``` 199 | 200 | To run this custom pipeline, you need to add a step that is uploading it in Buildkite. The same 201 | script that autogenerates the main pipeline can be used with the option 202 | `-t PATH_TO_CUSTOM_CONFIGURATION`: 203 | 204 | ```bash 205 | ./rust-vmm-ci/.buildkite/autogenerate_pipeline.py -t .buildkite/custom-tests.json | buildkite-agent pipeline upload 206 | ``` 207 | 208 | ## Integration Tests 209 | 210 | In addition to the one-liner tests defined in the 211 | [Buildkite Pipeline](#buildkite-pipeline), the rust-vmm-ci also has more 212 | complex tests defined in [integration_tests](integration_tests). 213 | 214 | ### Test Profiles 215 | 216 | The integration tests support two test profiles: 217 | 218 | - **devel**: this is the recommended profile for running the integration tests 219 | on a local development machine. 220 | - **ci** (default option): this is the profile used when running the 221 | integration tests as part of the Continuous Integration (CI). 222 | 223 | The test profiles are applicable to 224 | [`pytest`](https://docs.pytest.org/en/latest/), the integration test framework 225 | used with rust-vmm-ci. Currently only the 226 | [coverage test](tests/test_coverage.py) follows this model as all the other 227 | integration tests are run using the Buildkite pipeline. 228 | 229 | The difference between is declaring tests as passed or failed: 230 | 231 | - with the **devel** profile the coverage test passes if the current coverage 232 | is equal or higher than the upstream coverage value. In case the current 233 | coverage is higher, the coverage file is updated to the new coverage value. 234 | - with the **ci** profile the coverage test passes only if the current coverage 235 | is equal to the upstream coverage value. 236 | 237 | Further details about the coverage test can be found in the 238 | [Adaptive Coverage](#adaptive-coverage) section. 239 | 240 | ### Adaptive Coverage 241 | 242 | The line coverage is saved in [tests/coverage](tests/coverage). To update the 243 | coverage before submitting a PR, run the coverage test: 244 | 245 | ```bash 246 | CRATE="kvm-ioctls" 247 | # NOTE: This might not be the latest container version, you can check which one we're using 248 | # by looking into the .buildkite/autogenerate_pipeline.py file. 249 | LATEST=16 250 | docker run --device=/dev/kvm \ 251 | -it \ 252 | --security-opt seccomp=unconfined \ 253 | --volume $(pwd)/${CRATE}:/${CRATE} \ 254 | rustvmm/dev:v${LATEST} 255 | cd ${crate} 256 | pytest --profile=devel rust-vmm-ci/integration_tests/test_coverage.py 257 | ``` 258 | 259 | If the PR coverage is higher than the upstream coverage, the coverage file 260 | needs to be manually added to the commit before submitting the PR: 261 | 262 | ```bash 263 | git add tests/coverage 264 | ``` 265 | 266 | Failing to do so will generate a fail on the CI pipeline when publishing the 267 | PR. 268 | 269 | **NOTE:** The coverage file is only updated in the `devel` test profile. In 270 | the `ci` profile the coverage test will fail if the current coverage is higher 271 | than the coverage reported in [tests/coverage](tests/coverage). 272 | 273 | ### Performance tests 274 | 275 | `rust-vmm-ci` includes an integration test that can run a battery of 276 | benchmarks at every pull request, comparing the results with the tip of the 277 | upstream `main` branch. The test is not included in the default Buildkite 278 | pipeline. Each crate that requires the test to be run as part of the CI must 279 | add a [custom pipeline](#custom-pipeline). 280 | 281 | An example of a pipeline that runs the test for ARM platforms and prints the 282 | results: 283 | 284 | ```yaml 285 | steps: 286 | - label: bench-aarch64 287 | command: pytest rust-vmm-ci/integration_tests/test_benchmark.py -s 288 | retry: 289 | automatic: false 290 | agents: 291 | os: linux 292 | platform: arm.metal 293 | plugins: 294 | - docker#v3.8.0: 295 | image: rustvmm/dev:v16 296 | always-pull: true 297 | ``` 298 | 299 | The test requires [`criterion`](https://github.com/bheisler/criterion.rs) 300 | benchmarks to be exported by the crate. The test expects the entry point 301 | into the performance benchmarks to be named `main`. In other words, the 302 | following configuration is expected in `Cargo.toml`: 303 | 304 | ```toml 305 | [[bench]] 306 | name = "main" 307 | ``` 308 | 309 | All benchmarks need to be collected in a main.rs file placed in `benches/`. 310 | 311 | `criterion` collects performance results by running a function for a 312 | user-configured number of iterations, timing the runs, and applying statistics. 313 | The individual benchmark tests must be added in the crate. They can be run 314 | outside the CI with: 315 | 316 | ```bash 317 | cargo bench [--all-features] OR [--features ] 318 | ``` 319 | 320 | `rust-vmm-ci` uses [`critcmp`](https://github.com/BurntSushi/critcmp) to 321 | compare the results yielded by `cargo bench --all-features` on the PR being 322 | tested with those from the tip of the upstream `main` branch. The test 323 | runs `cargo bench` twice, once on the current `HEAD`, then again after 324 | `git checkout origin/main`. `critcmp` takes care of the comparison, making 325 | use of `criterion`'s stable format for 326 | [output files](https://bheisler.github.io/criterion.rs/book/user_guide/csv_output.html). 327 | The results are printed to `stdout` and can be visually inspected in the 328 | pipeline output. In its present form, the test cannot fail. 329 | 330 | To run the test locally: 331 | 332 | ```bash 333 | docker run --device=/dev/kvm \ 334 | -it \ 335 | --security-opt seccomp=unconfined \ 336 | --volume $(pwd)/${CRATE}:/${CRATE} \ 337 | rustvmm/dev:v${LATEST} 338 | cd ${CRATE} 339 | pytest rust-vmm-ci/integration_tests/test_benchmark.py -s 340 | ``` 341 | 342 | Note that performance is highly dependent on the underlying platform that the 343 | tests are running on. The raw numbers obtained are likely to differ from their 344 | counterparts on a CI instance. 345 | 346 | ### Running the tests locally 347 | To run the integration tests locally, you can run the following from the crate you need to test. 348 | You can find the latest container version in the 349 | [script](.buildkite/autogenerate_pipeline.py) 350 | that autogenerates the pipeline. For example: 351 | ```bash 352 | cd ~/vm-superio 353 | CRATE="vm-superio" 354 | # NOTE: This might not be the latest container version, you can check which one we're using 355 | # by looking into the .buildkite/autogenerate_pipeline.py file. 356 | LATEST=16 357 | docker run -it \ 358 | --security-opt seccomp=unconfined \ 359 | --volume $(pwd):/${CRATE} \ 360 | --volume ~/.ssh:/root/.ssh \ 361 | rustvmm/dev:v${LATEST} 362 | cd vm-superio 363 | ./rust-vmm-ci/test_run.py 364 | ``` 365 | 366 | Known issues: 367 | - When running the `cargo-audit` test, the following error may occur: 368 | ``` 369 | test_cargo-audit (__main__.TestsContainer) ... error: couldn’t fetch advisory database: git operation failed: reference ‘refs/heads/main’ not found; class=Reference (4); code=NotFound (-3) 370 | ``` 371 | A fix for this is to remove `~/.cargo/advisory-db` in the container, and then rerun `test_run.py`: 372 | ``` 373 | rm -rf ~/.cargo/advisory-db 374 | ./rust-vmm-ci/test_run.py 375 | ``` 376 | -------------------------------------------------------------------------------- /coverage_config_aarch64.json.sample: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_score": 90, 3 | "exclude_path": "my_autogenerated_file\\.rs|path/to/my_folder", 4 | "crate_features": "my_dummy_feature,dummy_feature2" 5 | } 6 | -------------------------------------------------------------------------------- /coverage_config_x86_64.json: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_score": 100.0, 3 | "exclude_path": "", 4 | "crate_features": "" 5 | } 6 | -------------------------------------------------------------------------------- /coverage_config_x86_64.json.sample: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_score": 90, 3 | "exclude_path": "my_autogenerated_file\\.rs|path/to/my_folder", 4 | "crate_features": "my_dummy_feature,dummy_feature2" 5 | } 6 | -------------------------------------------------------------------------------- /dependabot-monthly.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | # A monthly update of the rust-vmm-ci submodule 5 | - package-ecosystem: gitsubmodule 6 | directory: "/" 7 | schedule: 8 | interval: monthly 9 | open-pull-requests-limit: 1 10 | 11 | # A monthly update to rust dependencies. These will be grouped, 12 | # e.g. one PR will contains updates for all dependencies. 13 | - package-ecosystem: cargo 14 | directory: "/" 15 | schedule: 16 | interval: monthly 17 | open-pull-requests-limit: 1 18 | # Make it also update transitive dependencies in Cargo.lock 19 | allow: 20 | - dependency-type: "all" 21 | # Group all available updates into a group called "rust-dependencies" 22 | groups: 23 | rust-dependencies: 24 | patterns: 25 | - "*" 26 | -------------------------------------------------------------------------------- /dependabot-weekly.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | # A weekly update of the rust-vmm-ci submodule 5 | - package-ecosystem: gitsubmodule 6 | directory: "/" 7 | schedule: 8 | interval: weekly 9 | day: monday 10 | open-pull-requests-limit: 1 11 | 12 | # A weekly update to rust dependencies. These will be grouped, 13 | # e.g. one PR will contains updates for all dependencies. 14 | - package-ecosystem: cargo 15 | directory: "/" 16 | schedule: 17 | interval: weekly 18 | day: monday 19 | open-pull-requests-limit: 1 20 | # Make it also update transitive dependencies in Cargo.lock 21 | allow: 22 | - dependency-type: "all" 23 | # Group all available updates into a group called "rust-dependencies" 24 | groups: 25 | rust-dependencies: 26 | patterns: 27 | - "*" 28 | -------------------------------------------------------------------------------- /integration_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | import pytest 4 | 5 | 6 | PROFILE_CI = "ci" 7 | PROFILE_DEVEL = "devel" 8 | 9 | WORKSPACE = "workspace" 10 | CRATE = "crate" 11 | 12 | 13 | def pytest_addoption(parser): 14 | parser.addoption( 15 | "--profile", 16 | default=PROFILE_CI, 17 | choices=[PROFILE_CI, PROFILE_DEVEL], 18 | help="Profile for running the test: {} or {}".format(PROFILE_CI, PROFILE_DEVEL), 19 | ) 20 | parser.addoption( 21 | "--no-cleanup", 22 | action="store_true", 23 | default=False, 24 | help="Keep the coverage report in `kcov_output` directory. If this " 25 | "flag is not provided, both coverage related directories are " 26 | "removed.", 27 | ) 28 | 29 | parser.addoption( 30 | "--test-scope", 31 | default=WORKSPACE, 32 | choices=[WORKSPACE, CRATE], 33 | help="Defines the scope of running tests: {} or {}".format(WORKSPACE, CRATE), 34 | ) 35 | 36 | 37 | @pytest.fixture 38 | def profile(request): 39 | return request.config.getoption("--profile") 40 | 41 | 42 | @pytest.fixture 43 | def no_cleanup(request): 44 | return request.config.getoption("--no-cleanup") 45 | 46 | 47 | @pytest.fixture 48 | def test_scope(request): 49 | return request.config.getoption("--test-scope") 50 | 51 | 52 | # This is used for defining global variables in pytest. 53 | def pytest_configure(): 54 | # These constants are needed in tests, so this is the way that we can 55 | # export them. 56 | pytest.profile_ci = PROFILE_CI 57 | pytest.profile_devel = PROFILE_DEVEL 58 | pytest.workspace = WORKSPACE 59 | pytest.crate = CRATE 60 | -------------------------------------------------------------------------------- /integration_tests/test_benchmark.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | """ 4 | Compare benchmark results before and after a pull request. 5 | 6 | This test works properly on the local machine only when the environment 7 | variables REMOTE and BASE_BRANCH are set. Otherwise the default values 8 | are "origin" for the remote name of the upstream repository and "main" 9 | for the name of the base branch, and this test may not work as expected. 10 | """ 11 | 12 | import os 13 | import subprocess 14 | 15 | from utils import get_repo_root_path 16 | 17 | REMOTE = os.environ.get("BUILDKITE_REPO") or os.environ.get("REMOTE") or "origin" 18 | BASE_BRANCH = ( 19 | os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH") 20 | or os.environ.get("BASE_BRANCH") 21 | or "main" 22 | ) 23 | # File used for saving the results of cargo bench 24 | # when running on the PR branch. 25 | PR_BENCH_RESULTS_FILE = "pr_bench_results" 26 | # File used for saving the results of cargo bench 27 | # when running on the upstream branch. 28 | UPSTREAM_BENCH_RESULTS_FILE = "upstream_bench_results" 29 | 30 | 31 | def test_bench(): 32 | """Runs benchmarks before and after and compares the results.""" 33 | os.chdir(get_repo_root_path()) 34 | 35 | # Newer versions of git check the ownership of directories. 36 | # We need to add an exception for /workdir which is shared, so that 37 | # the git commands don't fail. 38 | config_cmd = "git config --global --add safe.directory /workdir" 39 | subprocess.run(config_cmd, shell=True, check=True) 40 | 41 | # Get numbers for current HEAD. 42 | return_code, stdout, stderr = _run_cargo_bench(PR_BENCH_RESULTS_FILE) 43 | # Even if it is the first time this test is run, the benchmark tests should 44 | # pass. For this purpose, we need to explicitly check the return code. 45 | assert return_code == 0, "stdout: {}\n stderr: {}".format(stdout, stderr) 46 | 47 | # Get numbers from upstream tip, without the changes from the current PR. 48 | _git_checkout_upstream_branch() 49 | return_code, stdout, stderr = _run_cargo_bench(UPSTREAM_BENCH_RESULTS_FILE) 50 | 51 | # Before checking any results, let's just go back to the PR branch. 52 | # This way we make sure that the cleanup always happens even if the test 53 | # fails. 54 | _git_checkout_pr_branch() 55 | 56 | if return_code == 0: 57 | # In case this benchmark also ran successfully, we can call critcmp and 58 | # compare the results. 59 | _run_critcmp() 60 | else: 61 | # The benchmark did not run successfully, but it might be that it is 62 | # because a benchmark does not exist. In this case, we do not want to 63 | # fail the test. 64 | if "error: no bench target named `main`" in stderr: 65 | # This is a bit of a &*%^ way of checking if the benchmark does not 66 | # exist. Hopefully it will be possible to check it in another way 67 | # ...soon 68 | print("There are no benchmarks in main. No comparison can happen.") 69 | else: 70 | assert return_code == 0, "stdout: {}\n stderr: {}".format(stdout, stderr) 71 | 72 | 73 | def _run_cargo_bench(baseline): 74 | """Runs `cargo bench` and tags the baseline.""" 75 | process = subprocess.run( 76 | "cargo bench --bench main --all-features -- --noplot " 77 | "--save-baseline {}".format(baseline), 78 | shell=True, 79 | stderr=subprocess.PIPE, 80 | stdout=subprocess.PIPE, 81 | ) 82 | 83 | return ( 84 | process.returncode, 85 | process.stdout.decode("utf-8"), 86 | process.stderr.decode("utf-8"), 87 | ) 88 | 89 | 90 | def _run_critcmp(): 91 | p = subprocess.run( 92 | "critcmp {} {}".format(UPSTREAM_BENCH_RESULTS_FILE, PR_BENCH_RESULTS_FILE), 93 | shell=True, 94 | check=True, 95 | stdout=subprocess.PIPE, 96 | stderr=subprocess.PIPE, 97 | ) 98 | 99 | print(p.stdout.decode("utf-8")) 100 | print("ERRORS") 101 | print(p.stderr.decode("utf-8")) 102 | 103 | 104 | def _clean_workdir(): 105 | subprocess.run("git restore .", shell=True, check=True) 106 | subprocess.run("git clean -fd", shell=True, check=True) 107 | 108 | 109 | def _git_checkout_upstream_branch(): 110 | subprocess.run( 111 | "git fetch {} {}".format(REMOTE, BASE_BRANCH), shell=True, check=True 112 | ) 113 | _clean_workdir() 114 | subprocess.run("git checkout FETCH_HEAD", shell=True, check=True) 115 | 116 | 117 | def _git_checkout_pr_branch(): 118 | _clean_workdir() 119 | subprocess.run( 120 | "git checkout -", 121 | shell=True, 122 | check=True, 123 | stdout=subprocess.PIPE, 124 | stderr=subprocess.PIPE, 125 | ) 126 | -------------------------------------------------------------------------------- /integration_tests/test_commit_format.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | """ 4 | Test the commit message format. 5 | 6 | This test works properly on the local machine only when the environment 7 | variables REMOTE and BASE_BRANCH are set. Otherwise the default values 8 | are "origin" for the remote name of the upstream repository and "main" 9 | for the name of the base branch, and this test may not work as expected. 10 | """ 11 | 12 | import os 13 | import subprocess 14 | 15 | from utils import get_cmd_output 16 | 17 | COMMIT_TITLE_MAX_LEN = 60 18 | COMMIT_BODY_LINE_MAX_LEN = 75 19 | REMOTE = os.environ.get("BUILDKITE_REPO") or os.environ.get("REMOTE") or "origin" 20 | BASE_BRANCH = ( 21 | os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH") 22 | or os.environ.get("BASE_BRANCH") 23 | or "main" 24 | ) 25 | 26 | 27 | def test_commit_format(): 28 | """ 29 | Checks commit message format for the current PR's commits. 30 | 31 | Checks if commit messages do not have exceedingly long titles (a maximum 60 32 | characters for the title) and if commits are signed. 33 | """ 34 | # Newer versions of git check the ownership of directories. 35 | # We need to add an exception for /workdir which is shared, so that 36 | # the git commands don't fail. 37 | config_cmd = "git config --global --add safe.directory /workdir" 38 | subprocess.run(config_cmd, shell=True, check=True) 39 | # Fetch the upstream repository. 40 | fetch_base_cmd = "git fetch {} {}".format(REMOTE, BASE_BRANCH) 41 | try: 42 | subprocess.run(fetch_base_cmd, shell=True, check=True) 43 | except subprocess.CalledProcessError: 44 | raise NameError( 45 | "The name of the base branch or remote is invalid. " 46 | "See test documentation for more details." 47 | ) from None 48 | # Get hashes of PR's commits in their abbreviated form for 49 | # a prettier printing. 50 | shas_cmd = "git log --no-merges --pretty=%h --no-decorate " "FETCH_HEAD..HEAD" 51 | shas = get_cmd_output(shas_cmd) 52 | 53 | for sha in shas.split(): 54 | # Do not enforce the commit rules when the committer is dependabot. 55 | author_cmd = "git show -s --format='%ae' " + sha 56 | author = get_cmd_output(author_cmd) 57 | if "dependabot" in author: 58 | continue 59 | message_cmd = "git show --pretty=format:%B -s " + sha 60 | message = get_cmd_output(message_cmd) 61 | message_lines = message.split("\n") 62 | assert len(message_lines) >= 3, ( 63 | "The commit '{}' should contain at least 3 lines: title, " 64 | "blank line and a sign-off one.".format(sha) 65 | ) 66 | title = message_lines[0] 67 | assert message_lines[1] == "", ( 68 | "For commit '{}', title is divided into multiple lines. " 69 | "Please keep it one line long and make sure you add a blank " 70 | "line between title and description.".format(sha) 71 | ) 72 | assert len(title) <= COMMIT_TITLE_MAX_LEN, ( 73 | "For commit '{}', title exceeds {} chars. " 74 | "Please keep it shorter.".format(sha, COMMIT_TITLE_MAX_LEN) 75 | ) 76 | 77 | found_signed_off = False 78 | 79 | for line in message_lines[2:]: 80 | if line.startswith("Signed-off-by: "): 81 | found_signed_off = True 82 | break 83 | 84 | assert found_signed_off, ( 85 | "Commit '{}' is not signed. " 86 | "Please run 'git commit -s --amend' " 87 | "on it.".format(sha) 88 | ) 89 | -------------------------------------------------------------------------------- /integration_tests/test_coverage.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | """Test the coverage and update the threshold when coverage is increased.""" 4 | 5 | import json 6 | import os 7 | import re 8 | import shutil 9 | import subprocess 10 | import platform 11 | import pytest 12 | 13 | from utils import get_repo_root_path 14 | 15 | 16 | def get_coverage_config_path(): 17 | machine = platform.machine() 18 | target_file = f"coverage_config_{machine}.json" 19 | # We use a breadth-first search to guarantee that the config file 20 | # belongs to the crate that is being tested. Otherwise we might end 21 | # up wrongfully using the config file in the rust-vmm-ci submodule. 22 | # os.walkdir() offers a depth-first search and couldn't be used here. 23 | dirs = [os.getcwd()] 24 | while len(dirs): 25 | nextDirs = [] 26 | for dir in dirs: 27 | for file in os.listdir(dir): 28 | file_path = os.path.join(dir, file) 29 | if os.path.isdir(file_path): 30 | nextDirs.append(file_path) 31 | elif file == target_file: 32 | return file_path 33 | dirs = nextDirs 34 | 35 | 36 | REPO_ROOT_PATH = get_repo_root_path() 37 | COVERAGE_CONFIG_PATH = get_coverage_config_path() 38 | 39 | 40 | def _read_test_config(): 41 | """ 42 | Reads the config of the coverage for the repository being tested. 43 | 44 | Returns a JSON object with the configuration. 45 | """ 46 | coverage_config = {} 47 | with open(COVERAGE_CONFIG_PATH) as config_file: 48 | coverage_config = json.load(config_file) 49 | 50 | assert "coverage_score" in coverage_config 51 | assert "exclude_path" in coverage_config 52 | 53 | if "crate_features" in coverage_config: 54 | assert ( 55 | " " not in coverage_config["crate_features"] 56 | ), "spaces are not allowed in crate_features value" 57 | 58 | return coverage_config 59 | 60 | 61 | def _write_coverage_config(coverage_config): 62 | """Updates the coverage config file as per `coverage_config`""" 63 | with open(COVERAGE_CONFIG_PATH, "w") as outfile: 64 | json.dump(coverage_config, outfile) 65 | 66 | 67 | def _get_current_coverage(coverage_config, no_cleanup, test_scope): 68 | """Helper function that returns the coverage computed with llvm-cov.""" 69 | # By default the build output for kcov and unit tests are both in the debug 70 | # directory. This causes some linker errors that I haven't investigated. 71 | # Error: error: linking with `cc` failed: exit code: 1 72 | # An easy fix is to have separate build directories for kcov & unit tests. 73 | cov_build_dir = os.path.join(REPO_ROOT_PATH, "cov_build") 74 | 75 | # Remove kcov output and build directory to be sure we are always working 76 | # on a clean environment. 77 | shutil.rmtree(cov_build_dir, ignore_errors=True) 78 | 79 | llvm_cov_command = ( 80 | f"CARGO_TARGET_DIR={cov_build_dir} cargo llvm-cov test --json --summary-only" 81 | ) 82 | 83 | additional_exclude_path = coverage_config["exclude_path"] 84 | if additional_exclude_path: 85 | llvm_cov_command += f' --ignore-filename-regex "{additional_exclude_path}"' 86 | 87 | if test_scope == pytest.workspace: 88 | llvm_cov_command += " --workspace " 89 | 90 | crate_features = coverage_config.get("crate_features") 91 | if crate_features: 92 | llvm_cov_command += " --features=" + crate_features 93 | if crate_features is None: 94 | llvm_cov_command += " --all-features" 95 | 96 | # Pytest closes stdin by default, but some tests might need it to be open. 97 | # In the future, should the need arise, we can feed custom data to stdin. 98 | result = subprocess.run( 99 | llvm_cov_command, shell=True, check=True, input=b"", stdout=subprocess.PIPE 100 | ) 101 | 102 | summary = json.loads(result.stdout) 103 | coverage = summary["data"][0]["totals"]["lines"]["percent"] 104 | 105 | shutil.rmtree(cov_build_dir, ignore_errors=True) 106 | 107 | return coverage 108 | 109 | 110 | def test_coverage(profile, no_cleanup, test_scope): 111 | MAX_DELTA = 0.5 112 | 113 | coverage_config = _read_test_config() 114 | current_coverage = _get_current_coverage(coverage_config, no_cleanup, test_scope) 115 | previous_coverage = coverage_config["coverage_score"] 116 | diff = current_coverage - previous_coverage 117 | upper = previous_coverage + MAX_DELTA 118 | arch = platform.machine() 119 | 120 | msg = ( 121 | f"Current code coverage ({current_coverage:.2f}%) deviates by {diff:.2f}% from the previous code coverage {previous_coverage:.2f}%." 122 | f"Current code coverage must be within the range {previous_coverage:.2f}%..{upper:.2f}%." 123 | f"Please update the coverage in `coverage_config_{arch}.json`." 124 | ) 125 | 126 | if abs(diff) > MAX_DELTA: 127 | if previous_coverage < current_coverage: 128 | if profile == pytest.profile_ci: 129 | # In the CI Profile we expect the coverage to be manually updated. 130 | raise ValueError(msg) 131 | elif profile == pytest.profile_devel: 132 | coverage_config["coverage_score"] = current_coverage 133 | _write_coverage_config(coverage_config) 134 | else: 135 | # This should never happen because pytest should only accept 136 | # the valid test profiles specified with `choices` in 137 | # `pytest_addoption`. 138 | raise RuntimeError("Invalid test profile.") 139 | elif previous_coverage > current_coverage: 140 | raise ValueError(msg) 141 | -------------------------------------------------------------------------------- /integration_tests/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import os 5 | import subprocess 6 | 7 | 8 | def get_repo_root_path(): 9 | """Terrible hack to get the root path of the repository.""" 10 | integration_tests_path = os.path.dirname(os.path.realpath(__file__)) 11 | rust_vmm_ci_path = os.path.dirname(integration_tests_path) 12 | 13 | return os.path.dirname(rust_vmm_ci_path) 14 | 15 | 16 | def get_cmd_output(cmd): 17 | """Returns stdout content of `cmd` command.""" 18 | cmd_out = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE) 19 | stdout = cmd_out.stdout.decode("utf-8") 20 | return stdout 21 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub fn add(a: u32, b: u32) -> u32 { 2 | a + b 3 | } 4 | 5 | #[cfg(test)] 6 | mod tests { 7 | #[test] 8 | fn test_add() { 9 | assert_eq!(super::add(1, 2), 3) 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test_run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause 5 | import json 6 | import subprocess 7 | import platform 8 | import pathlib 9 | import unittest 10 | 11 | from argparse import ArgumentParser, RawTextHelpFormatter 12 | from textwrap import dedent 13 | 14 | PARENT_DIR = pathlib.Path(__file__).parent.resolve() 15 | 16 | 17 | class TestsContainer(unittest.TestCase): 18 | pass 19 | 20 | 21 | def make_test_function(command): 22 | def test(self): 23 | subprocess.run(command, shell=True, check=True) 24 | 25 | return test 26 | 27 | 28 | def retrieve_test_list(config_file=f"{PARENT_DIR}/.buildkite/test_description.json"): 29 | with open(config_file) as jsonFile: 30 | test_list = json.load(jsonFile) 31 | jsonFile.close() 32 | return test_list 33 | 34 | 35 | if __name__ == "__main__": 36 | help_text = dedent( 37 | """ 38 | This script allows running all the tests at once on the local machine. 39 | The tests "test_benchmark.py" and "test_commit_format.py" work properly 40 | on the local machine only when the environment variables REMOTE and 41 | BASE_BRANCH are set. Otherwise the default values are "origin" for the 42 | remote name of the upstream repository and "main" for the name of the 43 | base branch, and these tests may not work as expected. 44 | """ 45 | ) 46 | parser = ArgumentParser(description=help_text, formatter_class=RawTextHelpFormatter) 47 | parser.parse_args() 48 | 49 | test_config = retrieve_test_list() 50 | for test in test_config["tests"]: 51 | command = test["command"] 52 | command = command.replace("{target_platform}", platform.machine()) 53 | test_func = make_test_function(command) 54 | setattr(TestsContainer, "test_{}".format(test["test_name"]), test_func) 55 | 56 | unittest.main(verbosity=2) 57 | --------------------------------------------------------------------------------