├── .cargo └── config.toml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── agent ├── Cargo.toml ├── smf │ ├── agent.xml │ └── start.sh ├── src │ ├── control │ │ ├── mod.rs │ │ ├── protocol.rs │ │ └── server.rs │ ├── download.rs │ ├── exec.rs │ ├── main.rs │ ├── shadow.rs │ ├── uadmin.rs │ └── upload.rs └── systemd │ └── agent.service ├── bin ├── Cargo.toml └── src │ ├── config.rs │ └── main.rs ├── bunyan ├── Cargo.toml └── src │ └── lib.rs ├── client ├── Cargo.toml ├── openapi.json └── src │ ├── events.rs │ ├── ext.rs │ ├── lib.rs │ └── progenitor_client.rs ├── common ├── Cargo.toml └── src │ └── lib.rs ├── database ├── Cargo.toml └── src │ └── lib.rs ├── download ├── Cargo.toml └── src │ ├── kinds │ ├── file.rs │ ├── mod.rs │ ├── s3.rs │ └── url.rs │ ├── lib.rs │ └── stopwatch.rs ├── factory ├── aws │ ├── Cargo.toml │ ├── scripts │ │ └── user_data.sh │ └── src │ │ ├── aws.rs │ │ ├── config.rs │ │ └── main.rs ├── lab │ ├── Cargo.toml │ ├── schema.sql │ ├── scripts │ │ ├── debug.ipxe │ │ ├── hold.ipxe │ │ ├── postboot.sh │ │ └── regular.ipxe │ └── src │ │ ├── config.rs │ │ ├── db │ │ ├── mod.rs │ │ └── tables │ │ │ ├── instance.rs │ │ │ ├── instance_event.rs │ │ │ └── mod.rs │ │ ├── host.rs │ │ ├── main.rs │ │ ├── minder.rs │ │ ├── pty.rs │ │ └── worker.rs └── propolis │ ├── Cargo.toml │ ├── schema.sql │ ├── scripts │ ├── propolis.sh │ ├── serial.sh │ └── user_data.sh │ ├── smf │ ├── propolis.xml │ ├── serial.xml │ └── site.xml │ └── src │ ├── config.rs │ ├── db │ ├── mod.rs │ └── tables │ │ ├── instance.rs │ │ └── mod.rs │ ├── factory.rs │ ├── main.rs │ ├── net.rs │ ├── nocloud.rs │ ├── propolis.rs │ ├── serial.rs │ ├── svc.rs │ ├── ucred.rs │ ├── vm.rs │ └── zones.rs ├── github ├── client │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── database │ ├── Cargo.toml │ ├── schema.sql │ └── src │ │ ├── lib.rs │ │ └── tables │ │ ├── check_run.rs │ │ ├── check_suite.rs │ │ ├── delivery.rs │ │ ├── install.rs │ │ ├── mod.rs │ │ ├── repository.rs │ │ └── user.rs ├── dbtool │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── ghtool │ ├── Cargo.toml │ └── src │ │ ├── config.rs │ │ └── main.rs ├── hooktypes │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── server │ ├── Cargo.toml │ ├── src │ │ ├── config.rs │ │ ├── http.rs │ │ ├── main.rs │ │ ├── templates.rs │ │ └── variety │ │ │ ├── basic.rs │ │ │ ├── control.rs │ │ │ └── mod.rs │ └── www │ │ ├── bunyan.css │ │ └── bunyan.js ├── testdata │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── tools │ └── make_create_url.sh ├── jobsh ├── Cargo.toml └── src │ ├── jobfile.rs │ ├── lib.rs │ └── variety │ ├── basic.rs │ └── mod.rs ├── rustfmt.toml ├── server ├── Cargo.toml ├── schema.sql └── src │ ├── api │ ├── admin.rs │ ├── factory.rs │ ├── mod.rs │ ├── public.rs │ ├── user.rs │ └── worker.rs │ ├── archive │ ├── files.rs │ ├── jobs.rs │ └── mod.rs │ ├── chunks.rs │ ├── config.rs │ ├── db │ ├── mod.rs │ └── tables │ │ ├── factory.rs │ │ ├── job.rs │ │ ├── job_depend.rs │ │ ├── job_event.rs │ │ ├── job_file.rs │ │ ├── job_input.rs │ │ ├── job_output.rs │ │ ├── job_output_rule.rs │ │ ├── job_store.rs │ │ ├── job_tag.rs │ │ ├── job_time.rs │ │ ├── mod.rs │ │ ├── published_file.rs │ │ ├── target.rs │ │ ├── task.rs │ │ ├── user.rs │ │ ├── user_privilege.rs │ │ ├── worker.rs │ │ └── worker_event.rs │ ├── files.rs │ ├── jobs.rs │ ├── main.rs │ └── workers.rs ├── sse ├── Cargo.toml └── src │ └── lib.rs ├── types ├── Cargo.toml └── src │ ├── config.rs │ ├── lib.rs │ └── metadata.rs ├── variety └── basic │ ├── scripts │ ├── github_clone.sh │ ├── github_token.sh │ ├── rustup.sh │ └── setup.sh │ └── www │ ├── live.js │ └── style.css ├── www ├── buildomat.png ├── buildomat_wide.png ├── error.html ├── favicon.ico ├── index.html └── notfound.html └── xtask ├── Cargo.toml ├── scripts └── build_linux_agent.sh └── src └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --quiet --package xtask --" 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /config.toml 3 | /data.sqlite3 4 | /cache 5 | /etc 6 | /var 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "agent", 4 | "bin", 5 | "bunyan", 6 | "client", 7 | "common", 8 | "database", 9 | "download", 10 | "factory/aws", 11 | "factory/lab", 12 | "factory/propolis", 13 | "github/client", 14 | "github/database", 15 | "github/dbtool", 16 | "github/ghtool", 17 | "github/hooktypes", 18 | "github/server", 19 | "github/testdata", 20 | "jobsh", 21 | "server", 22 | "sse", 23 | "types", 24 | "xtask", 25 | ] 26 | resolver = "2" 27 | 28 | [workspace.dependencies] 29 | ansi-to-html = { version = "0.2", features = [ "lazy-init" ] } 30 | anyhow = "1" 31 | aws-config = "1" 32 | aws-credential-types = "1" 33 | aws-sdk-ec2 = "1" 34 | aws-sdk-s3 = "1" 35 | aws-types = "1" 36 | base64 = "0.22" 37 | bytes = "1.1" 38 | chrono = { version = "0.4", features = [ "serde" ] } 39 | dirs-next = "2" 40 | dropshot = { git = "https://github.com/oxidecomputer/dropshot" } 41 | futures = "0.3" 42 | futures-core = "0.3" 43 | getopts = "0.2" 44 | glob = "0.3" 45 | hiercmd = { git = "https://github.com/jclulow/hiercmd" } 46 | hmac-sha256 = "1" 47 | html-escape = "0.2" 48 | http = "1" 49 | http-body-util = "0.1" 50 | http-range = "0.1" 51 | hyper = "1" 52 | ipnet = "2.8" 53 | jmclib = { git = "https://github.com/jclulow/rust-jmclib", features = ["sqlite"] } 54 | libc = "0.2.113" 55 | new_mime_guess = "4" 56 | octorust = { git = "https://github.com/oxidecomputer/third-party-api-clients", branch = "jclulow" } 57 | pem = "3" 58 | percent-encoding = "2.1" 59 | progenitor = { version = "0.9.1" } 60 | progenitor-client = { version = "0.9.1" } 61 | rand = "0.9" 62 | regex = "1" 63 | reqwest = { version = "0.12", features = [ "json", "stream" ] } 64 | rust-toolchain-file = "0.1" 65 | rusty_ulid = "2" 66 | schemars = { version = "0.8", features = [ "chrono" ] } 67 | sea-query = { version = "0.32", default-features = false, features = [ "derive", "attr", "backend-sqlite" ] } 68 | sea-query-rusqlite = "0.7" 69 | semver = "1" 70 | serde = { version = "1", features = [ "derive" ] } 71 | serde_json = "1" 72 | serde_repr = "0.1" 73 | serde_urlencoded = "0.7" 74 | serde_with = "3" 75 | serde_yaml = "0.9" 76 | sigpipe = "0.1" 77 | slog = { version = "2.7", features = [ "release_max_level_debug" ] } 78 | slog-bunyan = "2.4" 79 | slog-term = "2.7" 80 | smf = { git = "https://github.com/illumos/smf-rs.git" } 81 | strip-ansi-escapes = "0.2" 82 | strum = { version = "0.27", features = [ "derive" ] } 83 | tempfile = "3.3" 84 | thiserror = "2" 85 | tlvc = { git = "https://github.com/oxidecomputer/tlvc", version = "0.3.1" } 86 | tlvc-text = { git = "https://github.com/oxidecomputer/tlvc", version = "0.3.0" } 87 | tokio = { version = "1", features = [ "full" ] } 88 | tokio-stream = "0.1" 89 | tokio-util = { version = "0.7", features = [ "io" ] } 90 | toml = "0.8" 91 | usdt = "0.5" 92 | zone = { version = "0.3", features = [ "async" ], default-features = false } 93 | -------------------------------------------------------------------------------- /agent/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-agent" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [features] 8 | default = ['vendored-openssl'] 9 | vendored-openssl = ['openssl/vendored'] 10 | 11 | [dependencies] 12 | buildomat-common = { path = "../common" } 13 | buildomat-client = { path = "../client" } 14 | buildomat-types = { path = "../types" } 15 | 16 | anyhow = { workspace = true } 17 | bytes = { workspace = true } 18 | chrono = { workspace = true } 19 | futures = { workspace = true } 20 | glob = { workspace = true } 21 | hiercmd = { workspace = true } 22 | ipnet = { workspace = true } 23 | libc = { workspace = true } 24 | rusty_ulid = { workspace = true } 25 | serde = { workspace = true } 26 | serde_json = { workspace = true } 27 | slog = { workspace = true } 28 | tokio = { workspace = true } 29 | # 30 | # I believe it is necessary to pull this in here, so that we can demand the 31 | # static linking of the vendored OpenSSL. We don't use it directly, but the 32 | # same version will then be used by reqwest. 33 | # 34 | openssl = { version = "0.10", optional = true } 35 | -------------------------------------------------------------------------------- /agent/smf/agent.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 15 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /agent/smf/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Until illumos bug 13511 (svc.startd should terminate orphaned contracts for 5 | # wait model services) is fixed, we run the agent under a noorphan contract of 6 | # our own. The agent's most critical job is running processes on behalf of 7 | # user submitted jobs; we want those grandchild processes to be terminated if 8 | # for some reason the agent exits unexpectedly. 9 | # 10 | exec /usr/bin/ctrun \ 11 | -l child \ 12 | -o noorphan,regent \ 13 | \ 14 | /opt/buildomat/lib/agent run 15 | -------------------------------------------------------------------------------- /agent/src/download.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::path::PathBuf; 6 | 7 | use tokio::sync::mpsc; 8 | 9 | #[derive(Debug)] 10 | pub enum Activity { 11 | Downloading(PathBuf), 12 | Downloaded(PathBuf), 13 | Complete, 14 | } 15 | 16 | pub(crate) fn download( 17 | cw: super::ClientWrap, 18 | inputs: Vec, 19 | inputdir: PathBuf, 20 | ) -> mpsc::Receiver { 21 | let (tx, rx) = mpsc::channel::(64); 22 | 23 | tokio::spawn(async move { 24 | for i in inputs.iter() { 25 | let mut path = inputdir.clone(); 26 | path.push(&i.name); 27 | 28 | /* 29 | * Try our best to create any parent directories that are required 30 | * for names that includes slashes. 31 | */ 32 | super::make_dirs_for(&path).ok(); 33 | 34 | tx.send(Activity::Downloading(path.clone())).await.unwrap(); 35 | 36 | cw.input(&i.id, &path).await; 37 | 38 | tx.send(Activity::Downloaded(path.clone())).await.unwrap(); 39 | } 40 | 41 | tx.send(Activity::Complete).await.unwrap(); 42 | }); 43 | 44 | rx 45 | } 46 | -------------------------------------------------------------------------------- /agent/src/shadow.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::{ 6 | io::{Read, Write}, 7 | path::Path, 8 | }; 9 | 10 | use anyhow::{bail, Result}; 11 | 12 | #[derive(Clone, PartialEq)] 13 | pub struct ShadowFile { 14 | entries: Vec>, 15 | } 16 | 17 | impl ShadowFile { 18 | pub fn load>(path: P) -> Result { 19 | let mut f = std::fs::File::open(path.as_ref())?; 20 | let mut data = String::new(); 21 | f.read_to_string(&mut data)?; 22 | 23 | let entries = data 24 | .lines() 25 | .enumerate() 26 | .map(|(i, l)| { 27 | let fields = 28 | l.split(':').map(str::to_string).collect::>(); 29 | if fields.len() != 9 { 30 | bail!("invalid shadow line {}: {:?}", i, fields); 31 | } 32 | Ok(fields) 33 | }) 34 | .collect::>>()?; 35 | 36 | Ok(ShadowFile { entries }) 37 | } 38 | 39 | pub fn password_set(&mut self, user: &str, password: &str) -> Result<()> { 40 | /* 41 | * First, make sure the username appears exactly once in the shadow 42 | * file. 43 | */ 44 | let mc = self.entries.iter().filter(|e| e[0] == user).count(); 45 | if mc != 1 { 46 | bail!("found {} matches for user {} in shadow file", mc, user); 47 | } 48 | 49 | self.entries.iter_mut().for_each(|e| { 50 | if e[0] == user { 51 | e[1] = password.to_string(); 52 | } 53 | }); 54 | Ok(()) 55 | } 56 | 57 | pub fn write>(&self, path: P) -> Result<()> { 58 | let mut f = std::fs::OpenOptions::new() 59 | .create(false) 60 | .truncate(true) 61 | .write(true) 62 | .open(path.as_ref())?; 63 | 64 | let mut data = self 65 | .entries 66 | .iter() 67 | .map(|e| e.join(":")) 68 | .collect::>() 69 | .join("\n"); 70 | data.push('\n'); 71 | 72 | f.write_all(data.as_bytes())?; 73 | f.flush()?; 74 | Ok(()) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /agent/src/uadmin.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Oxide Computer Company 3 | */ 4 | 5 | #![allow(dead_code)] 6 | 7 | mod c { 8 | pub const A_REBOOT: i32 = 1; 9 | pub const A_SHUTDOWN: i32 = 2; 10 | pub const A_DUMP: i32 = 5; 11 | 12 | pub const AD_HALT: i32 = 0; 13 | pub const AD_BOOT: i32 = 1; 14 | pub const AD_POWEROFF: i32 = 6; 15 | 16 | extern "C" { 17 | pub fn uadmin(cmd: i32, fcn: i32, mdep: usize) -> i32; 18 | } 19 | } 20 | 21 | pub enum Action { 22 | Reboot(Next), 23 | Shutdown(Next), 24 | Dump(Next), 25 | } 26 | 27 | impl Action { 28 | fn cmd(&self) -> i32 { 29 | match self { 30 | Action::Reboot(_) => c::A_REBOOT, 31 | Action::Shutdown(_) => c::A_SHUTDOWN, 32 | Action::Dump(_) => c::A_DUMP, 33 | } 34 | } 35 | 36 | fn fcn(&self) -> i32 { 37 | match self { 38 | Action::Reboot(n) | Action::Shutdown(n) | Action::Dump(n) => { 39 | match n { 40 | Next::Halt => c::AD_HALT, 41 | Next::Boot => c::AD_BOOT, 42 | Next::PowerOff => c::AD_POWEROFF, 43 | } 44 | } 45 | } 46 | } 47 | } 48 | 49 | pub enum Next { 50 | Halt, 51 | Boot, 52 | PowerOff, 53 | } 54 | 55 | pub fn uadmin(action: Action) -> std::io::Result<()> { 56 | let cmd = action.cmd(); 57 | let fcn = action.fcn(); 58 | 59 | if unsafe { c::uadmin(cmd, fcn, 0) } == -1 { 60 | Err(std::io::Error::last_os_error()) 61 | } else { 62 | Ok(()) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /agent/systemd/agent.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=buildomat agent 3 | 4 | [Service] 5 | Type=simple 6 | ExecStart=/opt/buildomat/lib/agent run 7 | Restart=always 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /bin/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-common = { path = "../common" } 9 | buildomat-client = { path = "../client" } 10 | 11 | anyhow = { workspace = true } 12 | bytes = { workspace = true } 13 | chrono = { workspace = true } 14 | dirs-next = { workspace = true } 15 | futures = { workspace = true } 16 | hiercmd = { workspace = true } 17 | rusty_ulid = { workspace = true } 18 | serde = { workspace = true } 19 | serde_json = { workspace = true } 20 | sigpipe = { workspace = true } 21 | tokio = { workspace = true } 22 | toml = { workspace = true } 23 | -------------------------------------------------------------------------------- /bin/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::collections::HashMap; 6 | use std::io::Read; 7 | use std::path::Path; 8 | 9 | use anyhow::{anyhow, bail, Context, Result}; 10 | use serde::Deserialize; 11 | 12 | #[derive(Deserialize, Clone)] 13 | pub struct Config { 14 | pub default_profile: Option, 15 | pub profile: HashMap, 16 | } 17 | 18 | #[derive(Deserialize, Clone)] 19 | pub struct Profile { 20 | pub name: Option, 21 | pub url: String, 22 | pub secret: String, 23 | pub admin_token: Option, 24 | } 25 | 26 | fn env(n: &str) -> Option { 27 | std::env::var(n).map(Some).unwrap_or(None) 28 | } 29 | 30 | impl Profile { 31 | fn from_env() -> Option { 32 | let url = env("INPUT_URL"); 33 | let secret = env("INPUT_SECRET"); 34 | let admin_token = env("INPUT_ADMIN_TOKEN"); 35 | 36 | match (url, secret) { 37 | (Some(url), Some(secret)) => { 38 | Some(Profile { name: None, url, secret, admin_token }) 39 | } 40 | _ => None, 41 | } 42 | } 43 | 44 | fn apply_env(&mut self) { 45 | if let Some(url) = env("INPUT_URL") { 46 | self.url = url; 47 | } 48 | if let Some(secret) = env("INPUT_SECRET") { 49 | self.secret = secret; 50 | } 51 | if let Some(admin_token) = env("INPUT_ADMIN_TOKEN") { 52 | self.admin_token = Some(admin_token); 53 | } 54 | } 55 | } 56 | 57 | fn read_file(p: &Path) -> Result { 58 | let mut f = std::fs::File::open(p)?; 59 | let mut s = String::new(); 60 | f.read_to_string(&mut s)?; 61 | Ok(toml::from_str(&s)?) 62 | } 63 | 64 | pub fn load(profile_name: Option<&str>) -> Result { 65 | /* 66 | * First, try to use the environment. If we have a complete profile in the 67 | * environment we don't need to look at the file system at all. 68 | */ 69 | if let Some(p) = Profile::from_env() { 70 | return Ok(p); 71 | } 72 | 73 | /* 74 | * Next, locate our configuration file. 75 | */ 76 | let mut path = dirs_next::config_dir() 77 | .ok_or_else(|| anyhow!("could not find config directory"))?; 78 | path.push("buildomat"); 79 | path.push("config.toml"); 80 | 81 | let c: Config = 82 | read_file(&path).with_context(|| anyhow!("reading file {:?}", path))?; 83 | 84 | let env_profile = env("BUILDOMAT_PROFILE"); 85 | 86 | let (profile_name, src) = if let Some(profile) = profile_name { 87 | (profile, "-p argument") 88 | } else if let Some(profile) = env_profile.as_deref() { 89 | (profile, "BUILDOMAT_PROFILE environment variable") 90 | } else if let Some(profile) = c.default_profile.as_deref() { 91 | (profile, "\"default_profile\" in config.toml") 92 | } else { 93 | ("default", "fallback default") 94 | }; 95 | 96 | if let Some(profile) = c.profile.get(profile_name) { 97 | let mut profile = profile.clone(); 98 | profile.name = Some(profile_name.to_string()); 99 | profile.apply_env(); 100 | Ok(profile) 101 | } else { 102 | bail!( 103 | "profile \"{}\" (from {}) not found in configuration file {:?}", 104 | profile_name, 105 | src, 106 | path 107 | ); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /bunyan/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-bunyan" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | bytes = { workspace = true } 13 | chrono = { workspace = true } 14 | serde = { workspace = true } 15 | serde_json = { workspace = true } 16 | serde_repr = { workspace = true } 17 | serde_with = { workspace = true } 18 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-client" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | buildomat-types = { path = "../types" } 12 | 13 | anyhow = { workspace = true } 14 | chrono = { workspace = true } 15 | futures = { workspace = true } 16 | progenitor = { workspace = true } 17 | progenitor-client = { workspace = true } 18 | reqwest = { workspace = true } 19 | rusty_ulid = { workspace = true } 20 | serde = { workspace = true } 21 | serde_json = { workspace = true } 22 | tokio = { workspace = true } 23 | -------------------------------------------------------------------------------- /client/src/events.rs: -------------------------------------------------------------------------------- 1 | use futures::TryStreamExt; 2 | 3 | #[derive(Debug)] 4 | #[allow(dead_code)] 5 | enum ServerEventLine { 6 | Event(String), 7 | Data(String), 8 | Id(String), 9 | Retry(String), 10 | } 11 | 12 | #[derive(Debug)] 13 | pub struct ServerEventRecord(Vec); 14 | 15 | impl ServerEventRecord { 16 | pub fn data(&self) -> String { 17 | self.0 18 | .iter() 19 | .filter_map(|ev| match ev { 20 | ServerEventLine::Data(da) => Some(da.to_string()), 21 | _ => None, 22 | }) 23 | .collect::>() 24 | .join("\n") 25 | } 26 | 27 | pub fn event(&self) -> String { 28 | self.0 29 | .iter() 30 | .filter_map(|ev| match ev { 31 | ServerEventLine::Event(ev) => Some(ev.to_string()), 32 | _ => None, 33 | }) 34 | .next() 35 | .unwrap_or_else(String::new) 36 | } 37 | } 38 | 39 | fn process_line(l: &str) -> Option { 40 | if l.starts_with(':') { 41 | /* 42 | * Treat this as a comment. 43 | */ 44 | return None; 45 | } 46 | 47 | let (name, value) = if let Some((name, value)) = l.split_once(':') { 48 | (name, value) 49 | } else { 50 | (l, "") 51 | }; 52 | 53 | /* 54 | * According to the whatwg specification, we should discard a single space 55 | * after the colon if there was one. 56 | */ 57 | let value = value.strip_prefix(' ').unwrap_or(value); 58 | 59 | Some(match name.trim() { 60 | "event" => ServerEventLine::Event(value.to_string()), 61 | "data" => ServerEventLine::Data(value.to_string()), 62 | "id" => ServerEventLine::Id(value.to_string()), 63 | "retry" => ServerEventLine::Retry(value.to_string()), 64 | _ => return None, 65 | }) 66 | } 67 | 68 | pub fn attach( 69 | rv: progenitor::progenitor_client::ResponseValue< 70 | progenitor::progenitor_client::ByteStream, 71 | >, 72 | ) -> tokio::sync::mpsc::Receiver> 73 | { 74 | let (tx, rx) = tokio::sync::mpsc::channel(128); 75 | 76 | tokio::task::spawn(async move { 77 | let mut stream = rv.into_inner_stream(); 78 | 79 | let mut line = Vec::new(); 80 | let mut fields = Vec::new(); 81 | loop { 82 | let buf = match stream.try_next().await { 83 | Ok(Some(buf)) => buf, 84 | Ok(None) => { 85 | /* 86 | * We have reached the end of the stream. 87 | */ 88 | if !fields.is_empty() { 89 | tx.send(Ok(ServerEventRecord(std::mem::take( 90 | &mut fields, 91 | )))) 92 | .await 93 | .ok(); 94 | } 95 | return; 96 | } 97 | Err(e) => { 98 | tx.send(Err(e.to_string())).await.ok(); 99 | return; 100 | } 101 | }; 102 | 103 | for b in buf { 104 | if b == b'\n' { 105 | if line.is_empty() { 106 | /* 107 | * This is the end of a record. 108 | */ 109 | if fields.is_empty() { 110 | continue; 111 | } 112 | 113 | if tx 114 | .send(Ok(ServerEventRecord(std::mem::take( 115 | &mut fields, 116 | )))) 117 | .await 118 | .is_err() 119 | { 120 | return; 121 | } 122 | continue; 123 | } 124 | 125 | /* 126 | * Process line. 127 | */ 128 | let ls = match std::str::from_utf8(&line) { 129 | Ok(ls) => ls, 130 | Err(e) => { 131 | tx.send(Err(e.to_string())).await.ok(); 132 | return; 133 | } 134 | }; 135 | 136 | if let Some(f) = process_line(ls) { 137 | fields.push(f); 138 | } 139 | line.clear(); 140 | continue; 141 | } else { 142 | line.push(b); 143 | } 144 | } 145 | } 146 | }); 147 | 148 | rx 149 | } 150 | -------------------------------------------------------------------------------- /client/src/ext.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use anyhow::Result; 6 | use rusty_ulid::Ulid; 7 | use std::{str::FromStr, time::Duration}; 8 | 9 | pub trait ClientJobExt { 10 | fn duration(&self, from: &str, until: &str) -> Option; 11 | } 12 | 13 | impl ClientJobExt for crate::types::Job { 14 | fn duration(&self, from: &str, until: &str) -> Option { 15 | let from = self.times.get(from)?; 16 | let until = self.times.get(until)?; 17 | 18 | if let Ok(dur) = until.signed_duration_since(*from).to_std() { 19 | if dur.is_zero() { 20 | None 21 | } else { 22 | Some(dur) 23 | } 24 | } else { 25 | None 26 | } 27 | } 28 | } 29 | 30 | pub trait ClientIdExt { 31 | fn id(&self) -> Result; 32 | } 33 | 34 | impl ClientIdExt for crate::types::Worker { 35 | fn id(&self) -> Result { 36 | to_ulid(&self.id) 37 | } 38 | } 39 | 40 | impl ClientIdExt for crate::types::Job { 41 | fn id(&self) -> Result { 42 | to_ulid(&self.id) 43 | } 44 | } 45 | 46 | impl ClientIdExt for crate::types::JobListEntry { 47 | fn id(&self) -> Result { 48 | to_ulid(&self.id) 49 | } 50 | } 51 | 52 | fn to_ulid(id: &str) -> Result { 53 | Ok(Ulid::from_str(id)?) 54 | } 55 | -------------------------------------------------------------------------------- /common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-common" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | chrono = { workspace = true } 13 | new_mime_guess = { workspace = true } 14 | rand = { workspace = true } 15 | regex = { workspace = true } 16 | rusty_ulid = { workspace = true } 17 | serde = { workspace = true } 18 | serde_json = { workspace = true } 19 | slog = { workspace = true } 20 | slog-bunyan = { workspace = true } 21 | slog-term = { workspace = true } 22 | tokio = { workspace = true } 23 | toml = { workspace = true } 24 | -------------------------------------------------------------------------------- /database/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-database" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | chrono = { workspace = true } 13 | jmclib = { workspace = true } 14 | sea-query = { workspace = true } 15 | sea-query-rusqlite = { workspace = true } 16 | serde = { workspace = true } 17 | serde_json = { workspace = true } 18 | slog = { workspace = true } 19 | thiserror = { workspace = true } 20 | usdt = { workspace = true } 21 | -------------------------------------------------------------------------------- /download/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-download" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | aws-sdk-s3 = { workspace = true } 13 | bytes = { workspace = true } 14 | dropshot = { workspace = true } 15 | futures = { workspace = true } 16 | http-body-util = { workspace = true } 17 | http-range = { workspace = true } 18 | hyper = { workspace = true } 19 | reqwest = { workspace = true } 20 | slog = { workspace = true } 21 | tokio = { workspace = true } 22 | tokio-stream = { workspace = true } 23 | -------------------------------------------------------------------------------- /download/src/kinds/file.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | use std::fs::File; 8 | use std::os::unix::fs::FileExt; 9 | 10 | use anyhow::Result; 11 | use bytes::BytesMut; 12 | use dropshot::Body; 13 | use hyper::{body::Frame, Response}; 14 | use slog::{o, Logger}; 15 | use tokio::sync::mpsc; 16 | 17 | /* 18 | * Four lots of 256KB is 1MB, which seems like enough total buffering (in the 19 | * channel) per connection for now: 20 | */ 21 | const BUF_SIZE: usize = 256 * 1024; 22 | const BUF_COUNT: usize = 4; 23 | 24 | /** 25 | * Produce a download response for a given local file. 26 | * 27 | * If "head_only" is true, it will be a HEAD response without a body; otherwise, 28 | * a GET response with the file contents. If a range is provided, it will be 29 | * assessed against the actual file size and a partial (206) response will be 30 | * returned, for both GET and HEAD. 31 | */ 32 | pub async fn stream_from_file( 33 | log: &Logger, 34 | info: String, 35 | f: File, 36 | range: Option, 37 | head_only: bool, 38 | ) -> Result> { 39 | let log = log.new(o!("download" => "file")); 40 | let md = f.metadata()?; 41 | assert!(md.is_file()); 42 | let file_size = md.len(); 43 | 44 | let (tx, rx) = mpsc::channel(BUF_COUNT); 45 | 46 | let (start_at, want_bytes, crange) = if let Some(range) = range { 47 | match range.single_range(file_size) { 48 | Some(sr) => { 49 | if head_only { 50 | return make_head_response(Some(sr), file_size, None); 51 | } 52 | 53 | (sr.start(), sr.content_length(), Some(sr)) 54 | } 55 | None => { 56 | /* 57 | * Return a HTTP 416 Range Not Satisfiable to the client. 58 | */ 59 | return Ok(bad_range_response(file_size)); 60 | } 61 | } 62 | } else { 63 | /* 64 | * If this is not a range request, just return the whole file. 65 | */ 66 | if head_only { 67 | return make_head_response(None, file_size, None); 68 | } 69 | 70 | (0, file_size, None) 71 | }; 72 | 73 | let mut sw = Stopwatch::start(info, start_at, want_bytes); 74 | tokio::task::spawn(async move { 75 | let mut read_bytes = 0; 76 | 77 | loop { 78 | assert!(read_bytes <= want_bytes); 79 | if read_bytes == want_bytes { 80 | /* 81 | * We've seen enough! 82 | */ 83 | sw.complete(&log); 84 | return; 85 | } 86 | 87 | /* 88 | * Determine how many bytes we need to read this time, and from 89 | * where we need to begin reading: 90 | */ 91 | let remaining = want_bytes.checked_sub(read_bytes).unwrap(); 92 | let read_size = remaining.min(BUF_SIZE.try_into().unwrap()); 93 | let pos = start_at.checked_add(read_bytes).unwrap(); 94 | 95 | /* 96 | * Allocate the buffer we need, then set the length to the target 97 | * read size. At this point, the buffer will still be 98 | * uninitialised. 99 | * 100 | * We allocate the full buffer size every time to avoid creating 101 | * fragmentation with shorter reads at the end of files. 102 | */ 103 | assert!(read_size > 0); 104 | let mut buf = BytesMut::with_capacity(BUF_SIZE); 105 | unsafe { buf.set_len(read_size.try_into().unwrap()) }; 106 | 107 | match tokio::task::block_in_place(|| f.read_at(&mut buf, pos)) { 108 | Ok(0) => { 109 | /* 110 | * We are careful to issue reads only when we need at least 111 | * one byte of data, so hitting EOF is unexpected. 112 | */ 113 | let e = sw.fail(&log, "unexpected end of file"); 114 | tx.send(e).await.ok(); 115 | return; 116 | } 117 | Ok(sz) => { 118 | /* 119 | * Trim the buffer to span only what we actually read. The 120 | * readable portion of the buffer is now safely initialised. 121 | */ 122 | unsafe { buf.set_len(sz) }; 123 | 124 | read_bytes = 125 | read_bytes.checked_add(sz.try_into().unwrap()).unwrap(); 126 | 127 | /* 128 | * Pass the read bytes onto the client. 129 | */ 130 | let buf = buf.freeze(); 131 | sw.add_bytes(buf.len()); 132 | if tx.send(Ok(Frame::data(buf))).await.is_err() { 133 | sw.fail(&log, "interrupted on client side").ok(); 134 | return; 135 | } 136 | } 137 | Err(e) => { 138 | let e = sw.fail(&log, &format!("local file error: {e}")); 139 | 140 | /* 141 | * Push the error to the client side stream, but ignore a 142 | * second failure on the client side. 143 | */ 144 | tx.send(e).await.ok(); 145 | return; 146 | } 147 | } 148 | } 149 | }); 150 | 151 | if let Some(crange) = &crange { 152 | assert_eq!(crange.content_length(), want_bytes); 153 | } 154 | make_get_response(crange, file_size, None, rx) 155 | } 156 | -------------------------------------------------------------------------------- /download/src/kinds/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | mod sublude { 6 | pub(crate) use crate::stopwatch::Stopwatch; 7 | pub(crate) use crate::PotentialRange; 8 | pub(crate) use crate::{ 9 | bad_range_response, make_get_response, make_head_response, 10 | }; 11 | } 12 | 13 | pub mod file; 14 | pub mod s3; 15 | pub mod url; 16 | -------------------------------------------------------------------------------- /download/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use anyhow::Result; 6 | use bytes::Bytes; 7 | 8 | use dropshot::Body; 9 | use http_body_util::StreamBody; 10 | use hyper::{ 11 | body::Frame, 12 | header::{ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE}, 13 | Response, StatusCode, 14 | }; 15 | 16 | use tokio::sync::mpsc; 17 | 18 | mod kinds; 19 | mod stopwatch; 20 | 21 | pub use kinds::file::stream_from_file; 22 | pub use kinds::s3::{stream_from_s3, unruin_content_length}; 23 | pub use kinds::url::stream_from_url; 24 | 25 | fn bad_range_response(file_size: u64) -> Response { 26 | hyper::Response::builder() 27 | .status(StatusCode::RANGE_NOT_SATISFIABLE) 28 | .header(ACCEPT_RANGES, "bytes") 29 | .header(CONTENT_RANGE, format!("bytes */{file_size}")) 30 | .body(Body::wrap(http_body_util::Empty::new())) 31 | .unwrap() 32 | } 33 | 34 | /** 35 | * Generate a GET response, optionally for a HTTP range request. The total 36 | * file length should be provided, whether or not the expected Content-Length 37 | * for a range request is shorter. 38 | */ 39 | fn make_get_response( 40 | crange: Option, 41 | file_length: u64, 42 | content_type: Option<&str>, 43 | rx: mpsc::Receiver, E>>, 44 | ) -> Result> 45 | where 46 | E: Into> 47 | + Send 48 | + Sync 49 | + 'static, 50 | { 51 | Ok(make_response_common(crange, file_length, content_type)?.body( 52 | Body::wrap(StreamBody::new( 53 | tokio_stream::wrappers::ReceiverStream::new(rx), 54 | )), 55 | )?) 56 | } 57 | 58 | /** 59 | * Generate a HEAD response, optionally for a HTTP range request. The total 60 | * file length should be provided, whether or not the expected Content-Length 61 | * for a range request is shorter. 62 | */ 63 | fn make_head_response( 64 | crange: Option, 65 | file_length: u64, 66 | content_type: Option<&str>, 67 | ) -> Result> { 68 | Ok(make_response_common(crange, file_length, content_type)? 69 | .body(Body::wrap(http_body_util::Empty::new()))?) 70 | } 71 | 72 | fn make_response_common( 73 | crange: Option, 74 | file_length: u64, 75 | content_type: Option<&str>, 76 | ) -> Result { 77 | let mut res = Response::builder(); 78 | res = res.header(ACCEPT_RANGES, "bytes"); 79 | res = res.header( 80 | CONTENT_TYPE, 81 | content_type.unwrap_or("application/octet-stream"), 82 | ); 83 | 84 | if let Some(crange) = crange { 85 | res = res.header(CONTENT_LENGTH, crange.content_length().to_string()); 86 | res = res.header(CONTENT_RANGE, crange.to_content_range()); 87 | res = res.status(StatusCode::PARTIAL_CONTENT); 88 | } else { 89 | res = res.header(CONTENT_LENGTH, file_length.to_string()); 90 | res = res.status(StatusCode::OK); 91 | } 92 | 93 | Ok(res) 94 | } 95 | 96 | pub struct PotentialRange(Vec); 97 | 98 | impl PotentialRange { 99 | pub fn single_range(&self, len: u64) -> Option { 100 | match http_range::HttpRange::parse_bytes(&self.0, len) { 101 | Ok(ranges) => { 102 | if ranges.len() != 1 || ranges[0].length < 1 { 103 | /* 104 | * Right now, we don't want to deal with encoding a 105 | * response that has multiple ranges. 106 | */ 107 | None 108 | } else { 109 | Some(SingleRange(ranges[0], len)) 110 | } 111 | } 112 | Err(_) => None, 113 | } 114 | } 115 | } 116 | 117 | pub struct SingleRange(http_range::HttpRange, u64); 118 | 119 | impl SingleRange { 120 | /** 121 | * Return the first byte in this range for use in inclusive ranges. 122 | */ 123 | pub fn start(&self) -> u64 { 124 | self.0.start 125 | } 126 | 127 | /** 128 | * Return the last byte in this range for use in inclusive ranges. 129 | */ 130 | pub fn end(&self) -> u64 { 131 | assert!(self.0.length > 0); 132 | 133 | self.0.start.checked_add(self.0.length).unwrap().checked_sub(1).unwrap() 134 | } 135 | 136 | /** 137 | * Generate the Content-Range header for inclusion in a HTTP 206 partial 138 | * content response using this range. 139 | */ 140 | pub fn to_content_range(&self) -> String { 141 | format!("bytes {}-{}/{}", self.0.start, self.end(), self.1) 142 | } 143 | 144 | /** 145 | * Generate a Range header for inclusion in another HTTP request; e.g., 146 | * to a backend object store. 147 | */ 148 | pub fn to_range(&self) -> String { 149 | format!("bytes={}-{}", self.0.start, self.end()) 150 | } 151 | 152 | pub fn content_length(&self) -> u64 { 153 | assert!(self.0.length > 0); 154 | 155 | self.0.length 156 | } 157 | } 158 | 159 | pub trait RequestContextEx { 160 | fn range(&self) -> Option; 161 | } 162 | 163 | impl RequestContextEx for dropshot::RequestContext> 164 | where 165 | T: Send + Sync + 'static, 166 | { 167 | /** 168 | * If there is a Range header, return it for processing during response 169 | * generation. 170 | */ 171 | fn range(&self) -> Option { 172 | self.request 173 | .headers() 174 | .get(hyper::header::RANGE) 175 | .map(|hv| PotentialRange(hv.as_bytes().to_vec())) 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /download/src/stopwatch.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::time::{Duration, Instant}; 6 | 7 | use anyhow::{anyhow, Result}; 8 | use bytes::Bytes; 9 | use hyper::body::Frame; 10 | use slog::{error, info, Logger}; 11 | 12 | pub struct Stopwatch { 13 | start: Instant, 14 | info: String, 15 | offset: u64, 16 | bytes_expected: u64, 17 | bytes_transferred: u64, 18 | } 19 | 20 | impl Stopwatch { 21 | pub fn start(info: String, offset: u64, bytes_expected: u64) -> Stopwatch { 22 | Stopwatch { 23 | start: Instant::now(), 24 | info, 25 | offset, 26 | bytes_expected, 27 | bytes_transferred: 0, 28 | } 29 | } 30 | 31 | pub fn add_bytes(&mut self, bytes: usize) { 32 | self.bytes_transferred = 33 | self.bytes_transferred.saturating_add(bytes.try_into().unwrap()); 34 | } 35 | 36 | fn complete_common(&self) -> (Duration, f64) { 37 | let dur = Instant::now().saturating_duration_since(self.start); 38 | let rate_mb = (self.bytes_transferred as f64 / dur.as_secs_f64()) 39 | / (1024.0 * 1024.0); 40 | 41 | (dur, rate_mb) 42 | } 43 | 44 | pub fn complete(self, log: &Logger) { 45 | let (dur, rate_mb) = self.complete_common(); 46 | 47 | info!(log, "download complete: {}", self.info; 48 | "offset" => self.offset, 49 | "bytes_transferred" => self.bytes_transferred, 50 | "rate_mb" => rate_mb, 51 | "msec" => dur.as_millis(), 52 | ); 53 | } 54 | 55 | pub fn fail(self, log: &Logger, how: &str) -> Result> { 56 | let (dur, rate_mb) = self.complete_common(); 57 | let msg = format!("download failed: {}: {}", self.info, how); 58 | 59 | error!(log, "{}", msg; 60 | "offset" => self.offset, 61 | "bytes_expected" => self.bytes_expected, 62 | "bytes_transferred" => self.bytes_transferred, 63 | "rate_mb" => rate_mb, 64 | "msec" => dur.as_millis(), 65 | ); 66 | 67 | Err(anyhow!(msg)) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /factory/aws/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-factory-aws" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-client = { path = "../../client" } 9 | buildomat-common = { path = "../../common" } 10 | buildomat-types = { path = "../../types" } 11 | 12 | anyhow = { workspace = true } 13 | aws-config = { workspace = true } 14 | aws-sdk-ec2 = { workspace = true } 15 | aws-types = { workspace = true } 16 | base64 = { workspace = true } 17 | getopts = { workspace = true } 18 | rusty_ulid = { workspace = true } 19 | serde = { workspace = true } 20 | slog = { workspace = true } 21 | slog-term = { workspace = true } 22 | tokio = { workspace = true } 23 | toml = { workspace = true } 24 | usdt = { workspace = true } 25 | -------------------------------------------------------------------------------- /factory/aws/scripts/user_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | 6 | function os_release { 7 | if [[ ! -f /etc/os-release ]]; then 8 | printf '\n' 9 | else 10 | local r=$( ( . /etc/os-release ; eval "echo \$$1" ) ) 11 | printf '%s\n' "${r// /+}" 12 | fi 13 | } 14 | 15 | # 16 | # Give the server some hints as to what OS we're running so that it can give us 17 | # the most appropriate agent binary: 18 | # 19 | q="?kernel=$(uname -s)" 20 | q+="&proc=$(uname -p)" 21 | q+="&mach=$(uname -m)" 22 | q+="&plat=$(uname -i)" 23 | q+="&id=$(os_release ID)" 24 | q+="&id_like=$(os_release ID_LIKE)" 25 | q+="&version_id=$(os_release VERSION_ID)" 26 | 27 | while :; do 28 | rm -f /var/tmp/agent 29 | rm -f /var/tmp/agent.gz 30 | 31 | # 32 | # First, try the gzip-compressed agent URL: 33 | # 34 | if curl -sSf -o /var/tmp/agent.gz '%URL%/file/agent.gz'"$q"; then 35 | if ! gunzip < /var/tmp/agent.gz > /var/tmp/agent; then 36 | sleep 1 37 | continue 38 | fi 39 | # 40 | # If that doesn't work, fall back to the old uncompressed URL: 41 | # 42 | elif ! curl -sSf -o /var/tmp/agent '%URL%/file/agent'"$q"; then 43 | sleep 1 44 | continue 45 | fi 46 | 47 | chmod +rx /var/tmp/agent 48 | if ! /var/tmp/agent install -N '%NODENAME%' '%URL%' '%STRAP%'; then 49 | sleep 1 50 | continue 51 | fi 52 | break 53 | done 54 | 55 | exit 0 56 | -------------------------------------------------------------------------------- /factory/aws/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::collections::HashMap; 6 | 7 | use buildomat_types::config::ConfigFileDiag; 8 | use serde::Deserialize; 9 | 10 | #[derive(Deserialize, Debug)] 11 | #[serde(deny_unknown_fields)] 12 | pub(crate) struct ConfigFile { 13 | pub aws: ConfigFileAws, 14 | pub general: ConfigFileGeneral, 15 | pub factory: ConfigFileFactory, 16 | pub target: HashMap, 17 | #[serde(default)] 18 | pub diag: ConfigFileDiag, 19 | } 20 | 21 | #[derive(Deserialize, Debug)] 22 | #[serde(deny_unknown_fields)] 23 | pub(crate) struct ConfigFileGeneral { 24 | pub baseurl: String, 25 | } 26 | 27 | #[derive(Deserialize, Debug)] 28 | #[serde(deny_unknown_fields)] 29 | pub(crate) struct ConfigFileFactory { 30 | pub token: String, 31 | } 32 | 33 | #[derive(Deserialize, Debug)] 34 | #[serde(deny_unknown_fields)] 35 | pub(crate) struct ConfigFileAwsTarget { 36 | pub instance_type: String, 37 | pub root_size_gb: i32, 38 | pub ami: String, 39 | #[serde(default)] 40 | pub diag: ConfigFileDiag, 41 | } 42 | 43 | #[derive(Deserialize, Debug)] 44 | #[serde(deny_unknown_fields)] 45 | pub(crate) struct ConfigFileAws { 46 | pub access_key_id: String, 47 | pub secret_access_key: String, 48 | pub region: String, 49 | pub vpc: String, 50 | pub subnet: String, 51 | pub tag: String, 52 | pub key: String, 53 | pub security_group: String, 54 | pub limit_total: usize, 55 | } 56 | 57 | impl ConfigFileAws { 58 | pub fn tagkey_worker(&self) -> String { 59 | format!("{}-worker_id", self.tag) 60 | } 61 | 62 | pub fn tagkey_lease(&self) -> String { 63 | format!("{}-lease_id", self.tag) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /factory/aws/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::sync::Arc; 6 | 7 | use anyhow::{bail, Context, Result}; 8 | use buildomat_common::*; 9 | use buildomat_types::metadata; 10 | use getopts::Options; 11 | use slog::Logger; 12 | 13 | mod aws; 14 | mod config; 15 | use config::ConfigFile; 16 | 17 | mod types { 18 | use rusty_ulid::Ulid; 19 | use std::str::FromStr; 20 | 21 | macro_rules! ulid_new_type { 22 | ($name:ident, $prefix:literal) => { 23 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] 24 | #[repr(transparent)] 25 | pub struct $name(Ulid); 26 | 27 | impl FromStr for $name { 28 | type Err = anyhow::Error; 29 | 30 | fn from_str(s: &str) -> Result { 31 | Ok($name(Ulid::from_str(s)?)) 32 | } 33 | } 34 | 35 | impl std::fmt::Display for $name { 36 | fn fmt( 37 | &self, 38 | f: &mut std::fmt::Formatter<'_>, 39 | ) -> std::fmt::Result { 40 | self.0.fmt(f) 41 | } 42 | } 43 | 44 | impl std::fmt::Debug for $name { 45 | fn fmt( 46 | &self, 47 | f: &mut std::fmt::Formatter<'_>, 48 | ) -> std::fmt::Result { 49 | format_args!("{}:{self}", $prefix).fmt(f) 50 | } 51 | } 52 | }; 53 | } 54 | 55 | ulid_new_type!(LeaseId, "lease"); 56 | ulid_new_type!(WorkerId, "worker"); 57 | } 58 | 59 | struct Central { 60 | log: Logger, 61 | config: config::ConfigFile, 62 | client: buildomat_client::Client, 63 | targets: Vec, 64 | } 65 | 66 | impl Central { 67 | fn metadata( 68 | &self, 69 | t: &config::ConfigFileAwsTarget, 70 | ) -> Result { 71 | /* 72 | * Allow the per-target diagnostic configuration to override the base 73 | * diagnostic configuration. 74 | */ 75 | Ok(self.config.diag.apply_overrides(&t.diag).build()?) 76 | } 77 | } 78 | 79 | #[tokio::main] 80 | async fn main() -> Result<()> { 81 | usdt::register_probes().unwrap(); 82 | 83 | let mut opts = Options::new(); 84 | 85 | opts.optopt("f", "", "configuration file", "CONFIG"); 86 | 87 | let p = match opts.parse(std::env::args().skip(1)) { 88 | Ok(p) => p, 89 | Err(e) => { 90 | eprintln!("ERROR: usage: {}", e); 91 | eprintln!(" {}", opts.usage("usage")); 92 | std::process::exit(1); 93 | } 94 | }; 95 | 96 | let log = make_log("factory-aws"); 97 | let config: ConfigFile = if let Some(f) = p.opt_str("f").as_deref() { 98 | read_toml(f)? 99 | } else { 100 | bail!("must specify configuration file (-f)"); 101 | }; 102 | let targets = config.target.keys().map(String::to_string).collect(); 103 | let client = buildomat_client::ClientBuilder::new(&config.general.baseurl) 104 | .bearer_token(&config.factory.token) 105 | .build()?; 106 | 107 | let c = Arc::new(Central { log, config, client, targets }); 108 | 109 | let t_aws = tokio::task::spawn(async move { 110 | aws::aws_worker(c).await.context("AWS worker task failure") 111 | }); 112 | 113 | tokio::select! { 114 | _ = t_aws => bail!("AWS worker task stopped early"), 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /factory/lab/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-factory-lab" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-common = { path = "../../common" } 9 | buildomat-database = { path = "../../database" } 10 | buildomat-download = { path = "../../download" } 11 | buildomat-client = { path = "../../client" } 12 | buildomat-types = { path = "../../types" } 13 | 14 | anyhow = { workspace = true } 15 | chrono = { workspace = true } 16 | dropshot = { workspace = true } 17 | getopts = { workspace = true } 18 | hyper = { workspace = true } 19 | libc = { workspace = true } 20 | rand = { workspace = true } 21 | schemars = { workspace = true } 22 | sea-query = { workspace = true } 23 | semver = { workspace = true } 24 | serde = { workspace = true } 25 | slog = { workspace = true } 26 | slog-term = { workspace = true } 27 | strum = { workspace = true } 28 | tokio = { workspace = true } 29 | toml = { workspace = true } 30 | usdt = { workspace = true } 31 | -------------------------------------------------------------------------------- /factory/lab/schema.sql: -------------------------------------------------------------------------------- 1 | -- v 1 2 | CREATE TABLE instance ( 3 | nodename TEXT NOT NULL, 4 | seq INTEGER NOT NULL, 5 | worker TEXT NOT NULL, 6 | target TEXT NOT NULL, 7 | state TEXT NOT NULL, 8 | key TEXT NOT NULL, 9 | bootstrap TEXT NOT NULL, 10 | flushed INTEGER NOT NULL, 11 | 12 | PRIMARY KEY (nodename, seq) 13 | ) 14 | 15 | -- v 2 16 | CREATE TABLE instance_event ( 17 | nodename TEXT NOT NULL, 18 | instance INTEGER NOT NULL, 19 | seq INTEGER NOT NULL, 20 | stream TEXT NOT NULL, 21 | payload TEXT NOT NULL, 22 | uploaded INTEGER NOT NULL, 23 | time TEXT NOT NULL, 24 | 25 | PRIMARY KEY (nodename, instance, seq) 26 | ); 27 | -------------------------------------------------------------------------------- /factory/lab/scripts/debug.ipxe: -------------------------------------------------------------------------------- 1 | #!ipxe 2 | 3 | set node %HOST% 4 | set console %CONSOLE% 5 | set server %BASEURL% 6 | set osroot ${server}/os/${node} 7 | 8 | kernel \ 9 | --name /platform/i86pc/kernel/amd64/unix \ 10 | ${osroot}/platform/i86pc/kernel/amd64/unix \ 11 | -B console=${console} \ 12 | -B ${console}-mode="115200,8,n,1,-" \ 13 | %BOOTARGS% \ 14 | || goto fail 15 | 16 | module \ 17 | --name /platform/i86pc/amd64/boot_archive \ 18 | ${osroot}/platform/i86pc/amd64/boot_archive \ 19 | type=rootfs name=ramdisk \ 20 | || goto fail 21 | 22 | module \ 23 | --name /platform/i86pc/amd64/boot_archive.hash \ 24 | ${osroot}/platform/i86pc/amd64/boot_archive.hash \ 25 | type=hash name=ramdisk \ 26 | || goto fail 27 | 28 | module \ 29 | --name /postboot.sh \ 30 | ${server}/postboot/${node} \ 31 | type=file name=postboot.sh \ 32 | || goto fail 33 | 34 | # 35 | # Emit our boot marker string so that we can mark the point in the IPMI SOL 36 | # stream that is relevant to the task at hand. 37 | # 38 | # We emit the string many times here on the basis that BMCs often have 39 | # unreliable rubbish serial facilities, and the odd character here and there is 40 | # known to go missing; Claude E. Shannon defend us. 41 | # 42 | echo %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% 43 | boot || goto fail 44 | 45 | :fail 46 | echo iPXE failure; retrying... 47 | sleep 5 48 | chain ${server}/ipxe/${node} || goto fail 49 | -------------------------------------------------------------------------------- /factory/lab/scripts/hold.ipxe: -------------------------------------------------------------------------------- 1 | #!ipxe 2 | 3 | set node %HOST% 4 | set server %BASEURL% 5 | 6 | :top 7 | # 8 | # Emit our hold marker string so that we can detect when the machine has come 9 | # up to iPXE and is looping waiting for instructions. 10 | # 11 | # We emit the string many times here on the basis that BMCs often have 12 | # unreliable rubbish serial facilities, and the odd character here and there is 13 | # known to go missing; Claude E. Shannon defend us. 14 | # 15 | echo %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% 16 | sleep 5 17 | chain --replace ${server}/ipxe/${node} || goto top 18 | -------------------------------------------------------------------------------- /factory/lab/scripts/postboot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | set -o xtrace 6 | 7 | # 8 | # /var/tmp in the ramdisk will obviously fill up very quickly, so replace it 9 | # with a tmpfs: 10 | # 11 | /usr/sbin/mount -F tmpfs -O swap /var/tmp 12 | 13 | # 14 | # /opt/buildomat will contain the agent, so, another tmpfs! 15 | # 16 | mkdir -p /opt/buildomat 17 | /usr/sbin/mount -F tmpfs -O swap /opt/buildomat 18 | chmod 0755 /opt/buildomat 19 | 20 | # 21 | # Set machine name. 22 | # 23 | echo '%HOST%' >/etc/nodename 24 | hostname '%HOST%' 25 | 26 | # 27 | # Set up IP networking: 28 | # 29 | /sbin/ipadm create-if igb0 30 | /sbin/ipadm create-addr -T dhcp -1 igb0/dhcp 31 | /usr/sbin/svcadm restart svc:/network/service:default 32 | 33 | # 34 | # Create a ramdisk-backed ZFS pool to hold work. Give it the name rpool so 35 | # that existing tools that create, say, "rpool/work" will get roughly what they 36 | # expect. 37 | # 38 | /usr/sbin/ramdiskadm -a rpool 8g 39 | /sbin/zpool create -O compression=on rpool /dev/ramdisk/rpool 40 | /sbin/zfs create -o mountpoint=/home rpool/home 41 | 42 | # 43 | # Contact the factory to signal that we've booted to this point. 44 | # 45 | while :; do 46 | if curl -X POST -sSf "%BASEURL%/signal/%HOST%?key=%KEY%"; then 47 | break 48 | fi 49 | 50 | sleep 1 51 | done 52 | 53 | # 54 | # Defer buildomat agent installation until after multi-user startup: 55 | # 56 | rm -f /var/tmp/buildomat-install.sh 57 | cat >/var/tmp/buildomat-install.sh <<'EOF' 58 | #!/bin/bash 59 | 60 | while :; do 61 | date -uR 62 | df -h 63 | 64 | rm -f /var/tmp/agent 65 | if ! curl -sSf -o /var/tmp/agent '%COREURL%/file/agent'; then 66 | sleep 1 67 | continue 68 | fi 69 | 70 | chmod +rx /var/tmp/agent 71 | if ! /var/tmp/agent install '%COREURL%' '%STRAP%'; then 72 | sleep 1 73 | continue 74 | fi 75 | 76 | break 77 | done 78 | EOF 79 | chmod +x /var/tmp/buildomat-install.sh 80 | 81 | rm -f /var/svc/manifest/site/buildomat-install.xml 82 | cat >/var/svc/manifest/site/buildomat-install.xml <<'EOF' 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 95 | 96 | 97 | 98 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 114 | 115 | 116 | 117 | EOF 118 | 119 | svcadm restart svc:/system/manifest-import:default 120 | 121 | echo postboot complete 122 | -------------------------------------------------------------------------------- /factory/lab/scripts/regular.ipxe: -------------------------------------------------------------------------------- 1 | #!ipxe 2 | 3 | set node %HOST% 4 | set console %CONSOLE% 5 | set server %BASEURL% 6 | set osroot ${server}/os/${node} 7 | 8 | kernel \ 9 | --name /platform/i86pc/kernel/amd64/unix \ 10 | ${osroot}/platform/i86pc/kernel/amd64/unix \ 11 | -B console=${console} \ 12 | -B ${console}-mode="115200,8,n,1,-" \ 13 | || goto fail 14 | 15 | module \ 16 | --name /platform/i86pc/amd64/boot_archive \ 17 | ${osroot}/platform/i86pc/amd64/boot_archive \ 18 | type=rootfs name=ramdisk \ 19 | || goto fail 20 | 21 | module \ 22 | --name /platform/i86pc/amd64/boot_archive.hash \ 23 | ${osroot}/platform/i86pc/amd64/boot_archive.hash \ 24 | type=hash name=ramdisk \ 25 | || goto fail 26 | 27 | module \ 28 | --name /postboot.sh \ 29 | ${server}/postboot/${node} \ 30 | type=file name=postboot.sh \ 31 | || goto fail 32 | 33 | # 34 | # Emit our boot marker string so that we can mark the point in the IPMI SOL 35 | # stream that is relevant to the task at hand. 36 | # 37 | # We emit the string many times here on the basis that BMCs often have 38 | # unreliable rubbish serial facilities, and the odd character here and there is 39 | # known to go missing; Claude E. Shannon defend us. 40 | # 41 | echo %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% %MARKER% 42 | boot || goto fail 43 | 44 | :fail 45 | echo iPXE failure; retrying... 46 | sleep 5 47 | chain ${server}/ipxe/${node} || goto fail 48 | -------------------------------------------------------------------------------- /factory/lab/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::collections::HashMap; 6 | 7 | use serde::Deserialize; 8 | 9 | #[derive(Deserialize, Debug, Clone)] 10 | pub(crate) struct ConfigFile { 11 | pub general: ConfigFileGeneral, 12 | pub factory: ConfigFileFactory, 13 | pub target: HashMap, 14 | pub host: HashMap, 15 | } 16 | 17 | #[derive(Deserialize, Debug, Clone)] 18 | pub(crate) struct ConfigFileGeneral { 19 | pub baseurl: String, 20 | } 21 | 22 | #[derive(Deserialize, Debug, Clone)] 23 | pub(crate) struct ConfigFileFactory { 24 | pub token: String, 25 | } 26 | 27 | #[derive(Deserialize, Debug, Clone)] 28 | pub(crate) struct ConfigFileTarget { 29 | pub nodename: Option, 30 | #[serde(default)] 31 | pub nodenames: Vec, 32 | pub os_dir: String, 33 | } 34 | 35 | impl ConfigFileTarget { 36 | pub fn runs_on_node(&self, nodename: &str) -> bool { 37 | if let Some(n) = self.nodename.as_deref() { 38 | if n == nodename { 39 | return true; 40 | } 41 | } 42 | for n in self.nodenames.iter() { 43 | if n.as_str() == nodename { 44 | return true; 45 | } 46 | } 47 | false 48 | } 49 | 50 | pub fn nodenames(&self) -> Vec { 51 | let mut out = Vec::new(); 52 | if let Some(n) = self.nodename.as_deref() { 53 | out.push(n.to_string()); 54 | } 55 | for n in self.nodenames.iter() { 56 | out.push(n.to_string()); 57 | } 58 | out 59 | } 60 | } 61 | 62 | #[derive(Deserialize, Debug, Clone)] 63 | pub(crate) struct ConfigFileExtraIps { 64 | pub cidr: String, 65 | pub first: String, 66 | pub count: u32, 67 | } 68 | 69 | #[derive(Deserialize, Debug, Clone)] 70 | pub(crate) struct ConfigFileHost { 71 | pub ip: String, 72 | pub gateway: Option, 73 | pub console: String, 74 | pub lab_baseurl: String, 75 | pub nodename: String, 76 | pub lom_ip: String, 77 | pub lom_username: String, 78 | pub lom_password: String, 79 | pub debug_os_dir: Option, 80 | pub debug_os_postboot_sh: Option, 81 | pub debug_boot_args: Option, 82 | pub extra_ips: Option, 83 | } 84 | -------------------------------------------------------------------------------- /factory/lab/src/db/tables/instance_event.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct InstanceEvent { 10 | pub nodename: String, 11 | pub instance: InstanceSeq, 12 | pub seq: EventSeq, 13 | pub stream: String, 14 | pub payload: String, 15 | pub uploaded: bool, 16 | pub time: IsoDate, 17 | } 18 | 19 | impl FromRow for InstanceEvent { 20 | fn columns() -> Vec { 21 | [ 22 | InstanceEventDef::Nodename, 23 | InstanceEventDef::Instance, 24 | InstanceEventDef::Seq, 25 | InstanceEventDef::Stream, 26 | InstanceEventDef::Payload, 27 | InstanceEventDef::Uploaded, 28 | InstanceEventDef::Time, 29 | ] 30 | .into_iter() 31 | .map(|col| { 32 | ColumnRef::TableColumn( 33 | SeaRc::new(InstanceEventDef::Table), 34 | SeaRc::new(col), 35 | ) 36 | }) 37 | .collect() 38 | } 39 | 40 | fn from_row(row: &Row) -> rusqlite::Result { 41 | Ok(InstanceEvent { 42 | nodename: row.get(0)?, 43 | instance: row.get(1)?, 44 | seq: row.get(2)?, 45 | stream: row.get(3)?, 46 | payload: row.get(4)?, 47 | uploaded: row.get(5)?, 48 | time: row.get(6)?, 49 | }) 50 | } 51 | } 52 | 53 | impl InstanceEvent { 54 | pub fn insert(&self) -> InsertStatement { 55 | Query::insert() 56 | .into_table(InstanceEventDef::Table) 57 | .columns(Self::bare_columns()) 58 | .values_panic([ 59 | self.nodename.clone().into(), 60 | self.instance.into(), 61 | self.seq.into(), 62 | self.stream.clone().into(), 63 | self.payload.clone().into(), 64 | self.uploaded.into(), 65 | self.time.into(), 66 | ]) 67 | .to_owned() 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /factory/lab/src/db/tables/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | mod sublude { 6 | pub use std::str::FromStr; 7 | 8 | pub use crate::db::types::*; 9 | pub use buildomat_database::{rusqlite, sqlite_sql_enum, FromRow}; 10 | pub use rusqlite::Row; 11 | pub use sea_query::{ 12 | enum_def, ColumnRef, Expr, Iden, InsertStatement, Order, Query, SeaRc, 13 | SelectStatement, 14 | }; 15 | } 16 | 17 | mod instance; 18 | mod instance_event; 19 | 20 | pub use instance::*; 21 | pub use instance_event::*; 22 | -------------------------------------------------------------------------------- /factory/lab/src/pty.rs: -------------------------------------------------------------------------------- 1 | use std::{os::unix::prelude::*, ptr}; 2 | 3 | use anyhow::{bail, Result}; 4 | use libc::c_int; 5 | 6 | pub struct Pty { 7 | manager: Option, 8 | subsidiary: Option, 9 | } 10 | 11 | impl Pty { 12 | pub fn new() -> Result { 13 | let mut m: c_int = -1; 14 | let mut s: c_int = -1; 15 | 16 | let r = unsafe { 17 | libc::openpty( 18 | &mut m, 19 | &mut s, 20 | ptr::null_mut(), 21 | ptr::null(), 22 | ptr::null(), 23 | ) 24 | }; 25 | if r != 0 { 26 | let ose = std::io::Error::last_os_error(); 27 | bail!("openpty: {:?}", ose); 28 | } 29 | 30 | Ok(Pty { manager: Some(m), subsidiary: Some(s) }) 31 | } 32 | 33 | pub fn manager(&mut self) -> std::fs::File { 34 | let fd = self.manager.take().unwrap(); 35 | unsafe { std::fs::File::from_raw_fd(fd) } 36 | } 37 | 38 | pub fn subsidiary(&mut self) -> i32 { 39 | self.subsidiary.unwrap() 40 | } 41 | 42 | pub fn close_subsidiary(&mut self) { 43 | let fd = self.subsidiary.take().unwrap(); 44 | assert_eq!(unsafe { libc::close(fd) }, 0); 45 | } 46 | } 47 | 48 | impl Drop for Pty { 49 | fn drop(&mut self) { 50 | if let Some(fd) = self.manager.take() { 51 | assert_eq!(unsafe { libc::close(fd) }, 0); 52 | } 53 | if let Some(fd) = self.subsidiary.take() { 54 | assert_eq!(unsafe { libc::close(fd) }, 0); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /factory/propolis/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-factory-propolis" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-common = { path = "../../common" } 9 | buildomat-database = { path = "../../database" } 10 | buildomat-client = { path = "../../client" } 11 | buildomat-types = { path = "../../types" } 12 | 13 | anyhow = { workspace = true } 14 | chrono = { workspace = true } 15 | getopts = { workspace = true } 16 | libc = { workspace = true } 17 | rand = { workspace = true } 18 | rusty_ulid = { workspace = true } 19 | schemars = { workspace = true } 20 | sea-query = { workspace = true } 21 | serde = { workspace = true } 22 | serde_json = { workspace = true } 23 | serde_yaml = { workspace = true } 24 | slog = { workspace = true } 25 | slog-term = { workspace = true } 26 | smf = { workspace = true } 27 | strum = { workspace = true } 28 | tokio = { workspace = true } 29 | toml = { workspace = true } 30 | usdt = { workspace = true } 31 | zone = { workspace = true } 32 | -------------------------------------------------------------------------------- /factory/propolis/schema.sql: -------------------------------------------------------------------------------- 1 | -- v 1 2 | CREATE TABLE instance ( 3 | nodename TEXT NOT NULL, 4 | seq INTEGER NOT NULL, 5 | worker TEXT NOT NULL, 6 | lease TEXT NOT NULL, 7 | target TEXT NOT NULL, 8 | state TEXT NOT NULL, 9 | bootstrap TEXT NOT NULL, 10 | slot INTEGER NOT NULL, 11 | 12 | PRIMARY KEY (nodename, seq) 13 | ) 14 | 15 | -- v 2 16 | CREATE INDEX instance_active ON instance (state) 17 | WHERE state <> 'destroyed'; 18 | -------------------------------------------------------------------------------- /factory/propolis/scripts/propolis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail 4 | set -o xtrace 5 | 6 | . /lib/svc/share/smf_include.sh 7 | 8 | if ! cd /vm; then 9 | exit "$SMF_EXIT_ERR_FATAL" 10 | fi 11 | 12 | # 13 | # The virtual machine is configured to exit on reboot or shutdown. We will 14 | # write the exit code into a marker file and then disable ourselves. 15 | # 16 | /software/propolis-standalone config.toml 17 | rc=$? 18 | 19 | printf '%d\n' "$rc" > /vm/exit_code.txt 20 | /usr/sbin/svcadm disable "$SMF_FMRI" 21 | sleep 5 22 | -------------------------------------------------------------------------------- /factory/propolis/scripts/serial.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail 4 | set -o xtrace 5 | 6 | . /lib/svc/share/smf_include.sh 7 | 8 | while :; do 9 | # 10 | # Just pass serial data as-is from propolis to the factory socket. 11 | # This is somewhat gross, but also didn't require writing any software 12 | # just at the moment. 13 | # 14 | nc -v -d -D -U /vm/ttya /dev/null 16 | sleep 0.1 17 | done 18 | -------------------------------------------------------------------------------- /factory/propolis/scripts/user_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | 6 | function os_release { 7 | if [[ ! -f /etc/os-release ]]; then 8 | printf '\n' 9 | else 10 | local r=$( ( . /etc/os-release ; eval "echo \$$1" ) ) 11 | printf '%s\n' "${r// /+}" 12 | fi 13 | } 14 | 15 | # 16 | # Give the server some hints as to what OS we're running so that it can give us 17 | # the most appropriate agent binary: 18 | # 19 | q="?kernel=$(uname -s)" 20 | q+="&proc=$(uname -p)" 21 | q+="&mach=$(uname -m)" 22 | q+="&plat=$(uname -i)" 23 | q+="&id=$(os_release ID)" 24 | q+="&id_like=$(os_release ID_LIKE)" 25 | q+="&version_id=$(os_release VERSION_ID)" 26 | 27 | if [[ $(uname -s) == SunOS ]]; then 28 | # 29 | # The Internet at the office does not have consistently fantastic 30 | # peering, so make sure we're putting our best foot forward with TCP: 31 | # 32 | ipadm set-prop -p send_buf=512000 tcp || true 33 | ipadm set-prop -p recv_buf=512000 tcp || true 34 | ipadm set-prop -p congestion_control=cubic tcp || true 35 | fi 36 | 37 | while :; do 38 | rm -f /var/tmp/agent 39 | rm -f /var/tmp/agent.gz 40 | 41 | # 42 | # First, try the gzip-compressed agent URL: 43 | # 44 | if curl -sSf -o /var/tmp/agent.gz '%URL%/file/agent.gz'"$q"; then 45 | if ! gunzip < /var/tmp/agent.gz > /var/tmp/agent; then 46 | sleep 1 47 | continue 48 | fi 49 | # 50 | # If that doesn't work, fall back to the old uncompressed URL: 51 | # 52 | elif ! curl -sSf -o /var/tmp/agent '%URL%/file/agent'"$q"; then 53 | sleep 1 54 | continue 55 | fi 56 | 57 | chmod +rx /var/tmp/agent 58 | if ! /var/tmp/agent install '%URL%' '%STRAP%'; then 59 | sleep 1 60 | continue 61 | fi 62 | break 63 | done 64 | 65 | exit 0 66 | -------------------------------------------------------------------------------- /factory/propolis/smf/propolis.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 15 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /factory/propolis/smf/serial.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 15 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /factory/propolis/smf/site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /factory/propolis/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::{collections::HashMap, net::Ipv4Addr, path::PathBuf}; 6 | 7 | use anyhow::{bail, Result}; 8 | use buildomat_types::config::ConfigFileDiag; 9 | use serde::Deserialize; 10 | 11 | use crate::db::types::InstanceId; 12 | 13 | #[derive(Deserialize, Debug, Clone)] 14 | #[serde(deny_unknown_fields)] 15 | pub(crate) struct ConfigFile { 16 | pub general: ConfigFileGeneral, 17 | pub factory: ConfigFileFactory, 18 | pub template: ConfigFileSlotTemplate, 19 | #[serde(default)] 20 | pub target: HashMap, 21 | pub slots: u32, 22 | pub software_dir: String, 23 | pub nodename: String, 24 | 25 | #[serde(default)] 26 | pub diag: ConfigFileDiag, 27 | } 28 | 29 | #[derive(Deserialize, Debug, Clone)] 30 | #[serde(deny_unknown_fields)] 31 | pub(crate) struct ConfigFileGeneral { 32 | pub baseurl: String, 33 | } 34 | 35 | #[derive(Deserialize, Debug, Clone)] 36 | #[serde(deny_unknown_fields)] 37 | pub(crate) struct ConfigFileFactory { 38 | pub token: String, 39 | } 40 | 41 | #[derive(Deserialize, Debug, Clone)] 42 | #[serde(deny_unknown_fields)] 43 | pub(crate) struct ConfigFileSlotTemplate { 44 | zoneroot_parent_dir: String, 45 | disk_zvol_parent: String, 46 | 47 | vnic_prefix: String, 48 | vlan_id: Option, 49 | physical: String, 50 | 51 | base_ip: Ipv4Addr, 52 | base_ip_prefix: u32, 53 | gateway: Ipv4Addr, 54 | 55 | ncpus: u32, 56 | ram_mb: u32, 57 | disk_gb: u64, 58 | } 59 | 60 | #[derive(Deserialize, Debug, Clone)] 61 | #[serde(deny_unknown_fields)] 62 | pub(crate) struct ConfigFileTarget { 63 | pub image_path: Option, 64 | pub image_zvol: Option, 65 | 66 | #[serde(default)] 67 | pub diag: ConfigFileDiag, 68 | } 69 | 70 | impl ConfigFileTarget { 71 | pub fn source(&self) -> Result { 72 | if self.image_zvol.is_some() && self.image_path.is_some() { 73 | bail!("a target may not specify both a zvol and an image path"); 74 | } 75 | 76 | Ok(if let Some(zvol) = self.image_zvol.as_deref() { 77 | ImageSource::Zvol(zvol.to_string()) 78 | } else if let Some(path) = self.image_path.as_deref() { 79 | ImageSource::File(PathBuf::from(path)) 80 | } else { 81 | bail!("a target must specify either a zvol or an image path"); 82 | }) 83 | } 84 | } 85 | 86 | pub enum ImageSource { 87 | File(PathBuf), 88 | Zvol(String), 89 | } 90 | 91 | impl ConfigFile { 92 | pub fn for_instance_in_slot( 93 | &self, 94 | slot: u32, 95 | id: &InstanceId, 96 | ) -> Result { 97 | Ok(InstanceInSlot { config: self, slot, id: id.clone() }) 98 | } 99 | 100 | pub fn socketdir(&self) -> Result { 101 | let dir = 102 | PathBuf::from(&self.template.zoneroot_parent_dir).join("zonesock"); 103 | 104 | if dir.is_symlink() || (dir.exists() && !dir.is_dir()) { 105 | bail!("{dir:?} exists but is not a directory"); 106 | } 107 | 108 | Ok(dir) 109 | } 110 | 111 | pub fn softwaredir(&self) -> Result { 112 | let dir = PathBuf::from(&self.software_dir); 113 | 114 | if dir.is_symlink() 115 | || !dir.is_dir() 116 | || !dir.join("propolis-standalone").is_file() 117 | || !dir.join("OVMF_CODE.fd").is_file() 118 | { 119 | bail!( 120 | "software directory {dir:?} should contain propolis and a ROM" 121 | ); 122 | } 123 | 124 | Ok(dir) 125 | } 126 | } 127 | 128 | pub(crate) struct InstanceInSlot<'a> { 129 | config: &'a ConfigFile, 130 | slot: u32, 131 | id: InstanceId, 132 | } 133 | 134 | impl InstanceInSlot<'_> { 135 | pub fn zonepath(&self) -> String { 136 | format!( 137 | "{}/{}", 138 | self.config.template.zoneroot_parent_dir, 139 | self.id.flat_id() 140 | ) 141 | } 142 | 143 | pub fn zvol_name(&self) -> String { 144 | format!( 145 | "{}/{}", 146 | self.config.template.disk_zvol_parent, 147 | self.id.flat_id() 148 | ) 149 | } 150 | 151 | pub fn vnic(&self) -> String { 152 | format!("{}{}", self.config.template.vnic_prefix, self.slot) 153 | } 154 | 155 | pub fn physical(&self) -> &str { 156 | &self.config.template.physical 157 | } 158 | 159 | pub fn vlan(&self) -> Option { 160 | self.config.template.vlan_id 161 | } 162 | 163 | pub fn ip(&self) -> Ipv4Addr { 164 | let base = u32::from_be_bytes(self.config.template.base_ip.octets()); 165 | let ip = base.checked_add(self.slot).unwrap(); 166 | let bytes = ip.to_be_bytes(); 167 | Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]) 168 | } 169 | 170 | pub fn cidr(&self) -> u32 { 171 | self.config.template.base_ip_prefix 172 | } 173 | 174 | pub fn addr(&self) -> String { 175 | format!("{}/{}", self.ip(), self.cidr()) 176 | } 177 | 178 | pub fn gateway(&self) -> Ipv4Addr { 179 | self.config.template.gateway 180 | } 181 | 182 | pub fn ncpus(&self) -> u32 { 183 | self.config.template.ncpus 184 | } 185 | 186 | pub fn ram_mb(&self) -> u32 { 187 | self.config.template.ram_mb 188 | } 189 | 190 | pub fn disk_bytes(&self) -> libc::c_long { 191 | self.config 192 | .template 193 | .disk_gb 194 | .checked_mul(1024 * 1024 * 1024) 195 | .unwrap() 196 | .try_into() 197 | .unwrap() 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /factory/propolis/src/db/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::{collections::HashSet, path::Path}; 6 | 7 | use anyhow::Result; 8 | use buildomat_database::{conflict, DBResult, FromRow, Sqlite}; 9 | use sea_query::{Expr, Order, Query}; 10 | #[allow(unused_imports)] 11 | use slog::{debug, error, info, warn, Logger}; 12 | 13 | mod tables; 14 | 15 | pub mod types { 16 | use buildomat_database::{rusqlite, sqlite_integer_new_type}; 17 | 18 | sqlite_integer_new_type!(InstanceSeq, u64, BigUnsigned); 19 | 20 | pub use super::tables::{InstanceId, InstanceState}; 21 | } 22 | 23 | pub use tables::*; 24 | use types::InstanceSeq; 25 | 26 | pub struct Database { 27 | #[allow(unused)] 28 | log: Logger, 29 | sql: Sqlite, 30 | } 31 | 32 | pub struct CreateInstance { 33 | pub worker: String, 34 | pub lease: String, 35 | pub target: String, 36 | pub bootstrap: String, 37 | pub slot: u32, 38 | } 39 | 40 | impl Database { 41 | pub fn new>( 42 | log: Logger, 43 | path: P, 44 | cache_kb: Option, 45 | ) -> Result { 46 | let sql = Sqlite::setup( 47 | log.clone(), 48 | path, 49 | include_str!("../../schema.sql"), 50 | cache_kb, 51 | )?; 52 | 53 | Ok(Database { log, sql }) 54 | } 55 | 56 | pub fn instance_get(&self, id: &InstanceId) -> DBResult> { 57 | self.sql.tx(|h| h.get_row_opt(Instance::find(id))) 58 | } 59 | 60 | pub fn instance_new_state( 61 | &self, 62 | id: &InstanceId, 63 | state: InstanceState, 64 | ) -> DBResult<()> { 65 | self.sql.tx_immediate(|h| { 66 | /* 67 | * Get the existing state of this instance: 68 | */ 69 | let i: Instance = h.get_row(Instance::find(id))?; 70 | 71 | if i.state == state { 72 | return Ok(()); 73 | } 74 | 75 | let valid_source_states: &[InstanceState] = match state { 76 | InstanceState::Unconfigured => &[], 77 | InstanceState::Configured => &[InstanceState::Unconfigured], 78 | InstanceState::Installed => &[InstanceState::Configured], 79 | InstanceState::ZoneOnline => &[InstanceState::Installed], 80 | InstanceState::Destroying => &[ 81 | InstanceState::Unconfigured, 82 | InstanceState::Configured, 83 | InstanceState::Installed, 84 | InstanceState::ZoneOnline, 85 | ], 86 | InstanceState::Destroyed => &[InstanceState::Destroying], 87 | }; 88 | 89 | if !valid_source_states.contains(&i.state) { 90 | conflict!( 91 | "instance {id} cannot move from state {} to {state}", 92 | i.state, 93 | ); 94 | } 95 | 96 | let uc = h.exec_update( 97 | Query::update() 98 | .table(InstanceDef::Table) 99 | .and_where( 100 | Expr::col(InstanceDef::Nodename).eq(id.nodename()), 101 | ) 102 | .and_where(Expr::col(InstanceDef::Seq).eq(id.seq())) 103 | .value(InstanceDef::State, state) 104 | .to_owned(), 105 | )?; 106 | assert_eq!(uc, 1); 107 | Ok(()) 108 | }) 109 | } 110 | 111 | pub fn instances_active(&self) -> DBResult> { 112 | self.sql.tx(|h| { 113 | h.get_rows( 114 | Query::select() 115 | .from(InstanceDef::Table) 116 | .columns(Instance::columns()) 117 | .and_where( 118 | Expr::col(InstanceDef::State) 119 | .ne(InstanceState::Destroyed), 120 | ) 121 | .order_by(InstanceDef::Nodename, Order::Asc) 122 | .order_by(InstanceDef::Seq, Order::Asc) 123 | .to_owned(), 124 | ) 125 | }) 126 | } 127 | 128 | pub fn instance_create( 129 | &self, 130 | nodename: &str, 131 | create: CreateInstance, 132 | ) -> DBResult { 133 | self.sql.tx_immediate(|h| { 134 | /* 135 | * Find the next available instance sequence number, starting at 1 136 | * if there have not been any instances before. 137 | */ 138 | let seq: Option = h.get_row( 139 | Query::select() 140 | .from(InstanceDef::Table) 141 | .expr(Expr::col(InstanceDef::Seq).max()) 142 | .and_where(Expr::col(InstanceDef::Nodename).eq(nodename)) 143 | .to_owned(), 144 | )?; 145 | let seq = seq.unwrap_or(InstanceSeq(0)).0.checked_add(1).unwrap(); 146 | 147 | let i = Instance::new(nodename, seq, create); 148 | 149 | let ic = h.exec_insert(i.insert())?; 150 | assert_eq!(ic, 1); 151 | 152 | Ok(i.id()) 153 | }) 154 | } 155 | 156 | pub fn slots_active(&self) -> DBResult> { 157 | self.sql.tx(|h| { 158 | Ok(h.get_rows::( 159 | Query::select() 160 | .from(InstanceDef::Table) 161 | .column(InstanceDef::Slot) 162 | .and_where( 163 | Expr::col(InstanceDef::State) 164 | .ne(InstanceState::Destroyed), 165 | ) 166 | .to_owned(), 167 | )? 168 | .into_iter() 169 | .collect()) 170 | }) 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /factory/propolis/src/db/tables/instance.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | use anyhow::{anyhow, bail, Result}; 7 | 8 | sqlite_sql_enum!(InstanceState => { 9 | Unconfigured, 10 | Configured, 11 | Installed, 12 | ZoneOnline, 13 | Destroying, 14 | Destroyed, 15 | }); 16 | 17 | #[derive(Clone, PartialEq, Eq, Hash)] 18 | pub struct InstanceId(String, u64); 19 | 20 | impl FromStr for InstanceId { 21 | type Err = anyhow::Error; 22 | 23 | fn from_str(s: &str) -> std::result::Result { 24 | if let Some((node, seq)) = s.split_once('/') { 25 | let seq: u64 = seq 26 | .parse() 27 | .map_err(|_| anyhow!("invalid instance ID {s:?}"))?; 28 | 29 | Ok(InstanceId(node.into(), seq)) 30 | } else { 31 | bail!("invalid instance ID {s:?}"); 32 | } 33 | } 34 | } 35 | 36 | impl std::fmt::Display for InstanceId { 37 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 38 | write!(f, "{}/{}", self.0, self.1) 39 | } 40 | } 41 | 42 | impl InstanceId { 43 | pub fn nodename(&self) -> &str { 44 | &self.0 45 | } 46 | 47 | pub fn seq(&self) -> u64 { 48 | self.1 49 | } 50 | 51 | pub fn zonename(&self) -> String { 52 | format!("bmat-{:08x}", self.1) 53 | } 54 | 55 | pub fn local_hostname(&self) -> String { 56 | format!("bmat-{}", self.flat_id()) 57 | } 58 | 59 | pub fn flat_id(&self) -> String { 60 | format!("{}-{:08x}", self.0, self.1) 61 | } 62 | 63 | pub fn from_zonename(nodename: &str, zonename: &str) -> Result { 64 | let Some(seq) = zonename.strip_prefix("bmat-") else { 65 | bail!("invalid zonename {zonename:?}"); 66 | }; 67 | 68 | match u64::from_str_radix(seq, 16) { 69 | Ok(seq) => Ok(InstanceId(nodename.into(), seq)), 70 | Err(e) => bail!("invalid zonename: {zonename:?}: {e}"), 71 | } 72 | } 73 | } 74 | 75 | #[derive(Clone, Debug)] 76 | #[enum_def(prefix = "", suffix = "Def")] 77 | pub struct Instance { 78 | pub nodename: String, 79 | pub seq: InstanceSeq, 80 | pub worker: String, 81 | pub lease: String, 82 | pub target: String, 83 | pub state: InstanceState, 84 | pub bootstrap: String, 85 | pub slot: u32, 86 | } 87 | 88 | impl FromRow for Instance { 89 | fn columns() -> Vec { 90 | [ 91 | InstanceDef::Nodename, 92 | InstanceDef::Seq, 93 | InstanceDef::Worker, 94 | InstanceDef::Lease, 95 | InstanceDef::Target, 96 | InstanceDef::State, 97 | InstanceDef::Bootstrap, 98 | InstanceDef::Slot, 99 | ] 100 | .into_iter() 101 | .map(|col| { 102 | ColumnRef::TableColumn( 103 | SeaRc::new(InstanceDef::Table), 104 | SeaRc::new(col), 105 | ) 106 | }) 107 | .collect() 108 | } 109 | 110 | fn from_row(row: &Row) -> rusqlite::Result { 111 | Ok(Instance { 112 | nodename: row.get(0)?, 113 | seq: row.get(1)?, 114 | worker: row.get(2)?, 115 | lease: row.get(3)?, 116 | target: row.get(4)?, 117 | state: row.get(5)?, 118 | bootstrap: row.get(6)?, 119 | slot: row.get(7)?, 120 | }) 121 | } 122 | } 123 | 124 | impl Instance { 125 | pub fn find(id: &InstanceId) -> SelectStatement { 126 | Query::select() 127 | .from(InstanceDef::Table) 128 | .columns(Instance::columns()) 129 | .and_where(Expr::col(InstanceDef::Nodename).eq(&id.0)) 130 | .and_where(Expr::col(InstanceDef::Seq).eq(id.1)) 131 | .to_owned() 132 | } 133 | 134 | pub fn insert(&self) -> InsertStatement { 135 | Query::insert() 136 | .into_table(InstanceDef::Table) 137 | .columns(Self::bare_columns()) 138 | .values_panic([ 139 | self.nodename.clone().into(), 140 | self.seq.into(), 141 | self.worker.clone().into(), 142 | self.lease.clone().into(), 143 | self.target.clone().into(), 144 | self.state.into(), 145 | self.bootstrap.clone().into(), 146 | self.slot.into(), 147 | ]) 148 | .to_owned() 149 | } 150 | 151 | pub fn id(&self) -> InstanceId { 152 | InstanceId(self.nodename.clone(), self.seq.0) 153 | } 154 | 155 | pub fn new( 156 | nodename: &str, 157 | seq: u64, 158 | ci: crate::db::CreateInstance, 159 | ) -> Instance { 160 | assert!(seq > 0); 161 | 162 | Instance { 163 | nodename: nodename.to_string(), 164 | seq: InstanceSeq(seq), 165 | state: InstanceState::Unconfigured, 166 | worker: ci.worker, 167 | lease: ci.lease, 168 | target: ci.target, 169 | bootstrap: ci.bootstrap, 170 | slot: ci.slot, 171 | } 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /factory/propolis/src/db/tables/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | mod sublude { 6 | pub use std::str::FromStr; 7 | 8 | pub use crate::db::types::*; 9 | pub use buildomat_database::{rusqlite, sqlite_sql_enum, FromRow}; 10 | pub use rusqlite::Row; 11 | pub use sea_query::{ 12 | enum_def, ColumnRef, Expr, Iden, InsertStatement, Query, SeaRc, 13 | SelectStatement, 14 | }; 15 | } 16 | 17 | mod instance; 18 | 19 | pub use instance::*; 20 | -------------------------------------------------------------------------------- /factory/propolis/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::{sync::Arc, time::Duration}; 6 | 7 | use anyhow::{bail, Context, Result}; 8 | use buildomat_common::*; 9 | use buildomat_types::metadata; 10 | use getopts::Options; 11 | use slog::{info, o, Logger}; 12 | 13 | mod config; 14 | mod db; 15 | mod factory; 16 | mod net; 17 | mod nocloud; 18 | mod propolis; 19 | mod serial; 20 | mod svc; 21 | mod ucred; 22 | mod vm; 23 | mod zones; 24 | 25 | struct Central { 26 | log: Logger, 27 | client: buildomat_client::Client, 28 | config: config::ConfigFile, 29 | db: db::Database, 30 | serial: serial::Serial, 31 | } 32 | 33 | impl Central { 34 | fn metadata( 35 | &self, 36 | t: &config::ConfigFileTarget, 37 | ) -> Result { 38 | /* 39 | * Allow the per-target diagnostic configuration to override the base 40 | * diagnostic configuration. 41 | */ 42 | Ok(self.config.diag.apply_overrides(&t.diag).build()?) 43 | } 44 | } 45 | 46 | /* 47 | * This factory runs on large AMD machines (e.g., 100+ SMT threads) and thus 48 | * ends up with far too many worker threads by default. 49 | */ 50 | #[tokio::main(worker_threads = 4)] 51 | async fn main() -> Result<()> { 52 | usdt::register_probes().unwrap(); 53 | 54 | let mut opts = Options::new(); 55 | 56 | opts.optopt("f", "", "configuration file", "CONFIG"); 57 | opts.optopt("d", "", "database file", "FILE"); 58 | 59 | let p = match opts.parse(std::env::args().skip(1)) { 60 | Ok(p) => p, 61 | Err(e) => { 62 | eprintln!("ERROR: usage: {}", e); 63 | eprintln!(" {}", opts.usage("usage")); 64 | std::process::exit(1); 65 | } 66 | }; 67 | 68 | let log = make_log("factory-propolis"); 69 | 70 | let config: config::ConfigFile = if let Some(f) = p.opt_str("f").as_deref() 71 | { 72 | read_toml(f)? 73 | } else { 74 | bail!("must specify configuration file (-f)"); 75 | }; 76 | 77 | let db = if let Some(p) = p.opt_str("d") { 78 | db::Database::new(log.clone(), p, None)? 79 | } else { 80 | bail!("must specify database file (-d)"); 81 | }; 82 | 83 | let sockdir = config.socketdir()?; 84 | if !sockdir.exists() { 85 | info!(log, "creating socket directory at {sockdir:?}"); 86 | std::fs::create_dir(&sockdir)?; 87 | } 88 | let serial = 89 | serial::Serial::new(log.new(o!("component" => "serial")), sockdir)?; 90 | 91 | let client = buildomat_client::ClientBuilder::new(&config.general.baseurl) 92 | .bearer_token(&config.factory.token) 93 | .build()?; 94 | 95 | /* 96 | * Install a custom panic hook that will try to exit the process after a 97 | * short delay. This is unfortunate, but I am not sure how else to avoid a 98 | * panicked worker thread leaving the process stuck without some of its 99 | * functionality. 100 | */ 101 | let orig_hook = std::panic::take_hook(); 102 | std::panic::set_hook(Box::new(move |info| { 103 | orig_hook(info); 104 | eprintln!("FATAL: THREAD PANIC DETECTED; EXITING IN 5 SECONDS..."); 105 | std::thread::spawn(move || { 106 | std::thread::sleep(Duration::from_secs(5)); 107 | std::process::exit(101); 108 | }); 109 | })); 110 | 111 | let c0 = Arc::new(Central { log, config, client, db, serial }); 112 | 113 | let c = Arc::clone(&c0); 114 | let t_vm = tokio::task::spawn(async move { 115 | vm::vm_worker(c).await.context("VM worker task failure") 116 | }); 117 | 118 | let c = Arc::clone(&c0); 119 | let t_factory = tokio::task::spawn(async move { 120 | factory::factory_task(c).await.context("factory task failure") 121 | }); 122 | 123 | tokio::select! { 124 | _ = t_vm => bail!("VM worker task stopped early"), 125 | _ = t_factory => bail!("factory task stopped early"), 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /factory/propolis/src/net.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | 3 | use anyhow::{anyhow, bail, Result}; 4 | use buildomat_common::*; 5 | 6 | const DLADM: &str = "/usr/sbin/dladm"; 7 | 8 | struct Terms { 9 | terms: Vec, 10 | buf: Option, 11 | } 12 | 13 | impl Terms { 14 | fn append(&mut self, c: char) { 15 | if self.buf.is_none() { 16 | self.buf = Some(String::new()); 17 | } 18 | self.buf.as_mut().unwrap().push(c); 19 | } 20 | 21 | fn commit(&mut self) { 22 | if let Some(val) = &self.buf { 23 | self.terms.push(val.to_string()); 24 | } 25 | self.buf = None; 26 | } 27 | 28 | fn result(&self) -> Vec { 29 | self.terms.to_owned() 30 | } 31 | 32 | fn new() -> Terms { 33 | Terms { terms: Vec::new(), buf: Some(String::new()) } 34 | } 35 | } 36 | 37 | fn parse_net_adm(stdout: Vec) -> Result>> { 38 | let stdout = String::from_utf8(stdout)?; 39 | let mut out = Vec::new(); 40 | 41 | for l in stdout.lines() { 42 | let mut terms = Terms::new(); 43 | let mut escape = false; 44 | 45 | for c in l.chars() { 46 | if escape { 47 | terms.append(c); 48 | escape = false; 49 | } else if c == '\\' { 50 | escape = true; 51 | } else if c == ':' { 52 | terms.commit(); 53 | } else { 54 | terms.append(c); 55 | } 56 | } 57 | terms.commit(); 58 | 59 | out.push(terms.result()); 60 | } 61 | 62 | Ok(out) 63 | } 64 | 65 | fn mac_sanitise(input: &str) -> String { 66 | let mac = input.split(':').fold(String::new(), |mut buf, octet| { 67 | if !buf.is_empty() { 68 | /* 69 | * Put the separating colon back between octets: 70 | */ 71 | buf.push(':'); 72 | } 73 | 74 | assert!(octet.len() == 1 || octet.len() == 2); 75 | if octet.len() < 2 { 76 | /* 77 | * Use a leading zero to pad any single-digit octets: 78 | */ 79 | buf.push('0'); 80 | } 81 | buf.push_str(octet); 82 | 83 | buf 84 | }); 85 | 86 | assert_eq!(mac.len(), 17); 87 | mac 88 | } 89 | 90 | pub struct Vnic { 91 | #[allow(unused)] 92 | pub name: String, 93 | pub physical: String, 94 | pub mac: String, 95 | pub vlan: Option, 96 | } 97 | 98 | pub fn dladm_vnic_get(vnic: &str) -> Result> { 99 | let output = Command::new(DLADM) 100 | .env_clear() 101 | .arg("show-vnic") 102 | .arg("-p") 103 | .arg("-o") 104 | .arg("link,over,macaddress,vid") 105 | .arg(vnic) 106 | .output()?; 107 | 108 | if !output.status.success() { 109 | let e = String::from_utf8_lossy(&output.stderr); 110 | if e.contains("invalid vnic name") && e.contains("object not found") { 111 | return Ok(None); 112 | } 113 | 114 | bail!("dladm failed: {}", output.info()); 115 | } 116 | 117 | let vnics = parse_net_adm(output.stdout)? 118 | .into_iter() 119 | .map(|l| { 120 | Ok(Vnic { 121 | name: l[0].to_string(), 122 | physical: l[1].to_string(), 123 | mac: mac_sanitise(&l[2]), 124 | vlan: { 125 | let vid = l[3] 126 | .parse() 127 | .map_err(|e| anyhow!("parsing VLAN ID: {e}"))?; 128 | 129 | if vid == 0 { 130 | None 131 | } else { 132 | Some(vid) 133 | } 134 | }, 135 | }) 136 | }) 137 | .collect::>>()?; 138 | 139 | if vnics.len() > 1 { 140 | bail!("found more than 1 vnic?!"); 141 | } 142 | 143 | Ok(vnics.into_iter().next()) 144 | } 145 | 146 | pub fn dladm_delete_vnic(vnic: &str) -> Result<()> { 147 | let output = Command::new(DLADM) 148 | .env_clear() 149 | .arg("delete-vnic") 150 | .arg(vnic) 151 | .output()?; 152 | 153 | if !output.status.success() { 154 | bail!("dladm delete-vnic failed: {}", output.info()); 155 | } 156 | 157 | Ok(()) 158 | } 159 | 160 | pub fn dladm_create_vnic( 161 | vnic: &str, 162 | over: &str, 163 | vlan: Option, 164 | ) -> Result<()> { 165 | let mut cmd = Command::new(DLADM); 166 | cmd.env_clear(); 167 | cmd.arg("create-vnic"); 168 | cmd.arg("-t"); 169 | cmd.arg("-l").arg(over); 170 | if let Some(vlan) = vlan { 171 | cmd.arg("-v").arg(vlan.to_string()); 172 | } 173 | cmd.arg(vnic); 174 | let output = cmd.output()?; 175 | 176 | if !output.status.success() { 177 | bail!("dladm create-vnic failed: {}", output.info()); 178 | } 179 | 180 | Ok(()) 181 | } 182 | -------------------------------------------------------------------------------- /factory/propolis/src/nocloud.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use serde::Serialize; 3 | 4 | #[derive(Debug, Serialize)] 5 | pub struct Network { 6 | pub version: u32, 7 | pub config: Vec, 8 | } 9 | 10 | #[derive(Debug, Serialize)] 11 | pub struct Config { 12 | #[serde(rename = "type")] 13 | pub type_: String, 14 | pub name: String, 15 | pub mac_address: String, 16 | pub subnets: Vec, 17 | } 18 | 19 | #[derive(Debug, Serialize)] 20 | pub struct Subnet { 21 | #[serde(rename = "type")] 22 | pub type_: String, 23 | pub address: String, 24 | pub gateway: String, 25 | pub dns_nameservers: Vec, 26 | } 27 | 28 | impl Network { 29 | pub fn to_yaml(&self) -> Result { 30 | Ok(serde_yaml::to_string(self)?) 31 | } 32 | } 33 | 34 | #[derive(Debug, Serialize)] 35 | #[serde(rename_all = "kebab-case")] 36 | pub struct MetaData { 37 | pub instance_id: String, 38 | pub local_hostname: String, 39 | } 40 | 41 | impl MetaData { 42 | pub fn to_json(&self) -> Result { 43 | let mut out = serde_json::to_string_pretty(self)?; 44 | out.push('\n'); 45 | Ok(out) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /factory/propolis/src/propolis.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use anyhow::Result; 4 | use serde::Serialize; 5 | 6 | #[derive(Debug, Serialize)] 7 | pub struct Config { 8 | pub main: Main, 9 | pub block_dev: HashMap, 10 | pub dev: HashMap, 11 | pub cloudinit: CloudInit, 12 | } 13 | 14 | #[derive(Debug, Serialize)] 15 | pub struct Main { 16 | pub name: String, 17 | pub bootrom: String, 18 | pub cpus: u32, 19 | pub memory: u32, 20 | 21 | pub exit_on_halt: u8, 22 | pub exit_on_reboot: u8, 23 | 24 | pub use_reservoir: bool, 25 | } 26 | 27 | #[derive(Debug, Serialize)] 28 | pub struct BlockDev { 29 | #[serde(rename = "type")] 30 | pub type_: String, 31 | pub path: Option, 32 | } 33 | 34 | #[derive(Debug, Serialize)] 35 | pub struct Dev { 36 | pub driver: String, 37 | #[serde(rename = "pci-path")] 38 | pub pci_path: String, 39 | pub block_dev: Option, 40 | pub vnic: Option, 41 | } 42 | 43 | #[derive(Debug, Serialize)] 44 | #[serde(rename_all = "kebab-case")] 45 | pub struct CloudInit { 46 | pub user_data: Option, 47 | pub meta_data: Option, 48 | pub network_config: Option, 49 | } 50 | 51 | impl Config { 52 | pub fn to_toml(&self) -> Result { 53 | Ok(toml::to_string_pretty(self)?) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /factory/propolis/src/ucred.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::os::fd::AsRawFd; 6 | 7 | use anyhow::{bail, Result}; 8 | use libc::{getpeerucred, ucred_free, ucred_getzoneid, ucred_t, zoneid_t}; 9 | 10 | pub trait PeerUCred: AsRawFd { 11 | fn peer_ucred(&self) -> Result { 12 | let fd = self.as_raw_fd(); 13 | 14 | let mut uc: *mut ucred_t = std::ptr::null_mut(); 15 | if unsafe { getpeerucred(fd, &mut uc) } != 0 { 16 | let e = std::io::Error::last_os_error(); 17 | bail!("getpeerucred failure: {e}"); 18 | } 19 | assert!(!uc.is_null()); 20 | 21 | Ok(UCred { uc }) 22 | } 23 | } 24 | 25 | impl PeerUCred for tokio::net::UnixStream {} 26 | 27 | pub struct UCred { 28 | uc: *mut ucred_t, 29 | } 30 | 31 | impl Drop for UCred { 32 | fn drop(&mut self) { 33 | assert!(!self.uc.is_null()); 34 | 35 | unsafe { ucred_free(self.uc) }; 36 | } 37 | } 38 | 39 | impl UCred { 40 | pub fn zoneid(&self) -> Option { 41 | let zoneid = unsafe { ucred_getzoneid(self.uc) }; 42 | if zoneid < 0 { 43 | None 44 | } else { 45 | Some(zoneid) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /factory/propolis/src/zones.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::ffi::CString; 6 | 7 | use anyhow::{bail, Result}; 8 | pub use libc::zoneid_t; 9 | 10 | #[link(name = "c")] 11 | extern "C" { 12 | fn getzoneidbyname(name: *const libc::c_char) -> zoneid_t; 13 | } 14 | 15 | pub fn zone_name_to_id(name: &str) -> Result> { 16 | let cs = CString::new(name)?; 17 | 18 | let id = unsafe { getzoneidbyname(cs.as_ptr()) }; 19 | if id < 0 { 20 | let e = unsafe { *libc::___errno() }; 21 | if e == libc::EINVAL { 22 | /* 23 | * According to the documentation, this actually means the zone does 24 | * not exist on the system. 25 | */ 26 | return Ok(None); 27 | } 28 | 29 | let e = std::io::Error::from_raw_os_error(e); 30 | bail!("getzoneidbyname({name}): {e}"); 31 | } 32 | 33 | Ok(Some(id)) 34 | } 35 | 36 | pub fn zone_exists(name: &str) -> Result { 37 | Ok(zone_name_to_id(name)?.is_some()) 38 | } 39 | -------------------------------------------------------------------------------- /github/client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-client" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | octorust = { workspace = true } 13 | reqwest = { workspace = true } 14 | -------------------------------------------------------------------------------- /github/client/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | const USER_AGENT: &str = "buildomat-github-integration/0"; 6 | const GITHUB_API_URL: &str = "https://api.github.com"; 7 | 8 | use std::time::Duration; 9 | 10 | use anyhow::Result; 11 | pub use octorust::auth::JWTCredentials; 12 | use octorust::auth::{Credentials, InstallationTokenGenerator}; 13 | pub use octorust::types; 14 | pub use octorust::Client; 15 | 16 | fn mk_reqwest_client() -> Result { 17 | Ok(reqwest::ClientBuilder::new() 18 | .timeout(Duration::from_secs(45)) 19 | .tcp_keepalive(Duration::from_secs(45)) 20 | .connect_timeout(Duration::from_secs(30)) 21 | .build()?) 22 | } 23 | 24 | pub fn app_client(jwt: JWTCredentials) -> Result { 25 | Ok(Client::custom( 26 | GITHUB_API_URL, 27 | USER_AGENT, 28 | Credentials::JWT(jwt), 29 | mk_reqwest_client()?, 30 | )) 31 | } 32 | 33 | pub fn install_client(jwt: JWTCredentials, install_id: i64) -> Result { 34 | let iat = InstallationTokenGenerator::new(install_id.try_into()?, jwt); 35 | 36 | Ok(Client::custom( 37 | GITHUB_API_URL, 38 | USER_AGENT, 39 | Credentials::InstallationToken(iat), 40 | mk_reqwest_client()?, 41 | )) 42 | } 43 | -------------------------------------------------------------------------------- /github/database/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-database" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | buildomat-common = { path = "../../common" } 12 | buildomat-database = { path = "../../database" } 13 | buildomat-github-hooktypes = { path = "../hooktypes" } 14 | buildomat-jobsh = { path = "../../jobsh" } 15 | 16 | anyhow = { workspace = true } 17 | chrono = { workspace = true } 18 | rand = { workspace = true } 19 | rusty_ulid = { workspace = true } 20 | sea-query = { workspace = true } 21 | serde = { workspace = true } 22 | serde_json = { workspace = true } 23 | slog = { workspace = true } 24 | strum = { workspace = true } 25 | toml = { workspace = true } 26 | -------------------------------------------------------------------------------- /github/database/schema.sql: -------------------------------------------------------------------------------- 1 | -- v 1 2 | CREATE TABLE delivery ( 3 | seq INTEGER PRIMARY KEY, 4 | uuid TEXT NOT NULL UNIQUE, 5 | event TEXT NOT NULL, 6 | headers TEXT NOT NULL, 7 | payload TEXT NOT NULL, 8 | recvtime TEXT NOT NULL, 9 | ack INTEGER 10 | ); 11 | 12 | -- v 2 13 | CREATE TABLE repository ( 14 | id INTEGER PRIMARY KEY, 15 | owner TEXT NOT NULL, 16 | name TEXT NOT NULL 17 | ); 18 | 19 | -- v 3 20 | CREATE TABLE check_suite ( 21 | id TEXT PRIMARY KEY, 22 | repo INTEGER NOT NULL, 23 | install INTEGER NOT NULL, 24 | github_id INTEGER NOT NULL, 25 | head_sha TEXT NOT NULL, 26 | head_branch TEXT, 27 | state TEXT NOT NULL, 28 | plan TEXT, 29 | plan_sha TEXT, 30 | url_key TEXT NOT NULL, 31 | 32 | UNIQUE (repo, github_id) 33 | ); 34 | 35 | -- v 4 36 | CREATE TABLE check_run ( 37 | id TEXT PRIMARY KEY, 38 | check_suite TEXT NOT NULL REFERENCES check_suite (id) 39 | ON UPDATE RESTRICT 40 | ON DELETE RESTRICT, 41 | name TEXT NOT NULL, 42 | variety TEXT NOT NULL, 43 | content TEXT, 44 | config TEXT, 45 | private TEXT, 46 | active INTEGER NOT NULL, 47 | flushed INTEGER NOT NULL, 48 | github_id INTEGER 49 | ); 50 | 51 | -- v 5 52 | CREATE INDEX delivery_ack ON delivery (ack); 53 | 54 | -- v 6 55 | CREATE TABLE install ( 56 | id INTEGER PRIMARY KEY, 57 | owner INTEGER NOT NULL 58 | ); 59 | 60 | -- v 7 61 | CREATE TABLE user ( 62 | id INTEGER PRIMARY KEY, 63 | login TEXT NOT NULL, 64 | usertype TEXT NOT NULL, 65 | name TEXT, 66 | email TEXT 67 | ); 68 | 69 | -- v 8 70 | ALTER TABLE check_suite ADD COLUMN 71 | pr_by INTEGER; 72 | 73 | -- v 9 74 | ALTER TABLE check_suite ADD COLUMN 75 | requested_by INTEGER; 76 | 77 | -- v 10 78 | ALTER TABLE check_suite ADD COLUMN 79 | approved_by INTEGER; 80 | 81 | -- v 11 82 | CREATE INDEX repository_owner_name ON repository (owner, name); 83 | 84 | -- v 12 85 | ALTER TABLE check_run ADD COLUMN 86 | dependencies TEXT; 87 | 88 | -- v 13 89 | CREATE INDEX check_run_check_suite ON check_run (check_suite); 90 | -------------------------------------------------------------------------------- /github/database/src/tables/check_suite.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use buildomat_jobsh::jobfile; 6 | 7 | use super::check_run::{CheckRunVariety, JobFileDepend}; 8 | use super::sublude::*; 9 | 10 | sqlite_sql_enum!(CheckSuiteState => { 11 | Created, 12 | Parked, 13 | Planned, 14 | Running, 15 | Complete, 16 | Retired, 17 | }); 18 | 19 | impl CheckSuiteState { 20 | pub fn is_parked(&self) -> bool { 21 | matches!(self, CheckSuiteState::Parked) 22 | } 23 | } 24 | 25 | sqlite_json_new_type!(JsonPlan, Plan); 26 | 27 | #[derive(Debug, Clone, Serialize, Deserialize)] 28 | pub struct Plan { 29 | pub jobfiles: Vec, 30 | } 31 | 32 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 33 | pub struct JobFile { 34 | pub path: String, 35 | pub name: String, 36 | pub variety: CheckRunVariety, 37 | pub config: serde_json::Value, 38 | pub content: String, 39 | #[serde(default)] 40 | pub dependencies: HashMap, 41 | } 42 | 43 | impl From for JobFile { 44 | fn from(value: jobfile::JobFile) -> Self { 45 | let jobfile::JobFile { 46 | path, 47 | name, 48 | variety, 49 | config, 50 | content, 51 | dependencies, 52 | } = value; 53 | 54 | Self { 55 | path, 56 | name, 57 | variety: match variety { 58 | jobfile::Variety::Basic => CheckRunVariety::Basic, 59 | }, 60 | config, 61 | content, 62 | dependencies: dependencies 63 | .into_iter() 64 | .map(|(a, b)| (a.into(), b.into())) 65 | .collect(), 66 | } 67 | } 68 | } 69 | 70 | #[derive(Debug, Clone)] 71 | #[enum_def(prefix = "", suffix = "Def")] 72 | pub struct CheckSuite { 73 | pub id: CheckSuiteId, 74 | pub repo: i64, 75 | pub install: i64, 76 | pub github_id: i64, 77 | pub head_sha: String, 78 | pub head_branch: Option, 79 | pub state: CheckSuiteState, 80 | pub plan: Option, 81 | pub plan_sha: Option, 82 | pub url_key: String, 83 | pub pr_by: Option, 84 | pub requested_by: Option, 85 | pub approved_by: Option, 86 | } 87 | 88 | impl FromRow for CheckSuite { 89 | fn columns() -> Vec { 90 | [ 91 | CheckSuiteDef::Id, 92 | CheckSuiteDef::Repo, 93 | CheckSuiteDef::Install, 94 | CheckSuiteDef::GithubId, 95 | CheckSuiteDef::HeadSha, 96 | CheckSuiteDef::HeadBranch, 97 | CheckSuiteDef::State, 98 | CheckSuiteDef::Plan, 99 | CheckSuiteDef::PlanSha, 100 | CheckSuiteDef::UrlKey, 101 | CheckSuiteDef::PrBy, 102 | CheckSuiteDef::RequestedBy, 103 | CheckSuiteDef::ApprovedBy, 104 | ] 105 | .into_iter() 106 | .map(|col| { 107 | ColumnRef::TableColumn( 108 | SeaRc::new(CheckSuiteDef::Table), 109 | SeaRc::new(col), 110 | ) 111 | }) 112 | .collect() 113 | } 114 | 115 | fn from_row(row: &Row) -> rusqlite::Result { 116 | Ok(CheckSuite { 117 | id: row.get(0)?, 118 | repo: row.get(1)?, 119 | install: row.get(2)?, 120 | github_id: row.get(3)?, 121 | head_sha: row.get(4)?, 122 | head_branch: row.get(5)?, 123 | state: row.get(6)?, 124 | plan: row.get(7)?, 125 | plan_sha: row.get(8)?, 126 | url_key: row.get(9)?, 127 | pr_by: row.get(10)?, 128 | requested_by: row.get(11)?, 129 | approved_by: row.get(12)?, 130 | }) 131 | } 132 | } 133 | 134 | impl CheckSuite { 135 | pub fn find(id: CheckSuiteId) -> SelectStatement { 136 | Query::select() 137 | .from(CheckSuiteDef::Table) 138 | .columns(CheckSuite::columns()) 139 | .and_where(Expr::col(CheckSuiteDef::Id).eq(id)) 140 | .to_owned() 141 | } 142 | 143 | pub fn find_by_github_id(repo: i64, github_id: i64) -> SelectStatement { 144 | Query::select() 145 | .from(CheckSuiteDef::Table) 146 | .columns(CheckSuite::columns()) 147 | .and_where(Expr::col(CheckSuiteDef::Repo).eq(repo)) 148 | .and_where(Expr::col(CheckSuiteDef::GithubId).eq(github_id)) 149 | .to_owned() 150 | } 151 | 152 | pub fn insert(&self) -> InsertStatement { 153 | Query::insert() 154 | .into_table(CheckSuiteDef::Table) 155 | .columns(Self::bare_columns()) 156 | .values_panic([ 157 | self.id.into(), 158 | self.repo.into(), 159 | self.install.into(), 160 | self.github_id.into(), 161 | self.head_sha.clone().into(), 162 | self.head_branch.clone().into(), 163 | self.state.into(), 164 | self.plan.clone().into(), 165 | self.plan_sha.clone().into(), 166 | self.url_key.clone().into(), 167 | self.pr_by.into(), 168 | self.requested_by.into(), 169 | self.approved_by.into(), 170 | ]) 171 | .to_owned() 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /github/database/src/tables/delivery.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Delivery { 10 | pub seq: DeliverySeq, 11 | pub uuid: String, 12 | pub event: String, 13 | pub headers: Dictionary, 14 | pub payload: JsonValue, 15 | pub recvtime: IsoDate, 16 | pub ack: Option, 17 | } 18 | 19 | impl FromRow for Delivery { 20 | fn columns() -> Vec { 21 | [ 22 | DeliveryDef::Seq, 23 | DeliveryDef::Uuid, 24 | DeliveryDef::Event, 25 | DeliveryDef::Headers, 26 | DeliveryDef::Payload, 27 | DeliveryDef::Recvtime, 28 | DeliveryDef::Ack, 29 | ] 30 | .into_iter() 31 | .map(|col| { 32 | ColumnRef::TableColumn( 33 | SeaRc::new(DeliveryDef::Table), 34 | SeaRc::new(col), 35 | ) 36 | }) 37 | .collect() 38 | } 39 | 40 | fn from_row(row: &Row) -> rusqlite::Result { 41 | Ok(Delivery { 42 | seq: row.get(0)?, 43 | uuid: row.get(1)?, 44 | event: row.get(2)?, 45 | headers: row.get(3)?, 46 | payload: row.get(4)?, 47 | recvtime: row.get(5)?, 48 | ack: row.get(6)?, 49 | }) 50 | } 51 | } 52 | 53 | impl Delivery { 54 | pub fn find(seq: DeliverySeq) -> SelectStatement { 55 | Query::select() 56 | .from(DeliveryDef::Table) 57 | .columns(Delivery::columns()) 58 | .and_where(Expr::col(DeliveryDef::Seq).eq(seq)) 59 | .to_owned() 60 | } 61 | 62 | pub fn insert(&self) -> InsertStatement { 63 | Query::insert() 64 | .into_table(DeliveryDef::Table) 65 | .columns(Self::bare_columns()) 66 | .values_panic([ 67 | self.seq.into(), 68 | self.uuid.clone().into(), 69 | self.event.clone().into(), 70 | self.headers.clone().into(), 71 | self.payload.clone().into(), 72 | self.recvtime.into(), 73 | self.ack.into(), 74 | ]) 75 | .to_owned() 76 | } 77 | 78 | pub fn recvtime_day_prefix(&self) -> String { 79 | self.recvtime.0.format("%Y-%m-%d").to_string() 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /github/database/src/tables/install.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Install { 10 | pub id: i64, 11 | pub owner: i64, 12 | } 13 | 14 | impl FromRow for Install { 15 | fn columns() -> Vec { 16 | [InstallDef::Id, InstallDef::Owner] 17 | .into_iter() 18 | .map(|col| { 19 | ColumnRef::TableColumn( 20 | SeaRc::new(InstallDef::Table), 21 | SeaRc::new(col), 22 | ) 23 | }) 24 | .collect() 25 | } 26 | 27 | fn from_row(row: &Row) -> rusqlite::Result { 28 | Ok(Install { id: row.get(0)?, owner: row.get(1)? }) 29 | } 30 | } 31 | 32 | impl Install { 33 | pub fn find(id: i64) -> SelectStatement { 34 | Query::select() 35 | .from(InstallDef::Table) 36 | .columns(Install::columns()) 37 | .and_where(Expr::col(InstallDef::Id).eq(id)) 38 | .to_owned() 39 | } 40 | 41 | pub fn insert(&self) -> InsertStatement { 42 | Query::insert() 43 | .into_table(InstallDef::Table) 44 | .columns(Self::bare_columns()) 45 | .values_panic([self.id.into(), self.owner.into()]) 46 | .to_owned() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /github/database/src/tables/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | mod sublude { 6 | pub use std::collections::HashMap; 7 | pub use std::str::FromStr; 8 | 9 | pub use crate::itypes::*; 10 | #[allow(unused_imports)] 11 | pub use anyhow::{anyhow, bail, Result}; 12 | #[allow(unused_imports)] 13 | pub use buildomat_common::*; 14 | pub use buildomat_database::{ 15 | rusqlite, sqlite_json_new_type, sqlite_sql_enum, FromRow, 16 | }; 17 | pub use buildomat_github_hooktypes as hooktypes; 18 | pub use rusqlite::Row; 19 | pub use sea_query::{ 20 | enum_def, ColumnRef, Expr, Iden, InsertStatement, Query, SeaRc, 21 | SelectStatement, 22 | }; 23 | pub use serde::{Deserialize, Serialize}; 24 | } 25 | 26 | mod check_run; 27 | mod check_suite; 28 | mod delivery; 29 | mod install; 30 | mod repository; 31 | mod user; 32 | 33 | pub use check_run::*; 34 | pub use check_suite::*; 35 | pub use delivery::*; 36 | pub use install::*; 37 | pub use repository::*; 38 | pub use user::*; 39 | -------------------------------------------------------------------------------- /github/database/src/tables/repository.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Repository { 10 | pub id: i64, 11 | pub owner: String, 12 | pub name: String, 13 | } 14 | 15 | impl FromRow for Repository { 16 | fn columns() -> Vec { 17 | [RepositoryDef::Id, RepositoryDef::Owner, RepositoryDef::Name] 18 | .into_iter() 19 | .map(|col| { 20 | ColumnRef::TableColumn( 21 | SeaRc::new(RepositoryDef::Table), 22 | SeaRc::new(col), 23 | ) 24 | }) 25 | .collect() 26 | } 27 | 28 | fn from_row(row: &Row) -> rusqlite::Result { 29 | Ok(Repository { 30 | id: row.get(0)?, 31 | owner: row.get(1)?, 32 | name: row.get(2)?, 33 | }) 34 | } 35 | } 36 | 37 | impl Repository { 38 | pub fn find(id: i64) -> SelectStatement { 39 | Query::select() 40 | .from(RepositoryDef::Table) 41 | .columns(Repository::columns()) 42 | .and_where(Expr::col(RepositoryDef::Id).eq(id)) 43 | .to_owned() 44 | } 45 | 46 | pub fn insert(&self) -> InsertStatement { 47 | Query::insert() 48 | .into_table(RepositoryDef::Table) 49 | .columns(Self::bare_columns()) 50 | .values_panic([ 51 | self.id.into(), 52 | self.owner.clone().into(), 53 | self.name.clone().into(), 54 | ]) 55 | .to_owned() 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /github/database/src/tables/user.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | sqlite_sql_enum!(UserType => { 8 | User, 9 | Bot, 10 | #[strum(serialize = "org")] 11 | Organisation, 12 | }); 13 | 14 | impl UserType { 15 | pub fn is_org(&self) -> bool { 16 | matches!(self, UserType::Organisation) 17 | } 18 | 19 | pub fn from_github_str(ut: &str) -> Result { 20 | Ok(match ut { 21 | "User" => UserType::User, 22 | "Bot" => UserType::Bot, 23 | "Organization" => UserType::Organisation, 24 | x => bail!("invalid user type from GitHub: {:?}", x), 25 | }) 26 | } 27 | 28 | pub fn from_github(ut: hooktypes::UserType) -> Self { 29 | match ut { 30 | hooktypes::UserType::User => UserType::User, 31 | hooktypes::UserType::Bot => UserType::Bot, 32 | hooktypes::UserType::Organization => UserType::Organisation, 33 | } 34 | } 35 | } 36 | 37 | #[derive(Debug, Clone)] 38 | #[enum_def(prefix = "", suffix = "Def")] 39 | pub struct User { 40 | pub id: i64, 41 | pub login: String, 42 | pub usertype: UserType, 43 | pub name: Option, 44 | pub email: Option, 45 | } 46 | 47 | impl FromRow for User { 48 | fn columns() -> Vec { 49 | [ 50 | UserDef::Id, 51 | UserDef::Login, 52 | UserDef::Usertype, 53 | UserDef::Name, 54 | UserDef::Email, 55 | ] 56 | .into_iter() 57 | .map(|col| { 58 | ColumnRef::TableColumn(SeaRc::new(UserDef::Table), SeaRc::new(col)) 59 | }) 60 | .collect() 61 | } 62 | 63 | fn from_row(row: &Row) -> rusqlite::Result { 64 | Ok(User { 65 | id: row.get(0)?, 66 | login: row.get(1)?, 67 | usertype: row.get(2)?, 68 | name: row.get(3)?, 69 | email: row.get(4)?, 70 | }) 71 | } 72 | } 73 | 74 | impl User { 75 | pub fn find(id: i64) -> SelectStatement { 76 | Query::select() 77 | .from(UserDef::Table) 78 | .columns(User::columns()) 79 | .and_where(Expr::col(UserDef::Id).eq(id)) 80 | .to_owned() 81 | } 82 | 83 | pub fn insert(&self) -> InsertStatement { 84 | Query::insert() 85 | .into_table(UserDef::Table) 86 | .columns(Self::bare_columns()) 87 | .values_panic([ 88 | self.id.into(), 89 | self.login.clone().into(), 90 | self.usertype.into(), 91 | self.name.clone().into(), 92 | self.email.clone().into(), 93 | ]) 94 | .to_owned() 95 | } 96 | } 97 | 98 | /* 99 | * These tests attempt to ensure that the concrete representation of the enum 100 | * does not change, as that would make the database unuseable. 101 | */ 102 | #[cfg(test)] 103 | mod test { 104 | use super::UserType; 105 | use std::str::FromStr; 106 | 107 | const USER_TYPES: &'static [(&'static str, UserType)] = &[ 108 | ("user", UserType::User), 109 | ("bot", UserType::Bot), 110 | ("org", UserType::Organisation), 111 | ]; 112 | 113 | #[test] 114 | fn user_type_forward() { 115 | for (s, e) in USER_TYPES { 116 | assert_eq!(*s, e.to_string()); 117 | } 118 | } 119 | 120 | #[test] 121 | fn user_type_backward() { 122 | for (s, e) in USER_TYPES { 123 | assert_eq!(UserType::from_str(s).unwrap(), *e); 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /github/dbtool/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-dbtool" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-github-database = { path = "../database" } 9 | buildomat-github-hooktypes = { path = "../hooktypes" } 10 | 11 | anyhow = { workspace = true } 12 | chrono = { workspace = true } 13 | hiercmd = { workspace = true } 14 | serde = { workspace = true } 15 | serde_json = { workspace = true } 16 | tokio = { workspace = true } 17 | -------------------------------------------------------------------------------- /github/ghtool/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-ghtool" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [features] 8 | default = ['vendored-openssl'] 9 | vendored-openssl = ['openssl/vendored'] 10 | 11 | [dependencies] 12 | buildomat-common = { path = "../../common" } 13 | buildomat-github-client = { path = "../client" } 14 | 15 | anyhow = { workspace = true } 16 | base64 = { workspace = true } 17 | chrono = { workspace = true } 18 | hiercmd = { workspace = true } 19 | hyper = { workspace = true } 20 | pem = { workspace = true } 21 | reqwest = { workspace = true } 22 | serde = { workspace = true } 23 | serde_json = { workspace = true } 24 | slog = { workspace = true } 25 | tokio = { workspace = true } 26 | toml = { workspace = true } 27 | 28 | # 29 | # I believe it is necessary to pull this in here, so that we can demand the 30 | # static linking of the vendored OpenSSL. We don't use it directly, but the 31 | # same version will then be used by reqwest. 32 | # 33 | openssl = { version = "0.10", optional = true } 34 | -------------------------------------------------------------------------------- /github/ghtool/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::fs::OpenOptions; 6 | use std::io::Read; 7 | use std::path::Path; 8 | 9 | use anyhow::Result; 10 | use buildomat_common::*; 11 | use serde::Deserialize; 12 | 13 | #[derive(Deserialize)] 14 | pub struct Config { 15 | pub id: u64, 16 | #[allow(unused)] 17 | pub secret: String, 18 | } 19 | 20 | pub fn load_bytes>(p: P) -> Result> { 21 | let p = p.as_ref(); 22 | let mut f = OpenOptions::new().read(true).open(p)?; 23 | let mut d = Vec::new(); 24 | f.read_to_end(&mut d)?; 25 | Ok(d) 26 | } 27 | 28 | pub fn load_config>(p: P) -> Result { 29 | read_toml(p) 30 | } 31 | -------------------------------------------------------------------------------- /github/hooktypes/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-hooktypes" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | serde = { workspace = true } 13 | -------------------------------------------------------------------------------- /github/hooktypes/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 Oxide Computer Company 3 | */ 4 | 5 | use anyhow::{bail, Result}; 6 | use serde::Deserialize; 7 | 8 | #[derive(Deserialize, Debug)] 9 | pub struct Payload { 10 | #[serde(default)] 11 | pub action: String, 12 | pub sender: User, 13 | pub repository: Option, 14 | pub installation: Option, 15 | pub check_suite: Option, 16 | pub check_run: Option, 17 | pull_request: Option, 18 | pub requested_action: Option, 19 | } 20 | 21 | impl Payload { 22 | pub fn pull_request(&self) -> Result<&PullRequest> { 23 | let mut problems = Vec::new(); 24 | 25 | if let Some(pr) = self.pull_request.as_ref() { 26 | /* 27 | * Unfortunately, GitHub has been known to occasionally omit the 28 | * repository object in web hooks that get delivered to us. If that 29 | * occurs, just drop the delivery. 30 | */ 31 | if pr.base.repo.is_none() { 32 | problems.push("missing pull_request.base.repo"); 33 | } 34 | if pr.head.repo.is_none() { 35 | problems.push("missing pull_request.head.repo"); 36 | } 37 | 38 | if problems.is_empty() { 39 | return Ok(pr); 40 | } 41 | } else { 42 | problems.push("missing pull request information"); 43 | } 44 | 45 | bail!("{}", problems.join(", ")); 46 | } 47 | } 48 | 49 | #[derive(Deserialize, Debug)] 50 | pub struct User { 51 | pub login: String, 52 | pub id: i64, 53 | pub node_id: String, 54 | pub name: Option, 55 | pub email: Option, 56 | #[serde(rename = "type")] 57 | pub type_: UserType, 58 | pub site_admin: bool, 59 | } 60 | 61 | #[derive(Deserialize, Debug)] 62 | pub struct App { 63 | pub id: i64, 64 | pub name: String, 65 | pub slug: String, 66 | pub description: Option, 67 | } 68 | 69 | #[derive(Deserialize, Debug, Clone, Copy)] 70 | pub enum UserType { 71 | Bot, 72 | User, 73 | Organization, 74 | } 75 | 76 | #[derive(Deserialize, Debug)] 77 | pub struct Repository { 78 | pub id: i64, 79 | pub node_id: String, 80 | pub name: String, 81 | pub owner: Owner, 82 | } 83 | 84 | #[derive(Deserialize, Debug)] 85 | pub struct CheckSuite { 86 | pub id: i64, 87 | pub node_id: String, 88 | pub head_branch: Option, 89 | pub head_sha: String, 90 | pub status: CheckSuiteStatus, 91 | pub app: App, 92 | } 93 | 94 | #[derive(Deserialize, Debug)] 95 | pub struct CheckRun { 96 | pub id: i64, 97 | pub node_id: String, 98 | pub head_sha: String, 99 | pub external_id: String, 100 | pub status: CheckRunStatus, 101 | pub conclusion: Option, 102 | } 103 | 104 | #[derive(Deserialize, Debug)] 105 | pub struct PullRequest { 106 | pub id: i64, 107 | pub number: i64, 108 | pub title: String, 109 | pub node_id: String, 110 | pub state: PullRequestState, 111 | pub head: PullRequestCommit, 112 | pub base: PullRequestCommit, 113 | } 114 | 115 | #[derive(Deserialize, Debug)] 116 | pub struct PullRequestCommit { 117 | pub label: String, 118 | #[serde(rename = "ref")] 119 | pub ref_: String, 120 | pub sha: String, 121 | pub user: User, 122 | pub repo: Option, 123 | } 124 | 125 | impl PullRequest { 126 | pub fn is_open(&self) -> bool { 127 | matches!(self.state, PullRequestState::Open) 128 | } 129 | } 130 | 131 | impl PullRequestCommit { 132 | pub fn repo(&self) -> &Repository { 133 | /* 134 | * This is validated in the pull_request() routine on the Payload. 135 | */ 136 | self.repo.as_ref().unwrap() 137 | } 138 | } 139 | 140 | #[derive(Deserialize, Debug)] 141 | #[serde(rename_all = "snake_case")] 142 | pub enum PullRequestState { 143 | Open, 144 | Closed, 145 | } 146 | 147 | #[derive(Deserialize, Debug)] 148 | #[serde(rename_all = "snake_case")] 149 | pub enum CheckSuiteStatus { 150 | Requested, 151 | InProgress, 152 | Completed, 153 | Queued, 154 | /* 155 | * This status is not documented, and does not appear in the schema, 156 | * but GitHub Actions (of course) seems to generate it sometimes: 157 | */ 158 | Pending, 159 | } 160 | 161 | #[derive(Deserialize, Debug)] 162 | #[serde(rename_all = "snake_case")] 163 | pub enum CheckRunStatus { 164 | Queued, 165 | InProgress, 166 | Completed, 167 | /* 168 | * This status is not documented, and does not appear in the schema, 169 | * but GitHub Actions (of course) seems to generate it sometimes: 170 | */ 171 | Pending, 172 | } 173 | 174 | #[derive(Deserialize, Debug)] 175 | #[serde(rename_all = "snake_case")] 176 | pub enum CheckRunConclusion { 177 | Success, 178 | Failure, 179 | Neutral, 180 | Cancelled, 181 | TimedOut, 182 | ActionRequired, 183 | Stale, 184 | Skipped, 185 | } 186 | 187 | #[derive(Deserialize, Debug)] 188 | pub struct Installation { 189 | pub id: i64, 190 | pub node_id: Option, 191 | pub account: Option, 192 | } 193 | 194 | #[derive(Deserialize, Debug)] 195 | pub struct Owner { 196 | pub id: i64, 197 | pub node_id: String, 198 | pub login: String, 199 | } 200 | 201 | #[derive(Deserialize, Debug)] 202 | pub struct RequestedAction { 203 | pub identifier: String, 204 | } 205 | -------------------------------------------------------------------------------- /github/server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-server" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-bunyan = { path = "../../bunyan" } 9 | buildomat-client = { path = "../../client" } 10 | buildomat-common = { path = "../../common" } 11 | buildomat-download = { path = "../../download" } 12 | buildomat-github-client = { path = "../client" } 13 | buildomat-github-database = { path = "../database" } 14 | buildomat-github-hooktypes = { path = "../hooktypes" } 15 | buildomat-jobsh = { path = "../../jobsh" } 16 | buildomat-sse = { path = "../../sse" } 17 | 18 | ansi-to-html = { workspace = true } 19 | anyhow = { workspace = true } 20 | base64 = { workspace = true } 21 | chrono = { workspace = true } 22 | dropshot = { workspace = true } 23 | futures = { workspace = true } 24 | hmac-sha256 = { workspace = true } 25 | html-escape = { workspace = true } 26 | http-body-util = { workspace = true } 27 | hyper = { workspace = true } 28 | pem = { workspace = true } 29 | reqwest = { workspace = true } 30 | rust-toolchain-file = { workspace = true } 31 | rusty_ulid = { workspace = true } 32 | schemars = { workspace = true } 33 | serde = { workspace = true } 34 | serde_json = { workspace = true } 35 | slog = { workspace = true } 36 | strip-ansi-escapes = { workspace = true } 37 | tempfile = { workspace = true } 38 | thiserror = { workspace = true } 39 | tokio = { workspace = true } 40 | tokio-util = { workspace = true } 41 | toml = { workspace = true } 42 | usdt = { workspace = true } 43 | 44 | [dev-dependencies] 45 | buildomat-github-testdata = { path = "../testdata" } 46 | -------------------------------------------------------------------------------- /github/server/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::fs::OpenOptions; 6 | use std::io::Read; 7 | use std::path::Path; 8 | 9 | use anyhow::Result; 10 | use buildomat_common::*; 11 | use serde::Deserialize; 12 | 13 | #[derive(Deserialize)] 14 | pub struct Sqlite { 15 | #[serde(default)] 16 | pub cache_kb: Option, 17 | } 18 | 19 | #[derive(Deserialize)] 20 | pub struct Buildomat { 21 | pub token: String, 22 | pub url: String, 23 | } 24 | 25 | #[derive(Deserialize)] 26 | pub struct Config { 27 | pub id: u64, 28 | #[allow(unused)] 29 | pub secret: String, 30 | pub webhook_secret: String, 31 | pub base_url: String, 32 | pub confroot: String, 33 | pub buildomat: Buildomat, 34 | pub allow_owners: Vec, 35 | pub sqlite: Sqlite, 36 | } 37 | 38 | pub fn load_bytes>(p: P) -> Result> { 39 | let p = p.as_ref(); 40 | let mut f = OpenOptions::new().read(true).open(p)?; 41 | let mut d = Vec::new(); 42 | f.read_to_end(&mut d)?; 43 | Ok(d) 44 | } 45 | 46 | pub fn load_config>(p: P) -> Result { 47 | read_toml(p) 48 | } 49 | -------------------------------------------------------------------------------- /github/server/src/templates.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use std::path::PathBuf; 6 | use std::result::Result as SResult; 7 | 8 | use anyhow::Result; 9 | use dropshot::HttpError; 10 | use slog::{error, Logger}; 11 | 12 | pub struct Templates { 13 | log: Logger, 14 | dir: Option, 15 | } 16 | 17 | impl Templates { 18 | pub fn new(log: Logger) -> Result { 19 | /* 20 | * We deploy this program in "/opt/buildomat/lib" and the templates, if 21 | * present, are alongside in "/opt/buildomat/share". 22 | */ 23 | let dir = { 24 | let dir = std::env::current_exe()?; 25 | if let Some(lib) = dir.parent() { 26 | if lib.file_name() == Some(std::ffi::OsStr::new("lib")) { 27 | Some(lib.parent().unwrap().join("share")) 28 | } else { 29 | None 30 | } 31 | } else { 32 | None 33 | } 34 | }; 35 | 36 | Ok(Self { log, dir }) 37 | } 38 | 39 | pub fn load(&self, name: &str) -> SResult { 40 | let log = &self.log; 41 | 42 | if let Some(dir) = &self.dir { 43 | let file = dir.join(name); 44 | match std::fs::read_to_string(&file) { 45 | Ok(data) => return Ok(data), 46 | Err(e) if e.kind() == std::io::ErrorKind::NotFound => (), 47 | Err(e) => { 48 | error!(log, "opening template {name:?}: {e}"); 49 | } 50 | } 51 | } 52 | 53 | match name { 54 | "variety/basic/www/style.css" => { 55 | Ok(include_str!("../../../variety/basic/www/style.css").into()) 56 | } 57 | _ => Err(HttpError::for_internal_error(format!( 58 | "could not locate template {name:?}" 59 | ))), 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /github/server/src/variety/control.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::collections::BTreeMap; 6 | use std::sync::Arc; 7 | 8 | use crate::App; 9 | use anyhow::Result; 10 | use buildomat_common::*; 11 | use buildomat_github_database::types::*; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | pub const CONTROL_RUN_NAME: &str = "*control"; 15 | 16 | #[derive(Debug, Serialize, Deserialize, Default)] 17 | pub struct ControlPrivate { 18 | pub error: Option, 19 | #[serde(default)] 20 | pub complete: bool, 21 | #[serde(default)] 22 | pub no_plans: bool, 23 | #[serde(default)] 24 | pub need_auth: bool, 25 | } 26 | 27 | pub(crate) async fn details( 28 | app: &Arc, 29 | cs: &CheckSuite, 30 | _cr: &CheckRun, 31 | _local_time: bool, 32 | ) -> Result { 33 | let mut out = String::new(); 34 | let db = &app.db; 35 | 36 | let repo = db.load_repository(cs.repo); 37 | 38 | /* 39 | * List details about all check runs for this check suite. Sort the runs 40 | * into groups by run name, as they appear on GitHub. 41 | */ 42 | let mut runs: BTreeMap> = Default::default(); 43 | for run in db.list_check_runs_for_suite(cs.id)? { 44 | let set = runs.entry(run.name.to_string()).or_default(); 45 | set.push(run); 46 | } 47 | 48 | /* 49 | * Within a set, sort by run ID (which, as the IDs are ULIDs, should also be 50 | * creation order): 51 | */ 52 | for set in runs.values_mut() { 53 | set.sort_by(|a, b| a.id.cmp(&b.id)); 54 | } 55 | 56 | /* 57 | * Render a list of sets of runs: 58 | */ 59 | out += "

Check Runs:

\n"; 60 | out += "
    \n"; 61 | for (set, runs) in &runs { 62 | out += &format!("
  • check run \"{set}\"\n"); 63 | 64 | out += "
      \n"; 65 | for run in runs { 66 | out += "
    • "; 67 | if run.active { 68 | out += ""; 69 | } 70 | 71 | let when = run 72 | .id 73 | .datetime() 74 | .to_rfc3339_opts(chrono::SecondsFormat::Secs, true); 75 | let age = run.id.age().render(); 76 | 77 | out += &format!( 78 | "{when} run {} ", 79 | app.make_details_url(cs, run), 80 | run.id, 81 | ); 82 | 83 | if run.active { 84 | out += " "; 85 | } 86 | 87 | out += &format!("[created {age} ago] "); 88 | 89 | if let Some(id) = &run.github_id { 90 | let id = if let Ok(repo) = &repo { 91 | let url = format!( 92 | "https://github.com/{}/{}/runs/{}", 93 | repo.owner, repo.name, id, 94 | ); 95 | format!("{id}") 96 | } else { 97 | id.to_string() 98 | }; 99 | out += &format!("(GitHub ID {id}) "); 100 | } 101 | 102 | if run.active { 103 | out += " [latest instance]"; 104 | } 105 | out += "\n"; 106 | } 107 | out += "
    \n"; 108 | out += "
    \n"; 109 | out += "
    \n"; 110 | } 111 | out += "
\n"; 112 | 113 | Ok(out) 114 | } 115 | -------------------------------------------------------------------------------- /github/server/src/variety/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Oxide Computer Company 3 | */ 4 | 5 | pub mod basic; 6 | pub mod control; 7 | -------------------------------------------------------------------------------- /github/server/www/bunyan.css: -------------------------------------------------------------------------------- 1 | .bunyan-trace { 2 | background-color: #96f5fa; 3 | } 4 | 5 | .bunyan-debug { 6 | background-color: #adc2ff; 7 | } 8 | 9 | .bunyan-info { 10 | background-color: #adffb0; 11 | } 12 | 13 | .bunyan-warn { 14 | background-color: #ffecad; 15 | } 16 | 17 | .bunyan-error { 18 | background-color: #ffb5ad; 19 | } 20 | 21 | .bunyan-fatal { 22 | background-color: #ffadf1; 23 | } 24 | 25 | .bunyan-other { 26 | background-color: #ffffff; 27 | } 28 | -------------------------------------------------------------------------------- /github/server/www/bunyan.js: -------------------------------------------------------------------------------- 1 | function selectMaxLevel(select) { 2 | let minIdx = select.selectedIndex; 3 | let options = select.options; 4 | 5 | console.info("setting minimum shown level to" + options[minIdx]); 6 | 7 | for (let i = 0; i < options.length; i++) { 8 | let display = i >= minIdx ? 'table-row' : 'none'; 9 | let levelClass = options[i].value; 10 | 11 | console.info("setting \'." + levelClass + "\' to \'display: " + 12 | display + "\'"); 13 | 14 | let logs = document.getElementsByClassName(levelClass); 15 | for (let log of logs) { 16 | log.style.display = display; 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /github/testdata/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-github-testdata" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | buildomat-common = { path = "../../common" } 12 | buildomat-github-database = { path = "../database" } 13 | 14 | anyhow = { workspace = true } 15 | serde = { workspace = true } 16 | serde_json = { workspace = true } 17 | toml = { workspace = true } 18 | -------------------------------------------------------------------------------- /github/tools/make_create_url.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Regrettably there is no simple file- or API-based mechanism for configuring a 5 | # GitHub App. One can arrange for a page with a form that will POST something 6 | # to GitHub, but that requires a server for a callback URL, which is remarkably 7 | # tedious for something that should be a simple PUT or POST with appropriate 8 | # credentials. 9 | # 10 | # The alternative is to specify a set of query parameters for the regular 11 | # manual creation page, which will pre-populate some (but of course, not all!) 12 | # of the form. The operator is then expected to copy and paste additional 13 | # things like secrets and private keys. In keeping with this distasteful 14 | # second option, we will produce the required URL, distastefully, via this 15 | # shell script. 16 | # 17 | 18 | if [[ -n $1 ]]; then 19 | # 20 | # The URL is different if you want to create the App in an 21 | # Organisation. 22 | # 23 | url="https://github.com/organizations/$1/settings/apps/new?" 24 | else 25 | url='https://github.com/settings/apps/new?' 26 | fi 27 | 28 | first=yes 29 | function add { 30 | local name=$1 31 | shift 32 | local value="$*" 33 | 34 | # 35 | # Escape! 36 | # 37 | value="${value// /+}" 38 | 39 | if [[ $first == no ]]; then 40 | url+='&' 41 | fi 42 | first=no 43 | 44 | url+="$name=$value" 45 | } 46 | 47 | add 'name' 'buildomat' 48 | add 'description' 'a software build labour saving device' 49 | add 'url' 'https://buildomat.eng.oxide.computer' 50 | add 'public' 'true' 51 | add 'webhook_url' 'https://buildomat.eng.oxide.computer/wg/0/webhook' 52 | add 'events[]' 'check_run' 53 | add 'events[]' 'check_suite' 54 | add 'events[]' 'create' 55 | add 'events[]' 'delete' 56 | add 'events[]' 'public' 57 | add 'events[]' 'pull_request' 58 | add 'events[]' 'push' 59 | add 'events[]' 'repository' 60 | 61 | # 62 | # Permissions: 63 | # 64 | add 'checks' 'write' 65 | add 'contents' 'read' 66 | add 'metadata' 'read' 67 | add 'pull_requests' 'read' 68 | add 'members' 'read' 69 | 70 | printf '%s\n' "$url" 71 | -------------------------------------------------------------------------------- /jobsh/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-jobsh" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-client = { path = "../client" } 9 | buildomat-common = { path = "../common" } 10 | buildomat-sse = { path = "../sse" } 11 | 12 | ansi-to-html = { workspace = true } 13 | anyhow = { workspace = true } 14 | base64 = { workspace = true } 15 | chrono = { workspace = true } 16 | dropshot = { workspace = true } 17 | futures = { workspace = true } 18 | html-escape = { workspace = true } 19 | hyper = { workspace = true } 20 | reqwest = { workspace = true } 21 | rust-toolchain-file = { workspace = true } 22 | rusty_ulid = { workspace = true } 23 | schemars = { workspace = true } 24 | serde = { workspace = true } 25 | serde_json = { workspace = true } 26 | slog = { workspace = true } 27 | thiserror = { workspace = true } 28 | tokio = { workspace = true } 29 | tokio-util = { workspace = true } 30 | toml = { workspace = true } 31 | -------------------------------------------------------------------------------- /jobsh/src/variety/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | pub mod basic; 6 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 80 2 | edition = "2021" 3 | use_small_heuristics = "Max" 4 | 5 | # 6 | # We do not wish to reformat the generated client code, but of course this 7 | # directive (uncontroversial though it would surely seem) is not available in 8 | # stable rustfmt: 9 | # 10 | # ignore = [ 11 | # "/openapi" 12 | # ] 13 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-server" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [dependencies] 8 | buildomat-common = { path = "../common" } 9 | buildomat-database = { path = "../database" } 10 | buildomat-download = { path = "../download" } 11 | buildomat-sse = { path = "../sse" } 12 | buildomat-types = { path = "../types" } 13 | 14 | anyhow = { workspace = true } 15 | aws-config = { workspace = true } 16 | aws-credential-types = { workspace = true } 17 | aws-sdk-s3 = { workspace = true } 18 | aws-types = { workspace = true } 19 | bytes = { workspace = true } 20 | chrono = { workspace = true } 21 | dropshot = { workspace = true } 22 | futures = { workspace = true } 23 | getopts = { workspace = true } 24 | http-range = { workspace = true } 25 | hyper = { workspace = true } 26 | rusty_ulid = { workspace = true } 27 | schemars = { workspace = true } 28 | sea-query = { workspace = true } 29 | semver = { workspace = true } 30 | serde = { workspace = true } 31 | serde_json = { workspace = true } 32 | slog = { workspace = true } 33 | tempfile = { workspace = true } 34 | tlvc = { workspace = true } 35 | tlvc-text = { workspace = true } 36 | tokio = { workspace = true } 37 | tokio-stream = { workspace = true } 38 | usdt = { workspace = true } 39 | -------------------------------------------------------------------------------- /server/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 Oxide Computer Company 3 | */ 4 | 5 | mod prelude { 6 | pub(crate) use crate::{db, unauth_response, Central, MakeInternalError}; 7 | pub use anyhow::Result; 8 | pub use buildomat_download::{PotentialRange, RequestContextEx}; 9 | pub use buildomat_sse::{HeaderMapEx, ServerSentEvents}; 10 | pub use buildomat_types::metadata; 11 | pub use chrono::prelude::*; 12 | pub use dropshot::{ 13 | endpoint, Body, ClientErrorStatusCode, HttpError, HttpResponseCreated, 14 | HttpResponseDeleted, HttpResponseOk, HttpResponseUpdatedNoContent, 15 | PaginationParams, Path as TypedPath, Query as TypedQuery, 16 | RequestContext, ResultsPage, TypedBody, UntypedBody, WhichPage, 17 | }; 18 | pub use futures::TryStreamExt; 19 | pub use hyper::Response; 20 | pub use rusty_ulid::Ulid; 21 | pub use schemars::JsonSchema; 22 | pub use serde::{Deserialize, Serialize}; 23 | pub use slog::{error, info, o, warn, Logger}; 24 | pub use std::collections::HashMap; 25 | pub use std::str::FromStr; 26 | pub use std::sync::Arc; 27 | 28 | pub type DSResult = std::result::Result; 29 | } 30 | 31 | pub mod admin; 32 | pub mod factory; 33 | pub mod public; 34 | pub mod user; 35 | pub mod worker; 36 | 37 | pub(crate) use prelude::DSResult; 38 | -------------------------------------------------------------------------------- /server/src/api/public.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use super::prelude::*; 6 | 7 | #[derive(Deserialize, JsonSchema)] 8 | pub(crate) struct PublicFilePath { 9 | username: String, 10 | series: String, 11 | version: String, 12 | name: String, 13 | } 14 | 15 | #[endpoint { 16 | method = GET, 17 | path = "/0/public/file/{username}/{series}/{version}/{name}", 18 | }] 19 | pub(crate) async fn public_file_download( 20 | rqctx: RequestContext>, 21 | path: TypedPath, 22 | ) -> DSResult> { 23 | public_file_common( 24 | &rqctx.log, 25 | &path.into_inner(), 26 | rqctx.context(), 27 | rqctx.range(), 28 | false, 29 | ) 30 | .await 31 | } 32 | 33 | #[endpoint { 34 | method = HEAD, 35 | path = "/0/public/file/{username}/{series}/{version}/{name}", 36 | }] 37 | pub(crate) async fn public_file_head( 38 | rqctx: RequestContext>, 39 | path: TypedPath, 40 | ) -> DSResult> { 41 | public_file_common( 42 | &rqctx.log, 43 | &path.into_inner(), 44 | rqctx.context(), 45 | rqctx.range(), 46 | true, 47 | ) 48 | .await 49 | } 50 | 51 | pub(crate) async fn public_file_common( 52 | log: &Logger, 53 | p: &PublicFilePath, 54 | c: &Central, 55 | pr: Option, 56 | head_only: bool, 57 | ) -> DSResult> { 58 | /* 59 | * Load the user from the database. 60 | */ 61 | let u = if let Some(au) = c.db.user_by_name(&p.username).or_500()? { 62 | au.id 63 | } else { 64 | return Err(HttpError::for_client_error( 65 | None, 66 | ClientErrorStatusCode::NOT_FOUND, 67 | "published file not found".into(), 68 | )); 69 | }; 70 | 71 | let pf = if let Some(pf) = 72 | c.db.published_file_by_name(u, &p.series, &p.version, &p.name) 73 | .or_500()? 74 | { 75 | pf 76 | } else { 77 | return Err(HttpError::for_client_error( 78 | None, 79 | ClientErrorStatusCode::NOT_FOUND, 80 | "published file not found".into(), 81 | )); 82 | }; 83 | 84 | let info = format!( 85 | "published file: user {} series {} version {} name {}", 86 | u, pf.series, pf.version, pf.name 87 | ); 88 | c.file_response(log, info, pf.job, pf.file, pr, head_only).await 89 | } 90 | -------------------------------------------------------------------------------- /server/src/archive/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod files; 2 | pub(crate) mod jobs; 3 | -------------------------------------------------------------------------------- /server/src/chunks.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use std::str::FromStr; 6 | use std::sync::Arc; 7 | use std::time::Duration; 8 | 9 | use anyhow::Result; 10 | #[allow(unused_imports)] 11 | use slog::{error, info, warn, Logger}; 12 | 13 | use super::Central; 14 | 15 | async fn chunk_cleanup_one(log: &Logger, c: &Central) -> Result<()> { 16 | /* 17 | * Get a list of chunk directories. Each directory will be the ID of a job. 18 | * If the job is complete, we can remove the entire directory. 19 | */ 20 | let mut dir = std::fs::read_dir(c.chunk_dir()?)?; 21 | 22 | while let Some(ent) = dir.next().transpose()? { 23 | if !ent.file_type()?.is_dir() { 24 | error!( 25 | log, 26 | "unexpected item in chunk dir: {:?}: {:?}", 27 | ent.path(), 28 | ent 29 | ); 30 | continue; 31 | } 32 | 33 | let id = if let Ok(name) = ent.file_name().into_string() { 34 | match rusty_ulid::Ulid::from_str(name.as_str()) { 35 | Ok(id) => super::db::JobId(id), 36 | Err(e) => { 37 | error!( 38 | log, 39 | "chunk directory not ulid: {:?}: {:?}", 40 | ent.path(), 41 | e 42 | ); 43 | continue; 44 | } 45 | } 46 | } else { 47 | error!(log, "unexpected item in chunk dir: {:?}", ent.path()); 48 | continue; 49 | }; 50 | 51 | match c.db.job_opt(id) { 52 | Ok(Some(job)) => { 53 | if !job.complete { 54 | continue; 55 | } 56 | 57 | info!(log, "job {} is complete; removing {:?}", id, ent.path()); 58 | } 59 | Ok(None) => { 60 | warn!(log, "job {} not found; removing {:?}", id, ent.path()); 61 | } 62 | Err(e) => { 63 | error!(log, "could not look up job {}: {:?}", id, e); 64 | continue; 65 | } 66 | } 67 | 68 | std::fs::remove_dir_all(ent.path())?; 69 | } 70 | 71 | Ok(()) 72 | } 73 | 74 | pub(crate) async fn chunk_cleanup(log: Logger, c: Arc) -> Result<()> { 75 | let delay = Duration::from_secs(73); 76 | info!(log, "start chunk cleanup task"); 77 | 78 | loop { 79 | if let Err(e) = chunk_cleanup_one(&log, &c).await { 80 | error!(log, "chunk cleanup task error: {:?}", e); 81 | } 82 | 83 | tokio::time::sleep(delay).await; 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /server/src/config.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 Oxide Computer Company 3 | */ 4 | 5 | use std::path::Path; 6 | 7 | use anyhow::Result; 8 | use buildomat_common::*; 9 | use serde::Deserialize; 10 | #[allow(unused_imports)] 11 | use slog::{error, info, o, warn, Logger}; 12 | 13 | #[derive(Deserialize, Debug)] 14 | #[serde(deny_unknown_fields)] 15 | pub struct ConfigFile { 16 | pub admin: ConfigFileAdmin, 17 | #[allow(dead_code)] 18 | pub general: ConfigFileGeneral, 19 | pub storage: ConfigFileStorage, 20 | pub sqlite: ConfigFileSqlite, 21 | pub job: ConfigFileJob, 22 | #[serde(default)] 23 | pub file: ConfigFileFile, 24 | } 25 | 26 | #[derive(Deserialize, Debug)] 27 | #[serde(deny_unknown_fields)] 28 | pub struct ConfigFileGeneral { 29 | #[allow(dead_code)] 30 | pub baseurl: String, 31 | } 32 | 33 | #[derive(Deserialize, Debug)] 34 | #[serde(deny_unknown_fields)] 35 | pub struct ConfigFileJob { 36 | pub max_runtime: u64, 37 | #[serde(default = "default_max_size_per_file_mb")] 38 | pub max_size_per_file_mb: u64, 39 | #[serde(default)] 40 | pub auto_archive: bool, 41 | #[serde(default)] 42 | pub auto_purge: bool, 43 | #[serde(default = "default_purge_delay_msec")] 44 | pub purge_delay_msec: u64, 45 | } 46 | 47 | impl ConfigFileJob { 48 | pub fn max_bytes_per_output(&self) -> u64 { 49 | self.max_size_per_file_mb.saturating_mul(1024 * 1024) 50 | } 51 | 52 | pub fn max_bytes_per_input(&self) -> u64 { 53 | self.max_size_per_file_mb.saturating_mul(1024 * 1024) 54 | } 55 | } 56 | 57 | fn default_max_size_per_file_mb() -> u64 { 58 | /* 59 | * By default, allow 1GB files to be uploaded: 60 | */ 61 | 1024 62 | } 63 | 64 | fn default_purge_delay_msec() -> u64 { 65 | /* 66 | * By default, wait half a second after a successful purge before purging 67 | * another job. When there are a lot of jobs to purge, this can help to 68 | * keep the system responsive to active jobs. 69 | */ 70 | 500 71 | } 72 | 73 | #[derive(Deserialize, Debug, Default)] 74 | #[serde(deny_unknown_fields)] 75 | pub struct ConfigFileFile { 76 | pub auto_archive: Option, 77 | } 78 | 79 | impl ConfigFileFile { 80 | pub fn auto_archive(&self) -> bool { 81 | self.auto_archive.unwrap_or(true) 82 | } 83 | } 84 | 85 | #[derive(Deserialize, Debug)] 86 | #[serde(deny_unknown_fields)] 87 | pub struct ConfigFileSqlite { 88 | #[serde(default)] 89 | pub cache_kb: Option, 90 | } 91 | 92 | #[derive(Deserialize, Debug)] 93 | #[serde(deny_unknown_fields)] 94 | pub struct ConfigFileAdmin { 95 | pub token: String, 96 | /** 97 | * Should we hold off on new VM creation by default at startup? 98 | */ 99 | pub hold: bool, 100 | } 101 | 102 | #[derive(Deserialize, Debug)] 103 | #[serde(deny_unknown_fields)] 104 | pub struct ConfigFileStorage { 105 | pub access_key_id: String, 106 | pub secret_access_key: String, 107 | pub bucket: String, 108 | pub prefix: String, 109 | pub region: String, 110 | } 111 | 112 | impl ConfigFileStorage { 113 | pub fn creds(&self) -> aws_credential_types::Credentials { 114 | aws_credential_types::Credentials::new( 115 | &self.access_key_id, 116 | &self.secret_access_key, 117 | None, 118 | None, 119 | "buildomat", 120 | ) 121 | } 122 | 123 | pub fn region(&self) -> aws_types::region::Region { 124 | aws_types::region::Region::new(self.region.to_string()) 125 | } 126 | } 127 | 128 | pub fn load>(path: P) -> Result { 129 | read_toml(path.as_ref()) 130 | } 131 | -------------------------------------------------------------------------------- /server/src/db/tables/factory.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Factory { 10 | pub id: FactoryId, 11 | pub name: String, 12 | pub token: String, 13 | pub lastping: Option, 14 | pub enable: bool, 15 | 16 | /** 17 | * For debugging purposes, it can be helpful to configure a particular 18 | * factory so that each worker it creates is marked held. Worker creation 19 | * will proceed as it normally would, but a job will not be assigned until 20 | * the hold is explicitly released by the operator, allowing the operator to 21 | * inspect or modify the worker environment prior to the start of job 22 | * execution. 23 | */ 24 | pub hold_workers: bool, 25 | } 26 | 27 | impl FromRow for Factory { 28 | fn columns() -> Vec { 29 | [ 30 | FactoryDef::Id, 31 | FactoryDef::Name, 32 | FactoryDef::Token, 33 | FactoryDef::Lastping, 34 | FactoryDef::Enable, 35 | FactoryDef::HoldWorkers, 36 | ] 37 | .into_iter() 38 | .map(|col| { 39 | ColumnRef::TableColumn( 40 | SeaRc::new(FactoryDef::Table), 41 | SeaRc::new(col), 42 | ) 43 | }) 44 | .collect() 45 | } 46 | 47 | fn from_row(row: &Row) -> rusqlite::Result { 48 | Ok(Factory { 49 | id: row.get(0)?, 50 | name: row.get(1)?, 51 | token: row.get(2)?, 52 | lastping: row.get(3)?, 53 | enable: row.get(4)?, 54 | hold_workers: row.get(5)?, 55 | }) 56 | } 57 | } 58 | 59 | impl Factory { 60 | pub fn find(id: FactoryId) -> SelectStatement { 61 | Query::select() 62 | .from(FactoryDef::Table) 63 | .columns(Factory::columns()) 64 | .and_where(Expr::col(FactoryDef::Id).eq(id)) 65 | .to_owned() 66 | } 67 | 68 | pub fn insert(&self) -> InsertStatement { 69 | Query::insert() 70 | .into_table(FactoryDef::Table) 71 | .columns(Self::bare_columns()) 72 | .values_panic([ 73 | self.id.into(), 74 | self.name.clone().into(), 75 | self.token.clone().into(), 76 | self.lastping.into(), 77 | self.enable.into(), 78 | self.hold_workers.into(), 79 | ]) 80 | .to_owned() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /server/src/db/tables/job.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Clone, Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | #[allow(unused)] 10 | pub struct Job { 11 | pub id: JobId, 12 | pub owner: UserId, 13 | pub name: String, 14 | /** 15 | * The original target name specified by the user, prior to resolution. 16 | */ 17 | pub target: String, 18 | pub complete: bool, 19 | pub failed: bool, 20 | pub worker: Option, 21 | pub waiting: bool, 22 | /** 23 | * The resolved target ID. This field should be accessed through the 24 | * target() method to account for old records where this ID was optional. 25 | */ 26 | pub target_id: Option, 27 | pub cancelled: bool, 28 | /** 29 | * When was this job successfully uploaded to the object store? 30 | */ 31 | pub time_archived: Option, 32 | /** 33 | * When were the live records for this job removed from the database? 34 | */ 35 | pub time_purged: Option, 36 | } 37 | 38 | impl FromRow for Job { 39 | fn columns() -> Vec { 40 | [ 41 | JobDef::Id, 42 | JobDef::Owner, 43 | JobDef::Name, 44 | JobDef::Target, 45 | JobDef::Complete, 46 | JobDef::Failed, 47 | JobDef::Worker, 48 | JobDef::Waiting, 49 | JobDef::TargetId, 50 | JobDef::Cancelled, 51 | JobDef::TimeArchived, 52 | JobDef::TimePurged, 53 | ] 54 | .into_iter() 55 | .map(|col| { 56 | ColumnRef::TableColumn(SeaRc::new(JobDef::Table), SeaRc::new(col)) 57 | }) 58 | .collect() 59 | } 60 | 61 | fn from_row(row: &Row) -> rusqlite::Result { 62 | Ok(Job { 63 | id: row.get(0)?, 64 | owner: row.get(1)?, 65 | name: row.get(2)?, 66 | target: row.get(3)?, 67 | complete: row.get(4)?, 68 | failed: row.get(5)?, 69 | worker: row.get(6)?, 70 | waiting: row.get(7)?, 71 | target_id: row.get(8)?, 72 | cancelled: row.get(9)?, 73 | time_archived: row.get(10)?, 74 | time_purged: row.get(11)?, 75 | }) 76 | } 77 | } 78 | 79 | impl Job { 80 | pub fn find(id: JobId) -> SelectStatement { 81 | Query::select() 82 | .from(JobDef::Table) 83 | .columns(Job::columns()) 84 | .and_where(Expr::col(JobDef::Id).eq(id)) 85 | .to_owned() 86 | } 87 | 88 | pub fn insert(&self) -> InsertStatement { 89 | Query::insert() 90 | .into_table(JobDef::Table) 91 | .columns(Self::bare_columns()) 92 | .values_panic([ 93 | self.id.into(), 94 | self.owner.into(), 95 | self.name.clone().into(), 96 | self.target.clone().into(), 97 | self.complete.into(), 98 | self.failed.into(), 99 | self.worker.into(), 100 | self.waiting.into(), 101 | self.target_id.into(), 102 | self.cancelled.into(), 103 | self.time_archived.into(), 104 | self.time_purged.into(), 105 | ]) 106 | .to_owned() 107 | } 108 | 109 | #[allow(dead_code)] 110 | pub fn time_submit(&self) -> DateTime { 111 | self.id.datetime() 112 | } 113 | 114 | pub fn target(&self) -> TargetId { 115 | self.target_id.unwrap_or_else(|| { 116 | /* 117 | * XXX No new records should be created without a resolved target 118 | * ID, but old records might not have had one. This is the ID of 119 | * the canned "default" target: 120 | */ 121 | TargetId::from_str("00E82MSW0000000000000TT000").unwrap() 122 | }) 123 | } 124 | 125 | pub fn is_archived(&self) -> bool { 126 | self.time_archived.is_some() 127 | } 128 | 129 | pub fn is_purged(&self) -> bool { 130 | self.time_purged.is_some() 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /server/src/db/tables/job_depend.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobDepend { 10 | pub job: JobId, 11 | pub name: String, 12 | pub prior_job: JobId, 13 | pub copy_outputs: bool, 14 | pub on_failed: bool, 15 | pub on_completed: bool, 16 | pub satisfied: bool, 17 | } 18 | 19 | impl FromRow for JobDepend { 20 | fn columns() -> Vec { 21 | [ 22 | JobDependDef::Job, 23 | JobDependDef::Name, 24 | JobDependDef::PriorJob, 25 | JobDependDef::CopyOutputs, 26 | JobDependDef::OnFailed, 27 | JobDependDef::OnCompleted, 28 | JobDependDef::Satisfied, 29 | ] 30 | .into_iter() 31 | .map(|col| { 32 | ColumnRef::TableColumn( 33 | SeaRc::new(JobDependDef::Table), 34 | SeaRc::new(col), 35 | ) 36 | }) 37 | .collect() 38 | } 39 | 40 | fn from_row(row: &Row) -> rusqlite::Result { 41 | Ok(JobDepend { 42 | job: row.get(0)?, 43 | name: row.get(1)?, 44 | prior_job: row.get(2)?, 45 | copy_outputs: row.get(3)?, 46 | on_failed: row.get(4)?, 47 | on_completed: row.get(5)?, 48 | satisfied: row.get(6)?, 49 | }) 50 | } 51 | } 52 | 53 | impl JobDepend { 54 | pub fn find(job: JobId, name: &str) -> SelectStatement { 55 | Query::select() 56 | .from(JobDependDef::Table) 57 | .columns(JobDepend::columns()) 58 | .and_where(Expr::col(JobDependDef::Job).eq(job)) 59 | .and_where(Expr::col(JobDependDef::Name).eq(name)) 60 | .to_owned() 61 | } 62 | 63 | pub fn insert(&self) -> InsertStatement { 64 | Query::insert() 65 | .into_table(JobDependDef::Table) 66 | .columns(Self::bare_columns()) 67 | .values_panic([ 68 | self.job.into(), 69 | self.name.clone().into(), 70 | self.prior_job.into(), 71 | self.copy_outputs.into(), 72 | self.on_failed.into(), 73 | self.on_completed.into(), 74 | self.satisfied.into(), 75 | ]) 76 | .to_owned() 77 | } 78 | 79 | pub fn from_create(cd: &CreateDepend, job: JobId) -> JobDepend { 80 | JobDepend { 81 | job, 82 | name: cd.name.to_string(), 83 | prior_job: cd.prior_job, 84 | copy_outputs: cd.copy_outputs, 85 | on_failed: cd.on_failed, 86 | on_completed: cd.on_completed, 87 | satisfied: false, 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /server/src/db/tables/job_event.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobEvent { 10 | pub job: JobId, 11 | pub task: Option, 12 | pub seq: u32, 13 | pub stream: String, 14 | /** 15 | * The time at which the core API server received or generated this event. 16 | */ 17 | pub time: IsoDate, 18 | pub payload: String, 19 | /** 20 | * If this event was received from a remote system, such as a worker, this 21 | * time reflects the time on that remote system when the event was 22 | * generated. Due to issues with NTP, etc, it might not align exactly with 23 | * the time field. 24 | */ 25 | pub time_remote: Option, 26 | } 27 | 28 | impl FromRow for JobEvent { 29 | fn columns() -> Vec { 30 | [ 31 | JobEventDef::Job, 32 | JobEventDef::Task, 33 | JobEventDef::Seq, 34 | JobEventDef::Stream, 35 | JobEventDef::Time, 36 | JobEventDef::Payload, 37 | JobEventDef::TimeRemote, 38 | ] 39 | .into_iter() 40 | .map(|col| { 41 | ColumnRef::TableColumn( 42 | SeaRc::new(JobEventDef::Table), 43 | SeaRc::new(col), 44 | ) 45 | }) 46 | .collect() 47 | } 48 | 49 | fn from_row(row: &Row) -> rusqlite::Result { 50 | Ok(JobEvent { 51 | job: row.get(0)?, 52 | task: row.get(1)?, 53 | seq: row.get(2)?, 54 | stream: row.get(3)?, 55 | time: row.get(4)?, 56 | payload: row.get(5)?, 57 | time_remote: row.get(6)?, 58 | }) 59 | } 60 | } 61 | 62 | impl JobEvent { 63 | pub fn insert(&self) -> InsertStatement { 64 | Query::insert() 65 | .into_table(JobEventDef::Table) 66 | .columns(Self::bare_columns()) 67 | .values_panic([ 68 | self.job.into(), 69 | self.task.into(), 70 | self.seq.into(), 71 | self.stream.clone().into(), 72 | self.time.into(), 73 | self.payload.clone().into(), 74 | self.time_remote.into(), 75 | ]) 76 | .to_owned() 77 | } 78 | 79 | pub fn age(&self) -> Duration { 80 | self.time.age() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /server/src/db/tables/job_file.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobFile { 10 | pub job: JobId, 11 | pub id: JobFileId, 12 | pub size: DataSize, 13 | /** 14 | * When was this file successfully uploaded to the object store? 15 | */ 16 | pub time_archived: Option, 17 | } 18 | 19 | impl FromRow for JobFile { 20 | fn columns() -> Vec { 21 | [ 22 | JobFileDef::Job, 23 | JobFileDef::Id, 24 | JobFileDef::Size, 25 | JobFileDef::TimeArchived, 26 | ] 27 | .into_iter() 28 | .map(|col| { 29 | ColumnRef::TableColumn( 30 | SeaRc::new(JobFileDef::Table), 31 | SeaRc::new(col), 32 | ) 33 | }) 34 | .collect() 35 | } 36 | 37 | fn from_row(row: &Row) -> rusqlite::Result { 38 | Ok(JobFile { 39 | job: row.get(0)?, 40 | id: row.get(1)?, 41 | size: row.get(2)?, 42 | time_archived: row.get(3)?, 43 | }) 44 | } 45 | } 46 | 47 | impl JobFile { 48 | pub fn find(job: JobId, file: JobFileId) -> SelectStatement { 49 | Query::select() 50 | .from(JobFileDef::Table) 51 | .columns(JobFile::columns()) 52 | .and_where(Expr::col(JobFileDef::Job).eq(job)) 53 | .and_where(Expr::col(JobFileDef::Id).eq(file)) 54 | .to_owned() 55 | } 56 | 57 | pub fn insert(&self) -> InsertStatement { 58 | Query::insert() 59 | .into_table(JobFileDef::Table) 60 | .columns(Self::bare_columns()) 61 | .values_panic([ 62 | self.job.into(), 63 | self.id.into(), 64 | self.size.into(), 65 | self.time_archived.into(), 66 | ]) 67 | .to_owned() 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /server/src/db/tables/job_input.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobInput { 10 | pub job: JobId, 11 | pub name: String, 12 | pub id: Option, 13 | /** 14 | * Files are identified by a (job ID, file ID) tuple. In the case of an 15 | * output that is copied to another job as an input, this field contains the 16 | * job ID of the job which actually holds the file. If this is not set, the 17 | * file is an input that was uploaded directly by the user creating the job 18 | * and is stored with the job that owns the input record. 19 | */ 20 | pub other_job: Option, 21 | } 22 | 23 | impl FromRow for JobInput { 24 | fn columns() -> Vec { 25 | [ 26 | JobInputDef::Job, 27 | JobInputDef::Name, 28 | JobInputDef::Id, 29 | JobInputDef::OtherJob, 30 | ] 31 | .into_iter() 32 | .map(|col| { 33 | ColumnRef::TableColumn( 34 | SeaRc::new(JobInputDef::Table), 35 | SeaRc::new(col), 36 | ) 37 | }) 38 | .collect() 39 | } 40 | 41 | fn from_row(row: &Row) -> rusqlite::Result { 42 | Ok(JobInput { 43 | job: row.get(0)?, 44 | name: row.get(1)?, 45 | id: row.get(2)?, 46 | other_job: row.get(3)?, 47 | }) 48 | } 49 | } 50 | 51 | impl JobInput { 52 | pub fn insert(&self) -> InsertStatement { 53 | Query::insert() 54 | .into_table(JobInputDef::Table) 55 | .columns(Self::bare_columns()) 56 | .values_panic([ 57 | self.job.into(), 58 | self.name.clone().into(), 59 | self.id.into(), 60 | self.other_job.into(), 61 | ]) 62 | .to_owned() 63 | } 64 | 65 | pub fn upsert(&self) -> InsertStatement { 66 | self.insert() 67 | .on_conflict(OnConflict::new().do_nothing().to_owned()) 68 | .to_owned() 69 | } 70 | 71 | pub fn from_create(name: &str, job: JobId) -> JobInput { 72 | JobInput { job, name: name.to_string(), id: None, other_job: None } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /server/src/db/tables/job_output.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobOutput { 10 | pub job: JobId, 11 | pub path: String, 12 | pub id: JobFileId, 13 | } 14 | 15 | impl FromRow for JobOutput { 16 | fn columns() -> Vec { 17 | [JobOutputDef::Job, JobOutputDef::Path, JobOutputDef::Id] 18 | .into_iter() 19 | .map(|col| { 20 | ColumnRef::TableColumn( 21 | SeaRc::new(JobOutputDef::Table), 22 | SeaRc::new(col), 23 | ) 24 | }) 25 | .collect() 26 | } 27 | 28 | fn from_row(row: &Row) -> rusqlite::Result { 29 | Ok(JobOutput { job: row.get(0)?, path: row.get(1)?, id: row.get(2)? }) 30 | } 31 | } 32 | 33 | impl JobOutput { 34 | pub fn find(job: JobId, file: JobFileId) -> SelectStatement { 35 | Query::select() 36 | .from(JobOutputDef::Table) 37 | .columns(JobOutput::columns()) 38 | .and_where(Expr::col(JobOutputDef::Job).eq(job)) 39 | .and_where(Expr::col(JobOutputDef::Id).eq(file)) 40 | .to_owned() 41 | } 42 | 43 | pub fn insert(&self) -> InsertStatement { 44 | Query::insert() 45 | .into_table(JobOutputDef::Table) 46 | .columns(Self::bare_columns()) 47 | .values_panic([ 48 | self.job.into(), 49 | self.path.clone().into(), 50 | self.id.into(), 51 | ]) 52 | .to_owned() 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /server/src/db/tables/job_output_rule.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobOutputRule { 10 | pub job: JobId, 11 | pub seq: u32, 12 | pub rule: String, 13 | pub ignore: bool, 14 | pub size_change_ok: bool, 15 | pub require_match: bool, 16 | } 17 | 18 | impl FromRow for JobOutputRule { 19 | fn columns() -> Vec { 20 | [ 21 | JobOutputRuleDef::Job, 22 | JobOutputRuleDef::Seq, 23 | JobOutputRuleDef::Rule, 24 | JobOutputRuleDef::Ignore, 25 | JobOutputRuleDef::SizeChangeOk, 26 | JobOutputRuleDef::RequireMatch, 27 | ] 28 | .into_iter() 29 | .map(|col| { 30 | ColumnRef::TableColumn( 31 | SeaRc::new(JobOutputRuleDef::Table), 32 | SeaRc::new(col), 33 | ) 34 | }) 35 | .collect() 36 | } 37 | 38 | fn from_row(row: &Row) -> rusqlite::Result { 39 | Ok(JobOutputRule { 40 | job: row.get(0)?, 41 | seq: row.get(1)?, 42 | rule: row.get(2)?, 43 | ignore: row.get(3)?, 44 | size_change_ok: row.get(4)?, 45 | require_match: row.get(5)?, 46 | }) 47 | } 48 | } 49 | 50 | impl JobOutputRule { 51 | pub fn insert(&self) -> InsertStatement { 52 | Query::insert() 53 | .into_table(JobOutputRuleDef::Table) 54 | .columns(Self::bare_columns()) 55 | .values_panic([ 56 | self.job.into(), 57 | self.seq.into(), 58 | self.rule.clone().into(), 59 | self.ignore.into(), 60 | self.size_change_ok.into(), 61 | self.require_match.into(), 62 | ]) 63 | .to_owned() 64 | } 65 | 66 | pub fn from_create( 67 | cd: &CreateOutputRule, 68 | job: JobId, 69 | seq: usize, 70 | ) -> JobOutputRule { 71 | JobOutputRule { 72 | job, 73 | seq: seq.try_into().unwrap(), 74 | rule: cd.rule.to_string(), 75 | ignore: cd.ignore, 76 | size_change_ok: cd.size_change_ok, 77 | require_match: cd.require_match, 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /server/src/db/tables/job_store.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobStore { 10 | pub job: JobId, 11 | pub name: String, 12 | pub value: String, 13 | pub secret: bool, 14 | pub source: String, 15 | pub time_update: IsoDate, 16 | } 17 | 18 | impl FromRow for JobStore { 19 | fn columns() -> Vec { 20 | [ 21 | JobStoreDef::Job, 22 | JobStoreDef::Name, 23 | JobStoreDef::Value, 24 | JobStoreDef::Secret, 25 | JobStoreDef::Source, 26 | JobStoreDef::TimeUpdate, 27 | ] 28 | .into_iter() 29 | .map(|col| { 30 | ColumnRef::TableColumn( 31 | SeaRc::new(JobStoreDef::Table), 32 | SeaRc::new(col), 33 | ) 34 | }) 35 | .collect() 36 | } 37 | 38 | fn from_row(row: &Row) -> rusqlite::Result { 39 | Ok(JobStore { 40 | job: row.get(0)?, 41 | name: row.get(1)?, 42 | value: row.get(2)?, 43 | secret: row.get(3)?, 44 | source: row.get(4)?, 45 | time_update: row.get(5)?, 46 | }) 47 | } 48 | } 49 | 50 | impl JobStore { 51 | pub fn find(job: JobId, name: &str) -> SelectStatement { 52 | Query::select() 53 | .from(JobStoreDef::Table) 54 | .columns(JobStore::columns()) 55 | .and_where(Expr::col(JobStoreDef::Job).eq(job)) 56 | .and_where(Expr::col(JobStoreDef::Name).eq(name)) 57 | .to_owned() 58 | } 59 | 60 | pub fn insert(&self) -> InsertStatement { 61 | Query::insert() 62 | .into_table(JobStoreDef::Table) 63 | .columns(Self::bare_columns()) 64 | .values_panic([ 65 | self.job.into(), 66 | self.name.clone().into(), 67 | self.value.clone().into(), 68 | self.secret.into(), 69 | self.source.clone().into(), 70 | self.time_update.into(), 71 | ]) 72 | .to_owned() 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /server/src/db/tables/job_tag.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | #[allow(unused)] 10 | pub struct JobTag { 11 | pub job: JobId, 12 | pub name: String, 13 | pub value: String, 14 | } 15 | 16 | impl FromRow for JobTag { 17 | fn columns() -> Vec { 18 | [JobTagDef::Job, JobTagDef::Name, JobTagDef::Value] 19 | .into_iter() 20 | .map(|col| { 21 | ColumnRef::TableColumn( 22 | SeaRc::new(JobTagDef::Table), 23 | SeaRc::new(col), 24 | ) 25 | }) 26 | .collect() 27 | } 28 | 29 | fn from_row(row: &Row) -> rusqlite::Result { 30 | Ok(JobTag { job: row.get(0)?, name: row.get(1)?, value: row.get(2)? }) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /server/src/db/tables/job_time.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct JobTime { 10 | pub job: JobId, 11 | pub name: String, 12 | pub time: IsoDate, 13 | } 14 | 15 | impl FromRow for JobTime { 16 | fn columns() -> Vec { 17 | [JobTimeDef::Job, JobTimeDef::Name, JobTimeDef::Time] 18 | .into_iter() 19 | .map(|col| { 20 | ColumnRef::TableColumn( 21 | SeaRc::new(JobTimeDef::Table), 22 | SeaRc::new(col), 23 | ) 24 | }) 25 | .collect() 26 | } 27 | 28 | fn from_row(row: &Row) -> rusqlite::Result { 29 | Ok(JobTime { job: row.get(0)?, name: row.get(1)?, time: row.get(2)? }) 30 | } 31 | } 32 | 33 | impl JobTime { 34 | pub fn find(job: JobId, name: &str) -> SelectStatement { 35 | Query::select() 36 | .from(JobTimeDef::Table) 37 | .columns(JobTime::columns()) 38 | .and_where(Expr::col(JobTimeDef::Job).eq(job)) 39 | .and_where(Expr::col(JobTimeDef::Name).eq(name)) 40 | .to_owned() 41 | } 42 | 43 | pub fn upsert(&self) -> InsertStatement { 44 | Query::insert() 45 | .into_table(JobTimeDef::Table) 46 | .columns(Self::bare_columns()) 47 | .values_panic([ 48 | self.job.into(), 49 | self.name.clone().into(), 50 | self.time.into(), 51 | ]) 52 | .on_conflict(OnConflict::new().do_nothing().to_owned()) 53 | .to_owned() 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /server/src/db/tables/published_file.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct PublishedFile { 10 | pub owner: UserId, 11 | pub series: String, 12 | pub version: String, 13 | pub name: String, 14 | pub job: JobId, 15 | pub file: JobFileId, 16 | } 17 | 18 | impl FromRow for PublishedFile { 19 | fn columns() -> Vec { 20 | [ 21 | PublishedFileDef::Owner, 22 | PublishedFileDef::Series, 23 | PublishedFileDef::Version, 24 | PublishedFileDef::Name, 25 | PublishedFileDef::Job, 26 | PublishedFileDef::File, 27 | ] 28 | .into_iter() 29 | .map(|col| { 30 | ColumnRef::TableColumn( 31 | SeaRc::new(PublishedFileDef::Table), 32 | SeaRc::new(col), 33 | ) 34 | }) 35 | .collect() 36 | } 37 | 38 | fn from_row(row: &Row) -> rusqlite::Result { 39 | Ok(PublishedFile { 40 | owner: row.get(0)?, 41 | series: row.get(1)?, 42 | version: row.get(2)?, 43 | name: row.get(3)?, 44 | job: row.get(4)?, 45 | file: row.get(5)?, 46 | }) 47 | } 48 | } 49 | 50 | impl PublishedFile { 51 | pub fn find( 52 | owner: UserId, 53 | series: &str, 54 | version: &str, 55 | name: &str, 56 | ) -> SelectStatement { 57 | Query::select() 58 | .from(PublishedFileDef::Table) 59 | .columns(PublishedFile::columns()) 60 | .and_where(Expr::col(PublishedFileDef::Owner).eq(owner)) 61 | .and_where(Expr::col(PublishedFileDef::Series).eq(series)) 62 | .and_where(Expr::col(PublishedFileDef::Version).eq(version)) 63 | .and_where(Expr::col(PublishedFileDef::Name).eq(name)) 64 | .to_owned() 65 | } 66 | 67 | pub fn insert(&self) -> InsertStatement { 68 | Query::insert() 69 | .into_table(PublishedFileDef::Table) 70 | .columns(Self::bare_columns()) 71 | .values_panic([ 72 | self.owner.into(), 73 | self.series.clone().into(), 74 | self.version.clone().into(), 75 | self.name.clone().into(), 76 | self.job.into(), 77 | self.file.into(), 78 | ]) 79 | .to_owned() 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /server/src/db/tables/target.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Target { 10 | pub id: TargetId, 11 | pub name: String, 12 | pub desc: String, 13 | pub redirect: Option, 14 | pub privilege: Option, 15 | } 16 | 17 | impl FromRow for Target { 18 | fn columns() -> Vec { 19 | [ 20 | TargetDef::Id, 21 | TargetDef::Name, 22 | TargetDef::Desc, 23 | TargetDef::Redirect, 24 | TargetDef::Privilege, 25 | ] 26 | .into_iter() 27 | .map(|col| { 28 | ColumnRef::TableColumn( 29 | SeaRc::new(TargetDef::Table), 30 | SeaRc::new(col), 31 | ) 32 | }) 33 | .collect() 34 | } 35 | 36 | fn from_row(row: &Row) -> rusqlite::Result { 37 | Ok(Target { 38 | id: row.get(0)?, 39 | name: row.get(1)?, 40 | desc: row.get(2)?, 41 | redirect: row.get(3)?, 42 | privilege: row.get(4)?, 43 | }) 44 | } 45 | } 46 | 47 | impl Target { 48 | pub fn find(id: TargetId) -> SelectStatement { 49 | Query::select() 50 | .from(TargetDef::Table) 51 | .columns(Target::columns()) 52 | .and_where(Expr::col(TargetDef::Id).eq(id)) 53 | .to_owned() 54 | } 55 | 56 | pub fn find_by_name(name: &str) -> SelectStatement { 57 | Query::select() 58 | .from(TargetDef::Table) 59 | .columns(Target::columns()) 60 | .and_where(Expr::col(TargetDef::Name).eq(name)) 61 | .to_owned() 62 | } 63 | 64 | pub fn insert(&self) -> InsertStatement { 65 | Query::insert() 66 | .into_table(TargetDef::Table) 67 | .columns(Self::bare_columns()) 68 | .values_panic([ 69 | self.id.into(), 70 | self.name.clone().into(), 71 | self.desc.clone().into(), 72 | self.redirect.into(), 73 | self.privilege.clone().into(), 74 | ]) 75 | .to_owned() 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /server/src/db/tables/task.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct Task { 10 | pub job: JobId, 11 | pub seq: u32, 12 | pub name: String, 13 | pub script: String, 14 | pub env_clear: bool, 15 | pub env: Dictionary, 16 | pub user_id: Option, 17 | pub group_id: Option, 18 | pub workdir: Option, 19 | pub complete: bool, 20 | pub failed: bool, 21 | } 22 | 23 | impl FromRow for Task { 24 | fn columns() -> Vec { 25 | [ 26 | TaskDef::Job, 27 | TaskDef::Seq, 28 | TaskDef::Name, 29 | TaskDef::Script, 30 | TaskDef::EnvClear, 31 | TaskDef::Env, 32 | TaskDef::UserId, 33 | TaskDef::GroupId, 34 | TaskDef::Workdir, 35 | TaskDef::Complete, 36 | TaskDef::Failed, 37 | ] 38 | .into_iter() 39 | .map(|col| { 40 | ColumnRef::TableColumn(SeaRc::new(TaskDef::Table), SeaRc::new(col)) 41 | }) 42 | .collect() 43 | } 44 | 45 | fn from_row(row: &Row) -> rusqlite::Result { 46 | Ok(Task { 47 | job: row.get(0)?, 48 | seq: row.get(1)?, 49 | name: row.get(2)?, 50 | script: row.get(3)?, 51 | env_clear: row.get(4)?, 52 | env: row.get(5)?, 53 | user_id: row.get(6)?, 54 | group_id: row.get(7)?, 55 | workdir: row.get(8)?, 56 | complete: row.get(9)?, 57 | failed: row.get(10)?, 58 | }) 59 | } 60 | } 61 | 62 | impl Task { 63 | pub fn find(job: JobId, seq: u32) -> SelectStatement { 64 | Query::select() 65 | .from(TaskDef::Table) 66 | .columns(Task::columns()) 67 | .and_where(Expr::col(TaskDef::Job).eq(job)) 68 | .and_where(Expr::col(TaskDef::Seq).eq(seq)) 69 | .to_owned() 70 | } 71 | 72 | pub fn insert(&self) -> InsertStatement { 73 | Query::insert() 74 | .into_table(TaskDef::Table) 75 | .columns(Self::bare_columns()) 76 | .values_panic([ 77 | self.job.into(), 78 | self.seq.into(), 79 | self.name.clone().into(), 80 | self.script.clone().into(), 81 | self.env_clear.into(), 82 | self.env.clone().into(), 83 | self.user_id.into(), 84 | self.group_id.into(), 85 | self.workdir.clone().into(), 86 | self.complete.into(), 87 | self.failed.into(), 88 | ]) 89 | .to_owned() 90 | } 91 | 92 | pub fn from_create(ct: &CreateTask, job: JobId, seq: usize) -> Task { 93 | Task { 94 | job, 95 | seq: seq.try_into().unwrap(), 96 | name: ct.name.to_string(), 97 | script: ct.script.to_string(), 98 | env_clear: ct.env_clear, 99 | env: Dictionary(ct.env.clone()), 100 | user_id: ct.user_id.map(UnixUid), 101 | group_id: ct.group_id.map(UnixGid), 102 | workdir: ct.workdir.clone(), 103 | complete: false, 104 | failed: false, 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /server/src/db/tables/user.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct User { 10 | pub id: UserId, 11 | pub name: String, 12 | pub token: String, 13 | pub time_create: IsoDate, 14 | } 15 | 16 | impl FromRow for User { 17 | fn columns() -> Vec { 18 | [UserDef::Id, UserDef::Name, UserDef::Token, UserDef::TimeCreate] 19 | .into_iter() 20 | .map(|col| { 21 | ColumnRef::TableColumn( 22 | SeaRc::new(UserDef::Table), 23 | SeaRc::new(col), 24 | ) 25 | }) 26 | .collect() 27 | } 28 | 29 | fn from_row(row: &Row) -> rusqlite::Result { 30 | Ok(User { 31 | id: row.get(0)?, 32 | name: row.get(1)?, 33 | token: row.get(2)?, 34 | time_create: row.get(3)?, 35 | }) 36 | } 37 | } 38 | 39 | impl User { 40 | pub fn find(id: UserId) -> SelectStatement { 41 | Query::select() 42 | .from(UserDef::Table) 43 | .columns(User::columns()) 44 | .and_where(Expr::col(UserDef::Id).eq(id)) 45 | .to_owned() 46 | } 47 | 48 | pub fn insert(&self) -> InsertStatement { 49 | Query::insert() 50 | .into_table(UserDef::Table) 51 | .columns(Self::bare_columns()) 52 | .values_panic([ 53 | self.id.into(), 54 | self.name.clone().into(), 55 | self.token.clone().into(), 56 | self.time_create.into(), 57 | ]) 58 | .to_owned() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /server/src/db/tables/user_privilege.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct UserPrivilege { 10 | pub user: UserId, 11 | pub privilege: String, 12 | } 13 | 14 | impl UserPrivilege { 15 | pub fn upsert(&self) -> InsertStatement { 16 | Query::insert() 17 | .into_table(UserPrivilegeDef::Table) 18 | .columns([UserPrivilegeDef::User, UserPrivilegeDef::Privilege]) 19 | .values_panic([self.user.into(), self.privilege.clone().into()]) 20 | .on_conflict(OnConflict::new().do_nothing().to_owned()) 21 | .to_owned() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /server/src/db/tables/worker_event.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use super::sublude::*; 6 | 7 | #[derive(Debug, Clone)] 8 | #[enum_def(prefix = "", suffix = "Def")] 9 | pub struct WorkerEvent { 10 | pub worker: WorkerId, 11 | pub seq: u32, 12 | pub stream: String, 13 | /** 14 | * The time at which the core API server received or generated this event. 15 | */ 16 | pub time: IsoDate, 17 | /** 18 | * This field reflects the time on the remote system when the event was 19 | * generated. Due to issues with NTP, etc, it might not align exactly with 20 | * the time field. 21 | */ 22 | pub time_remote: Option, 23 | pub payload: String, 24 | } 25 | 26 | impl FromRow for WorkerEvent { 27 | fn columns() -> Vec { 28 | [ 29 | WorkerEventDef::Worker, 30 | WorkerEventDef::Seq, 31 | WorkerEventDef::Stream, 32 | WorkerEventDef::Time, 33 | WorkerEventDef::TimeRemote, 34 | WorkerEventDef::Payload, 35 | ] 36 | .into_iter() 37 | .map(|col| { 38 | ColumnRef::TableColumn( 39 | SeaRc::new(WorkerEventDef::Table), 40 | SeaRc::new(col), 41 | ) 42 | }) 43 | .collect() 44 | } 45 | 46 | fn from_row(row: &Row) -> rusqlite::Result { 47 | Ok(WorkerEvent { 48 | worker: row.get(0)?, 49 | seq: row.get(1)?, 50 | stream: row.get(2)?, 51 | time: row.get(3)?, 52 | time_remote: row.get(4)?, 53 | payload: row.get(5)?, 54 | }) 55 | } 56 | } 57 | 58 | impl WorkerEvent { 59 | pub fn insert(&self) -> InsertStatement { 60 | Query::insert() 61 | .into_table(WorkerEventDef::Table) 62 | .columns(Self::bare_columns()) 63 | .values_panic([ 64 | self.worker.into(), 65 | self.seq.into(), 66 | self.stream.clone().into(), 67 | self.time.into(), 68 | self.time_remote.into(), 69 | self.payload.clone().into(), 70 | ]) 71 | .to_owned() 72 | } 73 | 74 | #[allow(unused)] 75 | pub fn age(&self) -> Duration { 76 | self.time.age() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /server/src/workers.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Oxide Computer Company 3 | */ 4 | 5 | use chrono::prelude::*; 6 | use std::time::Duration; 7 | use std::{sync::Arc, time::Instant}; 8 | 9 | use anyhow::Result; 10 | #[allow(unused_imports)] 11 | use slog::{error, info, warn, Logger}; 12 | 13 | use super::Central; 14 | 15 | const TEN_MINUTES: Duration = Duration::from_secs(10 * 60); 16 | 17 | async fn worker_liveness_one(log: &Logger, c: &Central) -> Result<()> { 18 | for w in c.db.workers_without_pings(TEN_MINUTES)? { 19 | assert!(!w.deleted); 20 | assert!(!w.recycle); 21 | assert!(!w.is_held()); 22 | 23 | let ping = w.lastping.unwrap().age().as_secs(); 24 | warn!(log, "worker stopped responding to pings!"; 25 | "id" => w.id.to_string(), 26 | "lastping" => ping); 27 | 28 | /* 29 | * Record in the database that the worker has failed. This routine will 30 | * take care of reporting failure in any assigned jobs, marking the 31 | * worker as held, etc. 32 | */ 33 | let failed_jobs = 34 | c.db.worker_mark_failed(w.id, "agent became unresponsive")?; 35 | if !failed_jobs.is_empty() { 36 | let jobs = failed_jobs 37 | .into_iter() 38 | .map(|j| j.to_string()) 39 | .collect::>() 40 | .join(", "); 41 | warn!(log, "worker {} failing caused job {jobs} to fail", w.id); 42 | } 43 | } 44 | 45 | Ok(()) 46 | } 47 | 48 | async fn worker_cleanup_one(log: &Logger, c: &Central) -> Result<()> { 49 | /* 50 | * We want to set a rough timeout for job execution to prevent things 51 | * getting hung and running forever. 52 | */ 53 | for w in c.db.workers_active()? { 54 | assert!(!w.deleted); 55 | 56 | if w.recycle { 57 | continue; 58 | } 59 | 60 | if w.is_held() { 61 | /* 62 | * If the worker is marked on hold, leave it and any related jobs 63 | * alone. 64 | */ 65 | continue; 66 | } 67 | 68 | let jobs = c.db.worker_jobs(w.id)?; 69 | if jobs.is_empty() { 70 | /* 71 | * Idle workers should be assigned relatively promptly. If a worker 72 | * has bootstrapped but not been assigned for some time, tear it 73 | * down. 74 | */ 75 | if w.agent_ok() && w.age() > Duration::from_secs(30 * 60) { 76 | info!(log, "recycling surplus worker {} after 30m idle", w.id); 77 | c.db.worker_recycle(w.id)?; 78 | } 79 | continue; 80 | } 81 | 82 | for j in jobs { 83 | if j.failed || j.complete { 84 | /* 85 | * This will get cleaned up in the usual way. 86 | */ 87 | continue; 88 | } 89 | 90 | /* 91 | * Determine when we assigned this job to a worker by looking at the 92 | * timestamp on the first control event. 93 | */ 94 | let control = 95 | c.db.job_events(j.id, 0, 10_000)? 96 | .iter() 97 | .find(|jev| jev.stream == "control") 98 | .cloned(); 99 | if let Some(control) = control { 100 | if control.age().as_secs() > c.config.job.max_runtime { 101 | warn!( 102 | log, 103 | "job {} duration {} exceeds {} seconds; \ 104 | recycling worker {}", 105 | j.id, 106 | control.age().as_secs(), 107 | c.config.job.max_runtime, 108 | w.id, 109 | ); 110 | c.db.job_append_event( 111 | j.id, 112 | None, 113 | "control", 114 | Utc::now(), 115 | None, 116 | &format!( 117 | "job duration {} exceeds {} seconds; aborting", 118 | control.age().as_secs(), 119 | c.config.job.max_runtime, 120 | ), 121 | )?; 122 | c.db.worker_recycle(w.id)?; 123 | } 124 | } 125 | } 126 | } 127 | 128 | Ok(()) 129 | } 130 | 131 | pub(crate) async fn worker_cleanup(log: Logger, c: Arc) -> Result<()> { 132 | let start = Instant::now(); 133 | let mut liveness_checks = false; 134 | 135 | let delay = Duration::from_secs(47); 136 | info!(log, "start worker cleanup task"); 137 | 138 | loop { 139 | if let Err(e) = worker_cleanup_one(&log, &c).await { 140 | error!(log, "worker cleanup task error: {:?}", e); 141 | } 142 | 143 | if !liveness_checks { 144 | /* 145 | * The liveness worker will compare worker ping times against a 146 | * threshold. The ping times are stored in the database, but if the 147 | * server is down for a measurable period agents might not have had 148 | * a chance to phone in again and update them immediately after the 149 | * server comes back. Only check liveness once the server has been 150 | * up long enough to know the difference: 151 | */ 152 | if Instant::now().saturating_duration_since(start) > TEN_MINUTES { 153 | info!(log, "starting worker liveness checks"); 154 | liveness_checks = true; 155 | } 156 | } else { 157 | if let Err(e) = worker_liveness_one(&log, &c).await { 158 | error!(log, "worker liveness task error: {:?}", e); 159 | } 160 | } 161 | 162 | tokio::time::sleep(delay).await; 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /sse/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-sse" 3 | version = "0.0.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = { workspace = true } 8 | bytes = { workspace = true } 9 | dropshot = { workspace = true } 10 | http = { workspace = true } 11 | http-body-util = { workspace = true } 12 | hyper = { workspace = true } 13 | slog = { workspace = true } 14 | tokio = { workspace = true } 15 | tokio-stream = { workspace = true } 16 | -------------------------------------------------------------------------------- /types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildomat-types" 3 | version = "0.0.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | schemars = { workspace = true } 13 | serde = { workspace = true } 14 | serde_json = { workspace = true } 15 | -------------------------------------------------------------------------------- /types/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod config; 2 | pub mod metadata; 3 | -------------------------------------------------------------------------------- /types/src/metadata.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | use schemars::JsonSchema; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug, Eq, PartialEq)] 9 | pub struct FactoryMetadataV1 { 10 | #[serde(default)] 11 | pub addresses: Vec, 12 | pub root_password_hash: Option, 13 | pub root_authorized_keys: Option, 14 | pub dump_to_rpool: Option, 15 | pub pre_job_diagnostic_script: Option, 16 | pub post_job_diagnostic_script: Option, 17 | pub rpool_disable_sync: Option, 18 | } 19 | 20 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug, Eq, PartialEq)] 21 | pub struct FactoryAddresses { 22 | pub name: String, 23 | pub cidr: String, 24 | pub first: String, 25 | pub count: u32, 26 | pub routed: bool, 27 | pub gateway: Option, 28 | } 29 | 30 | #[derive(Serialize, Deserialize, JsonSchema, Clone, Debug, Eq, PartialEq)] 31 | #[serde(tag = "v")] 32 | pub enum FactoryMetadata { 33 | #[serde(rename = "1")] 34 | V1(FactoryMetadataV1), 35 | } 36 | 37 | impl FactoryMetadata { 38 | pub fn root_password_hash(&self) -> Option<&str> { 39 | match self { 40 | FactoryMetadata::V1(md) => md.root_password_hash.as_deref(), 41 | } 42 | } 43 | 44 | pub fn root_authorized_keys(&self) -> Option<&str> { 45 | match self { 46 | FactoryMetadata::V1(md) => md.root_authorized_keys.as_deref(), 47 | } 48 | } 49 | 50 | pub fn addresses(&self) -> &[FactoryAddresses] { 51 | match self { 52 | FactoryMetadata::V1(md) => md.addresses.as_ref(), 53 | } 54 | } 55 | 56 | /** 57 | * Return the size in megabytes of the dump device to create, if one should 58 | * be created. 59 | */ 60 | pub fn dump_to_rpool(&self) -> Option { 61 | match self { 62 | FactoryMetadata::V1(md) => md.dump_to_rpool, 63 | } 64 | } 65 | 66 | pub fn pre_job_diagnostic_script(&self) -> Option<&str> { 67 | match self { 68 | FactoryMetadata::V1(md) => md.pre_job_diagnostic_script.as_deref(), 69 | } 70 | } 71 | 72 | pub fn post_job_diagnostic_script(&self) -> Option<&str> { 73 | match self { 74 | FactoryMetadata::V1(md) => md.post_job_diagnostic_script.as_deref(), 75 | } 76 | } 77 | 78 | /** 79 | * Should the agent set "sync=disabled" on the root ZFS pool? We expect 80 | * this to be useful in most cases, so we default to yes if the factory 81 | * configuration does not override. 82 | */ 83 | pub fn rpool_disable_sync(&self) -> bool { 84 | match self { 85 | FactoryMetadata::V1(md) => md.rpool_disable_sync.unwrap_or(true), 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /variety/basic/scripts/github_clone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | set -o xtrace 6 | 7 | mkdir -p "/work/$GITHUB_REPOSITORY" 8 | git clone "https://github.com/$GITHUB_REPOSITORY" "/work/$GITHUB_REPOSITORY" 9 | cd "/work/$GITHUB_REPOSITORY" 10 | git fetch origin "$GITHUB_SHA" 11 | if [[ -n $GITHUB_BRANCH ]]; then 12 | current=$(git branch --show-current) 13 | if [[ $current != $GITHUB_BRANCH ]]; then 14 | git branch -f "$GITHUB_BRANCH" "$GITHUB_SHA" 15 | git checkout -f "$GITHUB_BRANCH" 16 | fi 17 | fi 18 | git reset --hard "$GITHUB_SHA" 19 | -------------------------------------------------------------------------------- /variety/basic/scripts/github_token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | 6 | GITHUB_TOKEN=$(bmat store get GITHUB_TOKEN) 7 | 8 | cat >$HOME/.netrc <&2 43 | exit 1 44 | done 45 | 46 | exit 0 47 | fi 48 | 49 | case "$kern" in 50 | SunOS) 51 | groupadd -g "$build_uid" "$build_user" 52 | useradd -u "$build_uid" -g "$build_user" -d /home/build -s /bin/bash \ 53 | -c "$build_user" -P 'Primary Administrator' "$build_user" 54 | 55 | zfs create -o mountpoint="$work_dir" rpool/work 56 | 57 | # 58 | # Some illumos images use autofs by default for /home, which is not 59 | # what we want here. 60 | # 61 | if home_fs=$(awk '$2 == "/home" { print $3 }' /etc/mnttab) && 62 | [[ "$home_fs" == autofs ]]; then 63 | sed -i -e '/^\/home/d' /etc/auto_master 64 | automount -v 65 | fi 66 | ;; 67 | Linux) 68 | # 69 | # The stock Ubuntu images we're using in AWS are often missing 70 | # some basic conveniences: 71 | # 72 | apt-get -y update 73 | apt-get -y install sysvbanner build-essential 74 | 75 | groupadd -g "$build_uid" "$build_user" 76 | useradd -u "$build_uid" -g "$build_user" -d /home/build -s /bin/bash \ 77 | -c "$build_user" "$build_user" 78 | 79 | # 80 | # Simulate pfexec and the 'Primary Administrator' role with sudo: 81 | # 82 | echo "$build_user ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers.d/build 83 | chmod 0440 /etc/sudoers.d/build 84 | cat >/bin/pfexec <<-'EOF' 85 | #!/bin/bash 86 | exec /bin/sudo -- "$@" 87 | EOF 88 | chmod 0755 /bin/pfexec 89 | 90 | # 91 | # Simulate ptime to some extent: 92 | # 93 | cat >/bin/ptime <<-'EOF' 94 | #!/bin/bash 95 | verbose=no 96 | while getopts m c; do 97 | case "$c" in 98 | m) 99 | verbose=yes 100 | ;; 101 | ?) 102 | printf 'Usage: %s [-m] command args...\n' "$0" >&2 103 | exit 1 104 | esac 105 | done 106 | shift "$(( OPTIND - 1 ))" 107 | args=() 108 | if [[ $verbose == yes ]]; then 109 | args+=( '-v' ) 110 | fi 111 | exec /usr/bin/time "${args[@]}" "$@" 112 | EOF 113 | chmod 0755 /bin/ptime 114 | 115 | # 116 | # Ubuntu 18.04 had a genuine pre-war separate /bin directory! 117 | # 118 | if [[ ! -L /bin ]]; then 119 | for prog in ptime pfexec; do 120 | ln -s "../../bin/$prog" "/usr/bin/$prog" 121 | done 122 | fi 123 | 124 | mkdir -p "$work_dir" 125 | ;; 126 | *) 127 | printf 'ERROR: unknown OS: %s\n' "$kern" >&2 128 | exit 1 129 | ;; 130 | esac 131 | 132 | mkdir -p /home/build 133 | chown "$build_user":"$build_user" /home/build "$work_dir" 134 | chmod 0700 /home/build "$work_dir" 135 | -------------------------------------------------------------------------------- /variety/basic/www/style.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Oxide Computer Company 3 | */ 4 | 5 | /* 6 | * The "ansi-to-html" crate uses CSS variables when emitting text that uses the 7 | * classic ANSI colour palette. Adjust the default colours to be a little 8 | * darker for more contrast against "s_stdout" and "s_stderr" backgrounds, 9 | * which are both quite light. 10 | */ 11 | :root { 12 | --ansi-black: #000000; 13 | --ansi-red: #b0000f; 14 | --ansi-green: #007000; 15 | --ansi-yellow: #808000; 16 | --ansi-blue: #2b5388; 17 | --ansi-magenta: #75507b; 18 | --ansi-cyan: #046062; 19 | --ansi-white: #ffffff; 20 | 21 | --ansi-bright-black: #000000; 22 | --ansi-bright-red: #b20f00; 23 | --ansi-bright-green: #557000; 24 | --ansi-bright-yellow: #b44405; 25 | --ansi-bright-blue: #5f55df; 26 | --ansi-bright-magenta: #bf2c90; 27 | --ansi-bright-cyan: #30a0a0; 28 | --ansi-bright-white: #ffffff; 29 | } 30 | 31 | table.table_output { 32 | border: none; 33 | } 34 | 35 | td { 36 | vertical-align: top; 37 | } 38 | 39 | td.num { 40 | text-align: right; 41 | } 42 | 43 | tr.s_stdout { 44 | background-color: #ffffff; 45 | } 46 | 47 | tr.s_stderr { 48 | background-color: #f3f3f3; 49 | } 50 | 51 | tr.s_task { 52 | background-color: #add8e6; 53 | } 54 | 55 | tr.s_worker { 56 | background-color: #fafad2; 57 | } 58 | 59 | tr.s_control { 60 | background-color: #90ee90; 61 | } 62 | 63 | tr.s_console { 64 | background-color: #e7d1ff; 65 | } 66 | 67 | tr.s_bgtask { 68 | background-color: #f79d65; 69 | } 70 | 71 | tr.s_default { 72 | background-color: #dddddd; 73 | } 74 | 75 | span.header { 76 | white-space: pre; 77 | font-family: monospace; 78 | font-weight: bold; 79 | } 80 | 81 | span.field { 82 | white-space: pre; 83 | font-family: monospace; 84 | } 85 | 86 | span.payload { 87 | white-space: pre-wrap; 88 | white-space: -moz-pre-wrap !important; 89 | font-family: monospace; 90 | } 91 | 92 | a.numlink { 93 | white-space: pre; 94 | font-family: monospace; 95 | text-decoration: none; 96 | color: #111111; 97 | } 98 | 99 | div.more { 100 | position: fixed; 101 | z-index: 1; 102 | bottom: 3rem; 103 | right: 3rem; 104 | 105 | padding: 1.6rem; 106 | 107 | border: 0.3rem solid black; 108 | 109 | background-color: #a4ff91; 110 | color: #000000; 111 | 112 | /* 113 | * Change the mouse cursor to the pointer, like the browser would do for a 114 | * clickable link: 115 | */ 116 | cursor: pointer; 117 | } 118 | 119 | span.more_arrow { 120 | font-size: 1.5rem; 121 | font-weight: bold; 122 | } 123 | -------------------------------------------------------------------------------- /www/buildomat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxidecomputer/buildomat/853772fb5cae05d7bbe8a4459c03fe1637cdda95/www/buildomat.png -------------------------------------------------------------------------------- /www/buildomat_wide.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxidecomputer/buildomat/853772fb5cae05d7bbe8a4459c03fe1637cdda95/www/buildomat_wide.png -------------------------------------------------------------------------------- /www/error.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 23 | 24 | 25 |
26 | buildomat hammer logo 27 |
28 | 29 |
30 | There was an unexpected error while processing your request. 31 |
32 | 33 | 34 | 35 |
36 | We apologise for the inconvenience.
37 |
38 | 39 | 40 | -------------------------------------------------------------------------------- /www/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxidecomputer/buildomat/853772fb5cae05d7bbe8a4459c03fe1637cdda95/www/favicon.ico -------------------------------------------------------------------------------- /www/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 15 | 16 | 17 |
18 | buildomat hammer logo 19 |
20 | 21 | 22 | -------------------------------------------------------------------------------- /www/notfound.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 23 | 24 | 25 |
26 | buildomat hammer logo 27 |
28 | 29 |
30 | I'm sorry, I could not get that thing! 31 |
32 | 33 |
34 | We apologise for the inconvenience.
35 |
36 | 37 | 38 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = { workspace = true } 8 | tempfile = { workspace = true } 9 | -------------------------------------------------------------------------------- /xtask/scripts/build_linux_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # This job script is run inside a buildomat ephemeral VM. 5 | # 6 | if [[ -z $BUILDOMAT_JOB_ID ]]; then 7 | printf 'ERROR: this is supposed to be run under buildomat.\n' >&2 8 | exit 1 9 | fi 10 | 11 | set -o errexit 12 | set -o pipefail 13 | set -o xtrace 14 | 15 | # 16 | # Install basic build tools: 17 | # 18 | apt-get -y update 19 | apt-get -y install build-essential pkg-config 20 | 21 | # 22 | # Install a stable Rust toolchain: 23 | # 24 | RUSTUP_INIT_SKIP_PATH_CHECK=yes \ 25 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s - \ 26 | --default-toolchain stable \ 27 | --profile minimal \ 28 | --no-modify-path \ 29 | -y -q 30 | 31 | . "$HOME/.cargo/env" 32 | 33 | mkdir -p /work 34 | mkdir -p /out 35 | 36 | cd /work 37 | 38 | # 39 | # Unpack the templates and scripts we included when kicking off the job: 40 | # 41 | cpio -idv < '/input/src.cpio' 42 | 43 | cargo build --features vendored-openssl --release --locked -p buildomat-agent 44 | 45 | # 46 | # Copy rather than moving, because we're on the same file system and gzip 47 | # complains about a link count issue otherwise: 48 | # 49 | cp target/release/buildomat-agent /out/buildomat-agent-linux 50 | chmod 0755 /out/buildomat-agent-linux 51 | 52 | sha256sum /out/buildomat-agent-linux \ 53 | >/out/buildomat-agent-linux.sha256.txt 54 | gzip /out/buildomat-agent-linux 55 | sha256sum /out/buildomat-agent-linux.gz \ 56 | >/out/buildomat-agent-linux.gz.sha256.txt 57 | 58 | find '/out' -type f -ls 59 | --------------------------------------------------------------------------------