├── .dockerignore ├── .cargo └── config.toml ├── contrib ├── etc │ ├── sysconfig │ │ └── lockc │ └── lockc │ │ └── lockc.toml └── systemd │ └── lockc.service.in ├── lockc-ebpf ├── rust-toolchain.toml ├── .cargo │ └── config.toml ├── .vscode │ └── settings.json ├── .vim │ └── coc-settings.json ├── Cargo.toml ├── src │ ├── policy.rs │ ├── maps.rs │ ├── proc.rs │ └── main.rs ├── Cargo.lock └── LICENSE ├── Cargo.toml ├── .vim └── coc-settings.json ├── .vscode └── settings.json ├── .github ├── dependabot.yml └── workflows │ ├── rust.yml │ └── container-image.yml ├── lockctl ├── Cargo.toml └── src │ └── main.rs ├── lockc-common ├── Cargo.toml └── src │ └── lib.rs ├── examples └── kubernetes │ ├── nginx-should-fail.yaml │ ├── nginx-should-succeed.yaml │ ├── namespaces.yaml │ ├── deployments-should-succeed.yaml │ └── deployments-should-fail.yaml ├── .gitignore ├── xtask ├── Cargo.toml └── src │ ├── codegen.rs │ ├── main.rs │ ├── build_ebpf.rs │ ├── bintar.rs │ ├── run.rs │ └── install.rs ├── lockc ├── src │ ├── communication.rs │ ├── sysutils.rs │ ├── load.rs │ ├── maps.rs │ ├── main.rs │ └── runc.rs └── Cargo.toml ├── Dockerfile ├── README.md ├── Vagrantfile ├── LICENSE └── .clang-format /.dockerignore: -------------------------------------------------------------------------------- 1 | .gitignore -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --package xtask --" -------------------------------------------------------------------------------- /contrib/etc/sysconfig/lockc: -------------------------------------------------------------------------------- 1 | KUBECONFIG=/etc/rancher/k3s/k3s.yaml 2 | -------------------------------------------------------------------------------- /lockc-ebpf/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel="nightly" 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["lockc", "lockctl", "lockc-common", "xtask"] 3 | -------------------------------------------------------------------------------- /.vim/coc-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.linkedProjects": ["Cargo.toml", "lockc-ebpf/Cargo.toml"] 3 | } 4 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.linkedProjects": ["Cargo.toml", "lockc-ebpf/Cargo.toml"] 3 | } 4 | -------------------------------------------------------------------------------- /lockc-ebpf/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target-dir = "../target" 3 | target = "bpfel-unknown-none" 4 | 5 | [unstable] 6 | build-std = ["core"] -------------------------------------------------------------------------------- /lockc-ebpf/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.target": "bpfel-unknown-none", 3 | "rust-analyzer.checkOnSave.allTargets": false, 4 | } -------------------------------------------------------------------------------- /lockc-ebpf/.vim/coc-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.target": "bpfel-unknown-none", 3 | "rust-analyzer.checkOnSave.allTargets": false 4 | } 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | schedule: 6 | interval: daily 7 | commit-message: 8 | prefix: '' 9 | labels: [] 10 | -------------------------------------------------------------------------------- /lockctl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lockctl" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = "1.0" 8 | aya = "0.11" 9 | clap = "4.1" 10 | cli-table = "0.4" 11 | lockc-common = { path = "../lockc-common", features = ["cli", "user"] } 12 | procfs = "0.15" 13 | -------------------------------------------------------------------------------- /contrib/systemd/lockc.service.in: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=lockc daemon 3 | After=network-online.target 4 | 5 | [Service] 6 | Type=simple 7 | Restart=always 8 | RestartSec=1 9 | EnvironmentFile=-/etc/sysconfig/lockc 10 | ExecStart={{ bindir }}/lockc 11 | StandardOutput=journal 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /lockc-common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lockc-common" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [features] 7 | default = [] 8 | cli = [ "clap" ] 9 | user = [ "aya", "thiserror" ] 10 | 11 | [dependencies] 12 | aya = { version = "0.11", optional = true } 13 | clap = { version = "4.1", optional = true } 14 | thiserror = { version = "1.0", optional = true } 15 | 16 | [lib] 17 | path = "src/lib.rs" -------------------------------------------------------------------------------- /examples/kubernetes/nginx-should-fail.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-bad-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx-bad 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx-bad 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.14.2 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /examples/kubernetes/nginx-should-succeed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: bitnami/nginx:1.20.2 18 | ports: 19 | - containerPort: 8080 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### https://raw.github.com/github/gitignore/master/Rust.gitignore 2 | 3 | # Generated by Cargo 4 | # will have compiled files and executables 5 | debug/ 6 | target/ 7 | 8 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 9 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 10 | Cargo.lock 11 | 12 | # These are backup files generated by rustfmt 13 | **/*.rs.bk 14 | 15 | # Vagrant 16 | .vagrant/ 17 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | structopt = {version = "0.3", default-features = false } 10 | aya-tool = { git = "https://github.com/aya-rs/aya", branch = "main" } 11 | anyhow = "1" 12 | flate2 = "1.0" 13 | fs_extra = "1.2" 14 | scopeguard = "1.1" 15 | serde = { version = "1.0", features = ["derive"] } 16 | sudo = "0.6" 17 | tar = "0.4" 18 | tempfile = "3.3" 19 | tera = "1.15" 20 | thiserror = "1.0" 21 | -------------------------------------------------------------------------------- /xtask/src/codegen.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, io::Write, path::PathBuf}; 2 | 3 | use aya_tool::generate::InputFile; 4 | 5 | pub fn generate() -> Result<(), anyhow::Error> { 6 | let dir = PathBuf::from("lockc-ebpf/src"); 7 | let names: Vec<&str> = vec!["cred", "file", "sock", "sock_common", "task_struct"]; 8 | let bindings = aya_tool::generate( 9 | InputFile::Btf(PathBuf::from("/sys/kernel/btf/vmlinux")), 10 | &names, 11 | &[], 12 | )?; 13 | // Write the bindings to the $OUT_DIR/bindings.rs file. 14 | let mut out = File::create(dir.join("vmlinux.rs"))?; 15 | write!(out, "{}", bindings)?; 16 | Ok(()) 17 | } 18 | -------------------------------------------------------------------------------- /lockc-ebpf/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lockc-ebpf" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | aya-bpf = { git = "https://github.com/aya-rs/aya", branch = "main" } 8 | aya-log-ebpf = { git = "https://github.com/aya-rs/aya", branch = "main" } 9 | lockc-common = { path = "../lockc-common" } 10 | 11 | [[bin]] 12 | name = "lockc" 13 | path = "src/main.rs" 14 | 15 | [profile.dev] 16 | opt-level = 3 17 | debug = false 18 | debug-assertions = false 19 | overflow-checks = false 20 | lto = true 21 | panic = "abort" 22 | incremental = false 23 | codegen-units = 1 24 | rpath = false 25 | 26 | [profile.release] 27 | lto = true 28 | panic = "abort" 29 | codegen-units = 1 30 | 31 | [workspace] 32 | members = [] 33 | -------------------------------------------------------------------------------- /lockc/src/communication.rs: -------------------------------------------------------------------------------- 1 | use tokio::sync::oneshot; 2 | 3 | use lockc_common::ContainerPolicyLevel; 4 | 5 | use crate::maps::MapOperationError; 6 | 7 | /// Set of commands that the other tokio threads can use to request eBPF map 8 | /// operations. 9 | #[derive(Debug)] 10 | pub enum EbpfCommand { 11 | AddContainer { 12 | container_id: String, 13 | pid: i32, 14 | policy_level: ContainerPolicyLevel, 15 | responder_tx: oneshot::Sender>, 16 | }, 17 | DeleteContainer { 18 | container_id: String, 19 | responder_tx: oneshot::Sender>, 20 | }, 21 | AddProcess { 22 | container_id: String, 23 | pid: i32, 24 | responder_tx: oneshot::Sender>, 25 | }, 26 | } 27 | -------------------------------------------------------------------------------- /xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | mod bintar; 2 | mod build_ebpf; 3 | mod codegen; 4 | mod install; 5 | mod run; 6 | 7 | use std::process::exit; 8 | 9 | use structopt::StructOpt; 10 | #[derive(StructOpt)] 11 | pub struct Options { 12 | #[structopt(subcommand)] 13 | command: Command, 14 | } 15 | 16 | #[derive(StructOpt)] 17 | enum Command { 18 | Bintar(bintar::Options), 19 | BuildEbpf(build_ebpf::Options), 20 | Install(install::Options), 21 | Run(run::Options), 22 | Codegen, 23 | } 24 | 25 | fn main() { 26 | let opts = Options::from_args(); 27 | 28 | use Command::*; 29 | let ret = match opts.command { 30 | Bintar(opts) => bintar::BinTar::new(opts).do_bin_tar(), 31 | BuildEbpf(opts) => build_ebpf::build_ebpf(opts), 32 | Install(opts) => install::Installer::new(opts).do_install(), 33 | Run(opts) => run::run(opts), 34 | Codegen => codegen::generate(), 35 | }; 36 | 37 | if let Err(e) = ret { 38 | eprintln!("{:#}", e); 39 | exit(1); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /lockc-ebpf/src/policy.rs: -------------------------------------------------------------------------------- 1 | use aya_bpf::helpers::bpf_get_current_pid_tgid; 2 | 3 | use lockc_common::{ContainerID, ContainerPolicyLevel}; 4 | 5 | use crate::maps::*; 6 | 7 | /// Finds the policy level for the current LSM hook. 8 | /// 9 | /// If the current process (which triggered the LSM hook) is in a container, 10 | /// returns the container ID and policy level of the container. 11 | /// 12 | /// If the current process is not in a container, returns `None`. 13 | #[inline(always)] 14 | pub(crate) fn get_container_and_policy_level( 15 | ) -> Result<(Option, ContainerPolicyLevel), i32> { 16 | let pid = bpf_get_current_pid_tgid() as u32; 17 | let process_o = unsafe { PROCESSES.get(&(pid as i32)) }; 18 | match process_o { 19 | Some(process) => { 20 | let container_o = unsafe { CONTAINERS.get(&process.container_id) }; 21 | match container_o { 22 | Some(container) => Ok((Some(process.container_id), container.policy_level)), 23 | None => Err(-2), 24 | } 25 | } 26 | None => Ok((None, ContainerPolicyLevel::NotFound)), 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lockc-ebpf/src/maps.rs: -------------------------------------------------------------------------------- 1 | use aya_bpf::{ 2 | macros::map, 3 | maps::{HashMap, PerCpuArray}, 4 | }; 5 | 6 | use lockc_common::{Container, ContainerID, MountType, Path, Process, PID_MAX_LIMIT}; 7 | 8 | /// BPF map containing the info about a policy which should be enforced on the 9 | /// given container. 10 | #[map] 11 | pub(crate) static mut CONTAINERS: HashMap = 12 | HashMap::pinned(PID_MAX_LIMIT, 0); 13 | 14 | /// BPF map which maps the PID to a container it belongs to. The value of this 15 | /// map, which represents the container, is a key of `containers` BPF map, so 16 | /// it can be used immediately for lookups in `containers` map. 17 | #[map] 18 | pub(crate) static mut PROCESSES: HashMap = HashMap::pinned(PID_MAX_LIMIT, 0); 19 | 20 | #[map] 21 | pub(crate) static mut CONTAINER_INITIAL_SETUID: HashMap = 22 | HashMap::with_max_entries(PID_MAX_LIMIT, 0); 23 | 24 | #[map] 25 | pub(crate) static mut MOUNT_TYPE_BUF: PerCpuArray = PerCpuArray::with_max_entries(1, 0); 26 | 27 | #[map] 28 | pub(crate) static mut PATH_BUF: PerCpuArray = PerCpuArray::with_max_entries(1, 0); 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rustlang/rust:nightly-bullseye as builder 2 | 3 | RUN apt-get update \ 4 | && apt-get install -y software-properties-common \ 5 | && wget https://apt.llvm.org/llvm-snapshot.gpg.key \ 6 | && apt-key add llvm-snapshot.gpg.key \ 7 | && rm -f llvm-snapshot.gpg.key \ 8 | && add-apt-repository "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-14 main" \ 9 | && apt-get update \ 10 | && apt-get install -y \ 11 | libssl-dev \ 12 | llvm-14-dev \ 13 | musl \ 14 | musl-dev \ 15 | musl-tools \ 16 | pkg-config 17 | RUN rustup component add rust-src 18 | RUN rustup target add x86_64-unknown-linux-musl 19 | RUN cargo install bpf-linker 20 | COPY . /src 21 | WORKDIR /src 22 | RUN --mount=type=cache,target=/.root/cargo/registry \ 23 | --mount=type=cache,target=/src/target \ 24 | cargo xtask build-ebpf --release \ 25 | && cargo build --release --target=x86_64-unknown-linux-musl \ 26 | && cp /src/target/x86_64-unknown-linux-musl/release/lockc /usr/sbin 27 | 28 | FROM alpine:3.15 29 | # runc links those libraries dynamically 30 | RUN apk update \ 31 | && apk add libseccomp \ 32 | libselinux 33 | COPY --from=builder /usr/sbin/lockc /usr/sbin/ 34 | ENTRYPOINT [ "/usr/sbin/lockc" ] 35 | -------------------------------------------------------------------------------- /lockc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lockc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | aya = { version = "0.11", features = ["async_tokio"] } 9 | aya-log = "0.1" 10 | bytes = "1.1" 11 | lockc-common = { path = "../lockc-common", features=["cli", "user"] } 12 | anyhow = "1.0.42" 13 | clap = { version = "4.1", features = ["env"] } 14 | config = "0.13" 15 | fanotify-rs = { git = "https://github.com/vadorovsky/fanotify-rs", branch = "fix-pid-type" } 16 | kube = { version = "0.71", features = ["runtime", "derive"] } 17 | k8s-openapi = { version = "0.14", features = ["v1_23"] } 18 | libc = "0.2.102" 19 | log = "0.4" 20 | nix = "0.24" 21 | openssl-sys = { version = "0.9", features = ["vendored"] } 22 | procfs = "0.12" 23 | regex = { version = "1.5", default-features = false, features = ["perf", "std"] } 24 | scopeguard = "1.1" 25 | serde = "1.0" 26 | serde_json = "1.0" 27 | thiserror = "1.0" 28 | tokio = { version = "1.18", features = ["macros", "rt", "rt-multi-thread", "net", "signal"] } 29 | tracing = "0.1" 30 | tracing-core = "0.1" 31 | tracing-log = "0.1" 32 | tracing-subscriber = { version = "0.3", features = ["json"] } 33 | walkdir = "2.3" 34 | 35 | [dev-dependencies] 36 | tempfile = "3.3" 37 | 38 | [[bin]] 39 | name = "lockc" 40 | path = "src/main.rs" 41 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Install rustc nightly 15 | uses: actions-rs/toolchain@v1 16 | with: 17 | toolchain: nightly 18 | components: rustfmt, clippy, rust-src 19 | - name: Install bpf-linker 20 | run: cargo install bpf-linker 21 | - name: Build eBPF 22 | run: cargo xtask build-ebpf 23 | - name: Build 24 | run: cargo build 25 | env: 26 | DOCKER_BUILDKIT: 1 27 | COMPOSE_DOCKER_CLI_BUILD: 1 28 | - name: Test 29 | run: cargo test 30 | env: 31 | DOCKER_BUILDKIT: 1 32 | COMPOSE_DOCKER_CLI_BUILD: 1 33 | - name: Lint 34 | run: cargo clippy -- -D warnings 35 | env: 36 | DOCKER_BUILDKIT: 1 37 | COMPOSE_DOCKER_CLI_BUILD: 1 38 | - name: Install udeps 39 | run: cargo +nightly install cargo-udeps 40 | - name: Check unused dependencies 41 | run: cargo +nightly udeps --all-targets 42 | env: 43 | DOCKER_BUILDKIT: 1 44 | COMPOSE_DOCKER_CLI_BUILD: 1 45 | -------------------------------------------------------------------------------- /examples/kubernetes/namespaces.yaml: -------------------------------------------------------------------------------- 1 | # Namespaces with pod-security labels which are supported both by lockc and 2 | # pod-security-admission. 3 | --- 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: restricted 8 | labels: 9 | pod-security.kubernetes.io/enforce: restricted 10 | pod-security.kubernetes.io/enforce-version: v1.22 11 | pod-security.kubernetes.io/audit: restricted 12 | pod-security.kubernetes.io/audit-version: v1.22 13 | pod-security.kubernetes.io/warn: restricted 14 | pod-security.kubernetes.io/warn-version: v1.22 15 | --- 16 | apiVersion: v1 17 | kind: Namespace 18 | metadata: 19 | name: baseline 20 | labels: 21 | pod-security.kubernetes.io/enforce: baseline 22 | pod-security.kubernetes.io/enforce-version: v1.22 23 | pod-security.kubernetes.io/audit: baseline 24 | pod-security.kubernetes.io/audit-version: v1.22 25 | pod-security.kubernetes.io/warn: baseline 26 | pod-security.kubernetes-io/warn-version: v1.22 27 | --- 28 | apiVersion: v1 29 | kind: Namespace 30 | metadata: 31 | name: privileged 32 | labels: 33 | pod-security.kubernetes.io/enforce: privileged 34 | pod-security.kubernetes.io/enforce-version: v1.22 35 | pod-security.kubernetes.io/audit: privileged 36 | pod-security.kubernetes.io/audit-version: v1.22 37 | pod-security.kubernetes.io/warn: privileged 38 | pod-security.kubernetes-io/warn-version: v1.22 39 | -------------------------------------------------------------------------------- /lockc/src/sysutils.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | io::{self, prelude::*}, 4 | path::Path, 5 | }; 6 | 7 | #[derive(thiserror::Error, Debug)] 8 | pub enum CheckBpfLsmError { 9 | #[error("regex compilation error")] 10 | Regex(#[from] regex::Error), 11 | 12 | #[error("I/O error")] 13 | IO(#[from] io::Error), 14 | 15 | #[error("BPF LSM is not enabled")] 16 | BpfLsmDisabled, 17 | } 18 | 19 | /// Checks whether BPF LSM is enabled in the system. 20 | pub fn check_bpf_lsm_enabled>(sys_lsm_path: P) -> Result<(), CheckBpfLsmError> { 21 | let rx = regex::Regex::new(r"bpf")?; 22 | let mut file = File::open(sys_lsm_path)?; 23 | let mut content = String::new(); 24 | 25 | file.read_to_string(&mut content)?; 26 | 27 | match rx.is_match(&content) { 28 | true => Ok(()), 29 | false => Err(CheckBpfLsmError::BpfLsmDisabled), 30 | } 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use super::*; 36 | 37 | use tempfile::tempdir; 38 | 39 | #[test] 40 | fn check_bpf_lsm_enabled_when_correct() { 41 | let dir = tempdir().unwrap(); 42 | let sys_lsm_path = dir.path().join("lsm"); 43 | let mut f = File::create(&sys_lsm_path).unwrap(); 44 | f.write_all(b"lockdown,capability,bpf").unwrap(); 45 | assert!(check_bpf_lsm_enabled(&sys_lsm_path).is_ok()); 46 | } 47 | 48 | #[test] 49 | fn check_bpf_lsm_enabled_should_return_error() { 50 | let dir = tempdir().unwrap(); 51 | let sys_lsm_path = dir.path().join("lsm"); 52 | let mut f = File::create(&sys_lsm_path).unwrap(); 53 | f.write_all(b"lockdown,capability,selinux").unwrap(); 54 | let res = check_bpf_lsm_enabled(&sys_lsm_path); 55 | assert!(res.is_err()); 56 | assert!(matches!(res.unwrap_err(), CheckBpfLsmError::BpfLsmDisabled)); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /xtask/src/build_ebpf.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::process::Command; 3 | 4 | use structopt::StructOpt; 5 | 6 | #[derive(Debug, Copy, Clone)] 7 | pub enum Architecture { 8 | BpfEl, 9 | BpfEb, 10 | } 11 | 12 | impl std::str::FromStr for Architecture { 13 | type Err = String; 14 | 15 | fn from_str(s: &str) -> Result { 16 | Ok(match s { 17 | "bpfel-unknown-none" => Architecture::BpfEl, 18 | "bpfeb-unknown-none" => Architecture::BpfEb, 19 | _ => return Err("invalid target".to_owned()), 20 | }) 21 | } 22 | } 23 | 24 | impl std::fmt::Display for Architecture { 25 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 26 | f.write_str(match self { 27 | Architecture::BpfEl => "bpfel-unknown-none", 28 | Architecture::BpfEb => "bpfeb-unknown-none", 29 | }) 30 | } 31 | } 32 | 33 | #[derive(StructOpt)] 34 | pub struct Options { 35 | /// Set the endianness of the BPF target 36 | #[structopt(default_value = "bpfel-unknown-none", long)] 37 | pub target: Architecture, 38 | /// Build the release target 39 | #[structopt(long)] 40 | pub release: bool, 41 | } 42 | 43 | pub fn build_ebpf(opts: Options) -> Result<(), anyhow::Error> { 44 | let dir = PathBuf::from("lockc-ebpf"); 45 | let target = format!("--target={}", opts.target); 46 | let mut args = vec![ 47 | "+nightly", 48 | "build", 49 | "--verbose", 50 | target.as_str(), 51 | "-Z", 52 | "build-std=core", 53 | ]; 54 | if opts.release { 55 | args.push("--release") 56 | } 57 | let status = Command::new("cargo") 58 | .current_dir(&dir) 59 | .args(&args) 60 | .status() 61 | .expect("failed to build bpf program"); 62 | assert!(status.success()); 63 | Ok(()) 64 | } 65 | -------------------------------------------------------------------------------- /xtask/src/bintar.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, path::Path}; 2 | 3 | use anyhow::Result; 4 | use flate2::{write::GzEncoder, Compression}; 5 | use scopeguard::guard; 6 | use structopt::StructOpt; 7 | use tempfile::tempdir; 8 | 9 | use crate::install; 10 | 11 | #[derive(StructOpt)] 12 | pub struct Options { 13 | #[structopt(default_value = "debug", long)] 14 | profile: String, 15 | 16 | #[structopt(default_value = "usr/local", long)] 17 | prefix: String, 18 | 19 | // Directories which belong under prefix. 20 | #[structopt(default_value = "bin", long)] 21 | bindir: String, 22 | #[structopt(default_value = "etc", long)] 23 | sysconfdir: String, 24 | #[structopt(default_value = "lib/systemd/system", long)] 25 | unitdir: String, 26 | } 27 | 28 | pub struct BinTar { 29 | opts: Options, 30 | } 31 | 32 | impl BinTar { 33 | pub fn new(opts: Options) -> BinTar { 34 | BinTar { opts } 35 | } 36 | 37 | pub fn do_bin_tar(&self) -> Result<()> { 38 | let dir = guard(tempdir()?, |d| { 39 | // Ensure the dir is deleted. 40 | d.close().unwrap(); 41 | }); 42 | install::Installer::new(install::Options { 43 | profile: self.opts.profile.clone(), 44 | destdir: dir.path().to_string_lossy().to_string(), 45 | prefix: self.opts.prefix.clone(), 46 | bindir: self.opts.bindir.clone(), 47 | sysconfdir: self.opts.sysconfdir.clone(), 48 | unitdir: self.opts.unitdir.clone(), 49 | }) 50 | .do_install()?; 51 | 52 | let tar_gz_path = Path::new("target") 53 | .join(&self.opts.profile) 54 | .join("lockc.tar.gz"); 55 | let tar_gz = File::create(&tar_gz_path)?; 56 | let enc = GzEncoder::new(tar_gz, Compression::default()); 57 | let mut tar = tar::Builder::new(enc); 58 | tar.append_dir_all("", dir.path())?; 59 | 60 | println!("Tarball created: {}", tar_gz_path.display()); 61 | Ok(()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /.github/workflows/container-image.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | tags: 6 | - "v*" 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | name: build container image 12 | 13 | jobs: 14 | build: 15 | name: Build container image 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Set up QEMU 21 | uses: docker/setup-qemu-action@v1 22 | - name: Set up Docker Buildx 23 | uses: docker/setup-buildx-action@v1 24 | - name: Login to GitHub Container Registry 25 | uses: docker/login-action@v1 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.repository_owner }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | - name: Build container image 31 | if: ${{ github.event_name == 'pull_request' }} 32 | uses: docker/build-push-action@v2 33 | with: 34 | context: 35 | file: ./Dockerfile 36 | platforms: linux/amd64 37 | push: false 38 | tags: | 39 | ghcr.io/lockc-project/lockc:latest 40 | - name: Build and push development container image 41 | if: ${{ startsWith(github.ref, 'refs/heads/') }} 42 | uses: docker/build-push-action@v2 43 | with: 44 | context: . 45 | file: ./Dockerfile 46 | platforms: linux/amd64 47 | push: true 48 | tags: | 49 | ghcr.io/lockc-project/lockc:latest 50 | - name: Retrieve tag name 51 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 52 | run: | 53 | echo TAG_NAME=$(echo $GITHUB_REF | sed -e "s|refs/tags/||") >> $GITHUB_ENV 54 | - name: Build and push tagged container image 55 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 56 | uses: docker/build-push-action@v2 57 | with: 58 | context: . 59 | file: ./Dockerfile 60 | platforms: linux/amd64 61 | push: true 62 | tags: | 63 | ghcr.io/lockc-project/lockc:${{ env.TAG_NAME }} 64 | -------------------------------------------------------------------------------- /xtask/src/run.rs: -------------------------------------------------------------------------------- 1 | use std::{os::unix::process::CommandExt, process::Command}; 2 | 3 | use anyhow::Context as _; 4 | use structopt::StructOpt; 5 | 6 | use crate::build_ebpf::{build_ebpf, Architecture, Options as BuildOptions}; 7 | 8 | #[derive(StructOpt)] 9 | pub struct Options { 10 | /// Set the endianness of the BPF target 11 | #[structopt(default_value = "bpfel-unknown-none", long)] 12 | pub bpf_target: Architecture, 13 | /// Build and run the release target 14 | #[structopt(long)] 15 | pub release: bool, 16 | /// The command used to wrap your application 17 | #[structopt(short, long, default_value = "sudo -E")] 18 | pub runner: String, 19 | /// Arguments to pass to your application 20 | #[structopt(name = "args", last = true)] 21 | pub run_args: Vec, 22 | } 23 | 24 | /// Build the project 25 | fn build(opts: &Options) -> Result<(), anyhow::Error> { 26 | let mut args = vec!["build"]; 27 | if opts.release { 28 | args.push("--release") 29 | } 30 | let status = Command::new("cargo") 31 | .args(&args) 32 | .status() 33 | .expect("failed to build userspace"); 34 | assert!(status.success()); 35 | Ok(()) 36 | } 37 | 38 | /// Build and run the project 39 | pub fn run(opts: Options) -> Result<(), anyhow::Error> { 40 | // build our ebpf program followed by our application 41 | build_ebpf(BuildOptions { 42 | target: opts.bpf_target, 43 | release: opts.release, 44 | }) 45 | .context("Error while building eBPF program")?; 46 | build(&opts).context("Error while building userspace application")?; 47 | 48 | // profile we are building (release or debug) 49 | let profile = if opts.release { "release" } else { "debug" }; 50 | let bin_path = format!("target/{}/lockc", profile); 51 | 52 | // arguments to pass to the application 53 | let mut run_args: Vec<_> = opts.run_args.iter().map(String::as_str).collect(); 54 | 55 | // configure args 56 | let mut args: Vec<_> = opts.runner.trim().split_terminator(' ').collect(); 57 | args.push(bin_path.as_str()); 58 | args.append(&mut run_args); 59 | 60 | // spawn the command 61 | let err = Command::new(args.first().expect("No first argument")) 62 | .args(args.iter().skip(1)) 63 | .exec(); 64 | 65 | // we shouldn't get here unless the command failed to spawn 66 | Err(anyhow::Error::from(err).context(format!("Failed to run `{}`", args.join(" ")))) 67 | } 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![lockc](https://raw.githubusercontent.com/lockc-project/assets/main/logo-horizontal-lockc.png) 2 | 3 | [![Crate](https://img.shields.io/crates/v/lockc)](https://crates.io/crates/lockc) 4 | [![Book](https://img.shields.io/website?url=https%3A%2F%2Flockc-project.github.io%2Flockc%2F)](https://lockc-project.github.io/lockc/) 5 | [![Discord](https://img.shields.io/discord/874314181191565453?label=discord&logo=discord)](https://discord.gg/799cmsYB4q) 6 | [![Docs](https://docs.rs/lockc/badge.svg)](https://docs.rs/lockc/) 7 | [![Build Status](https://github.com/lockc-project/lockc/actions/workflows/rust.yml/badge.svg)](https://github.com/lockc-project/lockc/actions/workflows/rust.yml) 8 | 9 | **lockc** is open source sofware for providing MAC (Mandatory Access Control) 10 | type of security audit for container workloads. 11 | 12 | The main reason why **lockc** exists is that **containers do not contain**. 13 | Containers are not as secure and isolated as VMs. By default, they expose 14 | a lot of information about host OS and provide ways to "break out" from the 15 | container. **lockc** aims to provide more isolation to containers and make them 16 | more secure. 17 | 18 | The [Containers do not contain](https://lockc-project.github.io/book/containers-do-not-contain.html) 19 | documentation section explains what we mean by that phrase and what kind of 20 | behavior we want to restrict with **lockc**. 21 | 22 | The main technology behind lockc is [eBPF](https://ebpf.io/) - to be more 23 | precise, its ability to attach to [LSM hooks](https://docs.kernel.org/bpf/prog_lsm.html) 24 | 25 | Please note that currently lockc is an experimental project, not meant for 26 | production environment and without any official binaries or packages to use - 27 | currently the only way to use it is building from sources. 28 | 29 | See [the full documentation here](https://lockc-project.github.io/). 30 | And [the code documentation here](https://docs.rs/lockc/). 31 | 32 | If you need help or want to talk with contributors, plese come chat with us 33 | on `#lockc` channel on the [Rust Cloud Native Discord server](https://discord.gg/799cmsYB4q). 34 | 35 | **lockc's** userspace part is licensed under [Apache License, version 2.0](https://github.com/lockc-project/lockc/blob/main/LICENSE). 36 | 37 | eBPF programs inside [lockc/src/bpf directory](https://github.com/lockc-project/lockc/tree/main/lock-ebpf) 38 | are licensed under [GNU General Public License, version 2](https://github.com/lockc-project/lockc/blob/main/lockc-ebpf/LICENSE). 39 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | CPUS = (ENV['LOCKC_VAGRANT_CPUS'] || 4).to_i 2 | MEMORY = (ENV['LOCKC_VAGRANT_MEMORY'] || 8192).to_i 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "generic/ubuntu2204" 6 | 7 | config.vm.synced_folder ".", "/vagrant", type: "rsync", 8 | rsync__exclude: "target/" 9 | 10 | config.vm.provider "virtualbox" do |v| 11 | v.cpus = CPUS 12 | v.memory = MEMORY 13 | v.customize ["modifyvm", :id, "--audio", "none"] 14 | end 15 | config.vm.provider "libvirt" do |libvirt| 16 | libvirt.cpus = CPUS 17 | libvirt.memory = MEMORY 18 | end 19 | 20 | config.vm.provision "shell", inline: <<-SHELL 21 | #!/usr/bin/env bash 22 | set -eux 23 | wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - 24 | add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main' 25 | apt-get update 26 | apt-get upgrade -y 27 | apt-get dist-upgrade -y 28 | apt-get install -y \ 29 | build-essential \ 30 | docker.io \ 31 | clang-15 \ 32 | linux-tools-generic \ 33 | lld-15 34 | usermod -aG docker vagrant 35 | sed -i 's/GRUB_CMDLINE_LINUX=\"\"/GRUB_CMDLINE_LINUX=\"lsm=lockdown,yama,bpf\"/' /etc/default/grub 36 | update-grub 37 | SHELL 38 | config.vm.provision :reload 39 | config.vm.provision "shell", privileged: false, inline: <<-SHELL 40 | #!/usr/bin/env bash 41 | set -eux 42 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 43 | source $HOME/.cargo/env 44 | rustup toolchain install nightly --component rust-src 45 | cargo install bpf-linker 46 | cargo install bindgen-cli 47 | cargo install --git https://github.com/aya-rs/aya -- aya-tool 48 | 49 | pushd /vagrant 50 | cargo xtask build-ebpf 51 | cargo build 52 | cargo xtask install 53 | popd 54 | SHELL 55 | config.vm.provision "shell", inline: <<-SHELL 56 | systemctl enable --now lockc 57 | SHELL 58 | if ENV['LOCKC_VAGRANT_K8S'] == 'true' 59 | config.vm.define "server" do |server| 60 | server.vm.network "private_network", ip: "192.168.33.10" 61 | server.vm.provision "shell", inline: <<-SHELL 62 | curl -sfL https://get.k3s.io | K3S_TOKEN=mynodetoken sh - 63 | SHELL 64 | end 65 | end 66 | 67 | # TODO(vadorovsky): Enble agent when we deploy lockc with helm. 68 | # config.vm.define "agent" do |agent| 69 | # agent.vm.network "private_network", ip: "192.168.33.11" 70 | # agent.vm.provision "shell", inline: <<-SHELL 71 | # curl -sfL https://get.k3s.io | K3S_URL=https://192.168.33.10:6443 K3S_TOKEN=mynodetoken sh - 72 | # SHELL 73 | # end 74 | end -------------------------------------------------------------------------------- /examples/kubernetes/deployments-should-succeed.yaml: -------------------------------------------------------------------------------- 1 | # Deployments which should run successfully as they should not violate policy 2 | # levels of their namespaces. 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: nginx-default-success 8 | namespace: default 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: nginx-default-success 13 | replicas: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: nginx-default-success 18 | spec: 19 | containers: 20 | - name: nginx 21 | image: bitnami/nginx:1.21.4 22 | ports: 23 | - containerPort: 80 24 | --- 25 | apiVersion: apps/v1 26 | kind: Deployment 27 | metadata: 28 | name: nginx-restricted-success 29 | namespace: restricted 30 | spec: 31 | selector: 32 | matchLabels: 33 | app: nginx-restricted-success 34 | replicas: 1 35 | template: 36 | metadata: 37 | labels: 38 | app: nginx-restricted-success 39 | spec: 40 | securityContext: 41 | runAsUser: 101 42 | seccompProfile: 43 | type: "RuntimeDefault" 44 | containers: 45 | - name: nginx 46 | image: bitnami/nginx:1.21.4 47 | ports: 48 | - containerPort: 80 49 | securityContext: 50 | allowPrivilegeEscalation: false 51 | runAsNonRoot: true 52 | capabilities: 53 | drop: ["ALL"] 54 | --- 55 | apiVersion: apps/v1 56 | kind: Deployment 57 | metadata: 58 | name: nginx-baseline-success 59 | namespace: baseline 60 | spec: 61 | selector: 62 | matchLabels: 63 | app: nginx-baseline-success 64 | replicas: 1 65 | template: 66 | metadata: 67 | labels: 68 | app: nginx-baseline-success 69 | spec: 70 | containers: 71 | - name: nginx 72 | image: bitnami/nginx:1.21.4 73 | ports: 74 | - containerPort: 80 75 | --- 76 | apiVersion: apps/v1 77 | kind: Deployment 78 | metadata: 79 | name: bpf-privileged-success 80 | namespace: privileged 81 | spec: 82 | selector: 83 | matchLabels: 84 | app: bpf-privileged-success 85 | replicas: 1 86 | template: 87 | metadata: 88 | labels: 89 | app: bpf-privileged-success 90 | spec: 91 | containers: 92 | - name: bpf 93 | image: busybox:latest 94 | command: ["sleep"] 95 | args: ["inf"] 96 | volumeMounts: 97 | - name: bpffs 98 | mountPath: /sys/fs/bpf 99 | volumes: 100 | - name: bpffs 101 | hostPath: 102 | path: /sys/fs/bpf 103 | type: Directory 104 | -------------------------------------------------------------------------------- /examples/kubernetes/deployments-should-fail.yaml: -------------------------------------------------------------------------------- 1 | # Deployments which should fail to run as they violate policy levels of their 2 | # namespaces. 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: nginx-restricted-fail 8 | namespace: restricted 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: nginx-restricted-fail 13 | replicas: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: nginx-restricted-fail 18 | spec: 19 | containers: 20 | - name: nginx 21 | image: bitnami/nginx:1.21.4 22 | ports: 23 | - containerPort: 80 24 | volumeMounts: 25 | - name: data 26 | mountPath: /var/data/www 27 | volumes: 28 | - name: data 29 | hostPath: 30 | path: /var/data/www 31 | type: Directory 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: bpf-default-fail 37 | namespace: default 38 | spec: 39 | selector: 40 | matchLabels: 41 | app: bpf-default-fail 42 | replicas: 1 43 | template: 44 | metadata: 45 | labels: 46 | app: bpf-default-fail 47 | spec: 48 | containers: 49 | - name: bpf 50 | image: busybox:latest 51 | command: ["sleep"] 52 | args: ["inf"] 53 | volumeMounts: 54 | - name: bpffs 55 | mountPath: /sys/fs/bpf 56 | volumes: 57 | - name: bpffs 58 | hostPath: 59 | path: /sys/fs/bpf 60 | type: Directory 61 | --- 62 | apiVersion: apps/v1 63 | kind: Deployment 64 | metadata: 65 | name: bpf-restricted-fail 66 | namespace: restricted 67 | spec: 68 | selector: 69 | matchLabels: 70 | app: bpf-restricted-fail 71 | replicas: 1 72 | template: 73 | metadata: 74 | labels: 75 | app: bpf-restricted-fail 76 | spec: 77 | containers: 78 | - name: bpf 79 | image: busybox:latest 80 | command: ["sleep"] 81 | args: ["inf"] 82 | volumeMounts: 83 | - name: bpffs 84 | mountPath: /sys/fs/bpf 85 | volumes: 86 | - name: bpffs 87 | hostPath: 88 | path: /sys/fs/bpf 89 | type: Directory 90 | --- 91 | apiVersion: apps/v1 92 | kind: Deployment 93 | metadata: 94 | name: bpf-baseline-fail 95 | namespace: baseline 96 | spec: 97 | selector: 98 | matchLabels: 99 | app: bpf-baseline-fail 100 | replicas: 1 101 | template: 102 | metadata: 103 | labels: 104 | app: bpf-baseline-fail 105 | spec: 106 | containers: 107 | - name: bpf 108 | image: busybox:latest 109 | command: ["sleep"] 110 | args: ["inf"] 111 | volumeMounts: 112 | - name: bpffs 113 | mountPath: /sys/fs/bpf 114 | volumes: 115 | - name: bpffs 116 | hostPath: 117 | path: /sys/fs/bpf 118 | type: Directory 119 | -------------------------------------------------------------------------------- /lockc-ebpf/src/proc.rs: -------------------------------------------------------------------------------- 1 | use aya_bpf::{macros::btf_tracepoint, programs::BtfTracePointContext}; 2 | use aya_log_ebpf::debug; 3 | 4 | use lockc_common::Process; 5 | 6 | use crate::{maps::*, vmlinux::task_struct}; 7 | 8 | /// Monitors all new tasks/functions created in the system and checks whether 9 | /// it's a child of some already containerized process (either the container 10 | /// runtime or any of its children) 11 | /// In any other case, it does not do anything. 12 | /// 13 | /// # Arguments 14 | /// 15 | /// * `ppid` - PID of the parent task 16 | /// * `child` - PID of the new task 17 | #[inline] 18 | fn handle_new_process(ctx: BtfTracePointContext, ppid: i32, pid: i32) -> Result { 19 | // Check if parent process is containerized (already registeed in BPF map). 20 | // If not, don't do anything. 21 | if let Some(parent) = unsafe { PROCESSES.get(&ppid) } { 22 | // Check if child process is already registered. If yes, don't do 23 | // anything. 24 | let child_lookup = unsafe { PROCESSES.get(&pid) }; 25 | if child_lookup.is_some() { 26 | return Ok(0); 27 | } 28 | 29 | // Register a new process. 30 | let container_id = parent.container_id; 31 | debug!( 32 | &ctx, 33 | "new containerized process: pid: {}, container_id: {}", 34 | pid, 35 | unsafe { container_id.as_str() } 36 | ); 37 | let child = Process { container_id }; 38 | unsafe { PROCESSES.insert(&pid, &child, 0).map_err(|e| e as i32)? }; 39 | } 40 | 41 | Ok(0) 42 | } 43 | 44 | /// Tracepoint program triggered by forking a process. 45 | /// 46 | /// It's used to find a potential new runc process. 47 | #[btf_tracepoint(name = "sched_process_fork")] 48 | pub fn sched_process_fork(ctx: BtfTracePointContext) -> i32 { 49 | match { try_sched_process_fork(ctx) } { 50 | Ok(ret) => ret, 51 | Err(ret) => ret, 52 | } 53 | } 54 | 55 | fn try_sched_process_fork(ctx: BtfTracePointContext) -> Result { 56 | let parent_task: *const task_struct = unsafe { ctx.arg(0) }; 57 | let child_task: *const task_struct = unsafe { ctx.arg(1) }; 58 | 59 | let ppid = unsafe { (*parent_task).pid }; 60 | let pid = unsafe { (*child_task).pid }; 61 | 62 | handle_new_process(ctx, ppid, pid) 63 | } 64 | 65 | /// Tracepoint program triggered by running a new proccess with a binary 66 | /// executable. 67 | /// 68 | /// It's used to find a potential new runc process. 69 | #[btf_tracepoint(name = "sched_process_exec")] 70 | pub fn sched_process_exec(ctx: BtfTracePointContext) -> i32 { 71 | match { try_sched_process_exec(ctx) } { 72 | Ok(ret) => ret, 73 | Err(ret) => ret, 74 | } 75 | } 76 | 77 | fn try_sched_process_exec(ctx: BtfTracePointContext) -> Result { 78 | let task: *const task_struct = unsafe { ctx.arg(0) }; 79 | 80 | let ppid = unsafe { (*(*task).parent).pid }; 81 | let pid = unsafe { (*task).pid }; 82 | 83 | handle_new_process(ctx, ppid, pid) 84 | } 85 | 86 | /// Tracepoint program triggered by a process exiting. 87 | /// 88 | /// It's used to check if any process registered by lockc is exiting and 89 | /// information about it can be removed. 90 | #[btf_tracepoint(name = "sched_process_exit")] 91 | pub fn sched_process_exit(ctx: BtfTracePointContext) -> i32 { 92 | match { try_sched_process_exit(ctx) } { 93 | Ok(ret) => ret, 94 | Err(ret) => ret, 95 | } 96 | } 97 | 98 | fn try_sched_process_exit(ctx: BtfTracePointContext) -> Result { 99 | let task: *const task_struct = unsafe { ctx.arg(0) }; 100 | 101 | let pid = unsafe { (*task).pid }; 102 | 103 | unsafe { PROCESSES.remove(&pid).map_err(|e| e as i32)? }; 104 | 105 | Ok(0) 106 | } 107 | -------------------------------------------------------------------------------- /lockc-ebpf/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "aya-bpf" 7 | version = "0.1.0" 8 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 9 | dependencies = [ 10 | "aya-bpf-bindings", 11 | "aya-bpf-cty", 12 | "aya-bpf-macros", 13 | "rustversion", 14 | ] 15 | 16 | [[package]] 17 | name = "aya-bpf-bindings" 18 | version = "0.1.0" 19 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 20 | dependencies = [ 21 | "aya-bpf-cty", 22 | ] 23 | 24 | [[package]] 25 | name = "aya-bpf-cty" 26 | version = "0.2.1" 27 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 28 | 29 | [[package]] 30 | name = "aya-bpf-macros" 31 | version = "0.1.0" 32 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 33 | dependencies = [ 34 | "proc-macro2", 35 | "quote", 36 | "syn", 37 | ] 38 | 39 | [[package]] 40 | name = "aya-log-common" 41 | version = "0.1.13" 42 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 43 | dependencies = [ 44 | "num_enum", 45 | ] 46 | 47 | [[package]] 48 | name = "aya-log-ebpf" 49 | version = "0.1.0" 50 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 51 | dependencies = [ 52 | "aya-bpf", 53 | "aya-log-common", 54 | "aya-log-ebpf-macros", 55 | ] 56 | 57 | [[package]] 58 | name = "aya-log-ebpf-macros" 59 | version = "0.1.0" 60 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 61 | dependencies = [ 62 | "aya-log-common", 63 | "aya-log-parser", 64 | "proc-macro2", 65 | "quote", 66 | "syn", 67 | ] 68 | 69 | [[package]] 70 | name = "aya-log-parser" 71 | version = "0.1.11-dev.0" 72 | source = "git+https://github.com/aya-rs/aya?branch=main#113e3ef0183acc69202fa6587643449f793cfff8" 73 | dependencies = [ 74 | "aya-log-common", 75 | ] 76 | 77 | [[package]] 78 | name = "lockc-common" 79 | version = "0.1.0" 80 | 81 | [[package]] 82 | name = "lockc-ebpf" 83 | version = "0.1.0" 84 | dependencies = [ 85 | "aya-bpf", 86 | "aya-log-ebpf", 87 | "lockc-common", 88 | ] 89 | 90 | [[package]] 91 | name = "num_enum" 92 | version = "0.5.11" 93 | source = "registry+https://github.com/rust-lang/crates.io-index" 94 | checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" 95 | dependencies = [ 96 | "num_enum_derive", 97 | ] 98 | 99 | [[package]] 100 | name = "num_enum_derive" 101 | version = "0.5.11" 102 | source = "registry+https://github.com/rust-lang/crates.io-index" 103 | checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" 104 | dependencies = [ 105 | "proc-macro2", 106 | "quote", 107 | "syn", 108 | ] 109 | 110 | [[package]] 111 | name = "proc-macro2" 112 | version = "1.0.52" 113 | source = "registry+https://github.com/rust-lang/crates.io-index" 114 | checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" 115 | dependencies = [ 116 | "unicode-ident", 117 | ] 118 | 119 | [[package]] 120 | name = "quote" 121 | version = "1.0.25" 122 | source = "registry+https://github.com/rust-lang/crates.io-index" 123 | checksum = "5308e8208729c3e1504a6cfad0d5daacc4614c9a2e65d1ea312a34b5cb00fe84" 124 | dependencies = [ 125 | "proc-macro2", 126 | ] 127 | 128 | [[package]] 129 | name = "rustversion" 130 | version = "1.0.12" 131 | source = "registry+https://github.com/rust-lang/crates.io-index" 132 | checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" 133 | 134 | [[package]] 135 | name = "syn" 136 | version = "1.0.109" 137 | source = "registry+https://github.com/rust-lang/crates.io-index" 138 | checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" 139 | dependencies = [ 140 | "proc-macro2", 141 | "quote", 142 | "unicode-ident", 143 | ] 144 | 145 | [[package]] 146 | name = "unicode-ident" 147 | version = "1.0.8" 148 | source = "registry+https://github.com/rust-lang/crates.io-index" 149 | checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" 150 | -------------------------------------------------------------------------------- /lockc-common/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "user"), no_std)] 2 | 3 | /// Max configurable PID limit (for x86_64, for the other architectures it's 4 | /// less or equal). 5 | // TODO(vadorovsky): I need to teach aya to be able to resize maps before they 6 | // are loaded into the kernel. So far aya doesn't differentiate between open() 7 | // and load(), it opens the ELF object and loads it immediately in one step. 8 | // I need to change it. 9 | // After that, we will be able to set the limit again up to the upper possible 10 | // limit. And resize according to the max PID limit in sysctl. 11 | // Before it's done - let's stick to the default value to not use too much RAM. 12 | // pub const PID_MAX_LIMIT: u32 = 4194304; 13 | pub const PID_MAX_LIMIT: u32 = 32768; 14 | 15 | pub const MOUNT_TYPE_LEN: usize = 5; 16 | 17 | pub const PATH_LEN: usize = 64; 18 | 19 | const CONTAINER_ID_LEN: usize = 64; 20 | 21 | #[cfg_attr(feature = "user", derive(Debug))] 22 | #[cfg_attr(feature = "cli", derive(clap::ValueEnum))] 23 | #[derive(Copy, Clone)] 24 | #[repr(C)] 25 | pub enum ContainerPolicyLevel { 26 | NotFound = -1, 27 | 28 | Lockc, 29 | 30 | // Policy levels. 31 | Restricted, 32 | Offline, 33 | Baseline, 34 | Privileged, 35 | } 36 | 37 | #[cfg(feature = "user")] 38 | impl std::fmt::Display for ContainerPolicyLevel { 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | match self { 41 | ContainerPolicyLevel::NotFound => write!(f, "not found"), 42 | ContainerPolicyLevel::Lockc => write!(f, "lockc"), 43 | ContainerPolicyLevel::Restricted => write!(f, "restricted"), 44 | ContainerPolicyLevel::Offline => write!(f, "offline"), 45 | ContainerPolicyLevel::Baseline => write!(f, "baseline"), 46 | ContainerPolicyLevel::Privileged => write!(f, "privileged"), 47 | } 48 | } 49 | } 50 | 51 | #[derive(Copy, Clone)] 52 | #[repr(C)] 53 | pub struct ContainerID { 54 | pub id: [u8; CONTAINER_ID_LEN], 55 | } 56 | 57 | #[cfg(feature = "user")] 58 | impl std::str::FromStr for ContainerID { 59 | type Err = std::ffi::NulError; 60 | 61 | fn from_str(id: &str) -> Result { 62 | let mut id = std::ffi::CString::new(id)?.into_bytes_with_nul(); 63 | id.resize(CONTAINER_ID_LEN, 0); 64 | Ok(ContainerID { 65 | id: id.try_into().unwrap(), 66 | }) 67 | } 68 | } 69 | 70 | #[cfg(feature = "user")] 71 | impl ContainerID { 72 | pub fn as_str(&self) -> Result<&str, std::str::Utf8Error> { 73 | std::str::from_utf8(&self.id) 74 | } 75 | } 76 | 77 | #[cfg(not(feature = "user"))] 78 | impl ContainerID { 79 | /// Convert container ID to a string. 80 | /// 81 | /// # Safety 82 | /// 83 | /// Container ID is a fixed-size array which has to be nul terminated. 84 | /// Otherwise this conversion to string is going to fail. 85 | pub unsafe fn as_str(&self) -> &str { 86 | core::str::from_utf8_unchecked(&self.id) 87 | } 88 | } 89 | 90 | #[cfg(feature = "user")] 91 | #[derive(thiserror::Error, Debug)] 92 | pub enum NewContainerIDError { 93 | #[error(transparent)] 94 | NulError(#[from] std::ffi::NulError), 95 | 96 | #[error("could not convert Vec to CString")] 97 | VecU8CStringConv, 98 | } 99 | 100 | #[cfg(feature = "user")] 101 | impl ContainerID { 102 | /// Creates a new container_id instance and converts the given Rust string 103 | /// into C fixed size char array. 104 | pub fn new(id: &str) -> Result { 105 | let mut id_b = std::ffi::CString::new(id)?.into_bytes_with_nul(); 106 | id_b.resize(CONTAINER_ID_LEN, 0); 107 | Ok(ContainerID { 108 | id: id_b 109 | .try_into() 110 | .map_err(|_| NewContainerIDError::VecU8CStringConv)?, 111 | }) 112 | } 113 | } 114 | 115 | #[derive(Copy, Clone)] 116 | #[repr(C)] 117 | pub struct Container { 118 | pub policy_level: ContainerPolicyLevel, 119 | } 120 | 121 | #[derive(Copy, Clone)] 122 | #[repr(C)] 123 | pub struct Process { 124 | pub container_id: ContainerID, 125 | } 126 | 127 | #[derive(Copy, Clone)] 128 | #[repr(C)] 129 | pub struct MountType { 130 | pub mount_type: [u8; MOUNT_TYPE_LEN], 131 | } 132 | 133 | #[derive(Copy, Clone)] 134 | #[repr(C)] 135 | pub struct Path { 136 | pub path: [u8; PATH_LEN], 137 | } 138 | 139 | #[cfg(feature = "user")] 140 | mod user { 141 | use super::*; 142 | 143 | unsafe impl aya::Pod for ContainerID {} 144 | unsafe impl aya::Pod for Container {} 145 | unsafe impl aya::Pod for Process {} 146 | } 147 | -------------------------------------------------------------------------------- /lockc/src/load.rs: -------------------------------------------------------------------------------- 1 | use std::{io, path::Path}; 2 | 3 | use aya::{ 4 | include_bytes_aligned, 5 | programs::{BtfTracePoint, Lsm, ProgramError}, 6 | Bpf, BpfError, BpfLoader, Btf, BtfError, 7 | }; 8 | use thiserror::Error; 9 | use tracing::warn; 10 | 11 | #[derive(Error, Debug)] 12 | pub enum LoadError { 13 | #[error(transparent)] 14 | IO(#[from] io::Error), 15 | 16 | #[error(transparent)] 17 | Bpf(#[from] BpfError), 18 | } 19 | 20 | /// Loads BPF programs from the object file built with clang. 21 | pub fn load_bpf>(path_base_r: P) -> Result { 22 | let path_base = path_base_r.as_ref(); 23 | std::fs::create_dir_all(path_base)?; 24 | 25 | #[cfg(debug_assertions)] 26 | let bpf = BpfLoader::new() 27 | .map_pin_path(path_base) 28 | .load(include_bytes_aligned!( 29 | "../../target/bpfel-unknown-none/debug/lockc" 30 | ))?; 31 | #[cfg(not(debug_assertions))] 32 | let bpf = BpfLoader::new() 33 | .map_pin_path(path_base) 34 | .load(include_bytes_aligned!( 35 | "../../target/bpfel-unknown-none/release/lockc" 36 | ))?; 37 | 38 | Ok(bpf) 39 | } 40 | 41 | #[derive(Error, Debug)] 42 | pub enum AttachError { 43 | #[error(transparent)] 44 | Btf(#[from] BtfError), 45 | 46 | #[error(transparent)] 47 | Program(#[from] ProgramError), 48 | 49 | #[error("could not load the program")] 50 | ProgLoad, 51 | } 52 | 53 | fn is_root_btrfs() -> bool { 54 | let mountinfo = std::fs::read_to_string("/proc/1/mountinfo"); 55 | if let Ok(mountinfo) = mountinfo { 56 | let root = mountinfo.lines().find(|line| line.contains(" / ")); 57 | if let Some(root) = root { 58 | root.contains("btrfs") 59 | } else { 60 | false 61 | } 62 | } else { 63 | false 64 | } 65 | } 66 | 67 | pub fn attach_programs(bpf: &mut Bpf) -> Result<(), AttachError> { 68 | let btf = Btf::from_sys_fs()?; 69 | 70 | let program: &mut BtfTracePoint = bpf 71 | .program_mut("sched_process_fork") 72 | .ok_or(AttachError::ProgLoad)? 73 | .try_into()?; 74 | program.load("sched_process_fork", &btf)?; 75 | program.attach()?; 76 | 77 | let program: &mut BtfTracePoint = bpf 78 | .program_mut("sched_process_exec") 79 | .ok_or(AttachError::ProgLoad)? 80 | .try_into()?; 81 | program.load("sched_process_exec", &btf)?; 82 | program.attach()?; 83 | 84 | let program: &mut BtfTracePoint = bpf 85 | .program_mut("sched_process_exit") 86 | .ok_or(AttachError::ProgLoad)? 87 | .try_into()?; 88 | program.load("sched_process_exit", &btf)?; 89 | program.attach()?; 90 | 91 | let program: &mut Lsm = bpf 92 | .program_mut("syslog") 93 | .ok_or(AttachError::ProgLoad)? 94 | .try_into()?; 95 | program.load("syslog", &btf)?; 96 | program.attach()?; 97 | 98 | // NOTE(vadorovsky): Mount policies work only with BTRFS for now. 99 | // TODO(vadorovsky): Add support for overlayfs. 100 | if is_root_btrfs() { 101 | let program: &mut Lsm = bpf 102 | .program_mut("sb_mount") 103 | .ok_or(AttachError::ProgLoad)? 104 | .try_into()?; 105 | program.load("sb_mount", &btf)?; 106 | program.attach()?; 107 | } else { 108 | warn!("Root filesystem is not BTRFS, skipping mount policies"); 109 | } 110 | 111 | let program: &mut Lsm = bpf 112 | .program_mut("task_fix_setuid") 113 | .ok_or(AttachError::ProgLoad)? 114 | .try_into()?; 115 | program.load("task_fix_setuid", &btf)?; 116 | program.attach()?; 117 | 118 | let program: &mut Lsm = bpf 119 | .program_mut("file_open") 120 | .ok_or(AttachError::ProgLoad)? 121 | .try_into()?; 122 | program.load("file_open", &btf)?; 123 | program.attach()?; 124 | 125 | let program: &mut Lsm = bpf 126 | .program_mut("socket_sendmsg") 127 | .ok_or(AttachError::ProgLoad)? 128 | .try_into()?; 129 | program.load("socket_sendmsg", &btf)?; 130 | program.attach()?; 131 | 132 | let program: &mut Lsm = bpf 133 | .program_mut("socket_recvmsg") 134 | .ok_or(AttachError::ProgLoad)? 135 | .try_into()?; 136 | program.load("socket_recvmsg", &btf)?; 137 | program.attach()?; 138 | 139 | Ok(()) 140 | } 141 | 142 | #[cfg(test)] 143 | mod tests { 144 | use super::*; 145 | 146 | #[test] 147 | #[cfg_attr(not(feature = "tests_bpf"), ignore)] 148 | fn load_and_attach_bpf() { 149 | let mut bpf = load_bpf("/sys/fs/bpf/lockc-test").expect("Loading BPF failed"); 150 | attach_programs(&mut bpf).expect("Attaching BPF programs failed"); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /lockc/src/maps.rs: -------------------------------------------------------------------------------- 1 | use aya::{ 2 | maps::{HashMap, MapError}, 3 | Bpf, 4 | }; 5 | use config::ConfigError; 6 | use thiserror::Error; 7 | use tracing::{debug, warn}; 8 | 9 | use lockc_common::{Container, ContainerID, ContainerPolicyLevel, NewContainerIDError, Process}; 10 | 11 | #[derive(Error, Debug)] 12 | pub enum MapOperationError { 13 | #[error(transparent)] 14 | Config(#[from] ConfigError), 15 | 16 | #[error(transparent)] 17 | Map(#[from] MapError), 18 | 19 | #[error(transparent)] 20 | NewContainerID(#[from] NewContainerIDError), 21 | } 22 | 23 | pub fn add_container( 24 | bpf: &mut Bpf, 25 | container_id: String, 26 | pid: i32, 27 | policy_level: ContainerPolicyLevel, 28 | ) -> Result<(), MapOperationError> { 29 | debug!( 30 | container = container_id.as_str(), 31 | pid = pid, 32 | // policy_level = policy_level, 33 | map = "CONTAINERS", 34 | "adding container to eBPF map", 35 | ); 36 | 37 | let mut containers: HashMap<_, ContainerID, Container> = 38 | bpf.map_mut("CONTAINERS")?.try_into()?; 39 | let container_key = ContainerID::new(&container_id)?; 40 | let container = Container { policy_level }; 41 | containers.insert(container_key, container, 0)?; 42 | 43 | let mut processes: HashMap<_, i32, Process> = bpf.map_mut("PROCESSES")?.try_into()?; 44 | let process = Process { 45 | container_id: container_key, 46 | }; 47 | processes.insert(pid, process, 0)?; 48 | 49 | Ok(()) 50 | } 51 | 52 | pub fn delete_container(bpf: &mut Bpf, container_id: String) -> Result<(), MapOperationError> { 53 | debug!( 54 | container = container_id.as_str(), 55 | map = "CONTAINERS", 56 | "deleting container from eBPF map" 57 | ); 58 | 59 | let mut containers: HashMap<_, ContainerID, Container> = 60 | bpf.map_mut("CONTAINERS")?.try_into()?; 61 | let container_key = ContainerID::new(&container_id)?; 62 | 63 | // An error while removing a container entry is expected when lockc was 64 | // installed after some containers were running (which is always the case 65 | // on Kubernetes). Instead of returning an error, let's warn users. 66 | if let Err(e) = containers.remove(&container_key) { 67 | if let MapError::SyscallError { .. } = e { 68 | warn!( 69 | container = container_id.as_str(), 70 | error = e.to_string().as_str(), 71 | "could not remove the eBPF map container entry" 72 | ); 73 | } 74 | } 75 | 76 | // TODO(vadorovsky): Add iter_mut() to HashMap in aya. Due to lack of it, 77 | // we cannot remove elements immediately when iterating, because iter() 78 | // borrows the HashMap immutably. 79 | let mut processes: HashMap<_, i32, Process> = bpf.map_mut("PROCESSES")?.try_into()?; 80 | let mut to_remove = Vec::new(); 81 | for res in processes.iter() { 82 | let (pid, process) = res?; 83 | if process.container_id.id == container_key.id { 84 | to_remove.push(pid); 85 | // processes.remove(&pid)?; 86 | } 87 | } 88 | for pid in to_remove { 89 | processes.remove(&pid)?; 90 | } 91 | 92 | Ok(()) 93 | } 94 | 95 | pub fn add_process(bpf: &mut Bpf, container_id: String, pid: i32) -> Result<(), MapOperationError> { 96 | debug!( 97 | pid = pid, 98 | container = container_id.as_str(), 99 | map = "PROCESSES", 100 | "adding process to eBPF map", 101 | ); 102 | 103 | let mut processes: HashMap<_, i32, Process> = bpf.map_mut("PROCESSES")?.try_into()?; 104 | let container_key = ContainerID::new(&container_id)?; 105 | let process = Process { 106 | container_id: container_key, 107 | }; 108 | processes.insert(pid, process, 0)?; 109 | 110 | Ok(()) 111 | } 112 | 113 | #[cfg(test)] 114 | mod tests { 115 | use tempfile::{Builder, TempDir}; 116 | 117 | use crate::load::load_bpf; 118 | 119 | use super::*; 120 | 121 | fn tmp_path_base() -> TempDir { 122 | Builder::new() 123 | .prefix("lockc-temp") 124 | .rand_bytes(5) 125 | .tempdir_in("/sys/fs/bpf") 126 | .expect("Creating temporary dir in BPFFS failed") 127 | } 128 | 129 | #[test] 130 | #[cfg_attr(not(feature = "tests_bpf"), ignore)] 131 | fn test_add_container() { 132 | let path_base = tmp_path_base(); 133 | let mut bpf = load_bpf(path_base).expect("Loading BPF failed"); 134 | add_container( 135 | &mut bpf, 136 | "5833851e673d45fab4d12105bf61c3f4892b2bbf9c12d811db509a4f22475ec9".to_string(), 137 | 42069, 138 | ContainerPolicyLevel::Baseline, 139 | ) 140 | .expect("Adding container failed"); 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /lockctl/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use aya::{ 4 | include_bytes_aligned, 5 | maps::{HashMap, MapRef, MapRefMut}, 6 | Bpf, BpfLoader, 7 | }; 8 | use clap::{Parser, Subcommand}; 9 | use cli_table::{print_stdout, Cell, Style, Table}; 10 | use lockc_common::{Container, ContainerID, ContainerPolicyLevel, Process}; 11 | 12 | const PATH_BASE: &str = "/sys/fs/bpf/lockc"; 13 | 14 | #[derive(Parser)] 15 | struct Args { 16 | #[command(subcommand)] 17 | subcommand: Sub, 18 | } 19 | 20 | #[derive(Subcommand)] 21 | enum Sub { 22 | /// Manage containers and their policies. 23 | Container { 24 | #[command(subcommand)] 25 | container: SubContainer, 26 | }, 27 | /// Manage containerized processes. 28 | Process { 29 | #[command(subcommand)] 30 | process: SubProcess, 31 | }, 32 | } 33 | 34 | #[derive(Subcommand)] 35 | enum SubContainer { 36 | /// List all containers. 37 | List, 38 | ApplyPolicy { 39 | /// The ID of the container. 40 | container_id: String, 41 | /// The policy to apply. 42 | #[clap(value_enum)] 43 | policy: ContainerPolicyLevel, 44 | }, 45 | } 46 | 47 | #[derive(Subcommand)] 48 | enum SubProcess { 49 | /// List all processes. 50 | List, 51 | } 52 | 53 | fn load_bpf() -> anyhow::Result { 54 | #[cfg(debug_assertions)] 55 | let bpf = BpfLoader::new() 56 | .map_pin_path(PATH_BASE) 57 | .load(include_bytes_aligned!( 58 | "../../target/bpfel-unknown-none/debug/lockc" 59 | ))?; 60 | #[cfg(not(debug_assertions))] 61 | let bpf = BpfLoader::new() 62 | .map_pin_path(PATH_BASE) 63 | .load(include_bytes_aligned!( 64 | "../../target/bpfel-unknown-none/release/lockc" 65 | ))?; 66 | 67 | Ok(bpf) 68 | } 69 | 70 | fn container_list() -> anyhow::Result<()> { 71 | let bpf = load_bpf()?; 72 | 73 | let containers: HashMap = bpf.map("CONTAINERS")?.try_into()?; 74 | let mut table = Vec::new(); 75 | for res in containers.iter() { 76 | let (container_id, container) = res?; 77 | table.push(vec![ 78 | container_id.as_str()?.to_string().cell(), 79 | format!("{}", container.policy_level).cell(), 80 | ]); 81 | } 82 | 83 | let table = table.table().title(vec![ 84 | "Container ID".cell().bold(true), 85 | "Policy Level".cell().bold(true), 86 | ]); 87 | 88 | print_stdout(table)?; 89 | 90 | Ok(()) 91 | } 92 | 93 | fn container_apply_policy( 94 | container_id: String, 95 | policy: ContainerPolicyLevel, 96 | ) -> anyhow::Result<()> { 97 | let bpf = load_bpf()?; 98 | 99 | let mut containers: HashMap = 100 | bpf.map_mut("CONTAINERS")?.try_into()?; 101 | 102 | let key = ContainerID::from_str(&container_id)?; 103 | if containers.get(&key, 0).is_err() { 104 | return Err(anyhow::anyhow!("container {} not found", container_id)); 105 | } 106 | 107 | let container = Container { 108 | policy_level: policy, 109 | }; 110 | containers.remove(&key)?; 111 | containers.insert(key, container, 0)?; 112 | 113 | Ok(()) 114 | } 115 | 116 | fn process_list() -> anyhow::Result<()> { 117 | let bpf = load_bpf()?; 118 | 119 | let processes: HashMap = bpf.map("PROCESSES")?.try_into()?; 120 | let containers: HashMap = bpf.map("CONTAINERS")?.try_into()?; 121 | let mut table = Vec::new(); 122 | for res in processes.iter() { 123 | let (pid, process) = res?; 124 | let (stat, running) = match procfs::process::Process::new(pid) { 125 | Ok(stat) => (Some(stat), true), 126 | Err(_) => (None, false), 127 | }; 128 | let exe = match stat { 129 | Some(stat) => stat.exe()?.to_string_lossy().to_string(), 130 | None => "-".to_owned(), 131 | }; 132 | let container = containers.get(&process.container_id, 0)?; 133 | table.push(vec![ 134 | pid.to_string().cell(), 135 | format!("{}", running).cell(), 136 | exe.cell(), 137 | process.container_id.as_str()?.to_string().cell(), 138 | format!("{}", container.policy_level).cell(), 139 | ]); 140 | } 141 | 142 | let table = table.table().title(vec![ 143 | "PID".cell().bold(true), 144 | "Running".cell().bold(true), 145 | "Command".cell().bold(true), 146 | "Container ID".cell().bold(true), 147 | "Policy Level".cell().bold(true), 148 | ]); 149 | 150 | print_stdout(table)?; 151 | 152 | Ok(()) 153 | } 154 | 155 | fn main() -> anyhow::Result<()> { 156 | let args = Args::parse(); 157 | 158 | match args.subcommand { 159 | Sub::Container { container } => match container { 160 | SubContainer::List => container_list()?, 161 | SubContainer::ApplyPolicy { 162 | container_id, 163 | policy, 164 | } => container_apply_policy(container_id, policy)?, 165 | }, 166 | Sub::Process { process } => match process { 167 | SubProcess::List => process_list()?, 168 | }, 169 | } 170 | 171 | Ok(()) 172 | } 173 | -------------------------------------------------------------------------------- /lockc/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{env, fs, path, thread}; 2 | 3 | use aya_log::BpfLogger; 4 | use clap::{Parser, ValueEnum}; 5 | use thiserror::Error; 6 | use tokio::{ 7 | runtime::Runtime, 8 | sync::{mpsc, oneshot}, 9 | }; 10 | use tracing::{debug, error, Level}; 11 | use tracing_log::LogTracer; 12 | use tracing_subscriber::FmtSubscriber; 13 | 14 | use lockc_common::ContainerPolicyLevel; 15 | 16 | mod communication; 17 | mod load; 18 | mod maps; 19 | mod runc; 20 | mod sysutils; 21 | 22 | use communication::EbpfCommand; 23 | use load::{attach_programs, load_bpf}; 24 | use maps::{add_container, add_process, delete_container}; 25 | // use runc::{attach_runc_nsexec, handle_events, mark_runc_binaries}; 26 | use runc::RuncWatcher; 27 | use sysutils::check_bpf_lsm_enabled; 28 | 29 | #[derive(Error, Debug)] 30 | enum FanotifyError { 31 | #[error("could not send the message")] 32 | Send, 33 | } 34 | 35 | /// Runs an fanotify-based runc watcher, which registers containers every time 36 | /// they are created or deleted. 37 | fn fanotify( 38 | fanotify_bootstrap_rx: oneshot::Receiver<()>, 39 | ebpf_tx: mpsc::Sender, 40 | default_policy_level: ContainerPolicyLevel, 41 | ) -> Result<(), anyhow::Error> { 42 | RuncWatcher::new(fanotify_bootstrap_rx, ebpf_tx, default_policy_level)?.work_loop()?; 43 | Ok(()) 44 | } 45 | 46 | /// Loads and attaches eBPF programs, then fetches logs and events from them. 47 | async fn ebpf( 48 | fanotify_bootstrap_tx: oneshot::Sender<()>, 49 | mut ebpf_rx: mpsc::Receiver, 50 | ) -> Result<(), anyhow::Error> { 51 | // Check whether BPF LSM is enabled in the kernel. That check should be 52 | // omitted in Kubernetes (where lockc runs in a container) or nested 53 | // containers, because sysctls inside containers might hide the fact 54 | // that BPF LSM is enabled. 55 | if env::var("LOCKC_CHECK_LSM_SKIP").is_err() { 56 | let sys_lsm_path = path::Path::new("/sys") 57 | .join("kernel") 58 | .join("security") 59 | .join("lsm"); 60 | check_bpf_lsm_enabled(sys_lsm_path)?; 61 | } 62 | 63 | // let config = new_config().await?; 64 | 65 | let path_base = std::path::Path::new("/sys") 66 | .join("fs") 67 | .join("bpf") 68 | .join("lockc"); 69 | fs::create_dir_all(&path_base)?; 70 | 71 | let mut bpf = load_bpf(&path_base)?; 72 | BpfLogger::init(&mut bpf)?; 73 | 74 | // init_allowed_paths(&mut bpf, &config)?; 75 | debug!("allowed paths initialized"); 76 | attach_programs(&mut bpf)?; 77 | debug!("attached programs"); 78 | 79 | // Bootstrap the fanotify thread. 80 | fanotify_bootstrap_tx 81 | .send(()) 82 | .map_err(|_| FanotifyError::Send)?; 83 | 84 | while let Some(cmd) = ebpf_rx.recv().await { 85 | match cmd { 86 | EbpfCommand::AddContainer { 87 | container_id, 88 | pid, 89 | policy_level, 90 | responder_tx, 91 | } => { 92 | let res = add_container(&mut bpf, container_id, pid, policy_level); 93 | match responder_tx.send(res) { 94 | Ok(_) => {} 95 | Err(_) => error!( 96 | command = "add_container", 97 | "could not send eBPF command result although the operation was succeessful" 98 | ), 99 | } 100 | } 101 | EbpfCommand::DeleteContainer { 102 | container_id, 103 | responder_tx, 104 | } => { 105 | let res = delete_container(&mut bpf, container_id); 106 | match responder_tx.send(res) { 107 | Ok(_) => {} 108 | Err(_) => error!( 109 | command = "delete_container", 110 | "could not send eBPF command result although the operation was succeessful" 111 | ), 112 | } 113 | } 114 | EbpfCommand::AddProcess { 115 | container_id, 116 | pid, 117 | responder_tx, 118 | } => { 119 | let res = add_process(&mut bpf, container_id, pid); 120 | match responder_tx.send(res) { 121 | Ok(_) => {} 122 | Err(_) => error!( 123 | command = "add_proceess", 124 | "could not send eBPF command result although the operation was succeessful" 125 | ), 126 | } 127 | } 128 | } 129 | } 130 | 131 | Ok(()) 132 | } 133 | 134 | #[derive(Parser)] 135 | #[clap(author, version, about, long_about = None)] 136 | struct Opt { 137 | #[cfg_attr( 138 | debug_assertions, 139 | clap(value_enum, long, env = "LOCKC_LOG_LEVEL", default_value_t = LogLevel::Debug) 140 | )] 141 | #[cfg_attr( 142 | not(debug_assertions), 143 | clap(value_enum, long, env = "LOCKC_LOG_LEVEL", default_value_t = LogLevel::Info) 144 | )] 145 | log_level: LogLevel, 146 | 147 | #[clap(value_enum, long, env="LOCKC_LOG_FMT", default_value_t = LogFmt::Text)] 148 | log_fmt: LogFmt, 149 | 150 | #[clap(value_enum, long, env="LOCKC_DEFAULT_POLICY_LEVEL", default_value_t = ContainerPolicyLevel::Baseline)] 151 | default_policy_level: ContainerPolicyLevel, 152 | } 153 | 154 | #[derive(ValueEnum, Clone)] 155 | enum LogLevel { 156 | Trace, 157 | Debug, 158 | Info, 159 | Warn, 160 | Error, 161 | } 162 | 163 | #[derive(ValueEnum, Clone)] 164 | enum LogFmt { 165 | Json, 166 | Text, 167 | } 168 | 169 | #[derive(Error, Debug)] 170 | enum SetupTracingError { 171 | #[error(transparent)] 172 | SetLogger(#[from] log::SetLoggerError), 173 | 174 | #[error(transparent)] 175 | SetGlobalDefault(#[from] tracing_core::dispatcher::SetGlobalDefaultError), 176 | } 177 | 178 | fn setup_tracing(opt: &Opt) -> Result<(), SetupTracingError> { 179 | let (level_tracing, level_log) = match opt.log_level { 180 | LogLevel::Trace => (Level::TRACE, log::LevelFilter::Trace), 181 | LogLevel::Debug => (Level::DEBUG, log::LevelFilter::Debug), 182 | LogLevel::Info => (Level::INFO, log::LevelFilter::Info), 183 | LogLevel::Warn => (Level::WARN, log::LevelFilter::Warn), 184 | LogLevel::Error => (Level::ERROR, log::LevelFilter::Error), 185 | }; 186 | 187 | let builder = FmtSubscriber::builder().with_max_level(level_tracing); 188 | match opt.log_fmt { 189 | LogFmt::Json => { 190 | let subscriber = builder.json().finish(); 191 | tracing::subscriber::set_global_default(subscriber)?; 192 | } 193 | LogFmt::Text => { 194 | let subscriber = builder.finish(); 195 | tracing::subscriber::set_global_default(subscriber)?; 196 | } 197 | }; 198 | 199 | LogTracer::builder().with_max_level(level_log).init()?; 200 | 201 | Ok(()) 202 | } 203 | 204 | fn main() -> Result<(), anyhow::Error> { 205 | let opt = Opt::parse(); 206 | setup_tracing(&opt)?; 207 | 208 | // Step 1: Create a synchronous thread which takes care of fanotify 209 | // polling on runc binaries. We monitor all possible runc binaries to get 210 | // all runc execution events (and therefore - all operations on 211 | // containers). 212 | // This thread has to be synchronous and cannot be a part of Tokio runtime, 213 | // because it: 214 | // * uses the poll() function 215 | // * blocks the filesystem operations on monitored files 216 | // * in case of monitoring runc, we have to be sure that we register a new 217 | // container exactly before we allow runc to be actually executed; 218 | // otherwise we cannot guarantee that lockc will actually enforce 219 | // anything on that container. 220 | 221 | // Fanotify thread bootstrap channel - used later to start the real bootstrap 222 | // of the thread. We want to bootstrap it later, after loading eBPF 223 | // programs (which happens in async code in Tokio runtime). 224 | let (fanotify_bootstrap_tx, fanotify_bootstrap_rx) = oneshot::channel::<()>(); 225 | 226 | // eBPF thread channel - used by fanotify thread to request eBFP operations 227 | // from the async eBPF thread. 228 | let (ebpf_tx, ebpf_rx) = mpsc::channel::(100); 229 | 230 | // Start the thread (but it's going to wait for bootstrap). 231 | let fanotify_thread = 232 | thread::spawn(move || fanotify(fanotify_bootstrap_rx, ebpf_tx, opt.default_policy_level)); 233 | 234 | // Step 2: Setup a Tokio runtime for asynchronous part of lockc, which 235 | // takes care of: 236 | // * loading and attaching of eBPF programs 237 | // * fetching events/logs from eBPF programs 238 | // After initializing the eBPF world, the thread from the step 1 is going 239 | // to be bootstraped. 240 | 241 | let rt = Runtime::new()?; 242 | 243 | rt.block_on(ebpf(fanotify_bootstrap_tx, ebpf_rx))?; 244 | 245 | if let Err(e) = fanotify_thread.join() { 246 | error!("failed to join the fanotify thread: {:?}", e); 247 | } 248 | 249 | Ok(()) 250 | } 251 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /xtask/src/install.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ffi::OsStr, 3 | fs, 4 | io::{self, prelude::*}, 5 | os::unix::fs::{MetadataExt, PermissionsExt}, 6 | path, 7 | }; 8 | 9 | use serde::Serialize; 10 | use structopt::StructOpt; 11 | use tera::{Context, Tera}; 12 | use thiserror::Error; 13 | 14 | #[derive(Error, Debug)] 15 | enum EscalateIfNotOwnedError { 16 | #[error(transparent)] 17 | IOError(#[from] io::Error), 18 | 19 | #[error("could not escalate privileges (sudo)")] 20 | SudoError, 21 | } 22 | 23 | fn mkdir_if_not_exists(p: &path::Path) -> Result<(), io::Error> { 24 | if !p.exists() { 25 | fs::create_dir_all(p)?; 26 | } 27 | 28 | Ok(()) 29 | } 30 | 31 | fn escalate_if_not_owned(p: &path::Path) -> Result<(), EscalateIfNotOwnedError> { 32 | if p.metadata()?.uid() == 0 { 33 | match sudo::escalate_if_needed() { 34 | Ok(_) => {} 35 | Err(_) => { 36 | // sudo library always returns the std::error::Error. Not 37 | // really descriptive for users... Replace it with our own 38 | // error. 39 | return Err(EscalateIfNotOwnedError::SudoError); 40 | } 41 | } 42 | } 43 | Ok(()) 44 | } 45 | 46 | #[derive(StructOpt)] 47 | pub(crate) struct Options { 48 | #[structopt(default_value = "debug", long)] 49 | pub(crate) profile: String, 50 | 51 | #[structopt(default_value = "/", long)] 52 | pub(crate) destdir: String, 53 | 54 | #[structopt(default_value = "usr/local", long)] 55 | pub(crate) prefix: String, 56 | 57 | // Directories which belong under prefix. 58 | #[structopt(default_value = "bin", long)] 59 | pub(crate) bindir: String, 60 | #[structopt(default_value = "etc", long)] 61 | pub(crate) sysconfdir: String, 62 | #[structopt(default_value = "lib/systemd/system", long)] 63 | pub(crate) unitdir: String, 64 | } 65 | 66 | impl Options { 67 | /// Returns a destdir path. 68 | fn destdir(&self) -> path::PathBuf { 69 | path::PathBuf::from(&self.destdir) 70 | } 71 | 72 | /// Returns a prefix path. 73 | /// Should be used for templating configuration and unit files. 74 | fn prefix(&self) -> path::PathBuf { 75 | path::Path::new("/").join(&self.prefix) 76 | } 77 | 78 | /// Returns a full prefix path (destdir + path). 79 | /// Should be used as an installation target for files. 80 | fn full_prefix(&self) -> path::PathBuf { 81 | path::Path::new(&self.destdir).join(&self.prefix) 82 | } 83 | 84 | /// Returns a bindir path (prefix + bindir). 85 | /// Should be used for templating configuration and unit files. 86 | fn bindir(&self) -> path::PathBuf { 87 | path::Path::new("/").join(&self.prefix).join(&self.bindir) 88 | } 89 | 90 | /// Returns a full bindir path (destdir + prefix + bindir). 91 | /// Should be used as an installation target for files. 92 | fn full_bindir(&self) -> path::PathBuf { 93 | path::Path::new(&self.destdir) 94 | .join(&self.prefix) 95 | .join(&self.bindir) 96 | } 97 | 98 | /// Returns a sysconfdir path (sysconfdir). 99 | /// Should be used for templating configuration and unit files. 100 | fn sysconfdir(&self) -> path::PathBuf { 101 | path::Path::new("/").join(&self.sysconfdir) 102 | } 103 | 104 | /// Returns a full sysconfdir path (destdir + sysconfdir). 105 | /// Should be used as an installation target for files. 106 | fn full_sysconfdir(&self) -> path::PathBuf { 107 | path::Path::new(&self.destdir).join(&self.sysconfdir) 108 | } 109 | 110 | /// Returns an unitdir path (prefix + unitdir). 111 | /// Should be used for templating configuration and unit files. 112 | fn unitdir(&self) -> path::PathBuf { 113 | path::Path::new("/").join(&self.prefix).join(&self.unitdir) 114 | } 115 | 116 | /// Returns a full unitdir path (destdir + prefix + unitdir). 117 | /// Should be used as an installation target for files. 118 | fn full_unitdir(&self) -> path::PathBuf { 119 | path::Path::new(&self.destdir) 120 | .join(&self.prefix) 121 | .join(&self.unitdir) 122 | } 123 | } 124 | 125 | #[derive(Serialize)] 126 | struct InstallDirs { 127 | /// Destdir path. 128 | destdir: path::PathBuf, 129 | /// Prefix path. 130 | /// Should be used for templating configuration and unit files. 131 | prefix: path::PathBuf, 132 | /// Full prefix path (destdir + prefix). 133 | /// Should be used as an installation target for files. 134 | prefix_full: path::PathBuf, 135 | /// Bindir path (prefix + bindir). 136 | /// Should be used for templating configuration and unit files. 137 | bindir: path::PathBuf, 138 | /// Full bindir path (destdir + prefix + bindir). 139 | /// Should be used as an installation target for files. 140 | bindir_full: path::PathBuf, 141 | /// Sysconfdir path. 142 | /// Should be used for templating configuration and unit files. 143 | sysconfdir: path::PathBuf, 144 | /// Full sysconfdir path. 145 | /// Should be used as an installation target for files. 146 | sysconfdir_full: path::PathBuf, 147 | /// Unitdir path. 148 | /// Should be used for templating configuration and unit files. 149 | unitdir: path::PathBuf, 150 | /// Full unitdir path. 151 | /// Should be used as an installation target for files. 152 | unitdir_full: path::PathBuf, 153 | } 154 | 155 | pub(crate) struct Installer { 156 | opts: Options, 157 | install_dirs: InstallDirs, 158 | } 159 | 160 | #[derive(Error, Debug)] 161 | enum InstallBinariesError { 162 | #[error(transparent)] 163 | IO(#[from] io::Error), 164 | 165 | #[error(transparent)] 166 | EscalateIfNotOwned(#[from] EscalateIfNotOwnedError), 167 | 168 | #[error("the project is not built (with the requested profile)")] 169 | NotBuilt, 170 | } 171 | 172 | #[derive(Error, Debug)] 173 | enum InstallConfigError { 174 | #[error(transparent)] 175 | FS(#[from] fs_extra::error::Error), 176 | 177 | #[error(transparent)] 178 | IO(#[from] io::Error), 179 | 180 | #[error(transparent)] 181 | EscalateIfNotOwned(#[from] EscalateIfNotOwnedError), 182 | } 183 | 184 | #[derive(Error, Debug)] 185 | enum InstallUnitsError { 186 | #[error(transparent)] 187 | IO(#[from] io::Error), 188 | 189 | #[error(transparent)] 190 | Tera(#[from] tera::Error), 191 | 192 | #[error(transparent)] 193 | EscalateIfNotOwned(#[from] EscalateIfNotOwnedError), 194 | 195 | #[error("could not determine a file name for a templated file")] 196 | TemplatedFileName, 197 | } 198 | 199 | impl Installer { 200 | pub(crate) fn new(opts: Options) -> Installer { 201 | Installer { 202 | install_dirs: InstallDirs { 203 | destdir: opts.destdir(), 204 | prefix: opts.prefix(), 205 | prefix_full: opts.full_prefix(), 206 | bindir: opts.bindir(), 207 | bindir_full: opts.full_bindir(), 208 | sysconfdir: opts.sysconfdir(), 209 | sysconfdir_full: opts.full_sysconfdir(), 210 | unitdir: opts.unitdir(), 211 | unitdir_full: opts.full_unitdir(), 212 | }, 213 | opts, 214 | } 215 | } 216 | 217 | fn install_binaries(&self) -> Result<(), InstallBinariesError> { 218 | let bindir_full = &self.install_dirs.bindir_full; 219 | 220 | mkdir_if_not_exists(bindir_full)?; 221 | escalate_if_not_owned(bindir_full)?; 222 | 223 | let target_path = path::Path::new("target").join(&self.opts.profile); 224 | if !target_path.exists() { 225 | return Err(InstallBinariesError::NotBuilt); 226 | } 227 | for entry in fs::read_dir(target_path)? { 228 | let path_cur = entry?.path(); 229 | let metadata = path_cur.metadata()?; 230 | 231 | // Skip directories. They might meet the next if statement (executable 232 | // bit), but we don't want to install them. 233 | if metadata.is_dir() { 234 | continue; 235 | } 236 | 237 | // If the file is executable. 238 | if metadata.permissions().mode() & 0o111 != 0 { 239 | let file_name = path_cur.file_name().unwrap(); 240 | 241 | // Skip xtask (which is THIS binary :) ) 242 | if file_name == "xtask" { 243 | continue; 244 | } 245 | 246 | let path_dest = bindir_full.clone().join(file_name); 247 | println!( 248 | "Installing {} to {}", 249 | file_name.to_string_lossy(), 250 | path_dest.display() 251 | ); 252 | fs::copy(path_cur, path_dest)?; 253 | } 254 | } 255 | Ok(()) 256 | } 257 | 258 | fn install_config(&self) -> Result<(), InstallConfigError> { 259 | let sysconfdir_full = &self.install_dirs.sysconfdir_full; 260 | 261 | mkdir_if_not_exists(sysconfdir_full)?; 262 | escalate_if_not_owned(sysconfdir_full)?; 263 | 264 | let config_path = path::Path::new("contrib").join("etc"); 265 | if !config_path.exists() { 266 | return Ok(()); 267 | } 268 | 269 | let mut paths = Vec::new(); 270 | for entry in fs::read_dir(config_path)? { 271 | let path_cur = entry?.path(); 272 | paths.push(path_cur); 273 | } 274 | 275 | println!("Installing config files"); 276 | let mut options = fs_extra::dir::CopyOptions::new(); 277 | options.overwrite = true; 278 | fs_extra::copy_items(&paths, sysconfdir_full, &options)?; 279 | 280 | Ok(()) 281 | } 282 | 283 | fn __install_and_template_units( 284 | &self, 285 | unit_path: &path::Path, 286 | file_name: &OsStr, 287 | ) -> Result<(), InstallUnitsError> { 288 | // Remove ".in" suffix. 289 | let file_name_new = match file_name.to_str().unwrap().get(..file_name.len() - 3) { 290 | Some(f) => f, 291 | None => return Err(InstallUnitsError::TemplatedFileName), 292 | }; 293 | let path_dest = self.install_dirs.unitdir_full.clone().join(file_name_new); 294 | 295 | let tera = Tera::new(&unit_path.join("*.in").to_string_lossy())?; 296 | let content = tera.render( 297 | &file_name.to_string_lossy(), 298 | &Context::from_serialize(&self.install_dirs)?, 299 | )?; 300 | 301 | let mut file_dst = fs::File::create(&path_dest)?; 302 | println!( 303 | "Templating and installing systemd unit {} to {}", 304 | file_name.to_string_lossy(), 305 | path_dest.display() 306 | ); 307 | file_dst.write_all(content.as_bytes())?; 308 | 309 | Ok(()) 310 | } 311 | 312 | fn __install_units( 313 | &self, 314 | path_cur: &path::Path, 315 | file_name: &OsStr, 316 | ) -> Result<(), InstallUnitsError> { 317 | let path_dest = self.install_dirs.unitdir_full.clone().join(file_name); 318 | println!( 319 | "Installing systemd unit {} to {}", 320 | file_name.to_string_lossy(), 321 | path_dest.display() 322 | ); 323 | fs::copy(path_cur, path_dest)?; 324 | 325 | Ok(()) 326 | } 327 | 328 | fn install_units(&self) -> Result<(), InstallUnitsError> { 329 | let unitdir_full = &self.install_dirs.unitdir_full; 330 | 331 | mkdir_if_not_exists(unitdir_full)?; 332 | escalate_if_not_owned(unitdir_full)?; 333 | 334 | let unit_path = path::Path::new("contrib").join("systemd"); 335 | if !unit_path.exists() { 336 | return Ok(()); 337 | } 338 | 339 | for entry in fs::read_dir(&unit_path)? { 340 | let path_cur = entry?.path(); 341 | let metadata = path_cur.metadata()?; 342 | 343 | // No nested directories in systemd units. 344 | if metadata.is_dir() { 345 | continue; 346 | } 347 | 348 | let file_name = path_cur.file_name().unwrap(); 349 | 350 | match path_cur.extension() { 351 | Some(ext) => { 352 | if ext == "in" { 353 | self.__install_and_template_units(&unit_path, file_name)?; 354 | } else { 355 | self.__install_units(&path_cur, file_name)?; 356 | } 357 | } 358 | None => { 359 | self.__install_units(&path_cur, file_name)?; 360 | } 361 | } 362 | } 363 | 364 | Ok(()) 365 | } 366 | 367 | pub(crate) fn do_install(&self) -> anyhow::Result<()> { 368 | self.install_binaries()?; 369 | self.install_config()?; 370 | self.install_units()?; 371 | Ok(()) 372 | } 373 | } 374 | -------------------------------------------------------------------------------- /lockc-ebpf/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | 4 | use aya_bpf::{ 5 | bindings::path, 6 | cty::{c_char, c_long}, 7 | helpers::{bpf_d_path, bpf_probe_read_kernel_str_bytes}, 8 | macros::lsm, 9 | programs::LsmContext, 10 | BpfContext, 11 | }; 12 | use aya_log_ebpf::{debug, error, info}; 13 | 14 | use lockc_common::{ContainerPolicyLevel, PATH_LEN}; 15 | 16 | mod maps; 17 | mod policy; 18 | mod proc; 19 | #[allow(non_upper_case_globals)] 20 | #[allow(non_snake_case)] 21 | #[allow(non_camel_case_types)] 22 | #[allow(dead_code)] 23 | mod vmlinux; 24 | 25 | use maps::{CONTAINER_INITIAL_SETUID, MOUNT_TYPE_BUF, PATH_BUF}; 26 | use policy::get_container_and_policy_level; 27 | use vmlinux::{cred, file, socket}; 28 | 29 | const AF_INET: u16 = 2; 30 | const AF_INET6: u16 = 10; 31 | 32 | /// LSM program triggered by attempts to access the kernel logs. Behavior based 33 | /// on policy levels: 34 | /// 35 | /// * restricted: deny 36 | /// * baseline: deny 37 | /// * privileged: allow 38 | #[lsm(name = "syslog")] 39 | pub fn syslog(ctx: LsmContext) -> i32 { 40 | match try_syslog(ctx) { 41 | Ok(ret) => ret, 42 | Err(ret) => ret, 43 | } 44 | } 45 | 46 | fn try_syslog(ctx: LsmContext) -> Result { 47 | let (_, policy_level) = get_container_and_policy_level()?; 48 | 49 | match policy_level { 50 | ContainerPolicyLevel::NotFound => { 51 | return Ok(0); 52 | } 53 | ContainerPolicyLevel::Lockc => { 54 | return Ok(0); 55 | } 56 | ContainerPolicyLevel::Restricted => { 57 | info!(&ctx, "syslog: deny accessing syslog"); 58 | return Err(-1); 59 | } 60 | ContainerPolicyLevel::Offline => { 61 | info!(&ctx, "syslog: deny accessing syslog"); 62 | return Err(-1); 63 | } 64 | ContainerPolicyLevel::Baseline => { 65 | info!(&ctx, "syslog: deny accessing syslog"); 66 | return Err(-1); 67 | } 68 | ContainerPolicyLevel::Privileged => { 69 | return Ok(0); 70 | } 71 | } 72 | } 73 | 74 | /// LSM program triggered by any mount attempt. It denies bind mounts to 75 | /// restricted and baseline containers. 76 | #[lsm(name = "sb_mount")] 77 | pub fn sb_mount(ctx: LsmContext) -> i32 { 78 | match try_sb_mount(ctx) { 79 | Ok(ret) => ret, 80 | Err(ret) => ret, 81 | } 82 | } 83 | 84 | fn try_sb_mount(ctx: LsmContext) -> Result { 85 | let (container_id, policy_level) = get_container_and_policy_level()?; 86 | 87 | match policy_level { 88 | ContainerPolicyLevel::NotFound => { 89 | return Ok(0); 90 | } 91 | ContainerPolicyLevel::Lockc => { 92 | return Ok(0); 93 | } 94 | ContainerPolicyLevel::Restricted => {} 95 | ContainerPolicyLevel::Offline => {} 96 | ContainerPolicyLevel::Baseline => {} 97 | ContainerPolicyLevel::Privileged => { 98 | return Ok(0); 99 | } 100 | } 101 | 102 | let mount_type = unsafe { 103 | let mount_type: *const c_char = ctx.arg(2); 104 | let buf_ptr = MOUNT_TYPE_BUF.get_ptr_mut(0).ok_or(0)?; 105 | let buf = &mut *buf_ptr; 106 | core::str::from_utf8_unchecked( 107 | bpf_probe_read_kernel_str_bytes(mount_type as *const u8, &mut buf.mount_type) 108 | .map_err(|e| e as i32)?, 109 | ) 110 | }; 111 | 112 | // Apply the policy only on bind mounts, ignore all the other types. 113 | if !mount_type.starts_with("bind") { 114 | return Ok(0); 115 | } 116 | 117 | let src_path = unsafe { 118 | let dev_name: *const c_char = ctx.arg(0); 119 | let buf_ptr = PATH_BUF.get_ptr_mut(0).ok_or(0)?; 120 | let buf = &mut *buf_ptr; 121 | core::str::from_utf8_unchecked( 122 | bpf_probe_read_kernel_str_bytes(dev_name as *const u8, &mut buf.path) 123 | .map_err(|e| e as i32)?, 124 | ) 125 | }; 126 | 127 | if src_path.starts_with("/run/k3s") 128 | || src_path.starts_with("/var/lib/docker") 129 | || src_path.starts_with("/var/lib/kubelet") 130 | || src_path.starts_with("/var/lib/rancher") 131 | || src_path.starts_with("/dev/pts") 132 | { 133 | return Ok(0); 134 | } 135 | 136 | let container_id = container_id.ok_or(-1)?; 137 | let container_id = unsafe { container_id.as_str() }; 138 | error!( 139 | &ctx, 140 | "sb_mount: {}: deny bind mounting {}", container_id, src_path 141 | ); 142 | 143 | Err(-1) 144 | } 145 | 146 | /// LSM program triggered when user attempts to change the UID. It denies 147 | /// changing the UID to 0 (logging in as root) in restricted and baseline 148 | /// containers. 149 | #[lsm(name = "task_fix_setuid")] 150 | pub fn task_fix_setuid(ctx: LsmContext) -> i32 { 151 | match { try_task_fix_setuid(ctx) } { 152 | Ok(ret) => ret, 153 | Err(ret) => ret, 154 | } 155 | } 156 | 157 | fn try_task_fix_setuid(ctx: LsmContext) -> Result { 158 | let (container_id, policy_level) = get_container_and_policy_level()?; 159 | match policy_level { 160 | ContainerPolicyLevel::NotFound => { 161 | return Ok(0); 162 | } 163 | ContainerPolicyLevel::Lockc => { 164 | return Ok(0); 165 | } 166 | ContainerPolicyLevel::Restricted => {} 167 | ContainerPolicyLevel::Offline => { 168 | return Ok(0); 169 | } 170 | ContainerPolicyLevel::Baseline => { 171 | return Ok(0); 172 | } 173 | ContainerPolicyLevel::Privileged => { 174 | return Ok(0); 175 | } 176 | } 177 | 178 | let container_id = container_id.ok_or(-1)?; 179 | 180 | let new: *const cred = unsafe { ctx.arg(0) }; 181 | let uid_new = unsafe { (*new).uid.val }; 182 | 183 | if let Some(initial_setuid) = unsafe { CONTAINER_INITIAL_SETUID.get(&container_id) } { 184 | if *initial_setuid { 185 | if uid_new == 0 { 186 | let container_id = unsafe { container_id.as_str() }; 187 | error!( 188 | &ctx, 189 | "task_fix_setuid: {}: deny logging as root", container_id 190 | ); 191 | return Err(-1); 192 | } 193 | } 194 | } else { 195 | debug!( 196 | &ctx, 197 | "task_fix_setuid: an initial setuid, policy not enforced" 198 | ); 199 | unsafe { 200 | CONTAINER_INITIAL_SETUID 201 | .insert(&container_id, &true, 0) 202 | .map_err(|e| e as i32)? 203 | }; 204 | } 205 | 206 | Ok(0) 207 | } 208 | 209 | // TODO(vadorovsky): Remove this once the following PR is merged: 210 | // https://github.com/aya-rs/aya/pull/257 211 | #[inline(always)] 212 | pub fn my_bpf_d_path(path: *mut path, buf: &mut [u8]) -> Result { 213 | let ret = unsafe { bpf_d_path(path, buf.as_mut_ptr() as *mut c_char, buf.len() as u32) }; 214 | if ret < 0 { 215 | return Err(ret); 216 | } 217 | 218 | Ok(ret as usize) 219 | } 220 | 221 | /// LSM program triggered by opening a file. It denies access to directories 222 | /// which might leak information about host (/sys/fs, /proc/acpi etc.) to 223 | /// restricted and baseline containers. 224 | #[lsm(name = "file_open")] 225 | pub fn file_open(ctx: LsmContext) -> i32 { 226 | match { try_file_open(ctx) } { 227 | Ok(ret) => ret, 228 | Err(ret) => ret, 229 | } 230 | } 231 | 232 | fn try_file_open(ctx: LsmContext) -> Result { 233 | let (container_id, policy_level) = get_container_and_policy_level()?; 234 | match policy_level { 235 | ContainerPolicyLevel::NotFound => { 236 | return Ok(0); 237 | } 238 | ContainerPolicyLevel::Lockc => { 239 | return Ok(0); 240 | } 241 | ContainerPolicyLevel::Restricted => {} 242 | ContainerPolicyLevel::Offline => {} 243 | ContainerPolicyLevel::Baseline => {} 244 | ContainerPolicyLevel::Privileged => { 245 | return Ok(0); 246 | } 247 | } 248 | 249 | let buf = unsafe { 250 | let buf_ptr = PATH_BUF.get_ptr_mut(0).ok_or(0)?; 251 | &mut *buf_ptr 252 | }; 253 | 254 | let p = unsafe { 255 | let f: *const file = ctx.arg(0); 256 | let p = &(*f).f_path as *const _ as *mut path; 257 | let len = my_bpf_d_path(p, &mut buf.path).map_err(|_| 0)?; 258 | if len >= PATH_LEN { 259 | return Err(0); 260 | } 261 | core::str::from_utf8_unchecked(&buf.path[..len]) 262 | }; 263 | 264 | let container_id = container_id.ok_or(-1)?; 265 | let container_id = unsafe { container_id.as_str() }; 266 | 267 | if p.starts_with("/sys/devices") 268 | || p.starts_with("/sys/fs/cgroup") 269 | || p.starts_with("/sys/kernel/mm") 270 | { 271 | return Ok(0); 272 | } 273 | 274 | if p.starts_with("/proc/acpi") 275 | || p.starts_with("/sys/") 276 | || p.starts_with("/var/run/secrets/kubernetes.io") 277 | { 278 | error!(&ctx, "file_open: {}: deny opening {}", container_id, p); 279 | return Err(-1); 280 | } 281 | 282 | Ok(0) 283 | } 284 | 285 | #[lsm(name = "socket_sendmsg")] 286 | pub fn socket_sendmsg(ctx: LsmContext) -> i32 { 287 | match { try_socket_sendmsg(ctx) } { 288 | Ok(ret) => ret, 289 | Err(ret) => ret, 290 | } 291 | } 292 | 293 | fn try_socket_sendmsg(ctx: LsmContext) -> Result { 294 | let (container_id, policy_level) = get_container_and_policy_level()?; 295 | match policy_level { 296 | ContainerPolicyLevel::NotFound => { 297 | return Ok(0); 298 | } 299 | ContainerPolicyLevel::Lockc => { 300 | return Ok(0); 301 | } 302 | ContainerPolicyLevel::Restricted => {} 303 | ContainerPolicyLevel::Offline => { 304 | return Err(-1); 305 | } 306 | ContainerPolicyLevel::Baseline => {} 307 | ContainerPolicyLevel::Privileged => { 308 | return Ok(0); 309 | } 310 | } 311 | 312 | let container_id = container_id.ok_or(-1)?; 313 | let container_id = unsafe { container_id.as_str() }; 314 | let pid = ctx.pid(); 315 | let sock: *const socket = unsafe { ctx.arg(0) }; 316 | let txhash = unsafe { (*(*sock).sk).sk_txhash }; 317 | debug!( 318 | &ctx, 319 | "socket_sendmsg: container_id: {}, pid: {}, txhash: {}", container_id, pid, txhash 320 | ); 321 | 322 | Ok(0) 323 | } 324 | 325 | #[lsm(name = "socket_recvmsg")] 326 | pub fn socket_recvmsg(ctx: LsmContext) -> i32 { 327 | match { try_socket_recvmsg(ctx) } { 328 | Ok(ret) => ret, 329 | Err(ret) => ret, 330 | } 331 | } 332 | 333 | fn try_socket_recvmsg(ctx: LsmContext) -> Result { 334 | let (container_id, policy_level) = get_container_and_policy_level()?; 335 | match policy_level { 336 | ContainerPolicyLevel::NotFound => { 337 | return Ok(0); 338 | } 339 | ContainerPolicyLevel::Lockc => { 340 | return Ok(0); 341 | } 342 | ContainerPolicyLevel::Restricted => {} 343 | ContainerPolicyLevel::Offline => { 344 | return Err(-1); 345 | } 346 | ContainerPolicyLevel::Baseline => {} 347 | ContainerPolicyLevel::Privileged => { 348 | return Ok(0); 349 | } 350 | } 351 | 352 | let container_id = container_id.ok_or(-1)?; 353 | let container_id = unsafe { container_id.as_str() }; 354 | let pid = ctx.pid(); 355 | let sock: *const socket = unsafe { ctx.arg(0) }; 356 | let txhash = unsafe { (*(*sock).sk).sk_txhash }; 357 | match unsafe { (*(*sock).sk).__sk_common.skc_family } { 358 | AF_INET => { 359 | let src_addr = u32::from_be(unsafe { 360 | (*(*sock).sk) 361 | .__sk_common 362 | .__bindgen_anon_1 363 | .__bindgen_anon_1 364 | .skc_rcv_saddr 365 | }); 366 | debug!( 367 | &ctx, 368 | "socket_recvmsg: container_id: {}, pid: {}, src_addr: {:ipv4}, txhash: {}", 369 | container_id, 370 | pid, 371 | src_addr, 372 | txhash 373 | ); 374 | } 375 | AF_INET6 => { 376 | let src_addr = unsafe { (*(*sock).sk).__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr8 }; 377 | debug!( 378 | &ctx, 379 | "socket_recvmsg: container_id: {}, pid: {}, src_addr: {:ipv6}, txhash: {}", 380 | container_id, 381 | pid, 382 | src_addr, 383 | txhash 384 | ); 385 | } 386 | _ => {} 387 | }; 388 | 389 | Ok(0) 390 | } 391 | 392 | #[panic_handler] 393 | fn panic(_info: &core::panic::PanicInfo) -> ! { 394 | unsafe { core::hint::unreachable_unchecked() } 395 | } 396 | -------------------------------------------------------------------------------- /contrib/etc/lockc/lockc.toml: -------------------------------------------------------------------------------- 1 | # Container runtime process names to monitor. 2 | runtimes = ["runc"] 3 | 4 | # Paths which are allowed to bind mount from host filesystem to container 5 | # filesystem in containers with "restricted" policy. 6 | # By default, these are only directories used by container runtimes (i.e. runc), 7 | # engines (i.e. containerd, cri-o, podman) and kubelet. 8 | allowed_paths_mount_restricted = [ 9 | # Path to Pseudo-Terminal Device, needed for -it option in container runtimes. 10 | "/dev/pts", 11 | # Storage directory used by libpod (podman, cri-o). 12 | "/var/lib/containers/storage", 13 | # Storage directory used by docker (aufs driver). 14 | "/var/lib/docker/aufs", 15 | # Storage directory used by docker (btrfs driver). 16 | "/var/lib/docker/btrfs", 17 | # Storage directory used by docker (devmapper driver). 18 | "/var/lib/docker/devmapper", 19 | # Storage directory used by docker (overlay driver) 20 | "/var/lib/docker/overlay", 21 | # Storage directory used by docker (overlay2 driver). 22 | "/var/lib/docker/overlay2", 23 | # Storage directory used by docker (vfs driver). 24 | "/var/lib/docker/vfs", 25 | # Storage directory used by docker (zfs driver). 26 | "/var/lib/docker/zfs", 27 | # Storage directory used by containerd. 28 | "/var/run/container", 29 | # Storage directory used by CRI containerd. 30 | "/run/containerd/io.containerd.runtime.v1.linux", 31 | # Storage directory used by CRI containerd. 32 | "/run/containerd/io.containerd.runtime.v2.task", 33 | # Data directory used by docker. 34 | "/var/lib/docker/containers", 35 | # Sandbox directory used by containerd. 36 | "/run/containerd/io.containerd.grpc.v1.cri/sandboxes", 37 | # Sandbox directory used by containerd. 38 | "/var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes", 39 | # Misc cgroup controller. 40 | "/sys/fs/cgroup/misc", 41 | # RDMA controller. 42 | "/sys/fs/cgroup/rdma", 43 | # Block I/O controller for libpod (podman, cri-o). 44 | "/sys/fs/cgroup/blkio/machine.slice", 45 | # CPU accounting controller for libpod (podman, cri-o). 46 | "/sys/fs/cgroup/cpu,cpuacct/machine.slice", 47 | # Cpusets for libpod (podman, cri-o). 48 | "/sys/fs/cgroup/cpuset/machine.slice", 49 | # Device allowlist controller for libpod (podman, cri-o). 50 | "/sys/fs/cgroup/devices/machine.slice", 51 | # Cgroup freezer for libpod (podman, cri-o). 52 | "/sys/fs/cgroup/freezer/machine.slice", 53 | # HugeTLB controller for libpod (podman, cri-o). 54 | "/sys/fs/cgroup/hugetlb/machine.slice", 55 | # Memory controller for libpod (podman, cri-o). 56 | "/sys/fs/cgroup/memory/machine.slice", 57 | # Network classifier and priority controller for libpod (podman, cri-o). 58 | "/sys/fs/cgroup/net_cls,net_prio/machine.slice", 59 | # Perf event controller for libpod (podman, cri-o). 60 | "/sys/fs/cgroup/perf_event/machine.slice", 61 | # Process number controller for libpod (podman, cri-o). 62 | "/sys/fs/cgroup/pids/machine.slice", 63 | # Cgroup v1 hierarchy (used by systemd) for libpod (podman, cri-o). 64 | "/sys/fs/cgroup/systemd/machine.slice", 65 | # Cgroup v2 hierarchy (used by systemd) for libpod (podman, cri-o). 66 | "/sys/fs/cgroup/unified/machine.slice", 67 | # Block I/O controller for kubelet. 68 | "/sys/fs/cgroup/blkio/kubepods.slice", 69 | # CPU accounting controller for kubelet. 70 | "/sys/fs/cgroup/cpu,cpuacct/kubepods.slice", 71 | # Cpusets for libpod for kubelet. 72 | "/sys/fs/cgroup/cpuset/kubepods.slice", 73 | # Device allowlist controller for kubelet. 74 | "/sys/fs/cgroup/devices/kubepods.slice", 75 | # Cgroup freezer for kubelet. 76 | "/sys/fs/cgroup/freezer/kubepods.slice", 77 | # HugeTLB controller for kubelet. 78 | "/sys/fs/cgroup/hugetlb/kubepods.slice", 79 | # Memory controller for kubelet. 80 | "/sys/fs/cgroup/memory/kubepods.slice", 81 | # Network classifier and priority controller for kubelet. 82 | "/sys/fs/cgroup/net_cls,net_prio/kubepods.slice", 83 | # Perf event controller for kubelet. 84 | "/sys/fs/cgroup/perf_event/kubepods.slice", 85 | # Process number controller for kubelet. 86 | "/sys/fs/cgroup/pids/kubepods.slice", 87 | # Cgroup v1 hierarchy (used by systemd) for kubelet. 88 | "/sys/fs/cgroup/systemd/kubepods.slice", 89 | # Cgroup v2 hierarchy (used by systemd) for kubelet. 90 | "/sys/fs/cgroup/unified/kubepods.slice", 91 | # Block I/O controller for kubelet. 92 | "/sys/fs/cgroup/blkio/kubepods-besteffort", 93 | # CPU accounting controller for kubelet. 94 | "/sys/fs/cgroup/cpu,cpuacct/kubepods-besteffort", 95 | # Cpusets for libpod for kubelet. 96 | "/sys/fs/cgroup/cpuset/kubepods-besteffort", 97 | # Device allowlist controller for kubelet. 98 | "/sys/fs/cgroup/devices/kubepods-besteffort", 99 | # Cgroup freezer for kubelet. 100 | "/sys/fs/cgroup/freezer/kubepods-besteffort", 101 | # HugeTLB controller for kubelet. 102 | "/sys/fs/cgroup/hugetlb/kubepods-besteffort", 103 | # Memory controller for kubelet. 104 | "/sys/fs/cgroup/memory/kubepods-besteffort", 105 | # Network classifier and priority controller for kubelet. 106 | "/sys/fs/cgroup/net_cls,net_prio/kubepods-besteffort", 107 | # Perf event controller for kubelet. 108 | "/sys/fs/cgroup/perf_event/kubepods-besteffort", 109 | # Process number controller for kubelet. 110 | "/sys/fs/cgroup/pids/kubepods-besteffort", 111 | # Cgroup v1 hierarchy (used by systemd) for kubelet. 112 | "/sys/fs/cgroup/systemd/kubepods-besteffort", 113 | # Cgroup v2 hierarchy (used by systemd) for kubelet. 114 | "/sys/fs/cgroup/unified/kubepods-besteffort", 115 | # Block I/O controller for containerd. 116 | "/sys/fs/cgroup/blkio/system.slice/containerd.service", 117 | # CPU accounting controller for containerd. 118 | "/sys/fs/cgroup/cpu,cpuacct/system.slice/containerd.service", 119 | # Cpusets for libpod for containerd. 120 | "/sys/fs/cgroup/cpuset/system.slice/containerd.service", 121 | # Device allowlist controller for containerd. 122 | "/sys/fs/cgroup/devices/system.slice/containerd.service", 123 | # Cgroup freezer for containerd. 124 | "/sys/fs/cgroup/freezer/system.slice/containerd.service", 125 | # HugeTLB controller for containerd. 126 | "/sys/fs/cgroup/hugetlb/system.slice/containerd.service", 127 | # Memory controller for containerd. 128 | "/sys/fs/cgroup/memory/system.slice/containerd.service", 129 | # Network classifier and priority controller for containerd. 130 | "/sys/fs/cgroup/net_cls,net_prio/system.slice/containerd.service", 131 | # Perf event controller for containerd. 132 | "/sys/fs/cgroup/perf_event/system.slice/containerd.service", 133 | # Process number controller for containerd. 134 | "/sys/fs/cgroup/pids/system.slice/containerd.service", 135 | # Cgroup v1 hierarchy (used by systemd) for containerd. 136 | "/sys/fs/cgroup/systemd/system.slice/containerd.service", 137 | # Cgroup v2 hierarchy (used by systemd) for containerd. 138 | "/sys/fs/cgroup/unified/system.slice/containerd.service", 139 | # Block I/O controller for docker. 140 | "/sys/fs/cgroup/blkio/docker", 141 | # CPU accounting controller for docker. 142 | "/sys/fs/cgroup/cpu,cpuacct/docker", 143 | # Cpusets for docker. 144 | "/sys/fs/cgroup/cpuset/docker", 145 | # Device allowlist controller for docker. 146 | "/sys/fs/cgroup/devices/docker", 147 | # Cgroup freezer for docker. 148 | "/sys/fs/cgroup/freezer/docker", 149 | # HugeTLB controller for docker. 150 | "/sys/fs/cgroup/hugetlb/docker", 151 | # Memory controller for docker. 152 | "/sys/fs/cgroup/memory/docker", 153 | # Network classifier and priority controller for docker. 154 | "/sys/fs/cgroup/net_cls,net_prio/docker", 155 | # Perf event controller for docker. 156 | "/sys/fs/cgroup/perf_event/docker", 157 | # Process number controller for docker. 158 | "/sys/fs/cgroup/pids/docker", 159 | # Cgroup v1 hierarchy (used by systemd) for docker. 160 | "/sys/fs/cgroup/systemd/docker", 161 | # Cgroup v2 hierarchy (used by systemd) for docker. 162 | "/sys/fs/cgroup/unified/docker", 163 | # State and ephemeral storage for kubelet. 164 | "/var/lib/kubelet/pods", 165 | ] 166 | 167 | # Paths which are allowed to bind mount from host filesystem to container 168 | # filesystem in containers with "baseline" policy. 169 | # By default, these are: 170 | # * /home 171 | # * /var/data 172 | # * directories used by container runtimes, engines and kubelet 173 | allowed_paths_mount_baseline = [ 174 | # Directories used by container runtimes, engines and kubelet. 175 | 176 | # Path to Pseudo-Terminal Device, needed for -it option in container runtimes. 177 | "/dev/pts", 178 | # Storage directory used by libpod (podman, cri-o). 179 | "/var/lib/containers/storage", 180 | # Storage directory used by docker (aufs driver). 181 | "/var/lib/docker/aufs", 182 | # Storage directory used by docker (btrfs driver). 183 | "/var/lib/docker/btrfs", 184 | # Storage directory used by docker (devmapper driver). 185 | "/var/lib/docker/devmapper", 186 | # Storage directory used by docker (overlay driver) 187 | "/var/lib/docker/overlay", 188 | # Storage directory used by docker (overlay2 driver). 189 | "/var/lib/docker/overlay2", 190 | # Storage directory used by docker (vfs driver). 191 | "/var/lib/docker/vfs", 192 | # Storage directory used by docker (zfs driver). 193 | "/var/lib/docker/zfs", 194 | # Storage directory used by containerd. 195 | "/var/run/container", 196 | # Storage directory used by CRI containerd. 197 | "/run/containerd/io.containerd.runtime.v1.linux", 198 | # Storage directory used by CRI containerd. 199 | "/run/containerd/io.containerd.runtime.v2.task", 200 | # Data directory used by docker. 201 | "/var/lib/docker/containers", 202 | # Sandbox directory used by containerd. 203 | "/run/containerd/io.containerd.grpc.v1.cri/sandboxes", 204 | # Sandbox directory used by containerd. 205 | "/var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes", 206 | # Misc cgroup controller. 207 | "/sys/fs/cgroup/misc", 208 | # RDMA controller. 209 | "/sys/fs/cgroup/rdma", 210 | # Block I/O controller for libpod (podman, cri-o). 211 | "/sys/fs/cgroup/blkio/machine.slice", 212 | # CPU accounting controller for libpod (podman, cri-o). 213 | "/sys/fs/cgroup/cpu,cpuacct/machine.slice", 214 | # Cpusets for libpod (podman, cri-o). 215 | "/sys/fs/cgroup/cpuset/machine.slice", 216 | # Device allowlist controller for libpod (podman, cri-o). 217 | "/sys/fs/cgroup/devices/machine.slice", 218 | # Cgroup freezer for libpod (podman, cri-o). 219 | "/sys/fs/cgroup/freezer/machine.slice", 220 | # HugeTLB controller for libpod (podman, cri-o). 221 | "/sys/fs/cgroup/hugetlb/machine.slice", 222 | # Memory controller for libpod (podman, cri-o). 223 | "/sys/fs/cgroup/memory/machine.slice", 224 | # Network classifier and priority controller for libpod (podman, cri-o). 225 | "/sys/fs/cgroup/net_cls,net_prio/machine.slice", 226 | # Perf event controller for libpod (podman, cri-o). 227 | "/sys/fs/cgroup/perf_event/machine.slice", 228 | # Process number controller for libpod (podman, cri-o). 229 | "/sys/fs/cgroup/pids/machine.slice", 230 | # Cgroup v1 hierarchy (used by systemd) for libpod (podman, cri-o). 231 | "/sys/fs/cgroup/systemd/machine.slice", 232 | # Cgroup v2 hierarchy (used by systemd) for libpod (podman, cri-o). 233 | "/sys/fs/cgroup/unified/machine.slice", 234 | # Block I/O controller for kubelet. 235 | "/sys/fs/cgroup/blkio/kubepods.slice", 236 | # CPU accounting controller for kubelet. 237 | "/sys/fs/cgroup/cpu,cpuacct/kubepods.slice", 238 | # Cpusets for libpod for kubelet. 239 | "/sys/fs/cgroup/cpuset/kubepods.slice", 240 | # Device allowlist controller for kubelet. 241 | "/sys/fs/cgroup/devices/kubepods.slice", 242 | # Cgroup freezer for kubelet. 243 | "/sys/fs/cgroup/freezer/kubepods.slice", 244 | # HugeTLB controller for kubelet. 245 | "/sys/fs/cgroup/hugetlb/kubepods.slice", 246 | # Memory controller for kubelet. 247 | "/sys/fs/cgroup/memory/kubepods.slice", 248 | # Network classifier and priority controller for kubelet. 249 | "/sys/fs/cgroup/net_cls,net_prio/kubepods.slice", 250 | # Perf event controller for kubelet. 251 | "/sys/fs/cgroup/perf_event/kubepods.slice", 252 | # Process number controller for kubelet. 253 | "/sys/fs/cgroup/pids/kubepods.slice", 254 | # Cgroup v1 hierarchy (used by systemd) for kubelet. 255 | "/sys/fs/cgroup/systemd/kubepods.slice", 256 | # Cgroup v2 hierarchy (used by systemd) for kubelet. 257 | "/sys/fs/cgroup/unified/kubepods.slice", 258 | # Block I/O controller for kubelet. 259 | "/sys/fs/cgroup/blkio/kubepods-besteffort", 260 | # CPU accounting controller for kubelet. 261 | "/sys/fs/cgroup/cpu,cpuacct/kubepods-besteffort", 262 | # Cpusets for libpod for kubelet. 263 | "/sys/fs/cgroup/cpuset/kubepods-besteffort", 264 | # Device allowlist controller for kubelet. 265 | "/sys/fs/cgroup/devices/kubepods-besteffort", 266 | # Cgroup freezer for kubelet. 267 | "/sys/fs/cgroup/freezer/kubepods-besteffort", 268 | # HugeTLB controller for kubelet. 269 | "/sys/fs/cgroup/hugetlb/kubepods-besteffort", 270 | # Memory controller for kubelet. 271 | "/sys/fs/cgroup/memory/kubepods-besteffort", 272 | # Network classifier and priority controller for kubelet. 273 | "/sys/fs/cgroup/net_cls,net_prio/kubepods-besteffort", 274 | # Perf event controller for kubelet. 275 | "/sys/fs/cgroup/perf_event/kubepods-besteffort", 276 | # Process number controller for kubelet. 277 | "/sys/fs/cgroup/pids/kubepods-besteffort", 278 | # Cgroup v1 hierarchy (used by systemd) for kubelet. 279 | "/sys/fs/cgroup/systemd/kubepods-besteffort", 280 | # Cgroup v2 hierarchy (used by systemd) for kubelet. 281 | "/sys/fs/cgroup/unified/kubepods-besteffort", 282 | # Block I/O controller for containerd. 283 | "/sys/fs/cgroup/blkio/system.slice/containerd.service", 284 | # CPU accounting controller for containerd. 285 | "/sys/fs/cgroup/cpu,cpuacct/system.slice/containerd.service", 286 | # Cpusets for libpod for containerd. 287 | "/sys/fs/cgroup/cpuset/system.slice/containerd.service", 288 | # Device allowlist controller for containerd. 289 | "/sys/fs/cgroup/devices/system.slice/containerd.service", 290 | # Cgroup freezer for containerd. 291 | "/sys/fs/cgroup/freezer/system.slice/containerd.service", 292 | # HugeTLB controller for containerd. 293 | "/sys/fs/cgroup/hugetlb/system.slice/containerd.service", 294 | # Memory controller for containerd. 295 | "/sys/fs/cgroup/memory/system.slice/containerd.service", 296 | # Network classifier and priority controller for containerd. 297 | "/sys/fs/cgroup/net_cls,net_prio/system.slice/containerd.service", 298 | # Perf event controller for containerd. 299 | "/sys/fs/cgroup/perf_event/system.slice/containerd.service", 300 | # Process number controller for containerd. 301 | "/sys/fs/cgroup/pids/system.slice/containerd.service", 302 | # Cgroup v1 hierarchy (used by systemd) for containerd. 303 | "/sys/fs/cgroup/systemd/system.slice/containerd.service", 304 | # Cgroup v2 hierarchy (used by systemd) for containerd. 305 | "/sys/fs/cgroup/unified/system.slice/containerd.service", 306 | # Block I/O controller for docker. 307 | "/sys/fs/cgroup/blkio/docker", 308 | # CPU accounting controller for docker. 309 | "/sys/fs/cgroup/cpu,cpuacct/docker", 310 | # Cpusets for docker. 311 | "/sys/fs/cgroup/cpuset/docker", 312 | # Device allowlist controller for docker. 313 | "/sys/fs/cgroup/devices/docker", 314 | # Cgroup freezer for docker. 315 | "/sys/fs/cgroup/freezer/docker", 316 | # HugeTLB controller for docker. 317 | "/sys/fs/cgroup/hugetlb/docker", 318 | # Memory controller for docker. 319 | "/sys/fs/cgroup/memory/docker", 320 | # Network classifier and priority controller for docker. 321 | "/sys/fs/cgroup/net_cls,net_prio/docker", 322 | # Perf event controller for docker. 323 | "/sys/fs/cgroup/perf_event/docker", 324 | # Process number controller for docker. 325 | "/sys/fs/cgroup/pids/docker", 326 | # Cgroup v1 hierarchy (used by systemd) for docker. 327 | "/sys/fs/cgroup/systemd/docker", 328 | # Cgroup v2 hierarchy (used by systemd) for docker. 329 | "/sys/fs/cgroup/unified/docker", 330 | # State and ephemeral storage for kubelet. 331 | "/var/lib/kubelet/pods", 332 | 333 | # Directories mounted by container engine user. 334 | 335 | "/home", 336 | "/var/data", 337 | ] 338 | 339 | allowed_paths_access_restricted = [ 340 | "cgroup:", 341 | "ipc:", 342 | "mnt:", 343 | "net:", 344 | "pid:", 345 | "pipe:", 346 | "time:", 347 | "user:", 348 | "uts:", 349 | "/bin", 350 | "/dev/console", 351 | "/dev/full", 352 | "/dev/null", 353 | "/dev/pts", 354 | "/dev/tty", 355 | "/dev/urandom", 356 | "/dev/zero", 357 | "/etc", 358 | "/home", 359 | "/lib", 360 | "/lib64", 361 | "/opt", 362 | "/pause", 363 | "/proc", 364 | "/run", 365 | "/sys/fs/cgroup", 366 | "/sys/kernel/mm", 367 | "/tmp", 368 | "/usr", 369 | "/var", 370 | ] 371 | 372 | allowed_paths_access_baseline = [ 373 | "cgroup:", 374 | "ipc:", 375 | "mnt:", 376 | "net:", 377 | "pid:", 378 | "pipe:", 379 | "time:", 380 | "user:", 381 | "uts:", 382 | "/bin", 383 | "/dev/console", 384 | "/dev/full", 385 | "/dev/null", 386 | "/dev/pts", 387 | "/dev/tty", 388 | "/dev/urandom", 389 | "/dev/zero", 390 | "/etc", 391 | "/home", 392 | "/lib", 393 | "/lib64", 394 | "/opt", 395 | "/pause", 396 | "/proc", 397 | "/run", 398 | "/sys/fs/cgroup", 399 | "/sys/kernel/mm", 400 | "/tmp", 401 | "/usr", 402 | "/var", 403 | ] 404 | 405 | denied_paths_access_restricted = [ 406 | "/proc/acpi", 407 | "/proc/sys", 408 | "/var/run/secrets/kubernetes.io", 409 | ] 410 | 411 | denied_paths_access_baseline = [ 412 | "/proc/acpi", 413 | "/var/run/secrets/kubernetes.io", 414 | ] 415 | -------------------------------------------------------------------------------- /lockc-ebpf/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | GNU GENERAL PUBLIC LICENSE 3 | Version 2, June 1991 4 | 5 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. 6 | 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 7 | Everyone is permitted to copy and distribute verbatim copies 8 | of this license document, but changing it is not allowed. 9 | 10 | Preamble 11 | 12 | The licenses for most software are designed to take away your 13 | freedom to share and change it. By contrast, the GNU General Public 14 | License is intended to guarantee your freedom to share and change free 15 | software--to make sure the software is free for all its users. This 16 | General Public License applies to most of the Free Software 17 | Foundation's software and to any other program whose authors commit to 18 | using it. (Some other Free Software Foundation software is covered by 19 | the GNU Library General Public License instead.) You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | this service if you wish), that you receive source code or can get it 26 | if you want it, that you can change the software or use pieces of it 27 | in new free programs; and that you know you can do these things. 28 | 29 | To protect your rights, we need to make restrictions that forbid 30 | anyone to deny you these rights or to ask you to surrender the rights. 31 | These restrictions translate to certain responsibilities for you if you 32 | distribute copies of the software, or if you modify it. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must give the recipients all the rights that 36 | you have. You must make sure that they, too, receive or can get the 37 | source code. And you must show them these terms so they know their 38 | rights. 39 | 40 | We protect your rights with two steps: (1) copyright the software, and 41 | (2) offer you this license which gives you legal permission to copy, 42 | distribute and/or modify the software. 43 | 44 | Also, for each author's protection and ours, we want to make certain 45 | that everyone understands that there is no warranty for this free 46 | software. If the software is modified by someone else and passed on, we 47 | want its recipients to know that what they have is not the original, so 48 | that any problems introduced by others will not reflect on the original 49 | authors' reputations. 50 | 51 | Finally, any free program is threatened constantly by software 52 | patents. We wish to avoid the danger that redistributors of a free 53 | program will individually obtain patent licenses, in effect making the 54 | program proprietary. To prevent this, we have made it clear that any 55 | patent must be licensed for everyone's free use or not licensed at all. 56 | 57 | The precise terms and conditions for copying, distribution and 58 | modification follow. 59 | 60 | GNU GENERAL PUBLIC LICENSE 61 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 62 | 63 | 0. This License applies to any program or other work which contains 64 | a notice placed by the copyright holder saying it may be distributed 65 | under the terms of this General Public License. The "Program", below, 66 | refers to any such program or work, and a "work based on the Program" 67 | means either the Program or any derivative work under copyright law: 68 | that is to say, a work containing the Program or a portion of it, 69 | either verbatim or with modifications and/or translated into another 70 | language. (Hereinafter, translation is included without limitation in 71 | the term "modification".) Each licensee is addressed as "you". 72 | 73 | Activities other than copying, distribution and modification are not 74 | covered by this License; they are outside its scope. The act of 75 | running the Program is not restricted, and the output from the Program 76 | is covered only if its contents constitute a work based on the 77 | Program (independent of having been made by running the Program). 78 | Whether that is true depends on what the Program does. 79 | 80 | 1. You may copy and distribute verbatim copies of the Program's 81 | source code as you receive it, in any medium, provided that you 82 | conspicuously and appropriately publish on each copy an appropriate 83 | copyright notice and disclaimer of warranty; keep intact all the 84 | notices that refer to this License and to the absence of any warranty; 85 | and give any other recipients of the Program a copy of this License 86 | along with the Program. 87 | 88 | You may charge a fee for the physical act of transferring a copy, and 89 | you may at your option offer warranty protection in exchange for a fee. 90 | 91 | 2. You may modify your copy or copies of the Program or any portion 92 | of it, thus forming a work based on the Program, and copy and 93 | distribute such modifications or work under the terms of Section 1 94 | above, provided that you also meet all of these conditions: 95 | 96 | a) You must cause the modified files to carry prominent notices 97 | stating that you changed the files and the date of any change. 98 | 99 | b) You must cause any work that you distribute or publish, that in 100 | whole or in part contains or is derived from the Program or any 101 | part thereof, to be licensed as a whole at no charge to all third 102 | parties under the terms of this License. 103 | 104 | c) If the modified program normally reads commands interactively 105 | when run, you must cause it, when started running for such 106 | interactive use in the most ordinary way, to print or display an 107 | announcement including an appropriate copyright notice and a 108 | notice that there is no warranty (or else, saying that you provide 109 | a warranty) and that users may redistribute the program under 110 | these conditions, and telling the user how to view a copy of this 111 | License. (Exception: if the Program itself is interactive but 112 | does not normally print such an announcement, your work based on 113 | the Program is not required to print an announcement.) 114 | 115 | These requirements apply to the modified work as a whole. If 116 | identifiable sections of that work are not derived from the Program, 117 | and can be reasonably considered independent and separate works in 118 | themselves, then this License, and its terms, do not apply to those 119 | sections when you distribute them as separate works. But when you 120 | distribute the same sections as part of a whole which is a work based 121 | on the Program, the distribution of the whole must be on the terms of 122 | this License, whose permissions for other licensees extend to the 123 | entire whole, and thus to each and every part regardless of who wrote it. 124 | 125 | Thus, it is not the intent of this section to claim rights or contest 126 | your rights to work written entirely by you; rather, the intent is to 127 | exercise the right to control the distribution of derivative or 128 | collective works based on the Program. 129 | 130 | In addition, mere aggregation of another work not based on the Program 131 | with the Program (or with a work based on the Program) on a volume of 132 | a storage or distribution medium does not bring the other work under 133 | the scope of this License. 134 | 135 | 3. You may copy and distribute the Program (or a work based on it, 136 | under Section 2) in object code or executable form under the terms of 137 | Sections 1 and 2 above provided that you also do one of the following: 138 | 139 | a) Accompany it with the complete corresponding machine-readable 140 | source code, which must be distributed under the terms of Sections 141 | 1 and 2 above on a medium customarily used for software interchange; or, 142 | 143 | b) Accompany it with a written offer, valid for at least three 144 | years, to give any third party, for a charge no more than your 145 | cost of physically performing source distribution, a complete 146 | machine-readable copy of the corresponding source code, to be 147 | distributed under the terms of Sections 1 and 2 above on a medium 148 | customarily used for software interchange; or, 149 | 150 | c) Accompany it with the information you received as to the offer 151 | to distribute corresponding source code. (This alternative is 152 | allowed only for noncommercial distribution and only if you 153 | received the program in object code or executable form with such 154 | an offer, in accord with Subsection b above.) 155 | 156 | The source code for a work means the preferred form of the work for 157 | making modifications to it. For an executable work, complete source 158 | code means all the source code for all modules it contains, plus any 159 | associated interface definition files, plus the scripts used to 160 | control compilation and installation of the executable. However, as a 161 | special exception, the source code distributed need not include 162 | anything that is normally distributed (in either source or binary 163 | form) with the major components (compiler, kernel, and so on) of the 164 | operating system on which the executable runs, unless that component 165 | itself accompanies the executable. 166 | 167 | If distribution of executable or object code is made by offering 168 | access to copy from a designated place, then offering equivalent 169 | access to copy the source code from the same place counts as 170 | distribution of the source code, even though third parties are not 171 | compelled to copy the source along with the object code. 172 | 173 | 4. You may not copy, modify, sublicense, or distribute the Program 174 | except as expressly provided under this License. Any attempt 175 | otherwise to copy, modify, sublicense or distribute the Program is 176 | void, and will automatically terminate your rights under this License. 177 | However, parties who have received copies, or rights, from you under 178 | this License will not have their licenses terminated so long as such 179 | parties remain in full compliance. 180 | 181 | 5. You are not required to accept this License, since you have not 182 | signed it. However, nothing else grants you permission to modify or 183 | distribute the Program or its derivative works. These actions are 184 | prohibited by law if you do not accept this License. Therefore, by 185 | modifying or distributing the Program (or any work based on the 186 | Program), you indicate your acceptance of this License to do so, and 187 | all its terms and conditions for copying, distributing or modifying 188 | the Program or works based on it. 189 | 190 | 6. Each time you redistribute the Program (or any work based on the 191 | Program), the recipient automatically receives a license from the 192 | original licensor to copy, distribute or modify the Program subject to 193 | these terms and conditions. You may not impose any further 194 | restrictions on the recipients' exercise of the rights granted herein. 195 | You are not responsible for enforcing compliance by third parties to 196 | this License. 197 | 198 | 7. If, as a consequence of a court judgment or allegation of patent 199 | infringement or for any other reason (not limited to patent issues), 200 | conditions are imposed on you (whether by court order, agreement or 201 | otherwise) that contradict the conditions of this License, they do not 202 | excuse you from the conditions of this License. If you cannot 203 | distribute so as to satisfy simultaneously your obligations under this 204 | License and any other pertinent obligations, then as a consequence you 205 | may not distribute the Program at all. For example, if a patent 206 | license would not permit royalty-free redistribution of the Program by 207 | all those who receive copies directly or indirectly through you, then 208 | the only way you could satisfy both it and this License would be to 209 | refrain entirely from distribution of the Program. 210 | 211 | If any portion of this section is held invalid or unenforceable under 212 | any particular circumstance, the balance of the section is intended to 213 | apply and the section as a whole is intended to apply in other 214 | circumstances. 215 | 216 | It is not the purpose of this section to induce you to infringe any 217 | patents or other property right claims or to contest validity of any 218 | such claims; this section has the sole purpose of protecting the 219 | integrity of the free software distribution system, which is 220 | implemented by public license practices. Many people have made 221 | generous contributions to the wide range of software distributed 222 | through that system in reliance on consistent application of that 223 | system; it is up to the author/donor to decide if he or she is willing 224 | to distribute software through any other system and a licensee cannot 225 | impose that choice. 226 | 227 | This section is intended to make thoroughly clear what is believed to 228 | be a consequence of the rest of this License. 229 | 230 | 8. If the distribution and/or use of the Program is restricted in 231 | certain countries either by patents or by copyrighted interfaces, the 232 | original copyright holder who places the Program under this License 233 | may add an explicit geographical distribution limitation excluding 234 | those countries, so that distribution is permitted only in or among 235 | countries not thus excluded. In such case, this License incorporates 236 | the limitation as if written in the body of this License. 237 | 238 | 9. The Free Software Foundation may publish revised and/or new versions 239 | of the General Public License from time to time. Such new versions will 240 | be similar in spirit to the present version, but may differ in detail to 241 | address new problems or concerns. 242 | 243 | Each version is given a distinguishing version number. If the Program 244 | specifies a version number of this License which applies to it and "any 245 | later version", you have the option of following the terms and conditions 246 | either of that version or of any later version published by the Free 247 | Software Foundation. If the Program does not specify a version number of 248 | this License, you may choose any version ever published by the Free Software 249 | Foundation. 250 | 251 | 10. If you wish to incorporate parts of the Program into other free 252 | programs whose distribution conditions are different, write to the author 253 | to ask for permission. For software which is copyrighted by the Free 254 | Software Foundation, write to the Free Software Foundation; we sometimes 255 | make exceptions for this. Our decision will be guided by the two goals 256 | of preserving the free status of all derivatives of our free software and 257 | of promoting the sharing and reuse of software generally. 258 | 259 | NO WARRANTY 260 | 261 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 262 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 263 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 264 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 265 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 266 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 267 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 268 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 269 | REPAIR OR CORRECTION. 270 | 271 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 272 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 273 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 274 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 275 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 276 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 277 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 278 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 279 | POSSIBILITY OF SUCH DAMAGES. 280 | 281 | END OF TERMS AND CONDITIONS 282 | 283 | How to Apply These Terms to Your New Programs 284 | 285 | If you develop a new program, and you want it to be of the greatest 286 | possible use to the public, the best way to achieve this is to make it 287 | free software which everyone can redistribute and change under these terms. 288 | 289 | To do so, attach the following notices to the program. It is safest 290 | to attach them to the start of each source file to most effectively 291 | convey the exclusion of warranty; and each file should have at least 292 | the "copyright" line and a pointer to where the full notice is found. 293 | 294 | 295 | Copyright (C) 296 | 297 | This program is free software; you can redistribute it and/or modify 298 | it under the terms of the GNU General Public License as published by 299 | the Free Software Foundation; either version 2 of the License, or 300 | (at your option) any later version. 301 | 302 | This program is distributed in the hope that it will be useful, 303 | but WITHOUT ANY WARRANTY; without even the implied warranty of 304 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 305 | GNU General Public License for more details. 306 | 307 | You should have received a copy of the GNU General Public License 308 | along with this program; if not, write to the Free Software 309 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 310 | 311 | 312 | Also add information on how to contact you by electronic and paper mail. 313 | 314 | If the program is interactive, make it output a short notice like this 315 | when it starts in an interactive mode: 316 | 317 | Gnomovision version 69, Copyright (C) year name of author 318 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 319 | This is free software, and you are welcome to redistribute it 320 | under certain conditions; type `show c' for details. 321 | 322 | The hypothetical commands `show w' and `show c' should show the appropriate 323 | parts of the General Public License. Of course, the commands you use may 324 | be called something other than `show w' and `show c'; they could even be 325 | mouse-clicks or menu items--whatever suits your program. 326 | 327 | You should also get your employer (if you work as a programmer) or your 328 | school, if any, to sign a "copyright disclaimer" for the program, if 329 | necessary. Here is a sample; alter the names: 330 | 331 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 332 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 333 | 334 | , 1 April 1989 335 | Ty Coon, President of Vice 336 | 337 | This General Public License does not permit incorporating your program into 338 | proprietary programs. If your program is a subroutine library, you may 339 | consider it more useful to permit linking proprietary applications with the 340 | library. If this is what you want to do, use the GNU Library General 341 | Public License instead of this License. 342 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # 3 | # clang-format configuration file. Intended for clang-format >= 4. 4 | # 5 | # For more information, see: 6 | # 7 | # Documentation/process/clang-format.rst 8 | # https://clang.llvm.org/docs/ClangFormat.html 9 | # https://clang.llvm.org/docs/ClangFormatStyleOptions.html 10 | # 11 | --- 12 | AccessModifierOffset: -4 13 | AlignAfterOpenBracket: Align 14 | AlignConsecutiveAssignments: false 15 | AlignConsecutiveDeclarations: false 16 | #AlignEscapedNewlines: Left # Unknown to clang-format-4.0 17 | AlignOperands: true 18 | AlignTrailingComments: false 19 | AllowAllParametersOfDeclarationOnNextLine: false 20 | AllowShortBlocksOnASingleLine: false 21 | AllowShortCaseLabelsOnASingleLine: false 22 | AllowShortFunctionsOnASingleLine: None 23 | AllowShortIfStatementsOnASingleLine: false 24 | AllowShortLoopsOnASingleLine: false 25 | AlwaysBreakAfterDefinitionReturnType: None 26 | AlwaysBreakAfterReturnType: None 27 | AlwaysBreakBeforeMultilineStrings: false 28 | AlwaysBreakTemplateDeclarations: false 29 | BinPackArguments: true 30 | BinPackParameters: true 31 | BraceWrapping: 32 | AfterClass: false 33 | AfterControlStatement: false 34 | AfterEnum: false 35 | AfterFunction: true 36 | AfterNamespace: true 37 | AfterObjCDeclaration: false 38 | AfterStruct: false 39 | AfterUnion: false 40 | #AfterExternBlock: false # Unknown to clang-format-5.0 41 | BeforeCatch: false 42 | BeforeElse: false 43 | IndentBraces: false 44 | #SplitEmptyFunction: true # Unknown to clang-format-4.0 45 | #SplitEmptyRecord: true # Unknown to clang-format-4.0 46 | #SplitEmptyNamespace: true # Unknown to clang-format-4.0 47 | BreakBeforeBinaryOperators: None 48 | BreakBeforeBraces: Custom 49 | #BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0 50 | BreakBeforeTernaryOperators: false 51 | BreakConstructorInitializersBeforeComma: false 52 | #BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0 53 | BreakAfterJavaFieldAnnotations: false 54 | BreakStringLiterals: false 55 | ColumnLimit: 80 56 | CommentPragmas: '^ IWYU pragma:' 57 | #CompactNamespaces: false # Unknown to clang-format-4.0 58 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 59 | ConstructorInitializerIndentWidth: 8 60 | ContinuationIndentWidth: 8 61 | Cpp11BracedListStyle: false 62 | DerivePointerAlignment: false 63 | DisableFormat: false 64 | ExperimentalAutoDetectBinPacking: false 65 | #FixNamespaceComments: false # Unknown to clang-format-4.0 66 | 67 | # Taken from: 68 | # git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \ 69 | # | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \ 70 | # | sort | uniq 71 | ForEachMacros: 72 | - 'apei_estatus_for_each_section' 73 | - 'ata_for_each_dev' 74 | - 'ata_for_each_link' 75 | - '__ata_qc_for_each' 76 | - 'ata_qc_for_each' 77 | - 'ata_qc_for_each_raw' 78 | - 'ata_qc_for_each_with_internal' 79 | - 'ax25_for_each' 80 | - 'ax25_uid_for_each' 81 | - '__bio_for_each_bvec' 82 | - 'bio_for_each_bvec' 83 | - 'bio_for_each_bvec_all' 84 | - 'bio_for_each_integrity_vec' 85 | - '__bio_for_each_segment' 86 | - 'bio_for_each_segment' 87 | - 'bio_for_each_segment_all' 88 | - 'bio_list_for_each' 89 | - 'bip_for_each_vec' 90 | - 'bitmap_for_each_clear_region' 91 | - 'bitmap_for_each_set_region' 92 | - 'blkg_for_each_descendant_post' 93 | - 'blkg_for_each_descendant_pre' 94 | - 'blk_queue_for_each_rl' 95 | - 'bond_for_each_slave' 96 | - 'bond_for_each_slave_rcu' 97 | - 'bpf_for_each_spilled_reg' 98 | - 'btree_for_each_safe128' 99 | - 'btree_for_each_safe32' 100 | - 'btree_for_each_safe64' 101 | - 'btree_for_each_safel' 102 | - 'card_for_each_dev' 103 | - 'cgroup_taskset_for_each' 104 | - 'cgroup_taskset_for_each_leader' 105 | - 'cpufreq_for_each_entry' 106 | - 'cpufreq_for_each_entry_idx' 107 | - 'cpufreq_for_each_valid_entry' 108 | - 'cpufreq_for_each_valid_entry_idx' 109 | - 'css_for_each_child' 110 | - 'css_for_each_descendant_post' 111 | - 'css_for_each_descendant_pre' 112 | - 'device_for_each_child_node' 113 | - 'displayid_iter_for_each' 114 | - 'dma_fence_chain_for_each' 115 | - 'do_for_each_ftrace_op' 116 | - 'drm_atomic_crtc_for_each_plane' 117 | - 'drm_atomic_crtc_state_for_each_plane' 118 | - 'drm_atomic_crtc_state_for_each_plane_state' 119 | - 'drm_atomic_for_each_plane_damage' 120 | - 'drm_client_for_each_connector_iter' 121 | - 'drm_client_for_each_modeset' 122 | - 'drm_connector_for_each_possible_encoder' 123 | - 'drm_for_each_bridge_in_chain' 124 | - 'drm_for_each_connector_iter' 125 | - 'drm_for_each_crtc' 126 | - 'drm_for_each_crtc_reverse' 127 | - 'drm_for_each_encoder' 128 | - 'drm_for_each_encoder_mask' 129 | - 'drm_for_each_fb' 130 | - 'drm_for_each_legacy_plane' 131 | - 'drm_for_each_plane' 132 | - 'drm_for_each_plane_mask' 133 | - 'drm_for_each_privobj' 134 | - 'drm_mm_for_each_hole' 135 | - 'drm_mm_for_each_node' 136 | - 'drm_mm_for_each_node_in_range' 137 | - 'drm_mm_for_each_node_safe' 138 | - 'flow_action_for_each' 139 | - 'for_each_acpi_dev_match' 140 | - 'for_each_active_dev_scope' 141 | - 'for_each_active_drhd_unit' 142 | - 'for_each_active_iommu' 143 | - 'for_each_aggr_pgid' 144 | - 'for_each_available_child_of_node' 145 | - 'for_each_bio' 146 | - 'for_each_board_func_rsrc' 147 | - 'for_each_bvec' 148 | - 'for_each_card_auxs' 149 | - 'for_each_card_auxs_safe' 150 | - 'for_each_card_components' 151 | - 'for_each_card_dapms' 152 | - 'for_each_card_pre_auxs' 153 | - 'for_each_card_prelinks' 154 | - 'for_each_card_rtds' 155 | - 'for_each_card_rtds_safe' 156 | - 'for_each_card_widgets' 157 | - 'for_each_card_widgets_safe' 158 | - 'for_each_cgroup_storage_type' 159 | - 'for_each_child_of_node' 160 | - 'for_each_clear_bit' 161 | - 'for_each_clear_bit_from' 162 | - 'for_each_cmsghdr' 163 | - 'for_each_compatible_node' 164 | - 'for_each_component_dais' 165 | - 'for_each_component_dais_safe' 166 | - 'for_each_comp_order' 167 | - 'for_each_console' 168 | - 'for_each_cpu' 169 | - 'for_each_cpu_and' 170 | - 'for_each_cpu_not' 171 | - 'for_each_cpu_wrap' 172 | - 'for_each_dapm_widgets' 173 | - 'for_each_dev_addr' 174 | - 'for_each_dev_scope' 175 | - 'for_each_dma_cap_mask' 176 | - 'for_each_dpcm_be' 177 | - 'for_each_dpcm_be_rollback' 178 | - 'for_each_dpcm_be_safe' 179 | - 'for_each_dpcm_fe' 180 | - 'for_each_drhd_unit' 181 | - 'for_each_dss_dev' 182 | - 'for_each_dtpm_table' 183 | - 'for_each_efi_memory_desc' 184 | - 'for_each_efi_memory_desc_in_map' 185 | - 'for_each_element' 186 | - 'for_each_element_extid' 187 | - 'for_each_element_id' 188 | - 'for_each_endpoint_of_node' 189 | - 'for_each_evictable_lru' 190 | - 'for_each_fib6_node_rt_rcu' 191 | - 'for_each_fib6_walker_rt' 192 | - 'for_each_free_mem_pfn_range_in_zone' 193 | - 'for_each_free_mem_pfn_range_in_zone_from' 194 | - 'for_each_free_mem_range' 195 | - 'for_each_free_mem_range_reverse' 196 | - 'for_each_func_rsrc' 197 | - 'for_each_hstate' 198 | - 'for_each_if' 199 | - 'for_each_iommu' 200 | - 'for_each_ip_tunnel_rcu' 201 | - 'for_each_irq_nr' 202 | - 'for_each_link_codecs' 203 | - 'for_each_link_cpus' 204 | - 'for_each_link_platforms' 205 | - 'for_each_lru' 206 | - 'for_each_matching_node' 207 | - 'for_each_matching_node_and_match' 208 | - 'for_each_member' 209 | - 'for_each_memcg_cache_index' 210 | - 'for_each_mem_pfn_range' 211 | - '__for_each_mem_range' 212 | - 'for_each_mem_range' 213 | - '__for_each_mem_range_rev' 214 | - 'for_each_mem_range_rev' 215 | - 'for_each_mem_region' 216 | - 'for_each_migratetype_order' 217 | - 'for_each_msi_entry' 218 | - 'for_each_msi_entry_safe' 219 | - 'for_each_net' 220 | - 'for_each_net_continue_reverse' 221 | - 'for_each_netdev' 222 | - 'for_each_netdev_continue' 223 | - 'for_each_netdev_continue_rcu' 224 | - 'for_each_netdev_continue_reverse' 225 | - 'for_each_netdev_feature' 226 | - 'for_each_netdev_in_bond_rcu' 227 | - 'for_each_netdev_rcu' 228 | - 'for_each_netdev_reverse' 229 | - 'for_each_netdev_safe' 230 | - 'for_each_net_rcu' 231 | - 'for_each_new_connector_in_state' 232 | - 'for_each_new_crtc_in_state' 233 | - 'for_each_new_mst_mgr_in_state' 234 | - 'for_each_new_plane_in_state' 235 | - 'for_each_new_private_obj_in_state' 236 | - 'for_each_node' 237 | - 'for_each_node_by_name' 238 | - 'for_each_node_by_type' 239 | - 'for_each_node_mask' 240 | - 'for_each_node_state' 241 | - 'for_each_node_with_cpus' 242 | - 'for_each_node_with_property' 243 | - 'for_each_nonreserved_multicast_dest_pgid' 244 | - 'for_each_of_allnodes' 245 | - 'for_each_of_allnodes_from' 246 | - 'for_each_of_cpu_node' 247 | - 'for_each_of_pci_range' 248 | - 'for_each_old_connector_in_state' 249 | - 'for_each_old_crtc_in_state' 250 | - 'for_each_old_mst_mgr_in_state' 251 | - 'for_each_oldnew_connector_in_state' 252 | - 'for_each_oldnew_crtc_in_state' 253 | - 'for_each_oldnew_mst_mgr_in_state' 254 | - 'for_each_oldnew_plane_in_state' 255 | - 'for_each_oldnew_plane_in_state_reverse' 256 | - 'for_each_oldnew_private_obj_in_state' 257 | - 'for_each_old_plane_in_state' 258 | - 'for_each_old_private_obj_in_state' 259 | - 'for_each_online_cpu' 260 | - 'for_each_online_node' 261 | - 'for_each_online_pgdat' 262 | - 'for_each_pci_bridge' 263 | - 'for_each_pci_dev' 264 | - 'for_each_pci_msi_entry' 265 | - 'for_each_pcm_streams' 266 | - 'for_each_physmem_range' 267 | - 'for_each_populated_zone' 268 | - 'for_each_possible_cpu' 269 | - 'for_each_present_cpu' 270 | - 'for_each_prime_number' 271 | - 'for_each_prime_number_from' 272 | - 'for_each_process' 273 | - 'for_each_process_thread' 274 | - 'for_each_prop_codec_conf' 275 | - 'for_each_prop_dai_codec' 276 | - 'for_each_prop_dai_cpu' 277 | - 'for_each_prop_dlc_codecs' 278 | - 'for_each_prop_dlc_cpus' 279 | - 'for_each_prop_dlc_platforms' 280 | - 'for_each_property_of_node' 281 | - 'for_each_registered_fb' 282 | - 'for_each_requested_gpio' 283 | - 'for_each_requested_gpio_in_range' 284 | - 'for_each_reserved_mem_range' 285 | - 'for_each_reserved_mem_region' 286 | - 'for_each_rtd_codec_dais' 287 | - 'for_each_rtd_components' 288 | - 'for_each_rtd_cpu_dais' 289 | - 'for_each_rtd_dais' 290 | - 'for_each_set_bit' 291 | - 'for_each_set_bit_from' 292 | - 'for_each_set_clump8' 293 | - 'for_each_sg' 294 | - 'for_each_sg_dma_page' 295 | - 'for_each_sg_page' 296 | - 'for_each_sgtable_dma_page' 297 | - 'for_each_sgtable_dma_sg' 298 | - 'for_each_sgtable_page' 299 | - 'for_each_sgtable_sg' 300 | - 'for_each_sibling_event' 301 | - 'for_each_subelement' 302 | - 'for_each_subelement_extid' 303 | - 'for_each_subelement_id' 304 | - '__for_each_thread' 305 | - 'for_each_thread' 306 | - 'for_each_unicast_dest_pgid' 307 | - 'for_each_vsi' 308 | - 'for_each_wakeup_source' 309 | - 'for_each_zone' 310 | - 'for_each_zone_zonelist' 311 | - 'for_each_zone_zonelist_nodemask' 312 | - 'fwnode_for_each_available_child_node' 313 | - 'fwnode_for_each_child_node' 314 | - 'fwnode_graph_for_each_endpoint' 315 | - 'gadget_for_each_ep' 316 | - 'genradix_for_each' 317 | - 'genradix_for_each_from' 318 | - 'hash_for_each' 319 | - 'hash_for_each_possible' 320 | - 'hash_for_each_possible_rcu' 321 | - 'hash_for_each_possible_rcu_notrace' 322 | - 'hash_for_each_possible_safe' 323 | - 'hash_for_each_rcu' 324 | - 'hash_for_each_safe' 325 | - 'hctx_for_each_ctx' 326 | - 'hlist_bl_for_each_entry' 327 | - 'hlist_bl_for_each_entry_rcu' 328 | - 'hlist_bl_for_each_entry_safe' 329 | - 'hlist_for_each' 330 | - 'hlist_for_each_entry' 331 | - 'hlist_for_each_entry_continue' 332 | - 'hlist_for_each_entry_continue_rcu' 333 | - 'hlist_for_each_entry_continue_rcu_bh' 334 | - 'hlist_for_each_entry_from' 335 | - 'hlist_for_each_entry_from_rcu' 336 | - 'hlist_for_each_entry_rcu' 337 | - 'hlist_for_each_entry_rcu_bh' 338 | - 'hlist_for_each_entry_rcu_notrace' 339 | - 'hlist_for_each_entry_safe' 340 | - 'hlist_for_each_entry_srcu' 341 | - '__hlist_for_each_rcu' 342 | - 'hlist_for_each_safe' 343 | - 'hlist_nulls_for_each_entry' 344 | - 'hlist_nulls_for_each_entry_from' 345 | - 'hlist_nulls_for_each_entry_rcu' 346 | - 'hlist_nulls_for_each_entry_safe' 347 | - 'i3c_bus_for_each_i2cdev' 348 | - 'i3c_bus_for_each_i3cdev' 349 | - 'ide_host_for_each_port' 350 | - 'ide_port_for_each_dev' 351 | - 'ide_port_for_each_present_dev' 352 | - 'idr_for_each_entry' 353 | - 'idr_for_each_entry_continue' 354 | - 'idr_for_each_entry_continue_ul' 355 | - 'idr_for_each_entry_ul' 356 | - 'in_dev_for_each_ifa_rcu' 357 | - 'in_dev_for_each_ifa_rtnl' 358 | - 'inet_bind_bucket_for_each' 359 | - 'inet_lhash2_for_each_icsk_rcu' 360 | - 'key_for_each' 361 | - 'key_for_each_safe' 362 | - 'klp_for_each_func' 363 | - 'klp_for_each_func_safe' 364 | - 'klp_for_each_func_static' 365 | - 'klp_for_each_object' 366 | - 'klp_for_each_object_safe' 367 | - 'klp_for_each_object_static' 368 | - 'kunit_suite_for_each_test_case' 369 | - 'kvm_for_each_memslot' 370 | - 'kvm_for_each_vcpu' 371 | - 'list_for_each' 372 | - 'list_for_each_codec' 373 | - 'list_for_each_codec_safe' 374 | - 'list_for_each_continue' 375 | - 'list_for_each_entry' 376 | - 'list_for_each_entry_continue' 377 | - 'list_for_each_entry_continue_rcu' 378 | - 'list_for_each_entry_continue_reverse' 379 | - 'list_for_each_entry_from' 380 | - 'list_for_each_entry_from_rcu' 381 | - 'list_for_each_entry_from_reverse' 382 | - 'list_for_each_entry_lockless' 383 | - 'list_for_each_entry_rcu' 384 | - 'list_for_each_entry_reverse' 385 | - 'list_for_each_entry_safe' 386 | - 'list_for_each_entry_safe_continue' 387 | - 'list_for_each_entry_safe_from' 388 | - 'list_for_each_entry_safe_reverse' 389 | - 'list_for_each_entry_srcu' 390 | - 'list_for_each_prev' 391 | - 'list_for_each_prev_safe' 392 | - 'list_for_each_safe' 393 | - 'llist_for_each' 394 | - 'llist_for_each_entry' 395 | - 'llist_for_each_entry_safe' 396 | - 'llist_for_each_safe' 397 | - 'mci_for_each_dimm' 398 | - 'media_device_for_each_entity' 399 | - 'media_device_for_each_intf' 400 | - 'media_device_for_each_link' 401 | - 'media_device_for_each_pad' 402 | - 'nanddev_io_for_each_page' 403 | - 'netdev_for_each_lower_dev' 404 | - 'netdev_for_each_lower_private' 405 | - 'netdev_for_each_lower_private_rcu' 406 | - 'netdev_for_each_mc_addr' 407 | - 'netdev_for_each_uc_addr' 408 | - 'netdev_for_each_upper_dev_rcu' 409 | - 'netdev_hw_addr_list_for_each' 410 | - 'nft_rule_for_each_expr' 411 | - 'nla_for_each_attr' 412 | - 'nla_for_each_nested' 413 | - 'nlmsg_for_each_attr' 414 | - 'nlmsg_for_each_msg' 415 | - 'nr_neigh_for_each' 416 | - 'nr_neigh_for_each_safe' 417 | - 'nr_node_for_each' 418 | - 'nr_node_for_each_safe' 419 | - 'of_for_each_phandle' 420 | - 'of_property_for_each_string' 421 | - 'of_property_for_each_u32' 422 | - 'pci_bus_for_each_resource' 423 | - 'pcl_for_each_chunk' 424 | - 'pcl_for_each_segment' 425 | - 'pcm_for_each_format' 426 | - 'ping_portaddr_for_each_entry' 427 | - 'plist_for_each' 428 | - 'plist_for_each_continue' 429 | - 'plist_for_each_entry' 430 | - 'plist_for_each_entry_continue' 431 | - 'plist_for_each_entry_safe' 432 | - 'plist_for_each_safe' 433 | - 'pnp_for_each_card' 434 | - 'pnp_for_each_dev' 435 | - 'protocol_for_each_card' 436 | - 'protocol_for_each_dev' 437 | - 'queue_for_each_hw_ctx' 438 | - 'radix_tree_for_each_slot' 439 | - 'radix_tree_for_each_tagged' 440 | - 'rb_for_each' 441 | - 'rbtree_postorder_for_each_entry_safe' 442 | - 'rdma_for_each_block' 443 | - 'rdma_for_each_port' 444 | - 'rdma_umem_for_each_dma_block' 445 | - 'resource_list_for_each_entry' 446 | - 'resource_list_for_each_entry_safe' 447 | - 'rhl_for_each_entry_rcu' 448 | - 'rhl_for_each_rcu' 449 | - 'rht_for_each' 450 | - 'rht_for_each_entry' 451 | - 'rht_for_each_entry_from' 452 | - 'rht_for_each_entry_rcu' 453 | - 'rht_for_each_entry_rcu_from' 454 | - 'rht_for_each_entry_safe' 455 | - 'rht_for_each_from' 456 | - 'rht_for_each_rcu' 457 | - 'rht_for_each_rcu_from' 458 | - '__rq_for_each_bio' 459 | - 'rq_for_each_bvec' 460 | - 'rq_for_each_segment' 461 | - 'scsi_for_each_prot_sg' 462 | - 'scsi_for_each_sg' 463 | - 'sctp_for_each_hentry' 464 | - 'sctp_skb_for_each' 465 | - 'shdma_for_each_chan' 466 | - '__shost_for_each_device' 467 | - 'shost_for_each_device' 468 | - 'sk_for_each' 469 | - 'sk_for_each_bound' 470 | - 'sk_for_each_entry_offset_rcu' 471 | - 'sk_for_each_from' 472 | - 'sk_for_each_rcu' 473 | - 'sk_for_each_safe' 474 | - 'sk_nulls_for_each' 475 | - 'sk_nulls_for_each_from' 476 | - 'sk_nulls_for_each_rcu' 477 | - 'snd_array_for_each' 478 | - 'snd_pcm_group_for_each_entry' 479 | - 'snd_soc_dapm_widget_for_each_path' 480 | - 'snd_soc_dapm_widget_for_each_path_safe' 481 | - 'snd_soc_dapm_widget_for_each_sink_path' 482 | - 'snd_soc_dapm_widget_for_each_source_path' 483 | - 'tb_property_for_each' 484 | - 'tcf_exts_for_each_action' 485 | - 'udp_portaddr_for_each_entry' 486 | - 'udp_portaddr_for_each_entry_rcu' 487 | - 'usb_hub_for_each_child' 488 | - 'v4l2_device_for_each_subdev' 489 | - 'v4l2_m2m_for_each_dst_buf' 490 | - 'v4l2_m2m_for_each_dst_buf_safe' 491 | - 'v4l2_m2m_for_each_src_buf' 492 | - 'v4l2_m2m_for_each_src_buf_safe' 493 | - 'virtio_device_for_each_vq' 494 | - 'while_for_each_ftrace_op' 495 | - 'xa_for_each' 496 | - 'xa_for_each_marked' 497 | - 'xa_for_each_range' 498 | - 'xa_for_each_start' 499 | - 'xas_for_each' 500 | - 'xas_for_each_conflict' 501 | - 'xas_for_each_marked' 502 | - 'xbc_array_for_each_value' 503 | - 'xbc_for_each_key_value' 504 | - 'xbc_node_for_each_array_value' 505 | - 'xbc_node_for_each_child' 506 | - 'xbc_node_for_each_key_value' 507 | - 'zorro_for_each_dev' 508 | 509 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 510 | IncludeCategories: 511 | - Regex: '.*' 512 | Priority: 1 513 | IncludeIsMainRegex: '(Test)?$' 514 | IndentCaseLabels: false 515 | #IndentPPDirectives: None # Unknown to clang-format-5.0 516 | IndentWidth: 8 517 | IndentWrappedFunctionNames: false 518 | JavaScriptQuotes: Leave 519 | JavaScriptWrapImports: true 520 | KeepEmptyLinesAtTheStartOfBlocks: false 521 | MacroBlockBegin: '' 522 | MacroBlockEnd: '' 523 | MaxEmptyLinesToKeep: 1 524 | NamespaceIndentation: None 525 | #ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0 526 | ObjCBlockIndentWidth: 8 527 | ObjCSpaceAfterProperty: true 528 | ObjCSpaceBeforeProtocolList: true 529 | 530 | # Taken from git's rules 531 | #PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0 532 | PenaltyBreakBeforeFirstCallParameter: 30 533 | PenaltyBreakComment: 10 534 | PenaltyBreakFirstLessLess: 0 535 | PenaltyBreakString: 10 536 | PenaltyExcessCharacter: 100 537 | PenaltyReturnTypeOnItsOwnLine: 60 538 | 539 | PointerAlignment: Right 540 | ReflowComments: false 541 | SortIncludes: false 542 | #SortUsingDeclarations: false # Unknown to clang-format-4.0 543 | SpaceAfterCStyleCast: false 544 | SpaceAfterTemplateKeyword: true 545 | SpaceBeforeAssignmentOperators: true 546 | #SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0 547 | #SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0 548 | SpaceBeforeParens: ControlStatements 549 | #SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0 550 | SpaceInEmptyParentheses: false 551 | SpacesBeforeTrailingComments: 1 552 | SpacesInAngles: false 553 | SpacesInContainerLiterals: false 554 | SpacesInCStyleCastParentheses: false 555 | SpacesInParentheses: false 556 | SpacesInSquareBrackets: false 557 | Standard: Cpp03 558 | TabWidth: 8 559 | UseTab: Always 560 | ... 561 | -------------------------------------------------------------------------------- /lockc/src/runc.rs: -------------------------------------------------------------------------------- 1 | use std::{collections, fs, io, os::unix::fs::PermissionsExt, path::Path, string::String}; 2 | 3 | use fanotify::{ 4 | high_level::{Event, Fanotify, FanotifyMode, FanotifyResponse}, 5 | low_level::FAN_OPEN_EXEC_PERM, 6 | }; 7 | use k8s_openapi::api::core::v1; 8 | use lockc_common::ContainerPolicyLevel; 9 | use nix::poll::{poll, PollFd, PollFlags}; 10 | use procfs::{process::Process, ProcError}; 11 | use scopeguard::defer; 12 | use serde::Deserialize; 13 | use serde_json::Value; 14 | use thiserror::Error; 15 | use tokio::{ 16 | runtime::Builder, 17 | sync::{mpsc, oneshot}, 18 | }; 19 | use tracing::{debug, error, warn}; 20 | use walkdir::WalkDir; 21 | 22 | use crate::{communication::EbpfCommand, maps::MapOperationError}; 23 | 24 | // static LABEL_NAMESPACE: &str = "io.kubernetes.pod.namespace"; 25 | static LABEL_POLICY_ENFORCE: &str = "pod-security.kubernetes.io/enforce"; 26 | // static LABEL_POLICY_AUDIT: &str = "pod-security.kubernetes.io/audit"; 27 | // static LABEL_POLICY_WARN: &str = "pod-security.kubernetes.io/warn"; 28 | 29 | static ANNOTATION_CONTAINERD_LOG_DIRECTORY: &str = "io.kubernetes.cri.sandbox-log-directory"; 30 | static ANNOTATION_CONTAINERD_SANDBOX_ID: &str = "io.kubernetes.cri.sandbox-id"; 31 | 32 | /// Type of Kubernetes container determined by annotations. 33 | enum KubernetesContainerType { 34 | /// Containerd CRI, main container with own log directory. 35 | ContainerdMain, 36 | /// Containerd CRI, part of another sandbox which has its own log 37 | /// directory. 38 | ContainerdPartOfSandbox, 39 | /// Unknown type of Kubernetes annotations. 40 | Unknown, 41 | } 42 | 43 | fn kubernetes_type(annotations: &collections::HashMap) -> KubernetesContainerType { 44 | if annotations.contains_key(ANNOTATION_CONTAINERD_LOG_DIRECTORY) { 45 | return KubernetesContainerType::ContainerdMain; 46 | } else if annotations.contains_key(ANNOTATION_CONTAINERD_SANDBOX_ID) { 47 | return KubernetesContainerType::ContainerdPartOfSandbox; 48 | } 49 | KubernetesContainerType::Unknown 50 | } 51 | 52 | /// Type of container by engine/runtime. 53 | enum ContainerType { 54 | Docker, 55 | KubernetesContainerd, 56 | Unknown, 57 | } 58 | 59 | #[derive(Debug, Deserialize)] 60 | #[serde(rename_all = "camelCase")] 61 | struct Mount { 62 | source: String, 63 | } 64 | 65 | #[derive(Debug, Deserialize)] 66 | #[serde(rename_all = "camelCase")] 67 | struct ContainerConfig { 68 | mounts: Vec, 69 | annotations: Option>, 70 | } 71 | 72 | #[derive(Error, Debug)] 73 | pub enum ContainerError { 74 | #[error(transparent)] 75 | Status(#[from] io::Error), 76 | 77 | #[error(transparent)] 78 | Json(#[from] serde_json::Error), 79 | 80 | #[error("could not get the file name of container log file")] 81 | LogFileName, 82 | 83 | #[error("could not parse k8s namespace")] 84 | K8sNamespace, 85 | } 86 | 87 | fn container_type_data>( 88 | container_bundle: P, 89 | ) -> Result<(ContainerType, Option), ContainerError> { 90 | let bundle_path = container_bundle.as_ref(); 91 | let config_path = bundle_path.join("config.json"); 92 | let f = fs::File::open(&config_path)?; 93 | let r = io::BufReader::new(f); 94 | 95 | let config: ContainerConfig = serde_json::from_reader(r)?; 96 | 97 | // Kubernetes 98 | if let Some(annotations) = config.annotations { 99 | debug!( 100 | bundle = ?bundle_path, 101 | config = ?config_path, 102 | "detected kubernetes container", 103 | ); 104 | match kubernetes_type(&annotations) { 105 | KubernetesContainerType::ContainerdMain => { 106 | // containerd doesn't expose k8s namespaces directly. They have 107 | // to be parsed from the log directory path, where the first 108 | // part of the filename is the namespace. 109 | let log_directory = &annotations[ANNOTATION_CONTAINERD_LOG_DIRECTORY]; 110 | debug!( 111 | log_directory = log_directory.as_str(), 112 | "detected k8s+containerd container", 113 | ); 114 | let log_path = std::path::PathBuf::from(log_directory); 115 | let file_name = log_path 116 | .file_name() 117 | .ok_or(ContainerError::LogFileName)? 118 | .to_str() 119 | .ok_or(ContainerError::LogFileName)?; 120 | let mut splitter = file_name.split('_'); 121 | let namespace = splitter 122 | .next() 123 | .ok_or(ContainerError::K8sNamespace)? 124 | .to_string(); 125 | 126 | return Ok((ContainerType::KubernetesContainerd, Some(namespace))); 127 | } 128 | KubernetesContainerType::ContainerdPartOfSandbox => { 129 | // When a container is running as a part of a previously created 130 | // pod, the log directory path has to be retrieved from the 131 | // sandbox container. 132 | let sandbox_id = &annotations[ANNOTATION_CONTAINERD_SANDBOX_ID]; 133 | debug!( 134 | sandbox_id = sandbox_id.as_str(), 135 | "detected k8s+containerd container", 136 | ); 137 | 138 | // Go one directory up from the current bundle. 139 | let mut ancestors = bundle_path.ancestors(); 140 | ancestors.next(); 141 | if let Some(v) = ancestors.next() { 142 | // Then go to sandbox_id directory (sandbox's bundle). 143 | let new_bundle = v.join(sandbox_id); 144 | return container_type_data(new_bundle); 145 | } 146 | } 147 | KubernetesContainerType::Unknown => {} 148 | } 149 | // TODO(vadorovsky): Support more Kubernetes CRI implementations. 150 | // They all come with their own annotations, so we will have to 151 | // handle more keys here. 152 | } 153 | 154 | // Docker 155 | for mount in config.mounts { 156 | let source: Vec<&str> = mount.source.split('/').collect(); 157 | if source.len() > 1 && source[source.len() - 1] == "hostname" { 158 | let config_v2 = str::replace(&mount.source, "hostname", "config.v2.json"); 159 | debug!( 160 | config_path = config_v2.as_str(), 161 | "detected docker container" 162 | ); 163 | return Ok((ContainerType::Docker, Some(config_v2))); 164 | } 165 | } 166 | 167 | Ok((ContainerType::Unknown, None)) 168 | } 169 | 170 | /// Finds the policy for the given Kubernetes namespace. If none, the baseline 171 | /// policy is returned. Otherwise checks the Kubernetes namespace labels. 172 | async fn policy_kubernetes( 173 | default_policy_level: ContainerPolicyLevel, 174 | namespace: String, 175 | ) -> Result { 176 | // Apply the privileged policy for kube-system containers immediately. 177 | // Otherwise the core k8s components (apiserver, scheduler) won't be able 178 | // to run. 179 | // If container has no k8s namespace, apply the baseline policy. 180 | if namespace.as_str() == "kube-system" { 181 | return Ok(ContainerPolicyLevel::Privileged); 182 | } 183 | 184 | let client = kube::Client::try_default().await?; 185 | 186 | let namespaces: kube::api::Api = kube::api::Api::all(client); 187 | let namespace = namespaces.get(&namespace).await?; 188 | 189 | match namespace.metadata.labels { 190 | Some(v) => match v.get(LABEL_POLICY_ENFORCE) { 191 | Some(v) => match v.as_str() { 192 | "restricted" => Ok(ContainerPolicyLevel::Restricted), 193 | "baseline" => Ok(ContainerPolicyLevel::Baseline), 194 | "privileged" => Ok(ContainerPolicyLevel::Privileged), 195 | _ => Ok(default_policy_level), 196 | }, 197 | None => Ok(default_policy_level), 198 | }, 199 | None => Ok(default_policy_level), 200 | } 201 | } 202 | 203 | #[derive(Error, Debug)] 204 | pub enum PolicyKubernetesSyncError { 205 | #[error(transparent)] 206 | IO(#[from] io::Error), 207 | 208 | #[error(transparent)] 209 | Kube(#[from] kube::Error), 210 | } 211 | 212 | /// Makes the `policy_label_sync` function synchronous. We use it together with 213 | /// poll(2) syscall, which is definitely not meant for multithreaded code. 214 | fn policy_kubernetes_sync( 215 | default_policy_level: ContainerPolicyLevel, 216 | namespace: String, 217 | ) -> Result { 218 | match Builder::new_current_thread() 219 | .enable_all() 220 | .build()? 221 | .block_on(policy_kubernetes(default_policy_level, namespace)) 222 | { 223 | Ok(p) => Ok(p), 224 | Err(e) => Err(PolicyKubernetesSyncError::from(e)), 225 | } 226 | } 227 | 228 | fn policy_docker>( 229 | default_policy_level: ContainerPolicyLevel, 230 | docker_bundle: P, 231 | ) -> Result { 232 | let config_path = docker_bundle.as_ref(); 233 | let f = std::fs::File::open(config_path)?; 234 | let r = std::io::BufReader::new(f); 235 | 236 | let l: Value = serde_json::from_reader(r)?; 237 | 238 | let x = l["Config"]["Labels"]["org.lockc.policy"].as_str(); 239 | 240 | match x { 241 | Some(x) => match x { 242 | "restricted" => Ok(ContainerPolicyLevel::Restricted), 243 | "baseline" => Ok(ContainerPolicyLevel::Baseline), 244 | "privileged" => Ok(ContainerPolicyLevel::Privileged), 245 | _ => Ok(default_policy_level), 246 | }, 247 | None => Ok(default_policy_level), 248 | } 249 | } 250 | 251 | enum ShimOptParsingAction { 252 | NoPositional, 253 | Skip, 254 | ContainerId, 255 | } 256 | 257 | enum ShimContainerAction { 258 | Other, 259 | Delete, 260 | } 261 | 262 | /// Types of options (prepositioned by `--`). 263 | enum OptParsingAction { 264 | /// Option not followed by a positional argument. 265 | NoPositional, 266 | /// Option followed by a positional argument we don't want to store. 267 | Skip, 268 | /// --bundle option which we want to store. 269 | Bundle, 270 | } 271 | 272 | /// Types of positional arguments. 273 | enum ArgParsingAction { 274 | /// Argument we don't want to store. 275 | None, 276 | /// Container ID which we want to store. 277 | ContainerId, 278 | } 279 | 280 | /// Types of actions performed on the container, defined by a runc subcommand. 281 | enum ContainerAction { 282 | /// Types we don't explicitly handle, except of registering the process as 283 | /// containerized. 284 | Other, 285 | /// Action of creating the container, when we want to register the new 286 | /// container. 287 | Create, 288 | /// Action of deleting the container, when we want to remove the registered 289 | /// container. 290 | Delete, 291 | } 292 | 293 | pub struct RuncWatcher { 294 | bootstrap_rx: oneshot::Receiver<()>, 295 | ebpf_tx: mpsc::Sender, 296 | fd: Fanotify, 297 | default_policy_level: ContainerPolicyLevel, 298 | } 299 | 300 | #[derive(Error, Debug)] 301 | pub enum HandleRuncEventError { 302 | #[error(transparent)] 303 | IO(#[from] io::Error), 304 | 305 | #[error(transparent)] 306 | Errno(#[from] nix::errno::Errno), 307 | 308 | #[error(transparent)] 309 | CommandSend(#[from] mpsc::error::SendError), 310 | 311 | #[error(transparent)] 312 | CommandRecv(#[from] oneshot::error::RecvError), 313 | 314 | #[error(transparent)] 315 | BootstrapTryRecv(#[from] oneshot::error::TryRecvError), 316 | 317 | #[error(transparent)] 318 | Proc(#[from] ProcError), 319 | 320 | #[error(transparent)] 321 | Container(#[from] ContainerError), 322 | 323 | #[error(transparent)] 324 | PolicyKubernetes(#[from] PolicyKubernetesSyncError), 325 | 326 | #[error(transparent)] 327 | MapOperation(#[from] MapOperationError), 328 | 329 | #[error("container data missing")] 330 | ContainerData, 331 | 332 | #[error("container ID missing")] 333 | ContainerID, 334 | } 335 | 336 | impl RuncWatcher { 337 | pub fn new( 338 | bootstrap_rx: oneshot::Receiver<()>, 339 | ebpf_tx: mpsc::Sender, 340 | default_policy_level: ContainerPolicyLevel, 341 | ) -> Result { 342 | let runc_paths = vec![ 343 | "/usr/bin/runc", 344 | "/usr/sbin/runc", 345 | "/usr/local/bin/runc", 346 | "/usr/local/sbin/runc", 347 | "/run/torcx/unpack/docker/bin/runc", 348 | "/host/usr/bin/runc", 349 | "/host/usr/sbin/runc", 350 | "/host/usr/local/bin/runc", 351 | "/host/usr/local/sbin/runc", 352 | "/host/run/torcx/unpack/docker/bin/runc", 353 | ]; 354 | let fd = Fanotify::new_with_blocking(FanotifyMode::CONTENT); 355 | 356 | for runc_path in runc_paths { 357 | debug!(path = runc_path, "checking runc"); 358 | let p = Path::new(&runc_path); 359 | if p.exists() { 360 | let metadata = p.metadata()?; 361 | 362 | // When the source for host mount in Kubernetes does not 363 | // exists, an empty directory is created. Also, directories 364 | // contain an executable bit. Skip directories before any other 365 | // checks. 366 | if metadata.is_dir() { 367 | continue; 368 | } 369 | 370 | // If the file is executable. 371 | if metadata.permissions().mode() & 0o111 != 0 { 372 | debug!(path = runc_path, "excecutable runc binary found"); 373 | fd.add_path(FAN_OPEN_EXEC_PERM, runc_path)?; 374 | debug!(path = runc_path, "added runc to fanotify"); 375 | } 376 | } 377 | } 378 | 379 | let runc_lookup_paths = vec![ 380 | Path::new("/var/lib/rancher/k3s/data"), 381 | Path::new("/host/var/lib/rancher/k3s/data"), 382 | ]; 383 | for path in runc_lookup_paths { 384 | debug!("looking for runc in: {}", path.display()); 385 | for entry in WalkDir::new(path) { 386 | match entry { 387 | Ok(entry) => { 388 | let path = entry.path(); 389 | if path.is_file() && path.file_name().unwrap().to_string_lossy() == "runc" { 390 | debug!("excecutable runc binary found: {}", path.display()); 391 | fd.add_path(FAN_OPEN_EXEC_PERM, path)?; 392 | debug!("added runc to fanotify: {}", path.display()); 393 | } 394 | } 395 | Err(e) => { 396 | warn!( 397 | error = e.to_string().as_str(), 398 | "could not process the walkdir entry" 399 | ); 400 | } 401 | } 402 | } 403 | } 404 | 405 | Ok(RuncWatcher { 406 | bootstrap_rx, 407 | ebpf_tx, 408 | fd, 409 | default_policy_level, 410 | }) 411 | } 412 | 413 | async fn add_container( 414 | &self, 415 | container_id: String, 416 | pid: i32, 417 | policy_level: ContainerPolicyLevel, 418 | ) -> Result<(), HandleRuncEventError> { 419 | let (responder_tx, responder_rx) = oneshot::channel(); 420 | 421 | self.ebpf_tx 422 | .send(EbpfCommand::AddContainer { 423 | container_id, 424 | pid, 425 | policy_level, 426 | responder_tx, 427 | }) 428 | .await?; 429 | responder_rx.await??; 430 | 431 | Ok(()) 432 | } 433 | 434 | fn add_container_sync( 435 | &self, 436 | container_id: String, 437 | pid: i32, 438 | policy_level: ContainerPolicyLevel, 439 | ) -> Result<(), HandleRuncEventError> { 440 | debug!(container_id = container_id.as_str(), "adding container"); 441 | 442 | Builder::new_current_thread() 443 | .build()? 444 | .block_on(self.add_container(container_id, pid, policy_level)) 445 | } 446 | 447 | async fn delete_container(&self, container_id: String) -> Result<(), HandleRuncEventError> { 448 | let (responder_tx, responder_rx) = oneshot::channel(); 449 | 450 | self.ebpf_tx 451 | .send(EbpfCommand::DeleteContainer { 452 | container_id, 453 | responder_tx, 454 | }) 455 | .await?; 456 | responder_rx.await??; 457 | 458 | Ok(()) 459 | } 460 | 461 | fn delete_container_sync(&self, container_id: String) -> Result<(), HandleRuncEventError> { 462 | debug!(container_id = container_id.as_str(), "deleting container"); 463 | 464 | Builder::new_current_thread() 465 | .build()? 466 | .block_on(self.delete_container(container_id)) 467 | } 468 | 469 | async fn add_process( 470 | &self, 471 | container_id: String, 472 | pid: i32, 473 | ) -> Result<(), HandleRuncEventError> { 474 | let (responder_tx, responder_rx) = oneshot::channel(); 475 | 476 | self.ebpf_tx 477 | .send(EbpfCommand::AddProcess { 478 | container_id, 479 | pid, 480 | responder_tx, 481 | }) 482 | .await?; 483 | responder_rx.await??; 484 | 485 | Ok(()) 486 | } 487 | 488 | fn add_process_sync(&self, container_id: String, pid: i32) -> Result<(), HandleRuncEventError> { 489 | debug!( 490 | container = container_id.as_str(), 491 | pid = pid, 492 | "adding process" 493 | ); 494 | 495 | Builder::new_current_thread() 496 | .build()? 497 | .block_on(self.add_process(container_id, pid)) 498 | } 499 | 500 | fn handle_containerd_shim_event( 501 | &self, 502 | containerd_shim_process: Process, 503 | ) -> Result<(), HandleRuncEventError> { 504 | let mut opt_parsing_action = ShimOptParsingAction::NoPositional; 505 | let mut container_action = ShimContainerAction::Other; 506 | 507 | let mut container_id_o: Option = None; 508 | 509 | for arg in containerd_shim_process.cmdline()? { 510 | debug!(argument = arg.as_str(), "containerd-shim"); 511 | match arg.as_str() { 512 | "-address" => opt_parsing_action = ShimOptParsingAction::Skip, 513 | "-bundle" => opt_parsing_action = ShimOptParsingAction::Skip, 514 | "-id" => opt_parsing_action = ShimOptParsingAction::ContainerId, 515 | "-namespace" => opt_parsing_action = ShimOptParsingAction::Skip, 516 | "-publish-binary" => opt_parsing_action = ShimOptParsingAction::Skip, 517 | _ => {} 518 | } 519 | if arg.starts_with('-') { 520 | continue; 521 | } 522 | 523 | match opt_parsing_action { 524 | ShimOptParsingAction::NoPositional => {} 525 | ShimOptParsingAction::Skip => { 526 | opt_parsing_action = ShimOptParsingAction::NoPositional; 527 | continue; 528 | } 529 | ShimOptParsingAction::ContainerId => { 530 | container_id_o = Some(arg); 531 | opt_parsing_action = ShimOptParsingAction::NoPositional; 532 | continue; 533 | } 534 | } 535 | 536 | if arg.as_str() == "delete" { 537 | container_action = ShimContainerAction::Delete 538 | } 539 | } 540 | 541 | match container_action { 542 | ShimContainerAction::Other => {} 543 | ShimContainerAction::Delete => { 544 | let container_id = container_id_o.ok_or(HandleRuncEventError::ContainerID)?; 545 | debug!(container = container_id.as_str(), "deleting container"); 546 | 547 | self.delete_container_sync(container_id)?; 548 | } 549 | } 550 | 551 | Ok(()) 552 | } 553 | 554 | fn handle_runc_event(&self, runc_process: Process) -> Result<(), HandleRuncEventError> { 555 | let mut opt_parsing_action = OptParsingAction::NoPositional; 556 | let mut arg_parsing_action = ArgParsingAction::None; 557 | let mut container_action = ContainerAction::Other; 558 | 559 | let mut container_bundle_o: Option = None; 560 | let mut container_id_o: Option = None; 561 | 562 | // for arg in cmdline.split(CMDLINE_DELIMITER) { 563 | for arg in runc_process.cmdline()? { 564 | debug!(argument = arg.as_str(), "runc"); 565 | match arg.as_str() { 566 | // Options which are followed with a positional arguments we don't 567 | // want to store. 568 | "--log" => opt_parsing_action = OptParsingAction::Skip, 569 | "--log-format" => opt_parsing_action = OptParsingAction::Skip, 570 | "--pid-file" => opt_parsing_action = OptParsingAction::Skip, 571 | "--process" => opt_parsing_action = OptParsingAction::Skip, 572 | "--console-socket" => opt_parsing_action = OptParsingAction::Skip, 573 | "--root" => opt_parsing_action = OptParsingAction::Skip, 574 | // We want to explicitly store the value of --bundle and --root 575 | // options. 576 | "--bundle" => opt_parsing_action = OptParsingAction::Bundle, 577 | _ => {} 578 | } 579 | if arg.starts_with('-') { 580 | // After handling the option, start parsing the next argument. 581 | continue; 582 | } 583 | 584 | match opt_parsing_action { 585 | OptParsingAction::NoPositional => {} 586 | OptParsingAction::Skip => { 587 | opt_parsing_action = OptParsingAction::NoPositional; 588 | continue; 589 | } 590 | OptParsingAction::Bundle => { 591 | container_bundle_o = Some(arg); 592 | opt_parsing_action = OptParsingAction::NoPositional; 593 | continue; 594 | } 595 | } 596 | match arg_parsing_action { 597 | ArgParsingAction::None => {} 598 | ArgParsingAction::ContainerId => { 599 | container_id_o = Some(arg); 600 | arg_parsing_action = ArgParsingAction::None; 601 | continue; 602 | } 603 | } 604 | 605 | match arg.as_str() { 606 | "checkpoint" => arg_parsing_action = ArgParsingAction::ContainerId, 607 | "create" => { 608 | arg_parsing_action = ArgParsingAction::ContainerId; 609 | container_action = ContainerAction::Create; 610 | } 611 | "delete" => { 612 | arg_parsing_action = ArgParsingAction::ContainerId; 613 | container_action = ContainerAction::Delete; 614 | } 615 | "events" => arg_parsing_action = ArgParsingAction::ContainerId, 616 | "exec" => arg_parsing_action = ArgParsingAction::ContainerId, 617 | "kill" => arg_parsing_action = ArgParsingAction::ContainerId, 618 | "pause" => arg_parsing_action = ArgParsingAction::ContainerId, 619 | "ps" => arg_parsing_action = ArgParsingAction::ContainerId, 620 | "restore" => arg_parsing_action = ArgParsingAction::ContainerId, 621 | "resume" => arg_parsing_action = ArgParsingAction::ContainerId, 622 | "run" => arg_parsing_action = ArgParsingAction::ContainerId, 623 | "start" => { 624 | arg_parsing_action = ArgParsingAction::ContainerId; 625 | } 626 | "state" => arg_parsing_action = ArgParsingAction::ContainerId, 627 | "update" => arg_parsing_action = ArgParsingAction::ContainerId, 628 | _ => {} 629 | } 630 | } 631 | 632 | match container_action { 633 | ContainerAction::Other => { 634 | debug!("other container action"); 635 | if let Some(container_id) = container_id_o { 636 | self.add_process_sync(container_id, runc_process.pid)?; 637 | } 638 | } 639 | ContainerAction::Create => { 640 | let container_id = container_id_o.ok_or(HandleRuncEventError::ContainerID)?; 641 | let container_bundle = match container_bundle_o { 642 | Some(v) => std::path::PathBuf::from(v), 643 | None => std::env::current_dir()?, 644 | }; 645 | 646 | // let policy; 647 | let (container_type, container_data) = container_type_data(container_bundle)?; 648 | let policy: ContainerPolicyLevel = match container_type { 649 | ContainerType::Docker => policy_docker( 650 | self.default_policy_level, 651 | container_data.ok_or(HandleRuncEventError::ContainerData)?, 652 | )?, 653 | ContainerType::KubernetesContainerd => policy_kubernetes_sync( 654 | self.default_policy_level, 655 | container_data.ok_or(HandleRuncEventError::ContainerData)?, 656 | )?, 657 | ContainerType::Unknown => ContainerPolicyLevel::Baseline, 658 | }; 659 | 660 | self.add_container_sync(container_id, runc_process.pid, policy)?; 661 | } 662 | ContainerAction::Delete => { 663 | let container_id = container_id_o.ok_or(HandleRuncEventError::ContainerID)?; 664 | self.delete_container_sync(container_id)?; 665 | } 666 | } 667 | 668 | Ok(()) 669 | } 670 | 671 | fn handle_event(&self, event: Event) -> Result<(), HandleRuncEventError> { 672 | // Let the process execute again 673 | defer!(self.fd.send_response(event.fd, FanotifyResponse::Allow)); 674 | 675 | debug!( 676 | path = event.path.as_str(), 677 | pid = event.pid, 678 | "received fanotify event" 679 | ); 680 | 681 | let p = Process::new(event.pid)?; 682 | 683 | // Usually fanotify receives two notifications about executing runc: 684 | // 1) from containerd-shim (or similar) 685 | // 2) from runc 686 | // We are interested in parsing only runc arguments rather than 687 | // containerd-shim. 688 | let comm = p.stat()?.comm; 689 | match comm.as_str() { 690 | "runc" => { 691 | self.handle_runc_event(p)?; 692 | } 693 | "containerd-shim" => { 694 | self.handle_containerd_shim_event(p)?; 695 | } 696 | _ => {} 697 | } 698 | 699 | Ok(()) 700 | } 701 | 702 | pub fn work_loop(&mut self) -> Result<(), HandleRuncEventError> { 703 | // Wait for the bootstrap request from the main, asynchronous part of 704 | // lockc. 705 | loop { 706 | match self.bootstrap_rx.try_recv() { 707 | Ok(_) => { 708 | break; 709 | } 710 | Err(oneshot::error::TryRecvError::Empty) => { 711 | // Keep waiting. 712 | } 713 | Err(e) => return Err(HandleRuncEventError::from(e)), 714 | } 715 | } 716 | 717 | debug!("starting work loop"); 718 | 719 | let mut fds = [PollFd::new(self.fd.as_raw_fd(), PollFlags::POLLIN)]; 720 | loop { 721 | let poll_num = poll(&mut fds, -1)?; 722 | if poll_num > 0 { 723 | for event in self.fd.read_event() { 724 | match self.handle_event(event) { 725 | Ok(_) => {} 726 | Err(e) => error!(error = e.to_string().as_str(), "failed to handle event"), 727 | }; 728 | } 729 | } else { 730 | debug!("poll_num <= 0!"); 731 | break; 732 | } 733 | } 734 | 735 | Ok(()) 736 | } 737 | } 738 | --------------------------------------------------------------------------------