├── .gitignore
├── peserver
├── src
│ ├── lib.rs
│ ├── api.rs
│ ├── gh.rs
│ ├── testclient.rs
│ └── util.rs
└── Cargo.toml
├── attic
├── makevsockhello.sh
├── makepivotrootfs.sh
├── init3
├── init2
├── makesqfstest.sh
├── debug.sh
├── makesquashfs.sh
├── analyzetrace.py
├── makepmemsized.py
├── qemumyinitdebug.sh
├── make_strace_relative_time.py
├── analyzeboottimes.py
├── analyzesqfs.py
├── cloudhypervisormyinit.sh
├── cloudhypervisortracing.sh
├── pivot_rootfs.c
├── init1
├── vsockhello.c
├── mallocstacks.py
├── config-rootless.json
├── oci-runtime-spec-defaults.json
├── containerbuildquestion.sh
├── config.json
├── cloudhypervisorapi.sh
└── chperf.py
├── peerofs
├── fuzz
│ ├── .gitignore
│ ├── Cargo.toml
│ └── fuzz_targets
│ │ ├── fuzz_builder.rs
│ │ └── fuzz_decompress_lz4.rs
├── src
│ ├── lib.rs
│ ├── decompressor.rs
│ └── dump.rs
├── scripts
│ ├── fuzz-builder.sh
│ └── fuzz-decompress-lz4.sh
└── Cargo.toml
├── pearchive
├── fuzz
│ ├── .gitignore
│ ├── fuzz_targets
│ │ └── fuzz_target_1.rs
│ └── Cargo.toml
├── scripts
│ ├── fuzz.sh
│ └── dirdigest.sh
├── test.sh
├── Cargo.toml
└── src
│ ├── open.rs
│ └── main.rs
├── quadlets
├── caddy-files.volume
├── caddy-data.volume
├── caddy.socket
├── caddy-dev.container
├── caddy.container
├── pe-server-gh.container
├── pe-server-lb.container
├── pe-image-service.container
└── pe-server-worker.container
├── peimage
├── src
│ ├── lib.rs
│ ├── bin
│ │ ├── squasherofs.rs
│ │ ├── squashbin.rs
│ │ ├── squashpodman.rs
│ │ ├── squashoci.rs
│ │ └── tardiff.rs
│ ├── mkfs.rs
│ ├── podman.rs
│ └── index.rs
├── go.mod
├── tartest.sh
├── Cargo.toml
└── go.sum
├── containers
├── pe-caddy
├── pe-caddy-dev
├── pe-server-lb
├── pe-server-gh
├── pe-image-service
└── pe-server-worker
├── SECURITY.md
├── peoci
├── src
│ ├── lib.rs
│ ├── ocidir.rs
│ ├── compression.rs
│ └── bin
│ │ └── ocidist.rs
└── Cargo.toml
├── peinit
├── src
│ └── notabin.rs
└── Cargo.toml
├── perunner
├── fetch-seccomp.sh
├── Cargo.toml
└── src
│ ├── blocktest.rs
│ └── iofile.rs
├── pefrontend
├── nginx.sh
├── .gitignore
├── src
│ ├── petoml.ts
│ ├── style.css
│ ├── util.ts
│ ├── api.ts
│ ├── urlstate.ts
│ └── pearchive.ts
├── README.md
├── nginx.conf
├── tsconfig.json
├── vite.config.ts
├── package.json
├── privacy.html
└── index.html
├── README.md
├── waitid_timeout
└── Cargo.toml
├── scripts
├── setupquadlets.sh
├── build-containers.sh
├── devserver.sh
├── build.sh
├── build-initramfs.sh
└── inspecttar.py
├── caddy
├── dev.caddyfile
└── prod.caddyfile
├── pevub
└── Cargo.toml
├── pegh
├── Cargo.toml
└── src
│ └── main.rs
├── LICENSE
├── peimage-service
├── Cargo.toml
└── src
│ ├── bin
│ └── testclient.rs
│ └── lib.rs
├── initramfs.file
└── Cargo.toml
/.gitignore:
--------------------------------------------------------------------------------
1 | pefrontend/node_modules
2 | target
3 |
--------------------------------------------------------------------------------
/peserver/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod api;
2 | pub mod util;
3 |
--------------------------------------------------------------------------------
/attic/makevsockhello.sh:
--------------------------------------------------------------------------------
1 | gcc -Wall -static -o vsockhello vsockhello.c
2 |
--------------------------------------------------------------------------------
/peerofs/fuzz/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | corpus
3 | artifacts
4 | coverage
5 |
--------------------------------------------------------------------------------
/attic/makepivotrootfs.sh:
--------------------------------------------------------------------------------
1 | gcc -Wall -static -o pivot_rootfs pivot_rootfs.c
2 |
--------------------------------------------------------------------------------
/pearchive/fuzz/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | corpus
3 | artifacts
4 | coverage
5 |
--------------------------------------------------------------------------------
/peerofs/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod build;
2 | pub mod decompressor;
3 | pub mod disk;
4 |
--------------------------------------------------------------------------------
/quadlets/caddy-files.volume:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Caddy public website files
3 |
--------------------------------------------------------------------------------
/pearchive/scripts/fuzz.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cargo +nightly fuzz run fuzz_target_1
4 |
--------------------------------------------------------------------------------
/attic/init3:
--------------------------------------------------------------------------------
1 | #!/bin/busybox sh
2 |
3 | crun run --bundle /run/bundle containerid-1234
4 |
--------------------------------------------------------------------------------
/peerofs/scripts/fuzz-builder.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cargo +nightly fuzz run fuzz_builder
4 |
--------------------------------------------------------------------------------
/quadlets/caddy-data.volume:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Caddy persistent data files like certs
3 |
--------------------------------------------------------------------------------
/peimage/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod index;
2 | pub mod mkfs;
3 | pub mod podman;
4 | pub mod squash;
5 |
--------------------------------------------------------------------------------
/containers/pe-caddy:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/caddy:2.9.1
2 |
3 | COPY caddy/prod.caddyfile /etc/caddy/Caddyfile
4 |
--------------------------------------------------------------------------------
/containers/pe-caddy-dev:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/caddy:2.9.1
2 |
3 | COPY caddy/dev.caddyfile /etc/caddy/Caddyfile
4 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | To report a security vulnerability or concern, please email aconz2 at gmail with the subject [Program Explorer Security]
2 |
--------------------------------------------------------------------------------
/attic/init2:
--------------------------------------------------------------------------------
1 | #!/bin/busybox sh
2 |
3 | busybox mount --rbind / /abc
4 | cd /abc
5 | busybox mount --move . /
6 | strace busybox chroot . /bin/init3
7 |
--------------------------------------------------------------------------------
/peoci/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod blobcache;
2 | pub mod compression;
3 | pub mod ocidir;
4 | pub mod ocidist;
5 | pub mod ocidist_cache;
6 | pub mod spec;
7 |
--------------------------------------------------------------------------------
/containers/pe-server-lb:
--------------------------------------------------------------------------------
1 | FROM scratch
2 |
3 | COPY target/x86_64-unknown-linux-musl/release/lb /bin/pe-lb
4 |
5 | ENV RUST_LOG=info
6 |
7 | ENTRYPOINT ["/bin/pe-lb"]
8 |
--------------------------------------------------------------------------------
/peinit/src/notabin.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | println!("you shouldn't be running this because it wants to shut your computer down");
3 | std::process::exit(1);
4 | }
5 |
--------------------------------------------------------------------------------
/perunner/fetch-seccomp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | wget https://raw.githubusercontent.com/containers/common/refs/heads/main/pkg/seccomp/seccomp.json
6 |
--------------------------------------------------------------------------------
/containers/pe-server-gh:
--------------------------------------------------------------------------------
1 | FROM scratch
2 |
3 | COPY target/x86_64-unknown-linux-musl/release/ghserver /bin/ghserver
4 |
5 | ENV RUST_LOG=info
6 |
7 | ENTRYPOINT ["/bin/ghserver"]
8 |
9 |
--------------------------------------------------------------------------------
/pefrontend/nginx.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | podman run --net=host --rm -v $(realpath nginx.conf):/etc/nginx/nginx.conf:z,ro -p 8000:8000 -p 6188:6188 -p 5173:5173 docker.io/library/nginx
4 |
--------------------------------------------------------------------------------
/attic/makesqfstest.sh:
--------------------------------------------------------------------------------
1 | extra="-fsanitize=address"
2 | gcc $extra -g -Wall -o sqfstest -lsquashfs sqfstest.c
3 |
4 | # ls sqfstest.c makesqfstest.sh | entr -c bash -c 'bash makesqfstest.sh && ./sqfstest'
5 |
--------------------------------------------------------------------------------
/containers/pe-image-service:
--------------------------------------------------------------------------------
1 | FROM scratch
2 |
3 | COPY target/x86_64-unknown-linux-musl/release/peimage-service /bin/peimage-service
4 |
5 | ENV RUST_LOG=info
6 |
7 | ENTRYPOINT ["/bin/peimage-service"]
8 |
9 |
--------------------------------------------------------------------------------
/quadlets/caddy.socket:
--------------------------------------------------------------------------------
1 | [Socket]
2 | ListenStream=[::]:80
3 | ListenStream=[::]:443
4 | #ListenStream=[::]:8000
5 | #ListenStream=[::]:4430
6 | BindIPv6Only=both
7 |
8 | [Install]
9 | WantedBy=sockets.target
10 |
--------------------------------------------------------------------------------
/pearchive/fuzz/fuzz_targets/fuzz_target_1.rs:
--------------------------------------------------------------------------------
1 | #![no_main]
2 |
3 | use libfuzzer_sys::fuzz_target;
4 | use pearchive::unpack_to_hashmap;
5 |
6 | fuzz_target!(|data: &[u8]| {
7 | let _ = unpack_to_hashmap(data);
8 | });
9 |
--------------------------------------------------------------------------------
/peerofs/scripts/fuzz-decompress-lz4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # https://llvm.org/docs/LibFuzzer.html
4 | # len_control=0 makes it try long input lengths immediately
5 | cargo +nightly fuzz run fuzz_decompress_lz4 -- -max_len=10000 -len_control=1
6 |
--------------------------------------------------------------------------------
/pearchive/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | cargo build
6 |
7 | cargo run pack . /tmp/pearchive.pear
8 | rm -rf /tmp/dest
9 | mkdir /tmp/dest
10 | cargo run unpack /tmp/pearchive.pear /tmp/dest
11 |
12 | ./scripts/dirdigest.sh $(pwd) /tmp/dest
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Program Explorer is a playground for easily running containers from your browser and a proving ground for the underlying container runtime.
2 |
3 | There is an architectural diagram in [./docs/arch.svg](./docs/arch.svg) that might provide some overview. More docs to come.
4 |
--------------------------------------------------------------------------------
/attic/debug.sh:
--------------------------------------------------------------------------------
1 | # lldb -o 'gdb-remote localhost:1234' -o 'break set -H -r ".*pivot_root.*"' ~/Repos/linux/vmlinux
2 | # gdb -ex 'target remote localhost:1234' ~/Repos/linux/vmlinux -ex 'hbreak namespace.c:4197'
3 |
4 | lldb -o 'gdb-remote localhost:1234' -o 'break set -H -f namespace.c -l 4197' ~/Repos/linux/vmlinux
5 |
6 |
--------------------------------------------------------------------------------
/pearchive/scripts/dirdigest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function inspectdir() {
4 | cat <(cd $1 && find -type f -exec sha256sum '{}' '+' | sort) <(cd $1 && find -type d | sort)
5 | }
6 |
7 | function hashdir() {
8 | inspectdir $1 | sha256sum
9 | }
10 |
11 | for dir in "$@"; do
12 | h=$(hashdir "$dir")
13 | echo "$h $dir"
14 | done
15 |
--------------------------------------------------------------------------------
/pefrontend/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/quadlets/caddy-dev.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=caddy
3 |
4 | [Service]
5 | Restart=always
6 | RuntimeDirectory=program-explorer
7 | RuntimeDirectoryPreserve=yes
8 |
9 | [Container]
10 | Image=localhost/pe-caddy-dev:latest
11 | Network=host
12 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
13 | Environment=RUNTIME_DIRECTORY=${RUNTIME_DIRECTORY}
14 |
15 | [Install]
16 | WantedBy=default.target
17 |
--------------------------------------------------------------------------------
/attic/makesquashfs.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | version=14.1.0
4 | #version=13.3.0
5 | #sqfstar=sqfstar
6 | sqfstar=~/Repos/squashfs-tools/squashfs-tools/sqfstar
7 | outfile=gcc-${version}.sqfs
8 |
9 | rm -f $outfile
10 |
11 | id=$(podman create docker.io/library/gcc:${version})
12 | trap "podman rm $id" EXIT
13 |
14 | podman export "$id" | $sqfstar -uid-gid-offset 1000 -comp zstd $outfile
15 |
16 | python makepmemsized.py $outfile
17 |
--------------------------------------------------------------------------------
/pearchive/fuzz/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pearchive-fuzz"
3 | version = "0.0.0"
4 | publish = false
5 | edition = "2021"
6 |
7 | [package.metadata]
8 | cargo-fuzz = true
9 |
10 | [dependencies]
11 | libfuzzer-sys = { workspace = true }
12 | pearchive = { workspace = true }
13 |
14 | [[bin]]
15 | name = "fuzz_target_1"
16 | path = "fuzz_targets/fuzz_target_1.rs"
17 | test = false
18 | doc = false
19 | bench = false
20 |
--------------------------------------------------------------------------------
/waitid_timeout/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "waitid_timeout"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [lib]
7 | name = "waitid_timeout"
8 | path = "src/lib.rs"
9 | doctest = false
10 |
11 | [dependencies]
12 | libc = { workspace = true }
13 | mio = { workspace = true, features = ["os-ext"] }
14 | mio-pidfd = { workspace = true }
15 | # syscalls = { version = "0.6.18", default-features = false, features = ["std"] }
16 |
17 | [lints]
18 | workspace = true
19 |
--------------------------------------------------------------------------------
/pefrontend/src/petoml.ts:
--------------------------------------------------------------------------------
1 | import {parse} from 'toml';
2 |
3 | export type PeToml = {
4 | env: string | null,
5 | cmd: string | null,
6 | image: string | null,
7 | stdin: string | null,
8 | };
9 |
10 | export function parsePeToml(s: string): PeToml {
11 | let parsed = parse(s);
12 | return {
13 | env: parsed.env ?? null,
14 | cmd: parsed.cmd ?? null,
15 | image: parsed.image ?? null,
16 | stdin: parsed.stdin ?? null,
17 | };
18 | }
19 |
--------------------------------------------------------------------------------
/pefrontend/README.md:
--------------------------------------------------------------------------------
1 | # `create-preact`
2 |
3 |
4 |
5 |
6 |
7 | Get started using Preact and Vite!
8 |
9 | ## Getting Started
10 |
11 | - `npm run dev` - Starts a dev server at http://localhost:5173/
12 |
13 | - `npm run build` - Builds for production, emitting to `dist/`
14 |
15 | - `npm run preview` - Starts a server at http://localhost:4173/ to test production build locally
16 |
--------------------------------------------------------------------------------
/attic/analyzetrace.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import json
3 | with open(sys.argv[1]) as fh:
4 | j = json.load(fh)
5 |
6 | out = []
7 | for group, events in j['events'].items():
8 | for event in events:
9 | name = event['event']
10 | duration = event['end_timestamp']['nanos'] - event['timestamp']['nanos']
11 | key = f'{group} {name}'
12 | out.append((key, duration / 1000 / 1000))
13 |
14 | out.sort(key=lambda x: x[1])
15 | for k, v in out:
16 | print(f'{k:40s} {v:0.2f}ms')
17 |
--------------------------------------------------------------------------------
/peerofs/src/decompressor.rs:
--------------------------------------------------------------------------------
1 | pub trait Decompressor {
2 | fn decompress(&self, _src: &[u8], _dst: &mut [u8], _original_size: usize) -> Option {
3 | None
4 | }
5 | }
6 |
7 | #[allow(dead_code)]
8 | pub struct Lz4Decompressor;
9 |
10 | impl Decompressor for Lz4Decompressor {
11 | #[cfg(feature = "lz4")]
12 | fn decompress(&self, src: &[u8], dst: &mut [u8], original_size: usize) -> Option {
13 | lzzzz::lz4::decompress_partial(src, dst, original_size).ok()
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/pefrontend/nginx.conf:
--------------------------------------------------------------------------------
1 | events {}
2 | http {
3 | server {
4 | listen 8000;
5 | server_name localhost;
6 | location / { # vite
7 | proxy_pass http://localhost:5173;
8 | # make websockets work for hotreload
9 | proxy_http_version 1.1;
10 | proxy_set_header Upgrade $http_upgrade;
11 | proxy_set_header Connection "upgrade";
12 | }
13 | location /api {
14 | proxy_pass http://localhost:6188;
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/pefrontend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "module": "ESNext",
5 | "moduleResolution": "bundler",
6 | "noEmit": true,
7 | "allowJs": true,
8 | "checkJs": true,
9 |
10 | /* Preact Config */
11 | "jsx": "react-jsx",
12 | "jsxImportSource": "preact",
13 | "skipLibCheck": true,
14 | "paths": {
15 | "react": ["./node_modules/preact/compat/"],
16 | "react-dom": ["./node_modules/preact/compat/"]
17 | }
18 | },
19 | "include": ["node_modules/vite/client.d.ts", "src/*.tsx"]
20 | }
21 |
--------------------------------------------------------------------------------
/quadlets/caddy.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=caddy
3 |
4 | [Service]
5 | Restart=always
6 | RuntimeDirectory=program-explorer
7 | RuntimeDirectoryPreserve=yes
8 |
9 | [Container]
10 | Image=localhost/pe-caddy:latest
11 | Network=host
12 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
13 | Volume=caddy-files.volume:/www
14 | # NOTE: the docker.io/library/caddy dockerfil sents Env XDG_DATA_HOME to /data
15 | Volume=caddy-data.volume:/data
16 | Environment=RUNTIME_DIRECTORY=${RUNTIME_DIRECTORY}
17 |
18 | [Install]
19 | WantedBy=default.target
20 |
--------------------------------------------------------------------------------
/pefrontend/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite';
2 | import { dirname , resolve } from 'node:path'
3 | import { fileURLToPath } from 'node:url'
4 | import preact from '@preact/preset-vite';
5 |
6 | // https://vitejs.dev/config/
7 | export default defineConfig({
8 | plugins: [preact()],
9 | build: {
10 | rollupOptions: {
11 | input: {
12 | main: resolve(__dirname , 'index.html'),
13 | privacy: resolve(__dirname , 'privacy.html'),
14 | },
15 | },
16 | },
17 | });
18 |
--------------------------------------------------------------------------------
/pefrontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "type": "module",
4 | "scripts": {
5 | "dev": "vite",
6 | "build": "vite build",
7 | "preview": "vite preview",
8 | "check": "tsc --noEmit -p ."
9 | },
10 | "dependencies": {
11 | "@preact/signals": "^1.3.1",
12 | "codemirror": "^6.0.1",
13 | "monaco-editor": "^0.52.0",
14 | "preact": "^10.22.1",
15 | "shlex": "^2.1.2",
16 | "toml": "^3.0.0"
17 | },
18 | "devDependencies": {
19 | "@preact/preset-vite": "^2.9.0",
20 | "typescript": "^5.7.2",
21 | "vite": "^6.2.5"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/scripts/setupquadlets.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | TARGET=~/.config/containers/systemd/program-explorer-dev
6 | mkdir -p ~/.config/containers/systemd
7 | if [ ! -d $TARGET ]; then
8 | ln -s $(realpath quadlets) $TARGET
9 | fi
10 |
11 | systemctl --user daemon-reload
12 |
13 | /usr/lib/systemd/system-generators/podman-system-generator --user --dryrun
14 |
15 | #systemctl --user start pe-server-lb.service
16 | #journalctl --user -feu pe-server-lb.service
17 |
18 | #systemctl --user daemon-reload && systemctl --user restart pe-server-lb.service
19 |
--------------------------------------------------------------------------------
/pearchive/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pearchive"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [lib]
7 | name = "pearchive"
8 | path = "src/lib.rs"
9 | doctest = false
10 |
11 | [[bin]]
12 | name = "pearchive"
13 | path = "src/main.rs"
14 | test = false
15 |
16 | [dependencies]
17 | byteorder = { workspace = true }
18 | memmap2 = { workspace = true }
19 | rustix = { workspace = true, features = ["fs", "process", "thread"] }
20 | thiserror = { workspace = true }
21 |
22 | [dev-dependencies]
23 | rand = { workspace = true }
24 |
25 | [lints]
26 | workspace = true
27 |
--------------------------------------------------------------------------------
/attic/makepmemsized.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | alignment = 0x20_0000
5 |
6 | filename = sys.argv[1]
7 | fd = os.open(filename, os.O_RDWR)
8 | assert fd > 0
9 |
10 | size = os.fstat(fd).st_size
11 |
12 | if size % alignment == 0:
13 | print(f'Size {size} is already aligned')
14 | sys.exit(0)
15 |
16 | remainder = size % alignment
17 | extra = alignment - remainder
18 | new_size = size + extra
19 | assert new_size % alignment == 0
20 |
21 | os.ftruncate(fd, new_size)
22 |
23 | new_size = os.fstat(fd).st_size
24 | assert new_size % alignment == 0
25 | print(f'Size {new_size} now aligned')
26 |
--------------------------------------------------------------------------------
/quadlets/pe-server-gh.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=PE gh gist service
3 |
4 | [Service]
5 | Restart=always
6 | StandardError=journal
7 | RuntimeDirectory=program-explorer
8 | RuntimeDirectoryPreserve=yes
9 |
10 | [Container]
11 | Image=localhost/pe-server-gh:latest
12 | Network=host
13 |
14 | # okay so using journald doesn't actually pass through JOURNAL_STREAM, so do it manually
15 | Environment=JOURNAL_STREAM=${JOURNAL_STREAM}
16 | LogDriver=passthrough
17 |
18 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
19 |
20 | Exec=--uds=${RUNTIME_DIRECTORY}/gh.sock
21 |
22 | [Install]
23 | WantedBy=default.target
24 |
--------------------------------------------------------------------------------
/containers/pe-server-worker:
--------------------------------------------------------------------------------
1 | FROM scratch
2 |
3 | ADD --chmod=755 --checksum=sha256:a250a9347d0ea9e93f88b54b25df3cdc6a9ba3c57f292aaf74bb664fb5c87496 https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/v43.0/cloud-hypervisor-static /bin/cloud-hypervisor
4 |
5 | # TODO build this or something
6 | COPY target/release/vmlinux /bin/vmlinux
7 | COPY target/release/initramfs /bin/initramfs
8 | COPY target/x86_64-unknown-linux-musl/release/worker /bin/pe-worker
9 |
10 | ENV RUST_LOG=info
11 |
12 | ENTRYPOINT ["/bin/pe-worker", "--ch=/bin/cloud-hypervisor", "--kernel=/bin/vmlinux", "--initramfs=/bin/initramfs"]
13 |
--------------------------------------------------------------------------------
/caddy/dev.caddyfile:
--------------------------------------------------------------------------------
1 | # admin api left on so we can check the config with
2 | # curl localhost:2019/config/ | jq | less
3 |
4 | {
5 | #debug
6 | }
7 |
8 | http://{$CADDY_HOST:localhost}:{$CADDY_PORT:8080}
9 |
10 | encode zstd gzip
11 |
12 | handle_path /api/gh/* {
13 | # uri is passed on with /api/gh stripped. Maybe the other should be like that too?
14 | reverse_proxy unix/{$RUNTIME_DIRECTORY}/gh.sock
15 | }
16 | reverse_proxy /api/* unix/{$RUNTIME_DIRECTORY}/lb.sock
17 |
18 | # this takes care of the websockets upgrade thing that we had to
19 | # tell nginx to take care of
20 | reverse_proxy * {$FRONTEND_SERVER:localhost:5173}
21 |
--------------------------------------------------------------------------------
/pevub/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pevub"
3 | version = "0.1.0"
4 | edition = "2024"
5 |
6 | [features]
7 | event_idx = []
8 |
9 | [dependencies]
10 | env_logger = { workspace = true }
11 | log = { workspace = true, features = ["release_max_level_warn"] }
12 | smallvec = { workspace = true }
13 | thiserror = { workspace = true }
14 | vhost = { workspace = true, features = ["vhost-user-backend"] }
15 | vhost-user-backend = { workspace = true }
16 | virtio-bindings = { workspace = true }
17 | virtio-queue = { workspace = true }
18 | vm-memory = { workspace = true, features = ["backend-atomic", "backend-mmap"] }
19 | vmm-sys-util = { workspace = true }
20 |
21 | [lints]
22 | workspace = true
23 |
--------------------------------------------------------------------------------
/attic/qemumyinitdebug.sh:
--------------------------------------------------------------------------------
1 | k=/home/andrew/Repos/linux/vmlinux
2 |
3 | # -S pauses the cpu at startup
4 |
5 | # -S \
6 | #-device pvpanic-pci \
7 | qemu-system-x86_64 \
8 | -M microvm,pit=off,pic=off,isa-serial=off,rtc=off \
9 | -nographic -no-user-config -nodefaults \
10 | -gdb tcp::1234 \
11 | -enable-kvm \
12 | -cpu host -smp 1 -m 1G \
13 | -kernel $k -append "console=hvc0" \
14 | -device virtio-blk-device,drive=test \
15 | -drive id=test,file=gcc-squashfs.sqfs,read-only=on,format=raw,if=none \
16 | -initrd init1.initramfs \
17 | -chardev stdio,id=virtiocon0 \
18 | -device virtio-serial-device \
19 | -device virtconsole,chardev=virtiocon0 $@
20 |
--------------------------------------------------------------------------------
/quadlets/pe-server-lb.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=PE server lb
3 |
4 | [Service]
5 | Restart=always
6 | RuntimeDirectory=program-explorer
7 | # idk if this is really the right place/way to do this
8 | RuntimeDirectoryPreserve=yes
9 | StandardError=journal
10 |
11 | [Container]
12 | Image=localhost/pe-server-lb:latest
13 | Network=none
14 |
15 | # okay so using journald doesn't actually pass through JOURNAL_STREAM, so do it manually
16 | Environment=JOURNAL_STREAM=${JOURNAL_STREAM}
17 | LogDriver=passthrough
18 |
19 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
20 |
21 | Exec=--uds=${RUNTIME_DIRECTORY}/lb.sock --worker=uds:${RUNTIME_DIRECTORY}/worker.sock
22 |
23 | [Install]
24 | WantedBy=default.target
25 |
--------------------------------------------------------------------------------
/pegh/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pegh"
3 | version = "0.1.0"
4 | edition = "2024"
5 |
6 | [dependencies]
7 | reqwest = { workspace = true, features = ["http2", "rustls-tls", "json"] }
8 | thiserror = {workspace = true }
9 | tokio = { workspace = true, features = ["fs", "io-util", "macros", "rt"] }
10 | clap = { workspace = true, features = ["derive"] }
11 | env_logger = { workspace = true }
12 | log = { workspace = true }
13 | serde = { workspace = true, features = ["derive"] }
14 | serde_json = { workspace = true }
15 | futures = { workspace = true }
16 | chrono.workspace = true
17 |
18 | [lib]
19 | path = "src/lib.rs"
20 |
21 | [[bin]]
22 | name = "gh"
23 | path = "src/main.rs"
24 |
25 | [lints]
26 | workspace = true
27 |
--------------------------------------------------------------------------------
/attic/make_strace_relative_time.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from functools import partial
3 | import string
4 |
5 | pids = {}
6 |
7 | def pid_letter(pid):
8 | if pid in pids:
9 | return pids[pid]
10 | pids[pid] = string.ascii_uppercase[len(pids)]
11 | return pids[pid]
12 |
13 | def xform(line, t0=0):
14 | pid, time, msg = line.split(' ', maxsplit=2)
15 | t = float(time)
16 | p = pid_letter(pid)
17 | t_off = (t - t0) * 1000
18 | return f'{t_off: 8.2f} {p} {msg}'
19 |
20 | f = sys.argv[1]
21 |
22 | with open(f, 'r') as fh:
23 | lines = list(fh)
24 |
25 | t0 = float(lines[0].split(' ', maxsplit=2)[1])
26 |
27 | out = map(partial(xform, t0=t0), lines)
28 |
29 | with open(f, 'w') as fh:
30 | fh.write(''.join(out))
31 |
--------------------------------------------------------------------------------
/peerofs/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peerofs"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [features]
7 | lz4 = ["dep:lzzzz"]
8 |
9 | [dependencies]
10 | byteorder = { workspace = true }
11 | bytes = { workspace = true }
12 | log = { workspace = true }
13 | lzzzz = { workspace = true, optional = true }
14 | memmap2 = { workspace = true }
15 | rustix = { workspace = true, features = ["fs"] }
16 | thiserror = { workspace = true }
17 | zerocopy = { workspace = true, features = ["derive", "std"] }
18 | env_logger = { workspace = true }
19 |
20 | [lib]
21 | path = "src/lib.rs"
22 |
23 | [[bin]]
24 | name = "erofs-dump"
25 | path = "src/dump.rs"
26 |
27 | [lints]
28 | workspace = true
29 |
30 | [dev-dependencies]
31 | tempfile = { workspace = true }
32 |
--------------------------------------------------------------------------------
/caddy/prod.caddyfile:
--------------------------------------------------------------------------------
1 | # based on https://github.com/eriksjolund/podman-caddy-socket-activation/blob/main/examples/example3/Caddyfile
2 | {
3 | admin off
4 | }
5 |
6 |
7 | http://{$CADDY_HOST:localhost} {
8 | bind fd/3 {
9 | protocols h1
10 | }
11 | redir https://{host}{uri}
12 | }
13 |
14 | https://{$CADDY_HOST:localhost} {
15 | bind fd/4 {
16 | protocols h1 h2
17 | }
18 |
19 | encode zstd gzip
20 |
21 | handle_path /api/gh/* {
22 | # uri is passed on with /api/gh stripped. Maybe the other should be like that too?
23 | reverse_proxy unix/{$RUNTIME_DIRECTORY}/gh.sock
24 | }
25 | reverse_proxy /api/* unix/{$RUNTIME_DIRECTORY}/lb.sock
26 | root * {$CADDY_FILE_ROOT:/www}
27 | file_server
28 | }
29 |
--------------------------------------------------------------------------------
/peerofs/fuzz/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peerofs-fuzz"
3 | version = "0.0.0"
4 | publish = false
5 | edition = "2021"
6 |
7 | [package.metadata]
8 | cargo-fuzz = true
9 |
10 | [dependencies]
11 | arbitrary = { workspace = true, features = ["derive"] }
12 | libfuzzer-sys = { workspace = true }
13 | peerofs = { workspace = true, features = ["lz4"] }
14 | memmap2 = { workspace = true }
15 | tempfile = { workspace = true }
16 | rustix = { workspace = true, features = ["fs"] }
17 |
18 | [[bin]]
19 | name = "fuzz_builder"
20 | path = "fuzz_targets/fuzz_builder.rs"
21 | test = false
22 | doc = false
23 | bench = false
24 |
25 | [[bin]]
26 | name = "fuzz_decompress_lz4"
27 | path = "fuzz_targets/fuzz_decompress_lz4.rs"
28 | test = false
29 | doc = false
30 | bench = false
31 |
--------------------------------------------------------------------------------
/pefrontend/privacy.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Program Explorer: Privacy Policy
8 |
9 |
10 |
11 | The files you send are never written to durable storage. They are not retained after your request is complete. They do not appear in server logs. Your IP may be used in rate limiting to ensure quality of service but is not durably stored for other purposes. The container image you choose may be counted for metric collection but is not tied to any identifying information.
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/quadlets/pe-image-service.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=PE image service
3 |
4 | [Service]
5 | Restart=always
6 | StandardError=journal
7 | RuntimeDirectory=program-explorer
8 | RuntimeDirectoryPreserve=yes
9 |
10 | [Container]
11 | Image=localhost/pe-image-service:latest
12 | Network=host
13 |
14 | # okay so using journald doesn't actually pass through JOURNAL_STREAM, so do it manually
15 | Environment=JOURNAL_STREAM=${JOURNAL_STREAM}
16 | LogDriver=passthrough
17 |
18 | Volume=${HOME}/.local/share/peoci:/peoci:z
19 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
20 |
21 | Secret=container-registries
22 |
23 | Exec=--listen=${RUNTIME_DIRECTORY}/image-service.sock \
24 | --auth=/run/secrets/container-registries \
25 | --cache=/peoci
26 |
27 | [Install]
28 | WantedBy=default.target
29 |
--------------------------------------------------------------------------------
/peimage/src/bin/squasherofs.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use peimage::mkfs::squash_erofs;
4 | use peoci::ocidir::load_layers_from_oci;
5 |
6 | // NOTE this uses mkfs.erofs
7 |
8 | fn main() {
9 | let args: Vec<_> = env::args().collect();
10 | let dir = args.get(1).expect("give me an oci dir");
11 | let image = args.get(2).expect("give me an image name or digest");
12 | let outfile = args.get(3).expect("give me an output file");
13 |
14 | if !outfile.ends_with(".erofs") {
15 | eprintln!("outfile should end with .erofs");
16 | std::process::exit(1);
17 | }
18 |
19 | let mut readers: Vec<_> = load_layers_from_oci(dir, image).expect("getting layers failed");
20 |
21 | let stats = squash_erofs(&mut readers, outfile).unwrap();
22 | eprintln!("{stats:?}");
23 | }
24 |
--------------------------------------------------------------------------------
/quadlets/pe-server-worker.container:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=PE server worker
3 |
4 | [Service]
5 | Restart=always
6 | StandardError=journal
7 | RuntimeDirectory=program-explorer
8 | RuntimeDirectoryPreserve=yes
9 |
10 | [Container]
11 | Image=localhost/pe-server-worker:latest
12 | Network=none
13 |
14 | # okay so using journald doesn't actually pass through JOURNAL_STREAM, so do it manually
15 | Environment=JOURNAL_STREAM=${JOURNAL_STREAM}
16 | LogDriver=passthrough
17 |
18 | Volume=${RUNTIME_DIRECTORY}:${RUNTIME_DIRECTORY}:z
19 | PodmanArgs=--device=/dev/kvm
20 |
21 | Exec=--uds=${RUNTIME_DIRECTORY}/worker.sock \
22 | --image-service=${RUNTIME_DIRECTORY}/image-service.sock \
23 | --server-cpuset=0-3 \
24 | --worker-cpuset=4:2:2
25 |
26 | Tmpfs=/tmp
27 |
28 | [Install]
29 | WantedBy=default.target
30 |
--------------------------------------------------------------------------------
/scripts/build-containers.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | set -e
4 |
5 | # disabling for now b/c I have to run this build step in a container,
6 | # but then don't have access to podman inside the container
7 | # ./scripts/build.sh release
8 |
9 | tag=latest
10 |
11 | podman build -t pe-server-lb:$tag -f containers/pe-server-lb .
12 |
13 | # ugh copy of symlink won't work, should really build this in a container or something
14 | cp vmlinux target/release/vmlinux
15 | podman build -t pe-server-worker:$tag -f containers/pe-server-worker .
16 |
17 | podman build -t pe-image-service:$tag -f containers/pe-image-service .
18 |
19 | podman build -t pe-caddy:$tag -f containers/pe-caddy .
20 | podman build -t pe-caddy-dev:$tag -f containers/pe-caddy-dev .
21 |
22 | podman build -t pe-server-gh:$tag -f containers/pe-server-gh .
23 |
--------------------------------------------------------------------------------
/peimage/src/bin/squashbin.rs:
--------------------------------------------------------------------------------
1 | use std::fs::File;
2 | use std::io::BufWriter;
3 | use std::os::fd::FromRawFd;
4 |
5 | use peimage::squash::squash_to_tar;
6 | use peoci::compression::Compression;
7 |
8 | fn main() {
9 | let mut layers: Vec<_> = std::env::args()
10 | .skip(1)
11 | .map(|x| (Compression::Gzip, File::open(x).unwrap()))
12 | .collect();
13 |
14 | let mut out = BufWriter::with_capacity(32 * 1024, unsafe { File::from_raw_fd(1) });
15 | squash_to_tar(&mut layers, &mut out).unwrap();
16 | }
17 |
18 | // cargo run --package peimage --bin squash /mnt/storage/program-explorer/ocidir/blobs/sha256/{7cf63256a31a4cc44f6defe8e1af95363aee5fa75f30a248d95cae684f87c53c,780fcebf8d094ef0ab389c7651dd0b1cc4530c9aba473c44359bf39bb0d770a8,e4d974df5c807a317b10ac80cf137857c9f5b7cd768fb54113f7d1cc1756504f}
19 |
--------------------------------------------------------------------------------
/scripts/devserver.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
6 |
7 | export RUST_LOG=debug
8 |
9 | cargo build --bin peimage-service --bin lb --bin worker
10 |
11 | cargo run --bin peimage-service -- --listen /tmp/image.sock --auth ~/Secure/container-registries.json &
12 |
13 | cargo run --bin lb -- --uds /tmp/lb.sock --worker uds:/tmp/worker.sock &
14 |
15 | cargo run --bin ghserver -- --uds /tmp/gh.sock &
16 |
17 | env RUST_LOG=trace cargo run --bin worker -- --uds /tmp/worker.sock --image-service /tmp/image.sock --worker-cpuset 0:2:2 --kernel target/debug/vmlinux --initramfs target/debug/initramfs --ch cloud-hypervisor-static &
18 |
19 | (cd pefrontend && npm run dev -- --clearScreen=false) &
20 |
21 | env RUNTIME_DIRECTORY=/tmp caddy run --config caddy/dev.caddyfile &
22 |
23 | wait
24 |
--------------------------------------------------------------------------------
/peimage/src/bin/squashpodman.rs:
--------------------------------------------------------------------------------
1 | use std::fs::File;
2 | use std::io::{BufWriter, Cursor};
3 | use std::os::fd::FromRawFd;
4 |
5 | use peimage::podman::load_layers_from_podman;
6 | use peimage::squash::squash_to_tar;
7 |
8 | // trying out this method of dealing with multiple error types
9 | // https://doc.rust-lang.org/rust-by-example/error/multiple_error_types/boxing_errors.html
10 |
11 | fn main() {
12 | let args: Vec<_> = std::env::args().collect();
13 | let image = args.get(1).expect("give me an image name");
14 |
15 | let mut layers: Vec<_> = load_layers_from_podman(image)
16 | .expect("getting layers failed")
17 | .into_iter()
18 | .map(|(c, b)| (c, Cursor::new(b)))
19 | .collect();
20 |
21 | let mut out = BufWriter::with_capacity(32 * 1024, unsafe { File::from_raw_fd(1) });
22 | squash_to_tar(&mut layers, &mut out).unwrap();
23 | }
24 |
--------------------------------------------------------------------------------
/peinit/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peinit"
3 | version = "0.1.0"
4 | edition = "2021"
5 | default-run = "notabin"
6 |
7 | [lib]
8 | name = "peinit"
9 | path = "src/lib.rs"
10 | doctest = false
11 |
12 | [[bin]]
13 | name = "notabin"
14 | path = "src/notabin.rs"
15 |
16 | [dependencies]
17 | waitid_timeout = { workspace = true }
18 | base16ct = { workspace = true, features = ["alloc"] }
19 | bincode = { workspace = true }
20 | byteorder = { workspace = true }
21 | libc = { workspace = true }
22 | serde = { workspace = true, features = ["derive"] }
23 | sha2 = { workspace = true }
24 | serde_json = { workspace = true }
25 | rustix = { workspace = true, features = ["fs", "mount", "net", "process", "system"] }
26 | command-fds = { workspace = true }
27 | vsock = { workspace = true, optional = true }
28 |
29 | [lints]
30 | workspace = true
31 |
32 | [features]
33 | snapshotting = ["dep:vsock"]
34 | blocktesting = []
35 |
--------------------------------------------------------------------------------
/peimage/go.mod:
--------------------------------------------------------------------------------
1 | module peimage
2 |
3 | go 1.23.2
4 |
5 | require (
6 | github.com/google/go-containerregistry v0.20.2
7 | github.com/sylabs/oci-tools v0.16.0
8 | )
9 |
10 | require (
11 | github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
12 | github.com/docker/cli v27.1.1+incompatible // indirect
13 | github.com/docker/distribution v2.8.2+incompatible // indirect
14 | github.com/docker/docker-credential-helpers v0.7.0 // indirect
15 | github.com/klauspost/compress v1.16.5 // indirect
16 | github.com/mitchellh/go-homedir v1.1.0 // indirect
17 | github.com/opencontainers/go-digest v1.0.0 // indirect
18 | github.com/opencontainers/image-spec v1.1.0 // indirect
19 | github.com/pkg/errors v0.9.1 // indirect
20 | github.com/sirupsen/logrus v1.9.1 // indirect
21 | github.com/vbatts/tar-split v0.11.3 // indirect
22 | golang.org/x/sync v0.2.0 // indirect
23 | golang.org/x/sys v0.22.0 // indirect
24 | )
25 |
--------------------------------------------------------------------------------
/attic/analyzeboottimes.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # the linux/scripts/bootgraph.pl script says kernel params initcall_debug printk.time=1
4 | # perl ~/Repos/linux/scripts/bootgraph.pl < boottimes
5 |
6 | prev_time = None
7 | prev_event = None
8 |
9 | stats = []
10 |
11 | with open(sys.argv[1]) as fh:
12 | for line in fh:
13 | if line.startswith('['):
14 | i_end = line.find(']')
15 | time = float(line[1:i_end].strip())
16 | event = line[i_end+1:-1]
17 | if prev_time is None:
18 | prev_time = time
19 | prev_event = event
20 | else:
21 | duration = time - prev_time
22 | stats.append((duration, prev_event))
23 | prev_time = time
24 | prev_event = event
25 |
26 | stats.sort(reverse=True)
27 | for duration, event in stats:
28 | print('{:8.2f} ms {}'.format(duration * 1000, event))
29 |
--------------------------------------------------------------------------------
/scripts/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | profile=${1:-debug}
6 | mkdir -p target/$profile
7 |
8 | # for whatever reason you have to use --profile=dev to get ./target/debug/...
9 | if [[ "$profile" == "debug" ]]; then
10 | cargo_profile="dev"
11 | else
12 | cargo_profile="$profile"
13 | fi
14 |
15 | # this is not really relevant to deploy
16 | # for package in perunner; do
17 | # cargo build --package=${package} --profile=${cargo_profile}
18 | # done
19 |
20 | # todo would get this building in a container, but it seems caching deps locally is hard
21 | # peserver with musl requires musl-gcc (cmake OR zlib-ng-devel)
22 | # pingora requires flate2 with the zlib-ng feature
23 |
24 | for package in peinit pearchive peserver peimage-service; do
25 | cargo build --package=${package} --profile=${cargo_profile} --target x86_64-unknown-linux-musl
26 | done
27 |
28 | ./scripts/build-initramfs.sh "$profile"
29 |
30 | if [ "$profile" = "release" ]; then
31 | (cd pefrontend && npm run build)
32 | fi
33 |
--------------------------------------------------------------------------------
/peimage/tartest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | which podman
6 |
7 | ocidir=/mnt/storage/program-explorer/ocidir
8 | image=${1:-index.docker.io/library/gcc:13.3.0}
9 |
10 | echo "img is $image"
11 |
12 | function my-podman-export() {
13 | id=$(podman create $1)
14 | trap "podman rm $id" EXIT
15 | podman export $id
16 | }
17 |
18 | # echo "checking left=peimage(go) right=squash-oci"
19 | # cargo run --release --bin tardiff -- \
20 | # <(peimage export-notf $ocidir $image) \
21 | # <(cargo run --release --bin squash-oci -- $ocidir $image)
22 | #
23 | echo "=================================================="
24 |
25 | # if the tag is the same but the sha is different, may need to
26 |
27 | echo "checking left=podman right=squash-oci"
28 | cargo run --release --bin tardiff -- \
29 | <(my-podman-export $image) \
30 | <(cargo run --release --bin squash-oci -- $ocidir $image)
31 |
32 | echo "if things are different, maybe try checking again with"
33 | echo "skopeo copy oci:$ocidir:$image containers-storage:$image"
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2025 Andrew Consroe
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/peoci/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peoci"
3 | version = "0.1.0"
4 | edition = "2024"
5 |
6 | [dependencies]
7 | arc-swap = { workspace = true }
8 | bincode = { workspace = true }
9 | bytes = { workspace = true }
10 | chrono = { workspace = true, features = ["now"] }
11 | clap = { workspace = true, features = ["derive"] }
12 | env_logger = { workspace = true }
13 | futures = { workspace = true }
14 | hex = { workspace = true }
15 | log = { workspace = true }
16 | moka = { workspace = true, features = ["future"] }
17 | nom = { workspace = true }
18 | oci-spec = { workspace = true }
19 | reqwest = { workspace = true, features = ["http2", "rustls-tls", "json"] }
20 | rustix = { workspace = true, features = ["fs"] }
21 | serde = { workspace = true }
22 | serde_json = { workspace = true }
23 | sha2 = { workspace = true }
24 | thiserror = { workspace = true }
25 | tokio = { workspace = true, features = ["fs", "io-util", "macros", "rt"] }
26 |
27 | [lib]
28 | path = "src/lib.rs"
29 |
30 | [[bin]]
31 | name = "oci-dist"
32 | path = "src/bin/ocidist.rs"
33 |
34 | [lints]
35 | workspace = true
36 |
--------------------------------------------------------------------------------
/peimage-service/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peimage-service"
3 | version = "0.1.0"
4 | edition = "2024"
5 | default-run = "peimage-service"
6 |
7 | [dependencies]
8 | anyhow = { workspace = true }
9 | bincode = { workspace = true }
10 | clap = { workspace = true, features = ["derive"] }
11 | env_logger = { workspace = true }
12 | log = { workspace = true }
13 | memmap2 = { workspace = true }
14 | moka = { workspace = true, features = ["future"] }
15 | oci-spec = { workspace = true }
16 | peerofs = { workspace = true }
17 | peimage = { workspace = true }
18 | peoci = { workspace = true }
19 | rustix = { workspace = true, features = ["net"] }
20 | serde = { workspace = true }
21 | serde_json = { workspace = true }
22 | thiserror = { workspace = true }
23 | tokio = { workspace = true, features = ["macros", "rt", "signal"] }
24 | tokio-seqpacket = { workspace = true }
25 |
26 | [lints]
27 | workspace = true
28 |
29 | [lib]
30 | path = "src/lib.rs"
31 |
32 | [[bin]]
33 | name = "peimage-service"
34 | path = "src/main.rs"
35 |
36 | [[bin]]
37 | name = "peimage-service-testclient"
38 | path = "src/bin/testclient.rs"
39 |
--------------------------------------------------------------------------------
/pegh/src/main.rs:
--------------------------------------------------------------------------------
1 | use pegh::Client;
2 |
3 | use clap::Parser;
4 |
5 | #[derive(Parser, Debug)]
6 | #[command(version, about, long_about = None, disable_version_flag = true)]
7 | struct Args {
8 | gist: String,
9 |
10 | #[arg(long)]
11 | version: Option,
12 | }
13 |
14 | #[tokio::main(flavor = "current_thread")]
15 | async fn main() {
16 | env_logger::init();
17 | let args = Args::parse();
18 |
19 | let client = Client::new().unwrap();
20 |
21 | //let gist = if let Some(version) = args.version {
22 | // client.get_gist_version(&args.gist, &version).await.unwrap()
23 | //} else {
24 | // client.get_gist_latest(&args.gist).await.unwrap()
25 | //};
26 |
27 | let gist = client
28 | .get_gist(&args.gist, args.version.as_deref())
29 | .await
30 | .unwrap();
31 |
32 | if let Some(gist) = gist {
33 | println!("gist.version = {}", gist.version);
34 | println!("gist.versions:");
35 | for version in gist.versions {
36 | println!("- {version}");
37 | }
38 | for (name, contents) in &gist.files {
39 | println!("=== {name} ===");
40 | println!("{contents}");
41 | }
42 | } else {
43 | println!("oops not found");
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/attic/analyzesqfs.py:
--------------------------------------------------------------------------------
1 | from PySquashfsImage import SquashFsImage
2 | import sys
3 | import hashlib
4 |
5 | def file_hash(f):
6 | hasher = hashlib.new('sha256')
7 | for block in f.iter_bytes():
8 | hasher.update(block)
9 | return hasher.digest()
10 |
11 | def file_hashes(im):
12 | d = {}
13 | dir_count = 0
14 | for item in im:
15 | if item.is_file:
16 | d[file_hash(item)] = item.size
17 | elif item.is_dir:
18 | dir_count += 1
19 |
20 | return dir_count, d
21 |
22 | p1 = sys.argv[1]
23 | p2 = sys.argv[2]
24 |
25 | im1 = SquashFsImage.from_file(p1)
26 | im2 = SquashFsImage.from_file(p2)
27 |
28 | dc1, h1 = file_hashes(im1)
29 | dc2, h2 = file_hashes(im2)
30 |
31 | fsize1 = sum(h1.values())
32 | fsize2 = sum(h2.values())
33 |
34 | shared = set(h1) & set(h2)
35 | shared_size = sum(h1[k] for k in shared)
36 |
37 | print('{:10} {:10.2f} Mb (compressed) {:10.2f} Mb (uncompressed) {:10} files {:10} dirs'.format(p1, im1.size / 1e6, fsize1 / 1e6, len(h1), dc1))
38 | print('{:10} {:10.2f} Mb (compressed) {:10.2f} Mb (uncompressed) {:10} files {:10} dirs'.format(p2, im2.size / 1e6, fsize2 / 1e6, len(h2), dc2))
39 |
40 | print('{} {:10.2f} Mb shared {:5.2f}%'.format(len(shared), shared_size / 1e6, shared_size / max(fsize1, fsize2) * 100))
41 |
--------------------------------------------------------------------------------
/peimage/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peimage"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | byteorder = { workspace = true }
8 | oci-spec = { workspace = true }
9 | peinit = { workspace = true }
10 | serde = { workspace = true, features = ["derive"] }
11 | serde_json = { workspace = true }
12 | flate2 = { workspace = true, features = ["zlib-ng"] }
13 | tar = { workspace = true }
14 | sha2 = { workspace = true }
15 | base16ct = { workspace = true }
16 | tempfile = { workspace = true }
17 | rand = { workspace = true }
18 | rustix = { workspace = true }
19 | zstd = { workspace = true }
20 | peerofs = { workspace = true }
21 | peoci = { workspace = true }
22 | thiserror = { workspace = true }
23 | anyhow = { workspace = true }
24 |
25 | [features]
26 | # skip CRC32 calculation when reading from gzip files, not sure this is a good idea or not
27 | nocrc = []
28 |
29 | [lib]
30 | path = "src/lib.rs"
31 |
32 | [[bin]]
33 | name = "squash"
34 | path = "src/bin/squashbin.rs"
35 |
36 | [[bin]]
37 | name = "squash-podman"
38 | path = "src/bin/squashpodman.rs"
39 |
40 | [[bin]]
41 | name = "squash-oci"
42 | path = "src/bin/squashoci.rs"
43 |
44 | [[bin]]
45 | name = "squash-erofs"
46 | path = "src/bin/squasherofs.rs"
47 |
48 | [[bin]]
49 | name = "tardiff"
50 | path = "src/bin/tardiff.rs"
51 |
52 | [lints]
53 | workspace = true
54 |
--------------------------------------------------------------------------------
/scripts/build-initramfs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | profile=${1:-debug}
6 | crun=${CRUN}
7 | crun_url=https://github.com/containers/crun/releases/download/1.20/crun-1.20-linux-amd64
8 | outfile=target/$profile/initramfs
9 |
10 | if [[ -z $crun || ! -f $crun ]]; then
11 | crun=vendor/$(basename $crun_url)
12 | if [ ! -f $crun ]; then
13 | (cd vendor && wget $crun_url)
14 | fi
15 | echo 'e19a9a35484f3c75567219a7b6a4a580b43a0baa234df413655f48db023a200e vendor/crun-1.20-linux-amd64' | sha256sum -c
16 | fi
17 |
18 |
19 | echo "using profile=$profile crun=$crun" 1>&2
20 |
21 | if [ ! -f vendor/gen_init_cpio ]; then
22 | gcc -O1 vendor/gen_init_cpio.c -o vendor/gen_init_cpio
23 | fi
24 |
25 | function gen_file() {
26 | sed \
27 | -e "s/\$PROFILE/$profile/" \
28 | -e "s!\$CRUN!$crun!" \
29 | initramfs.file | \
30 | (if [[ "$profile" = "release" ]];
31 | # this one removes the whole line
32 | then sed -e "s/.*#@ REMOVE_IN_RELEASE//";
33 | # this one removes trailing whitespace and the marker
34 | # gen_init_cpio doesn't like having anything else in the line
35 | else sed -e "s/ *#@ REMOVE_IN_RELEASE//";
36 | fi)
37 | # the
38 | }
39 |
40 | echo "=========== using initrams.file =========="
41 | gen_file
42 | echo "=========================================="
43 |
44 | ./vendor/gen_init_cpio <(gen_file) > $outfile
45 |
46 | echo "wrote to $outfile"
47 | ls -lh $outfile
48 | cpio -vt < $outfile
49 |
--------------------------------------------------------------------------------
/peimage/src/bin/squashoci.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::fs::File;
3 | use std::io::BufWriter;
4 | use std::os::fd::FromRawFd;
5 |
6 | use peoci::ocidir::load_layers_from_oci;
7 | use peimage::squash::{squash_to_erofs, squash_to_tar};
8 |
9 | fn main() {
10 | let args: Vec<_> = env::args().collect();
11 | let dir = args.get(1).expect("give me an oci dir");
12 | let image = args.get(2).expect("give me an image name or digest");
13 | let stdin = "-".to_string();
14 | let output = args.get(3).unwrap_or(&stdin);
15 |
16 | let mut readers: Vec<_> = load_layers_from_oci(dir, image).expect("getting layers failed");
17 |
18 | if output == "-" {
19 | let mut out = BufWriter::with_capacity(32 * 1024, unsafe { File::from_raw_fd(1) });
20 | let stats = squash_to_tar(&mut readers, &mut out).unwrap();
21 | eprintln!("{stats:?}");
22 | } else if output.ends_with(".tar") {
23 | let mut out = BufWriter::with_capacity(32 * 1024, File::create(output).unwrap());
24 | let stats = squash_to_tar(&mut readers, &mut out).unwrap();
25 | eprintln!("{stats:?}");
26 | } else if output.ends_with(".erofs") {
27 | let out = File::create(output).unwrap();
28 | let builder = peerofs::build::Builder::new(out, peerofs::build::BuilderConfig::default()).unwrap();
29 | let (squash_stats, erofs_stats) = squash_to_erofs(&mut readers, builder).unwrap();
30 | eprintln!("{squash_stats:?}");
31 | eprintln!("{erofs_stats:?}");
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/peimage-service/src/bin/testclient.rs:
--------------------------------------------------------------------------------
1 | use std::time::Instant;
2 |
3 | use memmap2::MmapOptions;
4 | use oci_spec::image::{Arch, Os};
5 | use peerofs::disk::Erofs;
6 | use peimage_service::{Request, request_erofs_image};
7 |
8 | async fn main_() -> anyhow::Result<()> {
9 | let args = std::env::args().collect::>();
10 | let socket_path = args.get(1).expect("give me a socket path");
11 | let reference = args.get(2).expect("give me an image reference");
12 |
13 | let request = Request::new(reference, &Arch::Amd64, &Os::Linux).unwrap();
14 | let t0 = Instant::now();
15 | let response = request_erofs_image(socket_path, request).await?;
16 | let elapsed = t0.elapsed().as_secs_f32();
17 | println!("got response in {elapsed:.3}s");
18 |
19 | let mmap = unsafe { MmapOptions::new().map(&response.fd)? };
20 | let erofs = Erofs::new(&mmap)?;
21 | let dir = erofs.get_root_inode()?;
22 | let dirents = erofs.get_dirents(&dir)?;
23 |
24 | for item in dirents.iter()? {
25 | let item = item?;
26 | let inode = erofs.get_inode_from_dirent(&item)?;
27 | println!(
28 | " {:>20} {:4} {:?} {}/{} {:o}",
29 | item.name.escape_ascii().to_string(),
30 | item.disk_id,
31 | item.file_type,
32 | inode.uid(),
33 | inode.gid(),
34 | inode.mode()
35 | );
36 | }
37 | Ok(())
38 | }
39 |
40 | #[tokio::main(flavor = "current_thread")]
41 | async fn main() {
42 | env_logger::init();
43 | main_().await.unwrap();
44 | }
45 |
--------------------------------------------------------------------------------
/attic/cloudhypervisormyinit.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | k=/home/andrew/Repos/linux/vmlinux
4 | ch=${ch:-/home/andrew/Repos/cloud-hypervisor/target/x86_64-unknown-linux-musl/profiling/cloud-hypervisor}
5 |
6 | # exit children when we ctrl-c
7 | #trap "pkill -P $$" EXIT
8 |
9 | #strace --decode-pids=comm --trace=!ioctl,close,mmap,munmap,io_uring_enter -f -o chstrace.out ./cloud-hypervisor-static \
10 |
11 | rm -f /tmp/ch.sock
12 |
13 | rm -rf /tmp/_out
14 | mkdir /tmp/_out
15 |
16 | rm -rf /tmp/_in
17 | mkdir -p /tmp/_in/dir
18 | echo 'hello this is stdin' > /tmp/_in/stdin
19 | echo 'this is the contents of file1' > /tmp/_in/dir/file1
20 |
21 | # (cd /tmp/_in && mksquashfs . input.sqfs -no-compression -no-xattrs -force-uid 0 -force-gid 0)
22 | ./pearchive/target/release/pearchive pack /tmp/_in/dir /tmp/in.pack
23 | python makepmemsized.py /tmp/in.pack
24 |
25 | #--disk path=gcc-14.1.0.sqfs,readonly=on,id=gcc14 \
26 | #strace --decode-pids=comm -f ./cloud-hypervisor-static \
27 | time $ch \
28 | --kernel $k \
29 | --initramfs initramfs \
30 | --serial off \
31 | --pmem file=gcc-14.1.0.sqfs,discard_writes=on \
32 | file=/tmp/in.pack,discard_writes=on \
33 | --cmdline "console=hvc0" \
34 | --cpus boot=1 \
35 | --memory size=1024M,thp=on \
36 | --api-socket /tmp/ch.sock \
37 | $@
38 |
39 | echo $?
40 |
41 | #cpio --list < /tmp/_out/output
42 | #mkdir /tmp/_out/outout
43 | #(cd /tmp/_out/outout; cpio --extract < /tmp/_out/output)
44 | #ls -l /tmp/_out/outout
45 | # "sh", "-c", "echo 'into file' > /output/file1; echo 'to stdout'; echo 'to stderr' 1>&2"
46 |
--------------------------------------------------------------------------------
/peserver/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "peserver"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [[bin]]
7 | name = "testclient"
8 | path = "src/testclient.rs"
9 |
10 | [[bin]]
11 | name = "lb"
12 | path = "src/lb.rs"
13 |
14 | [[bin]]
15 | name = "worker"
16 | path = "src/worker.rs"
17 |
18 | [[bin]]
19 | name = "ghserver"
20 | path = "src/gh.rs"
21 |
22 | [lib]
23 | name = "peserver"
24 | path = "src/lib.rs"
25 | doctest = false
26 |
27 | [dependencies]
28 | arc-swap = { workspace = true }
29 | async-trait = { workspace = true }
30 | axum = { workspace = true }
31 | base64 = { workspace = true }
32 | byteorder = { workspace = true }
33 | bytes = { workspace = true }
34 | clap = { workspace = true }
35 | env_logger = { workspace = true }
36 | flate2 = { workspace = true }
37 | http = { workspace = true }
38 | log = { workspace = true }
39 | moka = { workspace = true, features = ["future"] }
40 | oci-spec = { workspace = true }
41 | once_cell = { workspace = true }
42 | pearchive = { workspace = true }
43 | pegh = { workspace = true }
44 | peimage = { workspace = true }
45 | peimage-service = { workspace = true }
46 | peinit = { workspace = true }
47 | perunner = { workspace = true, features = ["asynk"] }
48 | pingora = { workspace = true, features = ["proxy", "lb"] }
49 | pingora-limits = { workspace = true }
50 | pingora-timeout = { workspace = true }
51 | prometheus = { workspace = true }
52 | rustix = { workspace = true, features = ["thread"] }
53 | serde = { workspace = true, features = ["serde_derive"] }
54 | serde_json = { workspace = true }
55 | sha2 = { workspace = true }
56 | tempfile = { workspace = true }
57 | thiserror.workspace = true
58 | tokio = { workspace = true, features = ["io-util"] }
59 |
60 | [lints]
61 | workspace = true
62 |
--------------------------------------------------------------------------------
/perunner/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "perunner"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [lib]
7 | name = "perunner"
8 | path = "src/lib.rs"
9 |
10 | [[bin]]
11 | name = "snapshot-test"
12 | path = "src/snapshot-test.rs"
13 |
14 | [[bin]]
15 | name = "snapshot-read"
16 | path = "src/snapshot-read.rs"
17 |
18 | [[bin]]
19 | name = "blocktest"
20 | path = "src/blocktest.rs"
21 |
22 | [dependencies]
23 | api_client = { workspace = true }
24 | pearchive = { workspace = true }
25 | peinit = { workspace = true }
26 | waitid_timeout = { workspace = true }
27 | libc = { workspace = true }
28 | rand = { workspace = true }
29 | wait-timeout = { workspace = true }
30 | serde = { workspace = true }
31 | oci-spec = { workspace = true }
32 | serde_json = { workspace = true }
33 | base16ct = { workspace = true, features = ["alloc"] }
34 | sha2 = { workspace = true }
35 | byteorder = { workspace = true }
36 | crossbeam = { workspace = true, features = ["crossbeam-channel"] }
37 | nix = { workspace = true, features = ["sched"] }
38 | tempfile = { workspace = true }
39 | memmap2 = { workspace = true }
40 | clap = { workspace = true, features = ["derive"] }
41 | peimage = { workspace = true }
42 | tracing = { workspace = true }
43 | tracing-subscriber = { workspace = true }
44 | tokio = { workspace = true, features = ["rt", "sync"], optional = true }
45 | once_cell = { workspace = true }
46 | log = { workspace = true }
47 | rustix = { workspace = true }
48 | peoci = { workspace = true }
49 | thiserror = { workspace = true }
50 | peimage-service = { workspace = true }
51 | command-fds = { workspace = true }
52 | env_logger = { workspace = true }
53 |
54 | [features]
55 | default = ["asynk"]
56 | asynk = ["tokio"]
57 | tokio = ["dep:tokio"]
58 |
59 | [lints]
60 | workspace = true
61 |
--------------------------------------------------------------------------------
/attic/cloudhypervisortracing.sh:
--------------------------------------------------------------------------------
1 | k=/home/andrew/Repos/linux/vmlinux
2 | ch=/home/andrew//Repos/cloud-hypervisor/target/x86_64-unknown-linux-musl/release/cloud-hypervisor
3 | ch=/home/andrew/Repos/cloud-hypervisor/target/x86_64-unknown-linux-musl/debug/cloud-hypervisor
4 | ch=/home/andrew/Repos/cloud-hypervisor/target/x86_64-unknown-linux-musl/profiling/cloud-hypervisor
5 | #ch=~/Repos/cloud-hypervisor/target/debug/cloud-hypervisor
6 |
7 | set -e
8 |
9 | #strace -o /tmp/strace.out -f $ch \
10 | #--seccomp log --log-file ch.log \
11 | #strace --decode-pids=comm -f $ch
12 | #strace --stack-traces -f --absolute-timestamps=format:unix,precision:us -o strace.out $ch \
13 | #$ch \
14 |
15 | # strace -f --absolute-timestamps=format:unix,precision:us -o strace.out $ch \
16 | # --seccomp log \
17 | # --kernel $k \
18 | # --initramfs initramfs \
19 | # --cmdline "console=hvc0 tp_printk trace_event=initcall:*" \
20 | # --disk path=gcc-14.1.0.sqfs,readonly=on,id=container-bundle-squashfs \
21 | # --cpus boot=1 \
22 | # --memory size=1024M
23 | #
24 | #
25 |
26 | # needs sudo
27 | # perf stat -e 'kvm:*' $ch \
28 | #perf record --freq 5000 $ch \
29 | #strace -f --absolute-timestamps=format:unix,precision:us -o strace.out --trace=!ioctl,close $ch \
30 | #perf record --freq 5000 --call-graph dwarf $ch \
31 | #perf record --call-graph lbr --all-user --user-callchains -g \
32 | #perf record --freq 10000 -g \
33 | $ch \
34 | --seccomp log \
35 | --kernel $k \
36 | --initramfs initramfs \
37 | --console off \
38 | --cmdline "console=hvc0" \
39 | --disk path=gcc-14.1.0.sqfs,readonly=on,id=container-bundle-squashfs \
40 | --cpus boot=1 \
41 | --memory size=1024M
42 |
43 | #python3 make_strace_relative_time.py strace.out
44 | #cat strace.out
45 |
--------------------------------------------------------------------------------
/attic/pivot_rootfs.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Do the thing in https://github.com/containers/bubblewrap/issues/592#issuecomment-2243087731
3 | * unshare --mount
4 | * mount --rbind / /abc --mkdir
5 | * cd /abc
6 | * mount --move . /
7 | * chroot .
8 | */
9 |
10 | #define _GNU_SOURCE
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | int main(int argc, char** argv) {
18 | if (argc < 3) {
19 | fputs("args: ...\n", stderr);
20 | exit(EXIT_FAILURE);
21 | }
22 |
23 | if (unshare(CLONE_NEWNS) < 0) {
24 | perror("unshare --mount");
25 | exit(EXIT_FAILURE);
26 | }
27 |
28 | if (mount("/", argv[1], NULL, MS_BIND | MS_REC | MS_SILENT, NULL) < 0) {
29 | perror("mount --rbind / ");
30 | exit(EXIT_FAILURE);
31 | }
32 |
33 | // note: this can't be an fchdir with a dirfd opened previous to the mount
34 | if (chdir(argv[1]) < 0) {
35 | perror("fchdir dirfd");
36 | exit(EXIT_FAILURE);
37 | }
38 |
39 | if (mount(argv[1], "/", NULL, MS_MOVE | MS_SILENT, NULL) < 0) {
40 | perror("mount --move . /");
41 | exit(EXIT_FAILURE);
42 | }
43 |
44 | if (chroot(".") < 0) {
45 | perror("chroot .");
46 | exit(EXIT_FAILURE);
47 | }
48 |
49 | // this is not necessary though chroot(1) does do this
50 | // if (chdir("/") < 0) {
51 | // perror("chdir /");
52 | // exit(EXIT_FAILURE);
53 | // }
54 |
55 | if (setuid(1000) < 0) {
56 | perror("setuid");
57 | exit(EXIT_FAILURE);
58 | }
59 |
60 | if (execvp(argv[2], &argv[2]) < 0) {
61 | perror("execvp");
62 | exit(EXIT_FAILURE);
63 | }
64 |
65 | return 1;
66 | }
67 |
--------------------------------------------------------------------------------
/attic/init1:
--------------------------------------------------------------------------------
1 | #!/bin/busybox sh
2 |
3 | #set -e
4 |
5 | export PATH=/bin
6 |
7 | # otherwise we get a kernel panic and the vmm process hangs
8 | trap "busybox poweroff -f" EXIT
9 |
10 | # crun needs /proc/self/exe for stuff, cgroup_root for containers, and devtmpfs for mounting our sqfs
11 | busybox mount -t proc none /proc
12 | busybox mount -t cgroup2 none /sys/fs/cgroup
13 | busybox mount -t devtmpfs none /dev
14 |
15 | # this works!
16 | # busybox inotifyd - /dev:n &
17 | #
18 | # busybox ls -l /dev/pmem*
19 |
20 | #busybox mount -t squashfs -o loop /dev/vda /run/bundle/rootfs
21 | busybox mount -t squashfs -o loop /dev/pmem0 /mnt/rootfs
22 |
23 | busybox mount -t squashfs -o loop /dev/pmem1 /run/input
24 |
25 | busybox mount -t tmpfs -o size=2M,mode=777 none /run/output
26 | busybox mkdir --mode 777 /run/output/dir
27 |
28 | # busybox mount -t tmpfs -o size=2M none /mnt/upper/scratch
29 |
30 | # overlay writable /{scratch,input,output} dirs
31 | busybox mount -t overlay -o lowerdir=/mnt/rootfs,upperdir=/mnt/upper,workdir=/mnt/work none /run/bundle/rootfs
32 |
33 | busybox ls -l /
34 | busybox ls -l /run/
35 | # busybox ls -l /run/io/input/dir
36 | # busybox ls -l /run/bundle
37 | # busybox ls -l /run/bundle/rootfs
38 |
39 | pivot_rootfs /abc \
40 | /bin/crun run --bundle /run/bundle containerid-1234 \
41 | < /run/input/stdin \
42 | > /run/output/stdout \
43 | 2> /run/output/stderr
44 |
45 | echo $? > /run/output/exit
46 |
47 | echo '---------from guest--------------'
48 | echo '-------- stdout -----------'
49 | busybox cat /run/output/stdout
50 | echo '---------------------------'
51 |
52 | echo '-------- stderr -----------'
53 | busybox cat /run/output/stderr
54 | echo '---------------------------'
55 |
56 | # busybox ls -l /run/io/output
57 | # for f in /run/io/output/*; do
58 | # echo "$f"
59 | # busybox cat "$f"
60 | # done
61 | # TODO this should probably run as a different user too
62 | # (cd /run/output; busybox find . -print -depth | busybox cpio -H newc -ov > /dev/pmem2)
63 |
--------------------------------------------------------------------------------
/pefrontend/src/style.css:
--------------------------------------------------------------------------------
1 | :root {
2 | color-scheme: light dark;
3 | }
4 |
5 | summary { cursor: pointer; }
6 |
7 | /* these are internal to code-mirror */
8 | .cm-editor { height: 100%; }
9 | .cm-scroller { overflow: auto; }
10 |
11 | .editor-container {
12 | .tab-outer {
13 | font-family: monospace;
14 | margin-right: 5px;
15 |
16 | button {
17 | background: none;
18 | border: none;
19 | }
20 | }
21 |
22 | .tab-close {
23 | text-align: center;
24 | width: 3ch;
25 | display: inline-block;
26 | padding: 0;
27 | }
28 |
29 | .tab-name:hover, .tab-close:hover, .tab-new:hover { cursor: pointer; }
30 |
31 | .tab-close:after { content: "•"; }
32 | .tab-close:hover:after { content: "✕"; }
33 |
34 | .tab-close:hover, .tab-new:hover { background: whitesmoke; }
35 |
36 | .tab-close, .tab-new { border-radius: 4px; }
37 |
38 | .tab-outer.selected { border-bottom: 2px solid cornflowerblue; }
39 |
40 | .tab-outer.selected .tab-close:after { content: "✕"; }
41 |
42 | }
43 |
44 | input[name="cmd"] { width: 90%; }
45 | input[name="image"] { width: 90%; }
46 | #input-output-container { display: flex; }
47 | #input-container, #output-container { width: 50%; }
48 |
49 | #input-output-container {
50 | .cm-container {
51 | height: 80vh;
52 | padding-top: 2px;
53 | }
54 | }
55 |
56 | #input-output-container {
57 | .cm-container {}
58 | }
59 |
60 | .mono {
61 | font-family: monospace;
62 | }
63 |
64 | .inline { display: inline; }
65 |
66 | /* is this comment messing thigs up */
67 | code {
68 | padding: 1px 5px;
69 | border: 1px solid #e8e8e8;
70 | border-radius: 3px;
71 | }
72 |
73 | label.inline-label {
74 | padding-right: 3px;
75 | }
76 |
77 | #top-bar {
78 | display: flex;
79 | justify-content: space-between;
80 | }
81 |
82 | #disclaimer {
83 | padding: 5px;
84 | background: light-dark(azure, darkslategray);
85 | }
86 |
87 | #github-svg {
88 | display: inline;
89 | height: 2ch;
90 | }
91 |
--------------------------------------------------------------------------------
/peerofs/fuzz/fuzz_targets/fuzz_builder.rs:
--------------------------------------------------------------------------------
1 | #![no_main]
2 |
3 | use std::io::Cursor;
4 | use std::path::PathBuf;
5 |
6 | use arbitrary::Arbitrary;
7 | use libfuzzer_sys::fuzz_target;
8 |
9 | use rustix::fs::Mode;
10 |
11 | use peerofs::build::{Builder, BuilderConfig, Meta, XattrMap};
12 |
13 | #[derive(Arbitrary, Debug)]
14 | struct ArbMeta {
15 | uid: u32,
16 | gid: u32,
17 | mtime: u64,
18 | mode: u16,
19 | xattrs: XattrMap,
20 | }
21 |
22 | impl From for Meta {
23 | fn from(val: ArbMeta) -> Self {
24 | Meta {
25 | uid: val.uid,
26 | gid: val.gid,
27 | mtime: val.mtime,
28 | mode: Mode::from_bits_truncate(val.mode.into()),
29 | xattrs: val.xattrs,
30 | }
31 | }
32 | }
33 |
34 | #[derive(Arbitrary, Debug)]
35 | enum Op {
36 | File {
37 | path: PathBuf,
38 | meta: ArbMeta,
39 | data: Vec,
40 | },
41 | Dir {
42 | path: PathBuf,
43 | meta: ArbMeta,
44 | },
45 | Symlink {
46 | path: PathBuf,
47 | target: PathBuf,
48 | meta: ArbMeta,
49 | },
50 | Link {
51 | path: PathBuf,
52 | target: PathBuf,
53 | meta: ArbMeta,
54 | },
55 | }
56 |
57 | fuzz_target!(|ops: Vec| {
58 | let mut builder = Builder::new(Cursor::new(vec![]), BuilderConfig::default()).unwrap();
59 | for op in ops {
60 | match op {
61 | Op::File { path, meta, data } => {
62 | let _ = builder.add_file(path, meta.into(), data.len(), &mut Cursor::new(data));
63 | }
64 | Op::Dir { path, meta } => {
65 | let _ = builder.upsert_dir(path, meta.into());
66 | }
67 | Op::Symlink { path, target, meta } => {
68 | let _ = builder.add_symlink(path, target, meta.into());
69 | }
70 | Op::Link { path, target, meta } => {
71 | let _ = builder.add_link(path, target, meta.into());
72 | }
73 | }
74 | }
75 | let _ = builder.into_inner();
76 | });
77 |
--------------------------------------------------------------------------------
/pearchive/src/open.rs:
--------------------------------------------------------------------------------
1 | use crate::{Error, FILE_MODE, MKDIR_MODE};
2 | use std::ffi::CStr;
3 |
4 | use rustix::{
5 | fd::{AsFd, OwnedFd},
6 | fs::{Mode, OFlags, ResolveFlags},
7 | };
8 |
9 | // idk if openat2 is useful here since we work in a chroot anyways
10 |
11 | pub(crate) fn openat(fd: &Fd, name: &CStr) -> Result {
12 | rustix::fs::openat2(
13 | fd,
14 | name,
15 | OFlags::RDONLY | OFlags::CLOEXEC,
16 | Mode::empty(),
17 | ResolveFlags::BENEATH,
18 | )
19 | .map_err(Error::OpenAt)
20 | }
21 |
22 | pub(crate) fn openat_w(fd: &Fd, name: &CStr) -> Result {
23 | rustix::fs::openat2(
24 | fd,
25 | name,
26 | OFlags::WRONLY | OFlags::CREATE | OFlags::CLOEXEC,
27 | Mode::from_bits_truncate(FILE_MODE),
28 | ResolveFlags::BENEATH,
29 | )
30 | .map_err(Error::OpenAt)
31 | }
32 |
33 | pub(crate) fn opendir(name: &CStr) -> Result {
34 | rustix::fs::open(
35 | name,
36 | OFlags::RDONLY | OFlags::DIRECTORY | OFlags::CLOEXEC,
37 | Mode::empty(),
38 | )
39 | .map_err(Error::OpenAt)
40 | }
41 |
42 | pub(crate) fn opendirat(fd: &Fd, name: &CStr) -> Result {
43 | rustix::fs::openat2(
44 | fd,
45 | name,
46 | OFlags::RDONLY | OFlags::DIRECTORY | OFlags::CLOEXEC,
47 | Mode::empty(),
48 | ResolveFlags::BENEATH,
49 | )
50 | .map_err(Error::OpenAt)
51 | }
52 |
53 | pub(crate) fn opendirat_cwd(name: &CStr) -> Result {
54 | opendirat(&rustix::fs::CWD, name)
55 | }
56 |
57 | pub(crate) fn openpathat(fd: &Fd, name: &CStr) -> Result {
58 | rustix::fs::openat2(
59 | fd,
60 | name,
61 | OFlags::PATH | OFlags::DIRECTORY | OFlags::CLOEXEC,
62 | Mode::empty(),
63 | ResolveFlags::BENEATH,
64 | )
65 | .map_err(Error::OpenAt)
66 | }
67 |
68 | pub(crate) fn mkdirat(fd: &Fd, name: &CStr) -> Result<(), Error> {
69 | rustix::fs::mkdirat(fd, name, Mode::from_bits_truncate(MKDIR_MODE)).map_err(Error::MkdirAt)
70 | }
71 |
--------------------------------------------------------------------------------
/pefrontend/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Program Explorer
8 |
9 |
10 |
11 |
12 | Alpha preview, site may be unavailable without notice
13 |
14 |
26 |
27 |
28 |
29 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/pefrontend/src/util.ts:
--------------------------------------------------------------------------------
1 |
2 | export function bufToHex(data: ArrayBuffer, length: number): string {
3 | let n = Math.min(data.byteLength, length);
4 | let acc = '';
5 | let hexDigit = (i) => '0123456789abcdef'[i];
6 | if (data instanceof ArrayBuffer) {
7 | let buf = new Uint8Array(data);
8 | for (let i = 0; i < n; i++) {
9 | let b = buf[i];
10 | acc += hexDigit((b >> 4) & 0xf) + hexDigit(b & 0xf);
11 | }
12 | return acc;
13 | }
14 | throw new Error('bad type');
15 | }
16 |
17 | export function debounce(f, wait) {
18 | let timeoutId = null;
19 | return (...args) => {
20 | window.clearTimeout(timeoutId);
21 | timeoutId = window.setTimeout(() => {
22 | f(...args);
23 | }, wait);
24 | };
25 | }
26 |
27 | export function parseEnvText(s: string): string[] {
28 | let ret = [];
29 | for (let line of s.split('\n')) {
30 | if (line.startsWith('#')) {
31 | continue;
32 | }
33 | // TODO do some validation like VAR=blah
34 | ret.push(line);
35 | }
36 | return ret;
37 | }
38 |
39 | function bufToBase64Native(x: ArrayBuffer): string {
40 | // @ts-ignore:next-line
41 | return (new Uint8Array(x)).toBase64();
42 | }
43 | function bufToBase64Slow(x: ArrayBuffer): string {
44 | let ret = '';
45 | const bytes = new Uint8Array(x);
46 | const len = bytes.byteLength;
47 | for (let i = 0; i < len; i++) {
48 | ret += String.fromCharCode(bytes[i]);
49 | }
50 | return window.btoa(ret);
51 | }
52 |
53 | function bufFromBase64Native(x: string): ArrayBuffer | null {
54 | try {
55 | // @ts-ignore:next-line
56 | return Uint8Array.fromBase64(x).buffer;
57 | } catch {
58 | return null;
59 | }
60 | }
61 |
62 | function bufFromBase64Slow(x: string): ArrayBuffer | null {
63 | try {
64 | return new Uint8Array(Array.from(window.atob(x), x => x.charCodeAt(0))).buffer;
65 | } catch {
66 | return null;
67 | }
68 | }
69 |
70 | // @ts-ignore:next-line
71 | export const bufToBase64 = Uint8Array.prototype.toBase64 === undefined ? bufToBase64Slow : bufToBase64Native;
72 |
73 | // @ts-ignore:next-line
74 | export const bufFromBase64 = Uint8Array.fromBase64 === undefined ? bufFromBase64Slow : bufFromBase64Native;
75 |
--------------------------------------------------------------------------------
/peerofs/fuzz/fuzz_targets/fuzz_decompress_lz4.rs:
--------------------------------------------------------------------------------
1 | #![no_main]
2 |
3 | use std::fs;
4 | use std::process::Command;
5 |
6 | use libfuzzer_sys::fuzz_target;
7 | use memmap2::MmapOptions;
8 | use tempfile::{tempdir, NamedTempFile};
9 |
10 | use peerofs::disk::{Erofs, Layout};
11 |
12 | // TODO: not sure how effective this is, I have tried the -len_control flag to get it to use longer
13 | // inputs but still seems to use short ones. Maybe it is better to take a sequence of Arbitrary Ops
14 | // that are like len, kind, seed, where kind is one of Random, Repeat, Cycle, or something so that
15 | // a small number of ops can produce a much bigger output.
16 | // Basically I'm trying to produce outputs which have the property that some sections are well
17 | // compressed so they end up in a pcluster spanning multiple lclusters of varying length, other
18 | // sections become Plain literal blocks and all of this spanning different parts of the block size
19 | // boundaries
20 |
21 | fuzz_target!(|data: Vec| {
22 | let mut data = data;
23 | // this is enough to (almost?) always trigger compression
24 | for i in 0..4200 {
25 | data.push(i as u8);
26 | }
27 | println!("data len {}", data.len());
28 | let dir = tempdir().unwrap();
29 | let dest = NamedTempFile::new().unwrap();
30 |
31 | let filename = "file";
32 | let file = dir.path().join(&filename);
33 | fs::write(&file, &data).unwrap();
34 |
35 | let out = Command::new("mkfs.erofs")
36 | .arg(dest.path())
37 | .arg(dir.path())
38 | .arg(format!("-zlz4"))
39 | .arg("-b4096")
40 | .arg("-Elegacy-compress")
41 | .output()
42 | .unwrap();
43 | assert!(out.status.success());
44 |
45 | let mmap = unsafe { MmapOptions::new().map(&dest).unwrap() };
46 | let erofs = Erofs::new(&mmap).unwrap();
47 |
48 | let inode = erofs.lookup(&filename).unwrap().unwrap();
49 | let data_out = match inode.layout() {
50 | Layout::FlatInline | Layout::FlatPlain => {
51 | let (head, tail) = erofs.get_data(&inode).unwrap();
52 | [head, tail].concat()
53 | }
54 | Layout::CompressedFull => {
55 | erofs.get_compressed_data_vec(&inode).unwrap()
56 | }
57 | l => {
58 | panic!("not expecting layout {:?}", l)
59 | }
60 | };
61 | assert!(data == data_out);
62 | });
63 |
--------------------------------------------------------------------------------
/peoci/src/ocidir.rs:
--------------------------------------------------------------------------------
1 | use std::fs::File;
2 | use std::path::Path;
3 |
4 | use crate::compression::Compression;
5 |
6 | use oci_spec::image::{Descriptor, Digest, ImageIndex, ImageManifest};
7 |
8 | #[derive(Debug, thiserror::Error)]
9 | pub enum Error {
10 | NoMatchingManifest,
11 | OciSpec(#[from] oci_spec::OciSpecError),
12 | NoMediaType,
13 | BadMediaType,
14 | Io(#[from] std::io::Error),
15 | }
16 |
17 | // how wrong is this?
18 | impl std::fmt::Display for Error {
19 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
20 | write!(f, "{:?}", self)
21 | }
22 | }
23 |
24 | // sha256:foo -> sha256/foo
25 | fn digest_path(d: &Digest) -> String {
26 | d.to_string().replacen(":", "/", 1)
27 | }
28 |
29 | fn load_blob(blobs: &Path, layer: &Descriptor) -> Result<(Compression, File), Error> {
30 | // grr the image spec is a bit complicated with old stuff, there is both mediaType and
31 | // artifactType and we have to handle the docker ones in mediaType and the OCI ones in artifact
32 | // type
33 | let compression = layer.try_into().map_err(|_| Error::BadMediaType)?;
34 | let file = File::open(blobs.join(digest_path(layer.digest()))).map_err(Into::::into)?;
35 | Ok((compression, file))
36 | }
37 |
38 | pub fn load_layers_from_oci>(
39 | dir: P,
40 | image: &str,
41 | ) -> Result, Error> {
42 | let dir = dir.as_ref();
43 | let blobs = dir.join("blobs");
44 |
45 | let index = ImageIndex::from_file(dir.join("index.json"))?;
46 | let manifest = (if image.starts_with("sha256:") {
47 | index
48 | .manifests()
49 | .iter()
50 | .find(|x| x.digest().to_string() == image)
51 | } else {
52 | index.manifests().iter().find(|x| {
53 | if let Some(annotations) = x.annotations() {
54 | if let Some(name) = annotations.get("org.opencontainers.image.ref.name") {
55 | return image == name;
56 | }
57 | }
58 | false
59 | })
60 | })
61 | .ok_or(Error::NoMatchingManifest)?;
62 |
63 | let image_manifest = ImageManifest::from_file(blobs.join(digest_path(manifest.digest())))?;
64 |
65 | // is there a nicer way to coerce things into the right error type here??
66 |
67 | image_manifest
68 | .layers()
69 | .iter()
70 | .map(|x| load_blob(&blobs, x))
71 | .collect()
72 | }
73 |
--------------------------------------------------------------------------------
/initramfs.file:
--------------------------------------------------------------------------------
1 | # file []
2 | # dir
3 | # nod
4 | # slink
5 | # pipe
6 | # sock
7 | #
8 | # name of the file/dir/nod/etc in the archive
9 | # location of the file in the current filesystem
10 | # expands shell variables quoted with ${}
11 | # link target
12 | # mode/permissions of the file
13 | # user id (0=root)
14 | # group id (0=root)
15 | # device type (b=block, c=character)
16 | # major number of nod
17 | # minor number of nod
18 | # space separated list of other links to file
19 | #
20 | # VARIABLES SUBSTITUTED
21 | # PROFILE -- to select between target/{debug,release}/pefoo
22 | # CRUN -- path to crun binary
23 | #
24 | # TODO some of this might be faster/easier to just create in peinit
25 | # dirs should be faster in here since it is all kernel esp the owner + perms
26 |
27 | dir /dev 0755 0 0
28 | dir /proc 0755 0 0
29 |
30 | dir /sys 0755 0 0
31 | dir /sys/fs 0755 0 0
32 | dir /sys/fs/cgroup 0755 0 0
33 |
34 | # pivot dir for parent rootfs
35 | dir /abc 0755 0 0
36 |
37 | dir /mnt 0755 0 0
38 | # workdir for overlayfs
39 | dir /mnt/work 0755 0 0
40 | # where the squashfs/erofs multi-image gets mounted
41 | dir /mnt/image 0755 0 0
42 | # where we bind mount the actual image's rootfs
43 | dir /mnt/rootfs 0755 0 0
44 | # root of upper in overlayfs to overlay the /run dir over rootfs
45 | dir /mnt/upper 0777 1000 1000
46 |
47 | dir /run 0777 0 0
48 |
49 | dir /run/bundle 0755 0 0
50 | dir /run/bundle/rootfs 0755 0 0
51 |
52 | dir /run/crun 0777 0 0
53 |
54 | dir /run/input 0777 1000 1000
55 | # this is a mount point for a tmpfs so we don't create /run/output/dir here
56 | dir /run/output 0777 1000 1000
57 |
58 | dir /bin 0755 0 0
59 |
60 | file /bin/crun $CRUN 0555 0 0
61 |
62 | file /bin/pearchive target/x86_64-unknown-linux-musl/$PROFILE/pearchive 0555 0 0
63 | file /init target/x86_64-unknown-linux-musl/$PROFILE/peinit 0555 0 0
64 |
65 | # TODO pull these from maybe somewhere else
66 | # busybox is https://www.busybox.net/downloads/binaries/1.35.0-x86_64-linux-musl/busybox
67 | # but strace was a custom build locally
68 | file /bin/busybox busybox 0555 0 0 #@ REMOVE_IN_RELEASE
69 | slink /bin/sh /bin/busybox 0555 0 0 #@ REMOVE_IN_RELEASE
70 | file /bin/strace strace 0555 0 0 #@ REMOVE_IN_RELEASE
71 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | resolver = "2"
3 | members = ["pearchive", "pearchive/fuzz", "peerofs", "peerofs/fuzz", "pegh", "peimage", "peimage-service", "peinit", "peoci", "perunner", "peserver", "pevub", "tools", "waitid_timeout"]
4 |
5 | [workspace.lints.clippy]
6 | redundant_field_names = "allow"
7 |
8 | [workspace.dependencies]
9 | anyhow = "1.0.98"
10 | api_client = { version = "0.1.0", path = "../cloud-hypervisor/api_client" }
11 | arbitrary = "1.4.1"
12 | arc-swap = "1.7.1"
13 | async-trait = "0.1.83"
14 | aws-config = "1.8.0"
15 | aws-sdk-s3 = "1.93.0"
16 | base16ct = "0.2.0"
17 | base64 = "0.22.1"
18 | byteorder = "1.5.0"
19 | bytes = "1.10.1"
20 | clap = "4.5.40"
21 | command-fds = "0.3.1"
22 | crossbeam = "0.8.4"
23 | env_logger = "0.11.8"
24 | flate2 = "1.1.1"
25 | hex = "0.4.3"
26 | http = "1.1.0"
27 | libc = "0.2.164"
28 | libfuzzer-sys = "0.4"
29 | log = "0.4.27"
30 | memmap2 = "0.9.5"
31 | mio = "1.0.2"
32 | mio-pidfd = "0.4.0"
33 | moka = "0.12.10"
34 | nix = "0.29.0"
35 | nom = "8.0.0"
36 | oci-spec = "0.8.1"
37 | once_cell = "1.20.2"
38 | pearchive = { version = "0.1.0", path = "pearchive" }
39 | peerofs = { version = "0.1.0", path = "peerofs" }
40 | peimage = { version = "0.1.0", path = "peimage" }
41 | peimage-service = { version = "0.1.0", path = "peimage-service" }
42 | peinit = { version = "0.1.0", path = "peinit" }
43 | peoci = { version = "0.1.0", path = "peoci" }
44 | perunner = { version = "0.1.0", path = "perunner" }
45 | pingora = "0.5.0"
46 | pingora-limits = "0.5.0"
47 | pingora-timeout = "0.5.0"
48 | prometheus = "0.13.4"
49 | reqwest = { version = "0.12.15", default-features = false }
50 | rustix = "1.0.7"
51 | serde = "1.0.219"
52 | serde_json = "1.0.140"
53 | sha2 = "0.10.9"
54 | smallvec = "1.15.1"
55 | tar = "0.4.44"
56 | tempfile = "3.19.1"
57 | thiserror = "2.0.12"
58 | tokio = "1.45.1"
59 | tokio-seqpacket = "0.8.0"
60 | tracing = "0.1.40"
61 | tracing-subscriber = "0.3.18"
62 | vhost = "0.14.0"
63 | vhost-user-backend = "0.19.0"
64 | virtio-bindings = "0.2.5"
65 | virtio-queue = "0.15.0"
66 | vm-memory = "0.16.2"
67 | vmm-sys-util = "0.14.0"
68 | vsock = "0.5.1"
69 | wait-timeout = "0.2.0"
70 | waitid_timeout = { version = "0.1.0", path = "waitid_timeout" }
71 | zerocopy = "0.8.24"
72 | zstd = "0.13.3"
73 | bincode = "2.0.1"
74 | rand = "0.9.1"
75 | lzzzz = "2.0.0"
76 | chrono = "0.4.41"
77 | axum = "0.8.4"
78 | pegh = { version = "0.1.0", path = "pegh" }
79 | futures = "0.3.31"
80 |
81 | # reminder on how to do this
82 | # [patch.crates-io]
83 | # libz-ng-sys = { path = '../libz-sys' }
84 |
85 | # in testing pevub, added some debug/trace logs to these
86 | # [patch.crates-io]
87 | # vhost-user-backend = { path = '../vhost/vhost-user-backend' }
88 | # vhost = { path = '../vhost/vhost' }
89 |
--------------------------------------------------------------------------------
/attic/vsockhello.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 |
10 |
11 | // [...]
12 | // location is v or u
13 | // fd is 0 | 1
14 | int main(int argc, char **argv)
15 | {
16 | if (argc < 3) {
17 | fputs(" \n", stderr);
18 | exit(EXIT_FAILURE);
19 | }
20 | const char* location = argv[1];
21 |
22 | int fd = atoi(argv[2]);
23 | if (!((fd == 0) || (fd == 1))) {
24 | fputs(" must be 0 or 1\n", stderr);
25 | exit(EXIT_FAILURE);
26 | }
27 |
28 | int ret;
29 | int dupfd;
30 |
31 | if (location[0] == 'u') {
32 | struct sockaddr_un addr;
33 | memset(&addr, 0, sizeof(addr));
34 | addr.sun_family = AF_UNIX;
35 | strncpy(addr.sun_path, &location[1], strlen(&location[1]));
36 | int sock = socket(AF_UNIX, SOCK_STREAM, 0);
37 | if (sock < 0) {perror("socket"); exit(EXIT_FAILURE);}
38 | ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
39 | if (ret < 0) {perror("bind"); exit(EXIT_FAILURE);}
40 | ret = listen(sock, 0);
41 | if (ret < 0) {perror("listen"); exit(EXIT_FAILURE);}
42 | dupfd = accept(sock, NULL, 0);
43 | if (dupfd < 0) {perror("accept"); exit(EXIT_FAILURE);}
44 | ret = close(sock);
45 | if (ret < 0) {perror("close sock"); exit(EXIT_FAILURE);}
46 |
47 | } else if (location[0] == 'v') {
48 | int port = atoi(&location[1]);
49 | struct sockaddr_vm addr;
50 | memset(&addr, 0, sizeof(addr));
51 | addr.svm_family = AF_VSOCK;
52 | addr.svm_reserved1 = 0;
53 | addr.svm_cid = VMADDR_CID_HOST;
54 | addr.svm_port = port;
55 | int sock = socket(AF_VSOCK, SOCK_STREAM, 0);
56 | if (sock < 0) {perror("socket"); exit(EXIT_FAILURE);}
57 | ret = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
58 | if (ret < 0) {perror("connect"); exit(EXIT_FAILURE);}
59 | dupfd = sock;
60 | } else {
61 | fputs(" must be u or v\n", stderr);
62 | exit(EXIT_FAILURE);
63 | }
64 |
65 | // looking back, dup2 does the close, right?
66 | ret = close(fd);
67 | if (ret < 0) {perror("close fd"); exit(EXIT_FAILURE);}
68 |
69 | ret = dup2(dupfd, fd);
70 | if (ret < 0) {perror("dup2"); exit(EXIT_FAILURE);}
71 |
72 | if (argc >= 4) {
73 | ret = execvp(argv[3], &argv[3]);
74 | if (ret < 0) {perror("execvp"); exit(EXIT_FAILURE);}
75 | }
76 | return 0;
77 | }
78 |
79 | /*
80 | * saved from how this was used
81 | # ON HOST
82 | ./vsockhello u/tmp/ch.sock_123 1 cat < /tmp/_stdin &
83 | ./vsockhello u/tmp/ch.sock_124 0 cpio -i -D /tmp/_out &
84 | ./vsockhello u/tmp/ch.sock_124 0 cat > /tmp/_out.cpio &
85 |
86 | # ON GUEST
87 | vsockhello v123 0 /bin/busybox cat > /input/_stdin
88 |
89 | echo -e '_stdout\n_stderr' | vsockhello v124 1 busybox cpio -H newc -o
90 | */
91 |
--------------------------------------------------------------------------------
/scripts/inspecttar.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import tarfile
4 | import json
5 | from pathlib import Path
6 | import argparse
7 |
8 | types = {}
9 | for k in 'REGTYPE AREGTYPE LNKTYPE SYMTYPE DIRTYPE FIFOTYPE CONTTYPE CHRTYPE BLKTYPE GNUTYPE_SPARSE'.split():
10 | types[getattr(tarfile, k)] = k
11 |
12 | formats = {}
13 | for k in 'USTAR_FORMAT GNU_FORMAT PAX_FORMAT'.split():
14 | formats[getattr(tarfile, k)] = k
15 |
16 | def print_tarfile(filename, layer_index=None):
17 | tf = tarfile.open(filename)
18 | # print(dir(tf))
19 | # for k in dir(tf):
20 | # print(k, getattr(tf, k))
21 | #print(formats)
22 | format_s = formats[tf.format]
23 | print(f'{filename}: format={format_s}')
24 |
25 | if tf.pax_headers:
26 | print('--- PAX ---')
27 | for k, v in tf.pax_headers.items():
28 | print(f'{k:20} {v}')
29 |
30 | for x in tf:
31 | type_s = types[x.type]
32 | if layer_index is None:
33 | print(f'size={x.size:10} mtime={x.mtime} mode={x.mode:06o} type={type_s} uid/gid={x.uid}/{x.gid} uname/gname={x.uname}/{x.gname} dev={x.devmajor},{x.devminor} {x.pax_headers} {x.name} ')
34 | else:
35 | print(f'layer={layer_index} size={x.size:10} mtime={x.mtime} mode={x.mode:06o} type={type_s} uid/gid={x.uid}/{x.gid} uname/gname={x.uname}/{x.gname} dev={x.devmajor},{x.devminor} {x.pax_headers} {x.name} ')
36 |
37 | # expects a manifest
38 | def main_json(index_filename, layer_index=None):
39 | def digest_path(digest):
40 | return index_filename.parent / 'blobs' / digest.replace(':', '/')
41 |
42 | with open(index_filename) as fh:
43 | index = json.load(fh)
44 | if len(index['manifests']) != 1: raise Exception('expecting 1 manifest')
45 | if index['manifests'][0]['mediaType'] != 'application/vnd.oci.image.manifest.v1+json': raise Exception('expecting manifest+v1', m['manifests'][0]['mediaType'])
46 |
47 | manifest_digest = index['manifests'][0]['digest']
48 | with open(digest_path(manifest_digest)) as fh:
49 | m = json.load(fh)
50 |
51 | if layer_index is None:
52 | for i, layer in enumerate(m['layers']):
53 | digest = layer['digest']
54 | print(f'-- layer {i} {digest}')
55 | print_tarfile(digest_path(digest), layer_index=i)
56 |
57 | else:
58 | layer = m['layers'][layer_index]
59 | digest = layer['digest']
60 | print(f'-- layer {layer_index} {digest}')
61 | print_tarfile(digest_path(digest), layer_index=layer_index)
62 |
63 |
64 | def main(args):
65 | if args.json or args.file.suffix == '.json':
66 | main_json(args.file, args.layer)
67 | else:
68 | print_tarfile(args.file)
69 |
70 | def args():
71 | parser = argparse.ArgumentParser()
72 | parser.add_argument('--json', default=False, action='store_true')
73 | parser.add_argument('--layer', default=None, type=int)
74 | parser.add_argument('file', type=Path)
75 | args = parser.parse_args()
76 | return args
77 |
78 | main(args())
79 |
--------------------------------------------------------------------------------
/pefrontend/src/api.ts:
--------------------------------------------------------------------------------
1 | export namespace Api {
2 | export type Siginfo =
3 | {Exited: number}
4 | | {Killed: number}
5 | | {Dumped: number}
6 | | {Stopped: number}
7 | | {Trapped: number}
8 | | {Continued: number};
9 |
10 | export type TimeVal = {
11 | sec: number, // TODO these are i64 so maybe blow up in json
12 | usec: number,
13 | };
14 |
15 | export type Rusage = {
16 | ru_utime : TimeVal, /* user CPU time used */
17 | ru_stime : TimeVal, /* system CPU time used */
18 | ru_maxrss : number, /* maximum resident set size */
19 | ru_ixrss : number, /* integral shared memory size */
20 | ru_idrss : number, /* integral unshared data size */
21 | ru_isrss : number, /* integral unshared stack size */
22 | ru_minflt : number, /* page reclaims (soft page faults) */
23 | ru_majflt : number, /* page faults (hard page faults) */
24 | ru_nswap : number, /* swaps */
25 | ru_inblock : number, /* block input operations */
26 | ru_oublock : number, /* block output operations */
27 | ru_msgsnd : number, /* IPC messages sent */
28 | ru_msgrcv : number, /* IPC messages received */
29 | ru_nsignals : number, /* signals received */
30 | ru_nvcsw : number, /* voluntary context switches */
31 | ru_nivcsw : number, /* involuntary context switches */
32 | };
33 |
34 | export namespace Runi {
35 | export type Request = {
36 | stdin?: string,
37 | entrypoint?: string[],
38 | cmd?: string[],
39 | };
40 | export type Response =
41 | | {kind: "Ok", siginfo: Siginfo, rusage: Rusage}
42 | | {kind: "Overtime", siginfo: Siginfo, rusage: Rusage}
43 | | {kind: "Panic", message: string};
44 | }
45 |
46 | export type Image = {
47 | links: {
48 | runi: string,
49 | upstream: string,
50 | },
51 | info: {
52 | digest: string,
53 | repository: string,
54 | registry: string,
55 | tag: string,
56 | },
57 | config: {
58 | created: string,
59 | architecture: string,
60 | os: string,
61 | config: {
62 | Cmd?: string[],
63 | Entrypoint?: string[],
64 | Env?: string[],
65 | },
66 | rootfs: {type: string, diff_ids: string[]}[],
67 | history: any, // todo
68 | },
69 | };
70 |
71 | export function apiv2_runi(reference: string, arch: string, os: string) : string {
72 | return `${window.location.origin}/api/v2/runi/${arch}/${os}/${reference}`;
73 | }
74 |
75 | export function gh_gist(id: string, version: string | null) : string {
76 | let suffix = version === null ? '' : `/${version}`;
77 | return `${window.location.origin}/api/gh/gist/${id}${suffix}`;
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/peoci/src/compression.rs:
--------------------------------------------------------------------------------
1 | use crate::spec;
2 | use oci_spec::image::{Descriptor, MediaType};
3 |
4 | #[derive(Debug)]
5 | pub enum Compression {
6 | None,
7 | Gzip,
8 | Zstd,
9 | }
10 |
11 | #[derive(Debug, thiserror::Error)]
12 | pub struct Error {
13 | pub media_type: MediaType,
14 | pub artifact_type: Option,
15 | }
16 |
17 | // how wrong is this?
18 | impl std::fmt::Display for Error {
19 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
20 | write!(f, "{:?}", self)
21 | }
22 | }
23 |
24 | // OCI Descriptor for an image layer has two fields, mediaType and artifactType.
25 | // mediaType is the "old" docker style media type
26 | // artifactType is set for ... (I'm not sure I actually understand this
27 |
28 | // so I originally had this, then add TryFrom<&Descriptor> because I hit a weird case that required
29 | // inspecting artifact_type, but maybe I didn't? Anyways just leaving from Descriptor for now
30 | // because it is more general and means you can just pass a layer (Descriptor)
31 |
32 | //impl TryFrom<&MediaType> for Compression {
33 | // type Error = ();
34 | // fn try_from(x: &MediaType) -> Result {
35 | // match x {
36 | // MediaType::ImageLayer => Ok(Compression::None),
37 | // MediaType::ImageLayerGzip => Ok(Compression::Gzip),
38 | // MediaType::ImageLayerZstd => Ok(Compression::Zstd),
39 | // _ => Err(()),
40 | // }
41 | // }
42 | //}
43 |
44 | impl TryFrom<&Descriptor> for Compression {
45 | type Error = Error;
46 | fn try_from(x: &Descriptor) -> Result {
47 | match (x.media_type(), x.artifact_type()) {
48 | // is this a thing? I don't think so
49 | //MediaType::Other(s) if s == "application/vnd.docker.image.rootfs.diff.tar" => Compression::None,
50 | (MediaType::Other(s), _)
51 | if s == "application/vnd.docker.image.rootfs.diff.tar.gzip" =>
52 | {
53 | Ok(Compression::Gzip)
54 | }
55 |
56 | // I don't think this ever made its way into the wild?
57 | //MediaType::Other(s) if s == "application/vnd.docker.image.rootfs.diff.tar.zstd" => Compression::Zstd,
58 | (MediaType::ImageLayer, _) => Ok(Compression::None),
59 | (MediaType::ImageLayerGzip, _) => Ok(Compression::Gzip),
60 | (MediaType::ImageLayerZstd, _) => Ok(Compression::Zstd),
61 | (media_type, artifact_type) => Err(Error {
62 | media_type: media_type.clone(),
63 | artifact_type: artifact_type.clone(),
64 | }),
65 | }
66 | }
67 | }
68 |
69 | impl From<&spec::LayerDescriptor> for Compression {
70 | fn from(x: &spec::LayerDescriptor) -> Self {
71 | match x.media_type {
72 | spec::MediaType::ImageLayer => Compression::None,
73 | spec::MediaType::ImageLayerGzip => Compression::Gzip,
74 | spec::MediaType::DockerImageLayerGzip => Compression::Gzip,
75 | spec::MediaType::ImageLayerZstd => Compression::Zstd,
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/attic/mallocstacks.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # mallocstacks Trace malloc() calls in a process and print the full
4 | # stack trace for all callsites.
5 | # For Linux, uses BCC, eBPF. Embedded C.
6 | #
7 | # This script is a basic example of the new Linux 4.6+ BPF_STACK_TRACE
8 | # table API.
9 | #
10 | # Copyright 2016 GitHub, Inc.
11 | # Licensed under the Apache License, Version 2.0 (the "License")
12 |
13 | # needs dnf install bcc kernel-modules-core
14 | # after mega battle got this running with
15 | # sudo podman run -v /sys:/sys --privileged -it --rm -w $(pwd) -v $(pwd):$(pwd) registry.fedoraproject.org/fedora-toolbox:40
16 | # but doesn't even give byte locations
17 | # so f it
18 |
19 | import os
20 | import sys
21 | import signal
22 |
23 | from bcc import BPF
24 | from bcc.utils import printb
25 | from time import sleep
26 |
27 | # if len(sys.argv) < 2:
28 | # print("USAGE: mallocstacks PID [NUM_STACKS=1024]")
29 | # exit()
30 | # pid = int(sys.argv[1])
31 | # if len(sys.argv) == 3:
32 | # try:
33 | # assert int(sys.argv[2]) > 0, ""
34 | # except (ValueError, AssertionError) as e:
35 | # print("USAGE: mallocstacks PID [NUM_STACKS=1024]")
36 | # print("NUM_STACKS must be a non-zero, positive integer")
37 | # exit()
38 | # stacks = sys.argv[2]
39 | # else:
40 | stacks = 10
41 |
42 | pid = os.fork()
43 | if pid == 0: # child
44 | print('child', os.getpid())
45 | os.kill(os.getpid(), signal.SIGSTOP)
46 | os.execvp(sys.argv[1], sys.argv[1:])
47 |
48 | with open('/proc/self/status') as fh:
49 | lines = list(fh)
50 | for line in lines:
51 | if line.startswith('Cap'):
52 | print(line[:-1])
53 |
54 |
55 | # TODO why doesn't it pick this up
56 | os.environ['BCC_KERNEL_SOURCE'] = '/lib/modules/6.10.11-200.fc40.x86_64/source'
57 |
58 | # load BPF program
59 | b = BPF(text=f"""
60 | #include
61 |
62 | BPF_HASH(calls, int);
63 | BPF_STACK_TRACE(stack_traces, {stacks});
64 |
65 | int alloc_enter(struct pt_regs *ctx, size_t size) {{
66 | int key = stack_traces.get_stackid(ctx, BPF_F_USER_STACK);
67 | if (key < 0)
68 | return 0;
69 |
70 | // could also use `calls.increment(key, size);`
71 | u64 zero = 0, *val;
72 | val = calls.lookup_or_try_init(&key, &zero);
73 | if (val) {{
74 | (*val) += size;
75 | }}
76 | return 0;
77 | }};
78 | """, debug=0x20)
79 |
80 | b.attach_uprobe(name="c", sym="malloc", fn_name="alloc_enter", pid=pid)
81 | print("Attaching to malloc in pid %d, Ctrl+C to quit." % pid)
82 |
83 | os.kill(pid, signal.SIGCONT)
84 |
85 | # sleep until Ctrl-C
86 | # try:
87 | # sleep(99999999)
88 | # except KeyboardInterrupt:
89 | # pass
90 |
91 | os.wait()
92 |
93 | calls = b.get_table("calls")
94 | stack_traces = b.get_table("stack_traces")
95 |
96 | for k, v in reversed(sorted(calls.items(), key=lambda c: c[1].value)):
97 | print("%d bytes allocated at:" % v.value)
98 | if k.value > 0 :
99 | for addr in stack_traces.walk(k.value):
100 | printb(b"\t%s" % b.sym(addr, pid, show_offset=True))
101 | print(" %d\n" % v.value)
102 |
--------------------------------------------------------------------------------
/perunner/src/blocktest.rs:
--------------------------------------------------------------------------------
1 | use clap::Parser;
2 | use std::fs::File;
3 | use std::process::Command;
4 |
5 | // build a peinit with blocktesting
6 | // cargo build --features=blocktesting --package=peinit --profile=dev --target x86_64-unknown-linux-musl && (cd .. && ./scripts/build-initramfs.sh)
7 | //
8 | // with vhost_user_block from ch
9 | // cargo run -- --user-block /tmp/vhost_user_block.sock
10 | // cd ~/Repos/cloud-hypervisor/vhost_user_block
11 | // cargo run -- --block-backend path=../../program-explorer/busybox.erofs,socket=/tmp/vhost_user_block.sock,readonly=true
12 | //
13 | // with pevub
14 | // cargo run -- --user-block /tmp/pevub.sock
15 | // cd pevub
16 | // env RUST_LOG=trace cargo run -- /tmp/pevub.sock
17 | //
18 | // with disk
19 | // cargo run -- --disk ../busybox.erofs
20 |
21 | #[derive(Parser, Debug)]
22 | #[command(version, about, long_about = None)]
23 | struct Args {
24 | #[arg(long)]
25 | strace: bool,
26 |
27 | #[arg(long)]
28 | ch_log: bool,
29 |
30 | #[arg(long)]
31 | user_block: Option,
32 |
33 | #[arg(long)]
34 | disk: Option,
35 | }
36 | fn main() {
37 | env_logger::init();
38 | let args = Args::parse();
39 |
40 | if args.user_block.is_none() && args.disk.is_none()
41 | || args.user_block.is_some() && args.disk.is_some()
42 | {
43 | println!("must give --user-block or --disk");
44 | }
45 |
46 | let mut cmd = Command::new(if args.strace {
47 | "strace"
48 | } else {
49 | "cloud-hypervisor"
50 | });
51 | if args.strace {
52 | cmd.arg("-o")
53 | .arg("/tmp/strace.out")
54 | .arg("-f")
55 | .arg("--")
56 | .arg("cloud-hypervisor");
57 | }
58 | cmd.arg("-v")
59 | .arg("--memory")
60 | .arg("size=1G,shared=on")
61 | .arg("--cpus")
62 | .arg("boot=1")
63 | .arg("--kernel")
64 | .arg("../vmlinux")
65 | .arg("--initramfs")
66 | .arg("../target/debug/initramfs")
67 | .arg("--cmdline")
68 | .arg("console=hvc0")
69 | .arg("--console")
70 | .arg("file=/tmp/ch-console")
71 | .arg("--log-file")
72 | .arg("/tmp/ch-log");
73 |
74 | if let Some(disk) = args.disk {
75 | cmd.arg("--disk")
76 | .arg(format!("path={},readonly=on,id=12345", disk));
77 | } else if let Some(socket) = args.user_block {
78 | cmd.arg("--disk")
79 | .arg(format!("vhost_user=on,socket={},id=12345,readonly=on", socket));
80 | } else {
81 | panic!("no --disk or --user-block");
82 | }
83 |
84 | let mut child = cmd.spawn().unwrap();
85 | let status = child.wait().unwrap();
86 | assert!(status.success());
87 |
88 | std::io::copy(
89 | &mut File::open("/tmp/ch-console").unwrap(),
90 | &mut std::io::stdout(),
91 | )
92 | .unwrap();
93 | if args.ch_log {
94 | std::io::copy(
95 | &mut File::open("/tmp/ch-log").unwrap(),
96 | &mut std::io::stdout(),
97 | )
98 | .unwrap();
99 | }
100 | if args.strace {
101 | std::io::copy(
102 | &mut File::open("/tmp/strace.out").unwrap(),
103 | &mut std::io::stdout(),
104 | )
105 | .unwrap();
106 | }
107 | println!();
108 | }
109 |
--------------------------------------------------------------------------------
/attic/config-rootless.json:
--------------------------------------------------------------------------------
1 | {
2 | "ociVersion": "1.0.0",
3 | "process": {
4 | "terminal": true,
5 | "user": {
6 | "uid": 0,
7 | "gid": 0
8 | },
9 | "args": [
10 | "sh"
11 | ],
12 | "env": [
13 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
14 | "TERM=xterm"
15 | ],
16 | "cwd": "/",
17 | "capabilities": {
18 | "bounding": [
19 | "CAP_AUDIT_WRITE",
20 | "CAP_KILL",
21 | "CAP_NET_BIND_SERVICE"
22 | ],
23 | "effective": [
24 | "CAP_AUDIT_WRITE",
25 | "CAP_KILL",
26 | "CAP_NET_BIND_SERVICE"
27 | ],
28 | "inheritable": [
29 | ],
30 | "permitted": [
31 | "CAP_AUDIT_WRITE",
32 | "CAP_KILL",
33 | "CAP_NET_BIND_SERVICE"
34 | ],
35 | "ambient": [
36 | "CAP_AUDIT_WRITE",
37 | "CAP_KILL",
38 | "CAP_NET_BIND_SERVICE"
39 | ]
40 | },
41 | "rlimits": [
42 | {
43 | "type": "RLIMIT_NOFILE",
44 | "hard": 1024,
45 | "soft": 1024
46 | }
47 | ],
48 | "noNewPrivileges": true
49 | },
50 | "root": {
51 | "path": "rootfs",
52 | "readonly": true
53 | },
54 | "hostname": "crun",
55 | "mounts": [
56 | {
57 | "destination": "/proc",
58 | "type": "proc",
59 | "source": "proc"
60 | },
61 | {
62 | "destination": "/dev",
63 | "type": "tmpfs",
64 | "source": "tmpfs",
65 | "options": [
66 | "nosuid",
67 | "strictatime",
68 | "mode=755",
69 | "size=65536k"
70 | ]
71 | },
72 | {
73 | "destination": "/dev/pts",
74 | "type": "devpts",
75 | "source": "devpts",
76 | "options": [
77 | "nosuid",
78 | "noexec",
79 | "newinstance",
80 | "ptmxmode=0666",
81 | "mode=0620"
82 | ]
83 | },
84 | {
85 | "destination": "/dev/shm",
86 | "type": "tmpfs",
87 | "source": "shm",
88 | "options": [
89 | "nosuid",
90 | "noexec",
91 | "nodev",
92 | "mode=1777",
93 | "size=65536k"
94 | ]
95 | },
96 | {
97 | "destination": "/dev/mqueue",
98 | "type": "mqueue",
99 | "source": "mqueue",
100 | "options": [
101 | "nosuid",
102 | "noexec",
103 | "nodev"
104 | ]
105 | },
106 | {
107 | "destination": "/sys",
108 | "type": "sysfs",
109 | "source": "sysfs",
110 | "options": [
111 | "nosuid",
112 | "noexec",
113 | "nodev",
114 | "ro"
115 | ]
116 | },
117 | {
118 | "destination": "/sys/fs/cgroup",
119 | "type": "cgroup",
120 | "source": "cgroup",
121 | "options": [
122 | "nosuid",
123 | "noexec",
124 | "nodev",
125 | "relatime",
126 | "ro"
127 | ]
128 | }
129 | ],
130 | "linux": {
131 | "resources": {
132 | "devices": [
133 | {
134 | "allow": false,
135 | "access": "rwm"
136 | }
137 | ]
138 | },
139 | "namespaces": [
140 | {
141 | "type": "pid"
142 | },
143 | {
144 | "type": "network"
145 | },
146 | {
147 | "type": "ipc"
148 | },
149 | {
150 | "type": "uts"
151 | },
152 | {
153 | "type": "user"
154 | },
155 | {
156 | "type": "cgroup"
157 | },
158 | {
159 | "type": "mount"
160 | }
161 | ],
162 | "maskedPaths": [
163 | "/proc/acpi",
164 | "/proc/asound",
165 | "/proc/kcore",
166 | "/proc/keys",
167 | "/proc/latency_stats",
168 | "/proc/timer_list",
169 | "/proc/timer_stats",
170 | "/proc/sched_debug",
171 | "/sys/firmware",
172 | "/proc/scsi"
173 | ],
174 | "readonlyPaths": [
175 | "/proc/bus",
176 | "/proc/fs",
177 | "/proc/irq",
178 | "/proc/sys",
179 | "/proc/sysrq-trigger"
180 | ]
181 | }
182 | }
183 |
--------------------------------------------------------------------------------
/pefrontend/src/urlstate.ts:
--------------------------------------------------------------------------------
1 | import {bufFromBase64} from './util';
2 |
3 | export type UrlHashState = {
4 | // just for dev
5 | expand: {
6 | help: boolean,
7 | more: boolean,
8 | },
9 | cmd: string | null,
10 | stdin: string | null,
11 | env: string | null,
12 | image: string | null,
13 | files: {path: string, data: string}[] | null,
14 | gist: string,
15 | }
16 |
17 | type UrlHashStateSettings = {
18 | cmd?: string | null,
19 | stdin?: string | null,
20 | env?: string | null,
21 | image?: string | null,
22 | files?: ({p: string, s: string} | {p: string, b: string})[],
23 | }
24 |
25 | export function loadUrlHashState(): UrlHashState { return parseUrlHashState(window.location.hash); }
26 | export function encodeUrlHashState(x: {
27 | cmd: string,
28 | stdin: string,
29 | env: string,
30 | image: string,
31 | files: ({p: string, s: string} | {p: string, b: string})[]
32 | }): string {
33 | return window.btoa(JSON.stringify(x));
34 | }
35 | // chrome doesn't support https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Uint8Array/fromBase64 yet
36 | // so we can't do Uint8Array.fromBase64 yet; punt and only handle strings for now
37 | function tryBase64Decode(x: string | null | undefined): string | null {
38 | if (x == null) return null;
39 | try {
40 | return window.atob(x);
41 | } catch {
42 | return null;
43 | }
44 | }
45 | function checkString(x): string | null { return typeof x === 'string' ? x : null; }
46 | function checkStringArray(x): string[] | null {
47 | if (!Array.isArray(x)) return null;
48 | if (!x.every((y) => typeof y === 'string')) return null;
49 | return x;
50 | }
51 | function checkFiles(x): ({path: string, data: string | ArrayBuffer})[] | null {
52 | if (!Array.isArray(x)) return null;
53 | let ret = [];
54 | for (let y of x) {
55 | let path = y.p;
56 | if (path == null) return null;
57 | let data;
58 | if (y.s != null && typeof y.s === 'string') {
59 | data = y.s;
60 | } else if (y.b != null && typeof y.b === 'string') {
61 | data = bufFromBase64(y.b);
62 | if (data == null) {
63 | console.error('got null data from atob?');
64 | return null;
65 | }
66 | } else {
67 | console.error('unhandled case');
68 | return null;
69 | }
70 | ret.push({path, data});
71 | }
72 | return ret;
73 | }
74 |
75 | function decodeBase64Json(s): object {
76 | try {
77 | return JSON.parse(window.atob(s));
78 | } catch (e) {
79 | console.error('error decoding json', e);
80 | return {};
81 | }
82 | }
83 |
84 | function decodeSettings(s: string): UrlHashStateSettings {
85 | return decodeBase64Json(s);
86 | }
87 |
88 | function parseUrlHashState(s): UrlHashState {
89 | let ret = {
90 | expand: { help: false, more: false, },
91 | cmd: null,
92 | stdin: null,
93 | env: null,
94 | image: null,
95 | files: null,
96 | gist: null,
97 | };
98 | let parts = s.substring(1).split('&');
99 | for (let part of parts) {
100 | let [a, b] = part.split('=');
101 | if (a === 'help' && b === 'x') { ret.expand.help = true; }
102 | else if (a === 'more' && b === 'x') { ret.expand.more = true; }
103 | else if (a === 's') {
104 | let settings = decodeSettings(b);
105 | ret.cmd = checkString(settings.cmd);
106 | ret.stdin = checkString(settings.stdin);
107 | ret.image = checkString(settings.image);
108 | ret.env = checkString(settings.env);
109 | ret.files = checkFiles(settings.files);
110 | }
111 | else if (a === 'gist') {
112 | ret.gist = b;
113 | }
114 | }
115 | return ret;
116 | }
117 |
118 |
--------------------------------------------------------------------------------
/attic/oci-runtime-spec-defaults.json:
--------------------------------------------------------------------------------
1 | {
2 | "ociVersion": "1.0.2-dev",
3 | "root": {
4 | "path": "rootfs",
5 | "readonly": true
6 | },
7 | "mounts": [
8 | {
9 | "destination": "/proc",
10 | "type": "proc",
11 | "source": "proc"
12 | },
13 | {
14 | "destination": "/dev",
15 | "type": "tmpfs",
16 | "source": "tmpfs",
17 | "options": [
18 | "nosuid",
19 | "strictatime",
20 | "mode=755",
21 | "size=65536k"
22 | ]
23 | },
24 | {
25 | "destination": "/dev/pts",
26 | "type": "devpts",
27 | "source": "devpts",
28 | "options": [
29 | "nosuid",
30 | "noexec",
31 | "newinstance",
32 | "ptmxmode=0666",
33 | "mode=0620",
34 | "gid=5"
35 | ]
36 | },
37 | {
38 | "destination": "/dev/shm",
39 | "type": "tmpfs",
40 | "source": "shm",
41 | "options": [
42 | "nosuid",
43 | "noexec",
44 | "nodev",
45 | "mode=1777",
46 | "size=65536k"
47 | ]
48 | },
49 | {
50 | "destination": "/dev/mqueue",
51 | "type": "mqueue",
52 | "source": "mqueue",
53 | "options": [
54 | "nosuid",
55 | "noexec",
56 | "nodev"
57 | ]
58 | },
59 | {
60 | "destination": "/sys",
61 | "type": "sysfs",
62 | "source": "sysfs",
63 | "options": [
64 | "nosuid",
65 | "noexec",
66 | "nodev",
67 | "ro"
68 | ]
69 | },
70 | {
71 | "destination": "/sys/fs/cgroup",
72 | "type": "cgroup",
73 | "source": "cgroup",
74 | "options": [
75 | "nosuid",
76 | "noexec",
77 | "nodev",
78 | "relatime",
79 | "ro"
80 | ]
81 | }
82 | ],
83 | "process": {
84 | "terminal": false,
85 | "user": {
86 | "uid": 0,
87 | "gid": 0
88 | },
89 | "args": [
90 | "sh"
91 | ],
92 | "env": [
93 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
94 | "TERM=xterm"
95 | ],
96 | "cwd": "/",
97 | "capabilities": {
98 | "bounding": [
99 | "CAP_AUDIT_WRITE",
100 | "CAP_NET_BIND_SERVICE",
101 | "CAP_KILL"
102 | ],
103 | "effective": [
104 | "CAP_AUDIT_WRITE",
105 | "CAP_NET_BIND_SERVICE",
106 | "CAP_KILL"
107 | ],
108 | "inheritable": [
109 | "CAP_AUDIT_WRITE",
110 | "CAP_NET_BIND_SERVICE",
111 | "CAP_KILL"
112 | ],
113 | "permitted": [
114 | "CAP_AUDIT_WRITE",
115 | "CAP_NET_BIND_SERVICE",
116 | "CAP_KILL"
117 | ],
118 | "ambient": [
119 | "CAP_AUDIT_WRITE",
120 | "CAP_NET_BIND_SERVICE",
121 | "CAP_KILL"
122 | ]
123 | },
124 | "rlimits": [
125 | {
126 | "type": "RLIMIT_NOFILE",
127 | "hard": 1024,
128 | "soft": 1024
129 | }
130 | ],
131 | "noNewPrivileges": true
132 | },
133 | "hostname": "youki",
134 | "annotations": {},
135 | "linux": {
136 | "resources": {
137 | "devices": []
138 | },
139 | "namespaces": [
140 | {
141 | "type": "pid"
142 | },
143 | {
144 | "type": "network"
145 | },
146 | {
147 | "type": "ipc"
148 | },
149 | {
150 | "type": "uts"
151 | },
152 | {
153 | "type": "mount"
154 | },
155 | {
156 | "type": "cgroup"
157 | }
158 | ],
159 | "maskedPaths": [
160 | "/proc/acpi",
161 | "/proc/asound",
162 | "/proc/kcore",
163 | "/proc/keys",
164 | "/proc/latency_stats",
165 | "/proc/timer_list",
166 | "/proc/timer_stats",
167 | "/proc/sched_debug",
168 | "/sys/firmware",
169 | "/proc/scsi"
170 | ],
171 | "readonlyPaths": [
172 | "/proc/bus",
173 | "/proc/fs",
174 | "/proc/irq",
175 | "/proc/sys",
176 | "/proc/sysrq-trigger"
177 | ]
178 | }
179 | }
180 |
--------------------------------------------------------------------------------
/attic/containerbuildquestion.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | function get_layers() {
4 | image_manifest=$(jq -r '.manifests[0].digest' index.json | sed 's_:_/_')
5 | jq -r '.layers[].digest' blobs/$image_manifest | sed 's_:_/_'
6 | }
7 |
8 | function show_layers() {
9 | pushd . &>/dev/null
10 | cd "$1"
11 | for layer in $(get_layers | tail -n+2); do
12 | echo $layer
13 | tar tvf blobs/$layer
14 | echo
15 | done
16 | popd &>/dev/null
17 | }
18 |
19 | mkdir -p /tmp/question
20 | cd /tmp/question
21 | name=githubquestion
22 |
23 | # use Dockerfile name b/c I don't know how to get buildkit to use a different name
24 | cat << "EOF" > Dockerfile
25 | FROM docker.io/library/busybox
26 | RUN echo hi > a
27 | RUN echo hi > b
28 | EOF
29 |
30 | trap 'trap - SIGTERM && kill 0' SIGINT SIGTERM EXIT
31 |
32 | # NOTE: we have to remove the image between builds otherwise it will use the build cache
33 | # even though we use --dns=none --no-hosts --no-hostname the second time around
34 |
35 | echo "# podman build -f Dockerfile"
36 | podman rmi --ignore $name >/dev/null
37 | podman build -f Dockerfile -t $name >/dev/null
38 | rm -rf oci-dir && mkdir oci-dir
39 | podman save --format oci-archive $name | tar xf - -C oci-dir
40 | show_layers oci-dir
41 |
42 | echo -e "---------------------------------\n"
43 |
44 | echo "# podman build --dns=none --no-hosts --no-hostname -f Dockerfile"
45 | podman rmi $name >/dev/null
46 | podman build --dns=none --no-hosts --no-hostname -f Dockerfile -t $name >/dev/null
47 | rm -rf oci-dir && mkdir oci-dir
48 | podman save --format oci-archive $name | tar xf - -C oci-dir
49 | show_layers oci-dir
50 |
51 | echo -e "---------------------------------\n"
52 |
53 | mkdir -p varrun
54 | trap 'kill $(jobs -p)' EXIT
55 |
56 | echo "# docker build . (containerized docker)"
57 | podman run --privileged --rm -v $(realpath varrun):/var/run/ docker:latest &>/dev/null &
58 | sleep 2 # wait for daemon to load
59 | podman run --privileged --rm -v $(realpath varrun):/var/run -v $(realpath .):/$(realpath .) -w $(realpath .) docker:latest build -t $name . &>/dev/null
60 | rm -rf oci-dir && mkdir oci-dir
61 | podman run --privileged --rm -v $(realpath varrun):/var/run -v $(realpath .):/$(realpath .) -w $(realpath .) docker:latest save $name | tar xf - -C oci-dir
62 | show_layers oci-dir
63 | kill %%
64 |
65 | echo -e "---------------------------------\n"
66 |
67 | # varrun (from above) has root owned files, docker fails when trying to load them as context
68 | mkdir -p clean
69 | cp Dockerfile clean/
70 | cd clean
71 |
72 | # NOTE: for this I have an externally running command like
73 | # wget https://download.docker.com/linux/static/stable/x86_64/docker-27.3.1.tgz
74 | # tar xf docker-27.3.1.tgz # unpacks a docker dir
75 | # wget https://github.com/moby/buildkit/releases/download/v0.17.0/buildkit-v0.17.0.linux-amd64.tar.gz
76 | # tar xf buildkit-v0.17.0.linux-amd64.tar.gz # unpacks a bin dir
77 | # PATH=$(realpath docker):$PATH sudo --preserve-env=PATH dockerd
78 | # PATH=$(realpath bin):$PATH sudo --preserve-env=PATH buildkitd
79 | # sudo chown $USER:$USER /var/run/docker.sock
80 | # sudo chown $USER:$USER /var/run/buildkit/buildkitd.sock
81 |
82 | echo "# docker build . (non-containerized docker)"
83 | # tried to get this to work but to no avail
84 | #sudo --preserve-env=PATH dockerd &
85 | #sudo chown $USER:$USER /var/run/docker.sock
86 | docker build -f Dockerfile -t $name . &>/dev/null
87 | rm -rf oci-dir && mkdir oci-dir
88 | docker save $name | tar xf - -C oci-dir
89 | show_layers oci-dir
90 |
91 | echo -e "---------------------------------\n"
92 |
93 | echo "# buildctl build --frontend dockerfile.v0 --local dockerfile=. (non-containerized buildkit)"
94 | # podman run --privileged --rm docker.io/moby/buildkit:latest & # tried getting this to work but no avail
95 | rm -rf oci-dir && mkdir oci-dir
96 | #buildctl --addr=podman-container://buildkitd build --frontend dockerfile.v0 --local context=. --local dockerfile=. --output type=oci | tar xf - -C oci-dir
97 | buildctl build --frontend dockerfile.v0 --local dockerfile=. --output type=oci 2>/dev/null | tar xf - -C oci-dir
98 | show_layers oci-dir
99 |
100 |
--------------------------------------------------------------------------------
/peimage-service/src/lib.rs:
--------------------------------------------------------------------------------
1 | use std::io::IoSliceMut;
2 | use std::os::fd::OwnedFd;
3 | use std::path::Path;
4 |
5 | use oci_spec::{
6 | distribution::Reference,
7 | image::{Arch, Os},
8 | };
9 | use tokio_seqpacket::{UnixSeqpacket, ancillary::OwnedAncillaryMessage};
10 |
11 | const MAX_MESSAG_LEN: usize = 1024;
12 |
13 | #[derive(Debug, thiserror::Error)]
14 | pub enum Error {
15 | Io(#[from] std::io::Error),
16 | Encode(#[from] bincode::error::EncodeError),
17 | Decode(#[from] bincode::error::DecodeError),
18 | PeOciSpec(#[from] peoci::spec::Error),
19 | BadDigest,
20 | BadReference,
21 | MissingFd,
22 | MessageTooBig,
23 | ServerError(String),
24 | Unknown,
25 |
26 | // these are the user facing errors
27 | NoMatchingManifest,
28 | ManifestNotFound,
29 | ImageTooBig,
30 | RatelimitExceeded,
31 | }
32 |
33 | // how wrong is this?
34 | impl std::fmt::Display for Error {
35 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
36 | write!(f, "{:?}", self)
37 | }
38 | }
39 |
40 | #[derive(Debug, bincode::Encode, bincode::Decode)]
41 | pub struct Request {
42 | reference: String,
43 | arch: peoci::spec::Arch,
44 | os: peoci::spec::Os,
45 | // TODO I think this has to take a duration since we'd rather not have the requester do a
46 | // timeout and cancel the request
47 | }
48 |
49 | impl Request {
50 | pub fn new(reference: &str, arch: &Arch, os: &Os) -> Result {
51 | let Ok(_ref): Result = reference.parse() else {
52 | return Err(Error::BadReference);
53 | };
54 | Ok(Request {
55 | reference: reference.to_string(),
56 | arch: arch.try_into()?,
57 | os: os.try_into()?,
58 | })
59 | }
60 | }
61 |
62 | impl Request {
63 | pub fn parse_reference(&self) -> Option {
64 | self.reference.parse().ok()
65 | }
66 | }
67 |
68 | // this should maybe not be pub but pub(crate) doesn't work with main.rs I think?
69 | #[derive(Debug, bincode::Encode, bincode::Decode)]
70 | pub enum WireResponse {
71 | Ok {
72 | manifest_digest: String,
73 | config: peoci::spec::ImageConfiguration,
74 | },
75 | NoMatchingManifest,
76 | ManifestNotFound,
77 | ImageTooBig,
78 | RatelimitExceeded,
79 | Err {
80 | message: String,
81 | },
82 | }
83 |
84 | pub struct Response {
85 | pub manifest_digest: String,
86 | pub config: peoci::spec::ImageConfiguration,
87 | pub fd: OwnedFd,
88 | }
89 |
90 | pub async fn request_erofs_image(
91 | socket_addr: impl AsRef,
92 | req: Request,
93 | ) -> Result {
94 | let socket = UnixSeqpacket::connect(socket_addr).await?;
95 | let mut buf = [0; MAX_MESSAG_LEN];
96 | let n = bincode::encode_into_slice(&req, &mut buf, bincode::config::standard())?;
97 | let _ = socket.send(&buf[..n]).await?;
98 |
99 | let mut ancillary_buffer = [0; 128];
100 | let (n, ancillary) = socket
101 | .recv_vectored_with_ancillary(&mut [IoSliceMut::new(&mut buf)], &mut ancillary_buffer)
102 | .await?;
103 |
104 | if ancillary.is_truncated() {
105 | return Err(Error::MessageTooBig);
106 | }
107 |
108 | let (wire_response, _) =
109 | bincode::decode_from_slice::(&buf[..n], bincode::config::standard())?;
110 |
111 | let fd = if let Some(OwnedAncillaryMessage::FileDescriptors(mut fds)) =
112 | ancillary.into_messages().next()
113 | {
114 | fds.next()
115 | } else {
116 | None
117 | };
118 |
119 | match (fd, wire_response) {
120 | (
121 | Some(fd),
122 | WireResponse::Ok {
123 | manifest_digest,
124 | config,
125 | },
126 | ) => Ok(Response {
127 | config,
128 | manifest_digest,
129 | fd,
130 | }),
131 | (_, WireResponse::NoMatchingManifest) => Err(Error::NoMatchingManifest),
132 | (_, WireResponse::ManifestNotFound) => Err(Error::ManifestNotFound),
133 | (_, WireResponse::ImageTooBig) => Err(Error::ImageTooBig),
134 | (_, WireResponse::RatelimitExceeded) => Err(Error::RatelimitExceeded),
135 | (_, WireResponse::Err { message }) => Err(Error::ServerError(message)),
136 | (None, _) => Err(Error::MissingFd),
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/attic/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "ociVersion": "1.0.0",
3 | "process": {
4 | "terminal": false,
5 | "user": {
6 | "uid": 1000,
7 | "gid": 1000
8 | },
9 | "args": [
10 | "sh", "-c", "echo hiiiiiiiiiiiiiiiiiiiiiiii; cat /etc/passwd; id; cat /proc/self/uid_map; ls -nl /; pwd; echo hi > foo; mv foo /output"
11 | ],
12 | "env": [
13 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
14 | "TERM=xterm"
15 | ],
16 | "cwd": "/scratch",
17 | "capabilities": {
18 | "bounding": [
19 | "CAP_AUDIT_WRITE",
20 | "CAP_KILL",
21 | "CAP_NET_BIND_SERVICE"
22 | ],
23 | "effective": [
24 | "CAP_AUDIT_WRITE",
25 | "CAP_KILL",
26 | "CAP_NET_BIND_SERVICE"
27 | ],
28 | "inheritable": [
29 | ],
30 | "permitted": [
31 | "CAP_AUDIT_WRITE",
32 | "CAP_KILL",
33 | "CAP_NET_BIND_SERVICE"
34 | ],
35 | "ambient": [
36 | "CAP_AUDIT_WRITE",
37 | "CAP_KILL",
38 | "CAP_NET_BIND_SERVICE"
39 | ]
40 | },
41 | "rlimits": [
42 | {
43 | "type": "RLIMIT_NOFILE",
44 | "hard": 1024,
45 | "soft": 1024
46 | }
47 | ],
48 | "noNewPrivileges": true
49 | },
50 | "root": {
51 | "path": "rootfs",
52 | "readonly": true
53 | },
54 | "hostname": "crun",
55 | "mounts": [
56 | {
57 | "destination": "/scratch",
58 | "type": "tmpfs",
59 | "source": "tmpfs",
60 | "options": [
61 | "nosuid",
62 | "strictatime",
63 | "mode=777",
64 | "size=8M"
65 | ]
66 | },
67 | {
68 | "destination": "/output",
69 | "type": "bind",
70 | "source": "/run/output/dir",
71 | "options": [
72 | "rw",
73 | "rprivate",
74 | "rbind"
75 | ]
76 | },
77 | {
78 | "destination": "/input",
79 | "type": "bind",
80 | "source": "/run/input/dir",
81 | "options": [
82 | "r",
83 | "rprivate",
84 | "rbind"
85 | ]
86 | },
87 | {
88 | "destination": "/proc",
89 | "type": "proc",
90 | "source": "proc"
91 | },
92 | {
93 | "destination": "/dev",
94 | "type": "tmpfs",
95 | "source": "tmpfs",
96 | "options": [
97 | "nosuid",
98 | "strictatime",
99 | "mode=755",
100 | "size=65536k"
101 | ]
102 | },
103 | {
104 | "destination": "/dev/pts",
105 | "type": "devpts",
106 | "source": "devpts",
107 | "options": [
108 | "nosuid",
109 | "noexec",
110 | "newinstance",
111 | "ptmxmode=0666",
112 | "mode=0620"
113 | ]
114 | },
115 | {
116 | "destination": "/dev/shm",
117 | "type": "tmpfs",
118 | "source": "shm",
119 | "options": [
120 | "nosuid",
121 | "noexec",
122 | "nodev",
123 | "mode=1777",
124 | "size=65536k"
125 | ]
126 | },
127 | {
128 | "destination": "/dev/mqueue",
129 | "type": "mqueue",
130 | "source": "mqueue",
131 | "options": [
132 | "nosuid",
133 | "noexec",
134 | "nodev"
135 | ]
136 | },
137 | {
138 | "destination": "/sys",
139 | "type": "sysfs",
140 | "source": "sysfs",
141 | "options": [
142 | "nosuid",
143 | "noexec",
144 | "nodev",
145 | "ro"
146 | ]
147 | },
148 | {
149 | "destination": "/sys/fs/cgroup",
150 | "type": "cgroup",
151 | "source": "cgroup",
152 | "options": [
153 | "nosuid",
154 | "noexec",
155 | "nodev",
156 | "relatime",
157 | "ro"
158 | ]
159 | }
160 | ],
161 | "linux": {
162 | "resources": {
163 | "devices": [
164 | {
165 | "allow": false,
166 | "access": "rwm"
167 | }
168 | ]
169 | },
170 | "uidMappings": [{"containerID": 1000, "hostID": 1000, "size": 1000}],
171 | "gidMappings": [{"containerID": 1000, "hostID": 1000, "size": 1000}],
172 | "namespaces": [
173 | {
174 | "type": "pid"
175 | },
176 | {
177 | "type": "network"
178 | },
179 | {
180 | "type": "ipc"
181 | },
182 | {
183 | "type": "uts"
184 | },
185 | {
186 | "type": "user"
187 | },
188 | {
189 | "type": "cgroup"
190 | },
191 | {
192 | "type": "mount"
193 | }
194 | ],
195 | "maskedPaths": [
196 | "/proc/acpi",
197 | "/proc/asound",
198 | "/proc/kcore",
199 | "/proc/keys",
200 | "/proc/latency_stats",
201 | "/proc/timer_list",
202 | "/proc/timer_stats",
203 | "/proc/sched_debug",
204 | "/sys/firmware",
205 | "/proc/scsi"
206 | ],
207 | "readonlyPaths": [
208 | "/proc/bus",
209 | "/proc/fs",
210 | "/proc/irq",
211 | "/proc/sys",
212 | "/proc/sysrq-trigger"
213 | ]
214 | }
215 | }
216 |
--------------------------------------------------------------------------------
/perunner/src/iofile.rs:
--------------------------------------------------------------------------------
1 | use std::fs::File;
2 | use std::io;
3 | use std::io::{Read, Seek, SeekFrom, Write};
4 | use std::os::fd::{AsRawFd, BorrowedFd, OwnedFd};
5 |
6 | use rustix::fd::AsFd;
7 | use rustix::fs::{fcntl_add_seals, fstat, ftruncate, memfd_create, MemfdFlags, SealFlags};
8 |
9 | const PMEM_ALIGN_SIZE: u64 = 0x20_0000; // 2 MB
10 |
11 | pub struct IoFile {
12 | file: File,
13 | }
14 |
15 | pub struct IoFileBuilder {
16 | file: File,
17 | }
18 |
19 | impl IoFileBuilder {
20 | pub fn new() -> rustix::io::Result {
21 | let fd = memfd_create(
22 | "peiofile",
23 | MemfdFlags::ALLOW_SEALING | MemfdFlags::NOEXEC_SEAL | MemfdFlags::CLOEXEC,
24 | )?;
25 | Ok(Self { file: fd.into() })
26 | }
27 |
28 | pub fn finish(mut self) -> rustix::io::Result {
29 | let _ = round_up_file_to_pmem_size(&mut self.file)?;
30 | fcntl_add_seals(&self.file, SealFlags::SHRINK | SealFlags::GROW)?;
31 | fcntl_add_seals(&self.file, SealFlags::SEAL)?;
32 | Ok(IoFile { file: self.file })
33 | }
34 | }
35 |
36 | impl AsRawFd for IoFileBuilder {
37 | fn as_raw_fd(&self) -> i32 {
38 | self.file.as_raw_fd()
39 | }
40 | }
41 |
42 | impl AsFd for IoFileBuilder {
43 | fn as_fd(&self) -> BorrowedFd {
44 | self.file.as_fd()
45 | }
46 | }
47 |
48 | impl Write for IoFileBuilder {
49 | fn write(&mut self, data: &[u8]) -> io::Result {
50 | self.file.write(data)
51 | }
52 | fn flush(&mut self) -> io::Result<()> {
53 | self.file.flush()
54 | }
55 | }
56 |
57 | impl Seek for IoFileBuilder {
58 | fn seek(&mut self, from: SeekFrom) -> io::Result {
59 | self.file.seek(from)
60 | }
61 | }
62 |
63 | impl IoFile {
64 | pub fn into_inner(self) -> File {
65 | self.file
66 | }
67 | }
68 |
69 | impl Read for IoFile {
70 | fn read(&mut self, buf: &mut [u8]) -> io::Result {
71 | self.file.read(buf)
72 | }
73 | }
74 |
75 | impl Seek for IoFile {
76 | fn seek(&mut self, from: SeekFrom) -> io::Result {
77 | self.file.seek(from)
78 | }
79 | }
80 |
81 | impl AsFd for IoFile {
82 | fn as_fd(&self) -> BorrowedFd {
83 | self.file.as_fd()
84 | }
85 | }
86 |
87 | impl AsRawFd for IoFile {
88 | fn as_raw_fd(&self) -> i32 {
89 | self.file.as_raw_fd()
90 | }
91 | }
92 |
93 | impl From for OwnedFd {
94 | fn from(io_file: IoFile) -> OwnedFd {
95 | io_file.file.into()
96 | }
97 | }
98 |
99 | fn round_up_to(x: u64) -> u64 {
100 | if x == 0 {
101 | return N;
102 | }
103 | x.div_ceil(N) * N
104 | }
105 |
106 | pub fn round_up_file_to_pmem_size(f: F) -> rustix::io::Result {
107 | let stat = fstat(&f)?;
108 | let cur = stat.st_size.try_into().unwrap_or(0);
109 | let newlen = round_up_to::(cur);
110 | if cur != newlen {
111 | ftruncate(f, newlen)?;
112 | }
113 | Ok(newlen)
114 | }
115 |
116 | #[cfg(test)]
117 | mod tests {
118 | use super::*;
119 | #[test]
120 | fn test_iofile() {
121 | let mut io_file = {
122 | let mut builder = IoFileBuilder::new().unwrap();
123 | builder.write_all(b"hello world").unwrap();
124 | builder.finish().unwrap().into_inner()
125 | };
126 | let len = io_file.metadata().unwrap().len();
127 | assert_eq!(len, PMEM_ALIGN_SIZE);
128 |
129 | io_file.seek(SeekFrom::Start(0)).unwrap();
130 | let mut buf = [0u8; 11];
131 | assert_eq!(11, io_file.read(&mut buf).unwrap());
132 | assert_eq!(&buf, b"hello world");
133 |
134 | // can write 2MB of stuff
135 | io_file.seek(SeekFrom::Start(0)).unwrap();
136 | let data = &[0xff].repeat(PMEM_ALIGN_SIZE as usize);
137 | io_file.write_all(&data).unwrap();
138 |
139 | // but can't write 1 byte more
140 | assert!(io_file.write_all(&[0xff]).is_err());
141 |
142 | // can't shrink
143 | assert!(io_file.set_len(1024).is_err());
144 | }
145 |
146 | #[test]
147 | fn test_round_up_to() {
148 | assert_eq!(PMEM_ALIGN_SIZE, round_up_to::(0));
149 | assert_eq!(
150 | PMEM_ALIGN_SIZE,
151 | round_up_to::(PMEM_ALIGN_SIZE - 1)
152 | );
153 | assert_eq!(
154 | PMEM_ALIGN_SIZE,
155 | round_up_to::(PMEM_ALIGN_SIZE)
156 | );
157 | assert_eq!(
158 | 2 * PMEM_ALIGN_SIZE,
159 | round_up_to::(PMEM_ALIGN_SIZE + 1)
160 | );
161 | }
162 | }
163 |
--------------------------------------------------------------------------------
/peimage/src/bin/tardiff.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeSet;
2 | use std::fs::File;
3 | use std::io::Read;
4 | use std::path::PathBuf;
5 | use std::{env, error, fmt, io};
6 |
7 | use sha2::{Digest, Sha256};
8 | use tar::{Archive, EntryType};
9 |
10 | #[derive(Debug)]
11 | enum TardiffError {
12 | NoLink,
13 | }
14 | impl fmt::Display for TardiffError {
15 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
16 | write!(f, "{:?}", self)
17 | }
18 | }
19 |
20 | impl error::Error for TardiffError {}
21 |
22 | #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
23 | enum EntryTyp {
24 | File,
25 | Dir,
26 | Link,
27 | Symlink,
28 | Fifo,
29 | }
30 |
31 | type Ext = Vec<(String, Vec)>;
32 |
33 | #[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
34 | struct Entry {
35 | typ: EntryTyp,
36 | path: PathBuf,
37 | data: Option, // digest
38 | ext: Ext,
39 | link: Option,
40 | mtime: u64,
41 | uid: u64,
42 | gid: u64,
43 | mode: u32,
44 | }
45 |
46 | #[derive(Debug)]
47 | struct Diffs {
48 | in_left_but_not_right: Vec,
49 | in_right_but_not_left: Vec,
50 | }
51 |
52 | fn sha_reader(reader: &mut R) -> io::Result {
53 | let mut hash = Sha256::new();
54 | io::copy(reader, &mut hash)?;
55 |
56 | Ok(base16ct::lower::encode_string(&hash.finalize()))
57 | }
58 |
59 | fn gather_entries(ar: &mut Archive) -> Result, Box> {
60 | let mut ret = BTreeSet::new();
61 |
62 | for entry in ar.entries()? {
63 | let mut entry = entry?;
64 | let path: PathBuf = entry.path()?.into();
65 |
66 | let header = entry.header();
67 | let uid = header.uid().unwrap();
68 | let gid = header.gid().unwrap();
69 | let mode = header.mode().unwrap();
70 | let mtime = header.mtime().unwrap();
71 | let entry_type = header.entry_type();
72 |
73 | let typ = match entry_type {
74 | EntryType::Regular => EntryTyp::File,
75 | EntryType::Directory => EntryTyp::Dir,
76 | EntryType::Link => EntryTyp::Link,
77 | EntryType::Symlink => EntryTyp::Symlink,
78 | EntryType::Fifo => EntryTyp::Fifo,
79 | x => {
80 | panic!("unhandled entry type {x:?}");
81 | }
82 | };
83 |
84 | let link = match entry_type {
85 | tar::EntryType::Link | tar::EntryType::Symlink => {
86 | Some(entry.link_name()?.ok_or(TardiffError::NoLink)?.into())
87 | }
88 | _ => None,
89 | };
90 |
91 | let data = match entry_type {
92 | tar::EntryType::Regular => Some(sha_reader(&mut entry)?),
93 | _ => None,
94 | };
95 |
96 | let ext = {
97 | if let Some(ext) = entry.pax_extensions().unwrap() {
98 | ext.into_iter()
99 | .map(|x| x.unwrap())
100 | .map(|x| (x.key().unwrap().to_string(), Vec::from(x.value_bytes())))
101 | .collect()
102 | } else {
103 | vec![]
104 | }
105 | };
106 |
107 | let e = Entry {
108 | typ,
109 | path,
110 | link,
111 | ext,
112 | data,
113 | uid,
114 | gid,
115 | mode,
116 | mtime,
117 | };
118 |
119 | ret.insert(e);
120 | }
121 |
122 | Ok(ret)
123 | }
124 |
125 | fn tardiff(left: R, right: R) -> Result> {
126 | let left = gather_entries(&mut Archive::new(left))?;
127 | let right = gather_entries(&mut Archive::new(right))?;
128 | Ok(Diffs {
129 | in_left_but_not_right: left.difference(&right).cloned().collect(),
130 | in_right_but_not_left: right.difference(&left).cloned().collect(),
131 | })
132 | }
133 |
134 | fn main() {
135 | let args: Vec<_> = env::args().collect();
136 | let left = args.get(1).expect("give me a left file");
137 | let right = args.get(2).expect("give me a right file");
138 |
139 | let diffs = tardiff(
140 | File::open(left).expect("couldn't open left"),
141 | File::open(right).expect("couldn't open left"),
142 | )
143 | .unwrap();
144 |
145 | println!("-------------------- in left but not right ----------------------");
146 | for entry in diffs.in_left_but_not_right.iter() {
147 | println!("{entry:?}");
148 | }
149 |
150 | println!("-------------------- in right but not left ----------------------");
151 | for entry in diffs.in_right_but_not_left.iter() {
152 | println!("{entry:?}");
153 | }
154 | //println!("{:?}", diffs.differing);
155 | }
156 |
--------------------------------------------------------------------------------
/peimage/src/mkfs.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::fs::{remove_file, OpenOptions};
3 | use std::io::{BufWriter, Read};
4 | use std::path::{Path, PathBuf};
5 | use std::process::Command;
6 |
7 | use rustix::fs::{mknodat, open, FileType, Mode, OFlags};
8 |
9 | use crate::squash::{squash_to_tar, Stats};
10 | use peoci::compression::Compression;
11 |
12 | // TODO allow passing more args into mkfs.erofs, wait with timeout
13 | //
14 | // notes on mkfs.erofs
15 | // without multithreading uses these syscalls
16 | // access
17 | // arch_prctl
18 | // brk
19 | // close
20 | // copy_file_range
21 | // dup
22 | // exit_group
23 | // fallocate
24 | // fstat
25 | // fstatfs
26 | // ftruncate
27 | // getcwd
28 | // getpid
29 | // getppid
30 | // getrandom
31 | // gettid
32 | // getuid
33 | // ioctl
34 | // lseek
35 | // mmap
36 | // mprotect
37 | // munmap
38 | // openat
39 | // pread64
40 | // prlimit64
41 | // pwrite64
42 | // read
43 | // readlink
44 | // rseq
45 | // rt_sigprocmask
46 | // set_robust_list
47 | // set_tid_address
48 | // write
49 | //
50 | // it first truncates the dest file to 1Tib (2**40), then copies each file's data portion at 1Tib
51 | // going forward and does so without compression. It does this by reading from the pipe, lseeking, then
52 | // writing to the file in 32k chunks; I don't know why it doesn't use copy_file_range here.
53 | // It then begins filling in the file by reading from the end of the file and copying the data to
54 | // the beginning of the file. For files less than 4K, there is no compression (these could be
55 | // written in place already I think). It does use copy_file_range in this phase but I'm not sure
56 | // what for. Strangely uses a mix of pwrite64 on the write side and seek+read on the read side (all
57 | // of the same file). I would think using pwritev would be useful here when writing larger files.
58 | // It then writes loads of pwrite64 of size 64 which are the large inode size with a mix of things
59 | // like symlinks but these are all sequential, so could also use pwritev (or buffer in mem then
60 | // flush). I think some of these are also small files with inline data. Not sure yet what the dir
61 | // ent writes are. I'm thinking about how to seccomp the binary with a patch I think, but also just
62 | // thinking about writing my own builder.
63 | //
64 | // But good to keep in mind that building a erofs on tmpfs will consume at peak the sum of all file
65 | // sizes uncompressed + the maps and stuff overhead in memory. Vs if you build on disk, then you
66 | // are first writing out the sum of all file sizes, then reading them back and writing the sum of
67 | // all compressed file sizes.
68 | //
69 | // it does a fallocate(4, FALLOC_FL_KEEP_SIZE|FALLOC_FL_PUNCH_HOLE, 51175371, 53)
70 | //
71 | // trying out a seccomp of mkfs.erofs gives about a 7% overhead, probably because of the high
72 | // number of syscalls (387,772 on silkeh/clang:17. top 4:
73 | // 209202 pwrite64
74 | // 74073 read
75 | // 55014 write
76 | // 48811 lseek
77 |
78 | pub fn squash_erofs(
79 | layer_readers: &mut [(Compression, R)],
80 | outfile: P,
81 | ) -> Result
82 | where
83 | R: Read,
84 | P: AsRef,
85 | {
86 | let fifo = mkfifo()?;
87 |
88 | let mut child = Command::new("mkfs.erofs")
89 | .arg("--quiet")
90 | .arg("--tar=f")
91 | .arg("-zlz4")
92 | .arg(outfile.as_ref().as_os_str())
93 | .arg(fifo.clone())
94 | .spawn()?;
95 |
96 | // Linux fifo size is 16 pages, should we match that?
97 | let fifo_file = OpenOptions::new()
98 | .write(true)
99 | .open(&fifo)?;
100 | let _fifo_file_remover = UnlinkFile { path: fifo.clone() };
101 |
102 | let mut out = BufWriter::with_capacity(4096 * 8, fifo_file);
103 |
104 | let stats = squash_to_tar(layer_readers, &mut out)?;
105 | let _ = out.into_inner(); // close fifo
106 | let status = child.wait()?;
107 |
108 | if status.success() {
109 | Ok(stats)
110 | } else {
111 | Err(anyhow::anyhow!("mkfs.erofs non-zero exit"))
112 | }
113 | }
114 |
115 | fn mkfifo() -> rustix::io::Result {
116 | use rand::distr::{Alphanumeric, SampleString};
117 |
118 | let rng = Alphanumeric.sample_string(&mut rand::rng(), 16);
119 | let temp_dir = env::temp_dir();
120 | let dir = open(&temp_dir, OFlags::DIRECTORY | OFlags::RDONLY, Mode::empty())?;
121 | let path = format!("pe-fifo-{rng}");
122 |
123 | // rustix doesn't have mkfifo https://github.com/bytecodealliance/rustix/issues/1391
124 | mknodat(dir, &path, FileType::Fifo, Mode::RUSR | Mode::WUSR, 0)?;
125 |
126 | Ok(temp_dir.join(path))
127 | }
128 |
129 | struct UnlinkFile {
130 | path: PathBuf,
131 | }
132 |
133 | impl Drop for UnlinkFile {
134 | fn drop(&mut self) {
135 | let _ = remove_file(&self.path);
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/pearchive/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::fs::File;
3 | use std::io::{Seek, SeekFrom};
4 | use std::os::fd::FromRawFd;
5 | use std::path::Path;
6 |
7 | use pearchive::{
8 | pack_dir_to_file, unpack_data_to_dir_with_unshare_chroot,
9 | unpack_file_to_dir_with_unshare_chroot,
10 | };
11 |
12 | use byteorder::{WriteBytesExt, LE};
13 | use memmap2::MmapOptions;
14 |
15 | #[derive(Debug)]
16 | enum Error {
17 | MissingArg,
18 | Mmap,
19 | }
20 |
21 | /// args:
22 | #[allow(clippy::get_first)]
23 | fn pack(args: &[String]) {
24 | let indir = args.get(0).ok_or(Error::MissingArg).unwrap();
25 | let outname = args.get(1).ok_or(Error::MissingArg).unwrap();
26 | let indirpath = Path::new(indir);
27 | assert!(indirpath.is_dir(), "{:?} should be a dir", indirpath);
28 |
29 | let fileout = File::create(outname).unwrap();
30 |
31 | pack_dir_to_file(indirpath, fileout).unwrap();
32 | }
33 |
34 | /// args:
35 | #[allow(clippy::get_first)]
36 | fn unpack(args: &[String]) {
37 | let inname = args.get(0).ok_or(Error::MissingArg).unwrap();
38 | let outname = args.get(1).ok_or(Error::MissingArg).unwrap();
39 |
40 | let inpath = Path::new(&inname);
41 | let outpath = Path::new(&outname);
42 | // this fails when we try to use /dev/pmem
43 | // assert!(inpath.is_file(), "{:?} should be a file", inpath);
44 | assert!(outpath.is_dir(), "{:?} should be a dir", outpath);
45 |
46 | let file = File::open(inpath).unwrap();
47 |
48 | unpack_file_to_dir_with_unshare_chroot(file, outpath).unwrap();
49 | }
50 |
51 | /// args:
52 | /// uses stream offset as beginning of map
53 | #[allow(clippy::get_first)]
54 | fn unpackfd(args: &[String]) {
55 | let in_fd = args
56 | .get(0)
57 | .ok_or(Error::MissingArg)
58 | .unwrap()
59 | .parse::()
60 | .unwrap();
61 | let outname = args.get(1).ok_or(Error::MissingArg).unwrap();
62 | let len = args
63 | .get(2)
64 | .ok_or(Error::MissingArg)
65 | .unwrap()
66 | .parse::()
67 | .unwrap();
68 |
69 | let outpath = Path::new(&outname);
70 |
71 | assert!(outpath.is_dir(), "{:?} should be a dir", outpath);
72 |
73 | let mut file = unsafe { File::from_raw_fd(in_fd) };
74 | let offset = file.stream_position().unwrap();
75 |
76 | let mmap = unsafe {
77 | MmapOptions::new()
78 | .offset(offset)
79 | .len(len)
80 | .map(&file)
81 | .map_err(|_| Error::Mmap)
82 | .unwrap()
83 | };
84 |
85 | unpack_data_to_dir_with_unshare_chroot(mmap.as_ref(), outpath).unwrap();
86 | }
87 |
88 | /// args:
89 | #[allow(clippy::get_first)]
90 | fn packfd(args: &[String]) {
91 | let indir = args.get(0).ok_or(Error::MissingArg).unwrap();
92 | let out_fd = args
93 | .get(1)
94 | .ok_or(Error::MissingArg)
95 | .unwrap()
96 | .parse::()
97 | .unwrap();
98 | let indirpath = Path::new(indir);
99 | assert!(indirpath.is_dir(), "{:?} should be a dir", indirpath);
100 |
101 | let mut fileout = unsafe { File::from_raw_fd(out_fd) };
102 | let offset = fileout.stream_position().unwrap();
103 |
104 | // its a bit quirky that we move fileout in and get it back out, which should be the same as an
105 | // &mut, but then the type of BufWriter<&mut File> gets weird and I don't know what to do
106 | let mut fileout = pack_dir_to_file(indirpath, fileout).unwrap();
107 |
108 | let ending_offset = fileout.stream_position().unwrap();
109 | assert!(ending_offset > offset);
110 | let archive_size = ending_offset - offset;
111 | let encoded_size: u32 = archive_size.try_into().unwrap();
112 | fileout.seek(SeekFrom::Start(0)).unwrap();
113 | fileout.write_u32::(encoded_size).unwrap();
114 | // this is to be extra sure the write through the pmem device has finished
115 | // only hit a bad case in the panic handler's write not getting sync'd
116 | fileout.sync_data().unwrap();
117 | }
118 |
119 | fn main() {
120 | let args: Vec = env::args().collect();
121 | match args.get(1).map(|s| s.as_str()) {
122 | Some("pack") => {
123 | pack(&args[2..]);
124 | }
125 | Some("unpack") => {
126 | unpack(&args[2..]);
127 | }
128 | Some("packfd") => {
129 | packfd(&args[2..]);
130 | }
131 | Some("unpackfd") => {
132 | unpackfd(&args[2..]);
133 | }
134 | _ => {
135 | println!("pack ");
136 | println!("unpack ");
137 | println!("packdev ");
138 | println!("unpackfd ");
139 | std::process::exit(1);
140 | }
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/peserver/src/api.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | pub const APPLICATION_JSON: &str = "application/json";
4 | pub const APPLICATION_X_PE_ARCHIVEV1: &str = "application/x.pe.archivev1";
5 |
6 | // max request per second per client
7 | pub const MAX_REQ_PER_SEC: isize = 2;
8 | // max time we will wait trying to get a place in line for the worker
9 | // browsers are maybe a 60s total timeout so we have to get in there pretty quick to then hope to
10 | // actually get our request through
11 | pub const MAX_BODY_SIZE: usize = 65536;
12 | pub const MAX_WAIT_TIMEOUT: Duration = Duration::from_secs(30);
13 | // these are per read/write call
14 | pub const DOWNSTREAM_READ_TIMEOUT: Duration = Duration::from_secs(5);
15 | pub const DOWNSTREAM_WRITE_TIMEOUT: Duration = Duration::from_secs(5);
16 |
17 | pub enum ContentType {
18 | ApplicationJson,
19 | PeArchiveV1, //
20 | }
21 |
22 | impl TryFrom<&str> for ContentType {
23 | type Error = ();
24 |
25 | fn try_from(s: &str) -> Result {
26 | match s {
27 | APPLICATION_JSON => Ok(ContentType::ApplicationJson),
28 | APPLICATION_X_PE_ARCHIVEV1 => Ok(ContentType::PeArchiveV1),
29 | _ => Err(()),
30 | }
31 | }
32 | }
33 |
34 | impl From for &str {
35 | fn from(val: ContentType) -> Self {
36 | match val {
37 | ContentType::ApplicationJson => APPLICATION_JSON,
38 | ContentType::PeArchiveV1 => APPLICATION_X_PE_ARCHIVEV1,
39 | }
40 | }
41 | }
42 |
43 | pub mod v2 {
44 | pub mod runi {
45 | use super::super::ContentType;
46 | use oci_spec::image::{Arch, Os};
47 | use peinit;
48 | use serde::{Deserialize, Serialize};
49 |
50 | pub const PREFIX: &str = "/api/v2/runi/";
51 |
52 | #[derive(Serialize, Deserialize)]
53 | pub struct Request {
54 | pub stdin: Option, // filename that will be set as stdin, noop
55 | // for content-type: application/json
56 | pub entrypoint: Option>, // as per oci image config
57 | pub cmd: Option>, // as per oci image config
58 | pub env: Option>, // as per oci image config
59 | }
60 |
61 | pub type Response = peinit::Response;
62 |
63 | #[derive(Debug)]
64 | pub struct ParsedPath<'a> {
65 | pub reference: &'a str,
66 | pub arch: Arch,
67 | pub os: Os,
68 | }
69 |
70 | // TODO would be nice to validate the reference I think? Right now we push string all the
71 | // way through to image-service so that it is a single string but could probably add a
72 | // peoci_spec::Reference with each field registry, repository, and TagOrDigest
73 | // /api/v2/runi///
74 | pub fn parse_path<'a>(s: &'a str) -> Option> {
75 | let rest = s.strip_prefix(PREFIX)?;
76 | let (arch, rest) = rest.split_once('/')?;
77 | let (os, reference) = rest.split_once('/')?;
78 | // https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests
79 | if reference.len() > 255 {
80 | return None;
81 | }
82 | Some(ParsedPath {
83 | reference,
84 | arch: arch.try_into().ok()?,
85 | os: os.try_into().ok()?,
86 | })
87 | }
88 |
89 | pub fn parse_request(body: &[u8], content_type: &ContentType) -> Option<(usize, Request)> {
90 | match content_type {
91 | ContentType::ApplicationJson => {
92 | let req = serde_json::from_slice(body).ok()?;
93 | Some((0, req))
94 | }
95 | ContentType::PeArchiveV1 => {
96 | if body.len() < 4 {
97 | return None;
98 | }
99 | let json_size =
100 | u32::from_le_bytes([body[0], body[1], body[2], body[3]]) as usize;
101 | let slice = body.get(4..4 + json_size)?;
102 | let req = serde_json::from_slice(slice).ok()?;
103 | Some((4 + json_size, req))
104 | }
105 | }
106 | }
107 |
108 | // assumes pearchivev1 format
109 | //
110 | pub fn parse_response(body: &[u8]) -> Option<(Response, &[u8])> {
111 | if body.len() < 4 {
112 | return None;
113 | }
114 | let json_size = u32::from_le_bytes([body[0], body[1], body[2], body[3]]) as usize;
115 | let slice = body.get(4..4 + json_size)?;
116 | let response: Response = serde_json::from_slice(slice).ok()?;
117 | let rem = body.get(4 + json_size..)?;
118 | Some((response, rem))
119 | }
120 | }
121 | }
122 |
--------------------------------------------------------------------------------
/attic/cloudhypervisorapi.sh:
--------------------------------------------------------------------------------
1 | #set -e
2 |
3 | # https://raw.githubusercontent.com/cloud-hypervisor/cloud-hypervisor/master/vmm/src/api/openapi/cloud-hypervisor.yaml
4 |
5 | k=/home/andrew/Repos/linux/vmlinux
6 | ch=$(realpath cloud-hypervisor-static)
7 | #ch=/home/andrew/Repos/cloud-hypervisor/target/debug/cloud-hypervisor
8 |
9 | trap "pkill -P $$" EXIT KILL TERM
10 |
11 | socket_path=/tmp/chapi.sock
12 |
13 | rm -f ${socket_path}
14 |
15 | # $ch \
16 | # --kernel $k \
17 | # --initramfs initramfs \
18 | # --serial off \
19 | # --cmdline "console=hvc0" \
20 | # --cpus boot=1 \
21 | # --memory size=1024M \
22 | # --event-monitor fd=2 \
23 | # -v \
24 | # --api-socket ${socket_path} > /tmp/ch.out &
25 | $ch -vvv --event-monitor path=/tmp/ch.events --api-socket path=${socket_path} > /tmp/ch.out 2> /tmp/ch.err &
26 | #trap "cat /tmp/ch.out" EXIT KILL TERM
27 |
28 | cat > /tmp/ch.config.json < /dev/null
66 |
67 | curl --unix-socket ${socket_path} \
68 | -i -X PUT 'http://localhost/api/v1/vm.boot' \
69 | -H 'Accept: application/json' &> /dev/null
70 | # curl --unix-socket ${socket_path} \
71 | # -i -X PUT 'http://localhost/api/v1/vm.add-pmem' \
72 | # -H 'Content-Type: application/json' \
73 | # -H 'Accept: application/json' \
74 | # -d '{"file": "ocismall.erofs", "discard_writes": true}'
75 |
76 | #curl --unix-socket ${socket_path} \
77 | # -i -X PUT 'http://localhost/api/v1/vm.add-pmem' \
78 | # -H 'Content-Type: application/json' \
79 | # -H 'Accept: application/json' \
80 | # -d '{"file": "/tmp/perunner-io-file", "discard_writes": false}'
81 |
82 | # curl --unix-socket ${socket_path} \
83 | # 'http://localhost/api/v1/vm.info' \
84 | # -H 'Accept: application/json' | jq
85 |
86 | #cat /tmp/ch.out
87 |
88 |
89 | sleep 2
90 | kill %%
91 | cat /tmp/ch.out
92 | echo '-----------------------------'
93 | cat /tmp/ch.err
94 | echo '-----------------------------'
95 | cat /tmp/ch.events
96 | echo '-----------------------------'
97 | #curl --unix-socket ${socket_path} \
98 | # -i -X PUT 'http://localhost/api/v1/vm.reboot'
99 | #sleep 1
100 | #wait
101 |
102 |
103 | # (./cloud-hypervisor-static -v --event-monitor path=/tmp/chevent --api-socket ${socket_path} | ts "%H:%M:%.S") > /tmp/chout 2> /tmp/cherr &
104 | #
105 | # config='{
106 | # "cpus": {"boot_vcpus": 1, "max_vcpus": 1},
107 | # "memory": {"size": 1073741824},
108 | # "payload": {"kernel": "/home/andrew/Repos/linux/vmlinux", "cmdline": "console=hvc0", "initramfs": "initramfs"},
109 | # "pmem": [{"file": "gcc-14.1.0.sqfs", "discard_writes": true}, {"file": "pmemtestfile"}],
110 | # "console": {"mode": "Tty"}
111 | # }'
112 | #
113 | # time curl --unix-socket ${socket_path} -i \
114 | # -X PUT 'http://localhost/api/v1/vm.create' \
115 | # -H 'Accept: application/json' \
116 | # -H 'Content-Type: application/json' \
117 | # -d "${config}"
118 | #
119 | # echo 'pre boot' | ts "%H:%M:%.S"
120 | # time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.boot'
121 | # echo 'post boot' | ts "%H:%M:%.S"
122 | # sleep 1
123 | #
124 | # echo 'rebooting'
125 | #
126 | # echo 'pre reboot' | ts "%H:%M:%.S"
127 | # time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.reboot'
128 | # echo 'post reboot' | ts "%H:%M:%.S"
129 | # time curl --unix-socket ${socket_path} -X GET 'http://localhost/api/v1/vm.info'
130 | # sleep 1
131 | # time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.shutdown'
132 | # time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.delete'
133 |
134 | #sleep 1
135 | #time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.shutdown'
136 |
137 | #time curl --unix-socket ${socket_path} -i \
138 | # -X PUT 'http://localhost/api/v1/vm.create' \
139 | # -H 'Accept: application/json' \
140 | # -H 'Content-Type: application/json' \
141 | # -d "${config}"
142 | #
143 | #time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.boot'
144 | #sleep 2
145 | #time curl --unix-socket ${socket_path} -i -X PUT 'http://localhost/api/v1/vm.boot'
146 | #time curl --unix-socket ${socket_path} -X GET 'http://localhost/api/v1/vm.info' | jq
147 |
148 | # wait
149 | #
150 | # cat /tmp/chout
151 | # cat /tmp/cherr
152 |
--------------------------------------------------------------------------------
/peimage/src/podman.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::ffi::OsStr;
3 | use std::io::{Cursor, Read, Write};
4 | use std::process::{Command, Stdio};
5 |
6 | use tar::Archive;
7 | use tempfile::NamedTempFile;
8 | use oci_spec::image::{Digest, ImageIndex, ImageManifest};
9 |
10 | use peoci::compression::Compression;
11 |
12 | #[derive(Debug, thiserror::Error)]
13 | pub enum Error {
14 | NoManifest,
15 | NoIndex,
16 | MissingBlob,
17 | BadBlobPath,
18 | NonUtf8Path,
19 | PodmanExport,
20 | PodmanBuild,
21 | PodmanRm,
22 | PodmanCreate,
23 | PodmanCreateId,
24 | Tempfile,
25 | OciSpec(#[from] oci_spec::OciSpecError),
26 | Io(#[from] std::io::Error),
27 | }
28 |
29 | // how wrong is this?
30 | impl std::fmt::Display for Error {
31 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
32 | write!(f, "{:?}", self)
33 | }
34 | }
35 |
36 | fn digest_to_string(digest: &Digest) -> Result {
37 | digest
38 | .to_string()
39 | .strip_prefix("sha256:")
40 | .map(|x| x.into())
41 | .ok_or(Error::BadBlobPath)
42 | }
43 |
44 | pub fn load_layers_from_podman(image: &str) -> Result)>, Error> {
45 | let mut child = Command::new("podman")
46 | .arg("image")
47 | .arg("save")
48 | .arg("--format=oci-archive")
49 | .arg(image)
50 | .stdout(Stdio::piped())
51 | .spawn()?;
52 |
53 | let stdout = child.stdout.take().expect("handle present");
54 | let mut archive = Archive::new(stdout);
55 | let mut blobs = BTreeMap::new();
56 | let mut index: Option = None;
57 | for entry in archive.entries()? {
58 | let mut entry = entry?;
59 | if entry.path()? == >::as_ref("index.json") {
60 | let _ = index.replace(ImageIndex::from_reader(&mut entry)?);
61 | } else {
62 | // have to read first before checking otherwise we try to take a mutable borrow
63 | // while we have an immutable borrow (annoying)
64 | let mut buf = vec![];
65 | entry.read_to_end(&mut buf)?;
66 | if let Ok(blob) = entry.path()?.strip_prefix("blobs/sha256/") {
67 | let name = blob.to_str().ok_or(Error::BadBlobPath)?.to_string();
68 | blobs.insert(name, buf);
69 | }
70 | }
71 | }
72 |
73 | let _ = child.wait()?;
74 |
75 | let index = index.ok_or(Error::NoIndex)?;
76 | let manifest = index.manifests().first().ok_or(Error::NoManifest)?;
77 | // Digest should really implement Borrow
78 | let manifest_blob = blobs
79 | .get(&digest_to_string(manifest.digest())?)
80 | .ok_or(Error::MissingBlob)?;
81 | let manifest = ImageManifest::from_reader(Cursor::new(manifest_blob))?;
82 | manifest
83 | .layers()
84 | .iter()
85 | .map(|x| {
86 | blobs
87 | .remove(&digest_to_string(x.digest())?)
88 | .ok_or(Error::MissingBlob)
89 | .map(|b| (Compression::Gzip, b))
90 | })
91 | .collect()
92 | }
93 |
94 | pub struct Rootfs {
95 | pub layers: Vec<(Compression, Vec)>,
96 | pub combined: Vec,
97 | }
98 |
99 | pub fn build_with_podman(containerfile: &str) -> Result {
100 | let mut id_file = NamedTempFile::new()?;
101 | let mut child = Command::new("podman")
102 | .arg("build")
103 | .arg("--file=-")
104 | .arg("--no-hosts")
105 | .arg("--no-hostname")
106 | .arg("--network=none")
107 | .arg(format!(
108 | "--iidfile={}",
109 | id_file.path().to_str().ok_or(Error::NonUtf8Path)?
110 | ))
111 | .stdin(Stdio::piped())
112 | .stdout(Stdio::null())
113 | .stderr(Stdio::null())
114 | .spawn()?;
115 |
116 | let mut stdin = child.stdin.take().expect("handle present");
117 | stdin
118 | .write_all(containerfile.as_bytes())?;
119 | drop(stdin);
120 |
121 | let _ = child.wait()?;
122 |
123 | let iid = {
124 | let mut buf = String::new();
125 | id_file.read_to_string(&mut buf)?;
126 | buf
127 | };
128 |
129 | let layers = load_layers_from_podman(&iid)?;
130 |
131 | let cid = {
132 | let output = Command::new("podman")
133 | .arg("create")
134 | .arg(&iid)
135 | .output()?;
136 |
137 | String::from_utf8(output.stdout)
138 | .map_err(|_| Error::PodmanCreateId)?
139 | .trim()
140 | .to_string()
141 | };
142 |
143 | let combined = {
144 | let output = Command::new("podman")
145 | .arg("export")
146 | .arg(&cid)
147 | .output()?;
148 | output.stdout
149 | };
150 |
151 | let _ = Command::new("podman")
152 | .arg("rm")
153 | .arg(cid)
154 | .stdout(Stdio::null())
155 | .stderr(Stdio::null())
156 | .status()?;
157 |
158 | let _ = Command::new("podman")
159 | .arg("rmi")
160 | .arg(&iid)
161 | .stdout(Stdio::null())
162 | .stderr(Stdio::null())
163 | .status()?;
164 |
165 | Ok(Rootfs { layers, combined })
166 | }
167 |
--------------------------------------------------------------------------------
/peserver/src/gh.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 |
3 | use axum::{
4 | extract::{Path, State},
5 | response::IntoResponse,
6 | routing::get,
7 | Router,
8 | };
9 | use clap::Parser;
10 | use http::{header, StatusCode};
11 | use log::{error, info};
12 | use moka::future::Cache;
13 |
14 | use peserver::util::setup_logs;
15 |
16 | // Note: this will double store the response for a gist at latest version if it is also requested
17 | // at that specific version. Arc> would allow sharing, but we don't know the latest
18 | // version until we've already gotten it and we can't then change the key. Maybe a simpler cache
19 | // with a map of RwLock would be better?
20 |
21 | struct Ctx {
22 | client: pegh::Client,
23 | // can't use Arc> because http_body::Body trait not implemented for it
24 | cache: Cache>,
25 | }
26 |
27 | #[derive(Debug, Clone, thiserror::Error)]
28 | pub enum Error {
29 | NotFound,
30 | Internal,
31 | }
32 |
33 | impl From for StatusCode {
34 | fn from(e: Error) -> StatusCode {
35 | match e {
36 | Error::NotFound => StatusCode::NOT_FOUND,
37 | Error::Internal => StatusCode::INTERNAL_SERVER_ERROR,
38 | }
39 | }
40 | }
41 |
42 | impl std::fmt::Display for Error {
43 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
44 | write!(f, "{:?}", self)
45 | }
46 | }
47 |
48 | async fn get_gist(
49 | State(ctx): State>,
50 | Path(gist): Path,
51 | ) -> Result {
52 | get_gist_impl(ctx, gist, None).await
53 | }
54 |
55 | async fn get_gist_version(
56 | State(ctx): State>,
57 | Path((gist, version)): Path<(String, String)>,
58 | ) -> Result {
59 | get_gist_impl(ctx, gist, Some(version)).await
60 | }
61 |
62 | async fn get_gist_impl(
63 | ctx: Arc,
64 | gist: String,
65 | version: Option,
66 | ) -> Result {
67 | let key = format!("{gist}:{}", version.as_deref().unwrap_or_default());
68 | let entry = ctx
69 | .cache
70 | .entry_by_ref(&key)
71 | .or_try_insert_with(retreive_gist(&ctx.client, &gist, version.as_deref()))
72 | .await
73 | .map_err(|e| StatusCode::from((*e).clone()))?;
74 |
75 | if entry.is_fresh() {
76 | info!("get_gist miss {key}");
77 | } else {
78 | info!("get_gist hit {key}");
79 | }
80 | let value: Box<[u8]> = entry.into_value();
81 | let cache_header = if version.is_some() {
82 | "immutable"
83 | } else {
84 | "max-age=3600"
85 | };
86 | let headers = [
87 | (header::CONTENT_TYPE, "application/json"),
88 | (header::CACHE_CONTROL, cache_header),
89 | ];
90 | Ok((headers, value))
91 | }
92 |
93 | async fn retreive_gist(
94 | client: &pegh::Client,
95 | gist: &str,
96 | version: Option<&str>,
97 | ) -> Result, Error> {
98 | if let Some(gist) = client
99 | .get_gist(gist, version)
100 | .await
101 | .inspect_err(|e| error!("retreive_gist {gist}:{version:?} failed {e:?}"))
102 | .map_err(|_| Error::Internal)?
103 | {
104 | let bytes = serde_json::to_vec(&gist).map_err(|_| Error::Internal)?;
105 | Ok(bytes.into())
106 | } else {
107 | Err(Error::NotFound)
108 | }
109 | }
110 |
111 | #[derive(Parser, Debug)]
112 | #[command(version, about, long_about = None)]
113 | struct Args {
114 | #[arg(long)]
115 | tcp: Option,
116 |
117 | #[arg(long)]
118 | uds: Option,
119 |
120 | #[arg(long, default_value_t = 100_000_000)]
121 | capacity: u64,
122 | }
123 |
124 | #[tokio::main(flavor = "current_thread")]
125 | async fn main() {
126 | setup_logs();
127 |
128 | let args = Args::parse();
129 | let client = pegh::Client::new().unwrap();
130 | let cache = Cache::builder()
131 | .max_capacity(args.capacity)
132 | .weigher(|k: &String, v: &Box<[u8]>| (k.len() + v.len()).try_into().unwrap_or(u32::MAX))
133 | .build();
134 |
135 | let ctx = Arc::new(Ctx {
136 | client: client,
137 | cache: cache,
138 | });
139 | let app = Router::new()
140 | .route("/gist/{gist}", get(get_gist))
141 | .route("/gist/{gist}/{version}", get(get_gist_version))
142 | .with_state(ctx);
143 |
144 | match (args.tcp, args.uds) {
145 | (Some(addr), None) => {
146 | let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
147 | axum::serve(listener, app)
148 | .with_graceful_shutdown(async {
149 | tokio::signal::ctrl_c().await.unwrap();
150 | })
151 | .await
152 | .unwrap();
153 | }
154 | (None, Some(addr)) => {
155 | let _ = std::fs::remove_file(&addr);
156 | let listener = tokio::net::UnixListener::bind(addr).unwrap();
157 | axum::serve(listener, app)
158 | .with_graceful_shutdown(async {
159 | tokio::signal::ctrl_c().await.unwrap();
160 | })
161 | .await
162 | .unwrap();
163 | }
164 | (Some(_), Some(_)) => panic!("cannot use --tcp and --uds"),
165 | (None, None) => panic!("muse use --tcp or --uds"),
166 | };
167 | }
168 |
--------------------------------------------------------------------------------
/peimage/go.sum:
--------------------------------------------------------------------------------
1 | github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
2 | github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
3 | github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
4 | github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
8 | github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
9 | github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
10 | github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
11 | github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
12 | github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
13 | github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
14 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
15 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
16 | github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
17 | github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
18 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
19 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
20 | github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
21 | github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
22 | github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
23 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
24 | github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
25 | github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
26 | github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
27 | github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
28 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
29 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
30 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
31 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
32 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
33 | github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
34 | github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
35 | github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
36 | github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
37 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
38 | github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ=
39 | github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
40 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
41 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
42 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
43 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
44 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
45 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
46 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
47 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
48 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
49 | github.com/sylabs/oci-tools v0.16.0 h1:4pdwS7HtNT9Y+3jpwNQo590Vj5218vbsestGilgSVtA=
50 | github.com/sylabs/oci-tools v0.16.0/go.mod h1:278n9ttZ0B9vTwbQ4896HCwwgZf3DvU82XD5wS+fZwI=
51 | github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
52 | github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
53 | github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
54 | github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
55 | github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
56 | golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
57 | golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
58 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
59 | golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
60 | golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
61 | golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
62 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
63 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
64 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
65 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
66 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
67 | gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
68 | gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
69 |
--------------------------------------------------------------------------------
/peoci/src/bin/ocidist.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::io::Write;
3 | use std::path::Path;
4 | use std::process::{Command, Stdio};
5 |
6 | use clap::Parser;
7 | use oci_spec::{
8 | distribution::Reference,
9 | image::{Arch, Os},
10 | };
11 | use serde::Deserialize;
12 | use tokio::{
13 | fs::File,
14 | io::{AsyncWriteExt, BufWriter},
15 | };
16 |
17 | use peoci::ocidist::{Auth, AuthMap};
18 |
19 | #[derive(Deserialize)]
20 | struct AuthEntry {
21 | username: String,
22 | password: String,
23 | }
24 |
25 | type StoredAuth = BTreeMap;
26 |
27 | fn load_stored_auth(p: impl AsRef) -> AuthMap {
28 | let stored: StoredAuth = serde_json::from_str(&std::fs::read_to_string(p).unwrap()).unwrap();
29 | stored
30 | .into_iter()
31 | .map(|(k, v)| (k, Auth::UserPass(v.username, v.password)))
32 | .collect()
33 | }
34 |
35 | #[derive(Parser, Debug)]
36 | #[command(version, about, long_about = None)]
37 | struct Args {
38 | image_ref: String,
39 |
40 | #[arg(long)]
41 | blobs: bool,
42 |
43 | #[arg(long, default_value = "true", action=clap::ArgAction::Set)]
44 | cache: bool,
45 |
46 | #[arg(long)]
47 | json: bool,
48 |
49 | #[arg(long)]
50 | outfile: Option,
51 | }
52 |
53 | #[tokio::main(flavor = "current_thread")]
54 | async fn main() {
55 | env_logger::init();
56 |
57 | let args = Args::parse();
58 |
59 | let image_ref: Reference = args.image_ref.parse().unwrap();
60 |
61 | let auth = if let Some(v) =
62 | std::env::vars().find_map(|(k, v)| if k == "PEOCI_AUTH" { Some(v) } else { None })
63 | {
64 | load_stored_auth(v)
65 | } else {
66 | BTreeMap::new()
67 | };
68 |
69 | println!("{:?}", image_ref);
70 |
71 | if args.cache {
72 | let peoci_cache_dir = std::env::vars()
73 | .find(|(k, _v)| k == "PEOCI_CACHE")
74 | .map(|(_, v)| Path::new(&v).to_owned())
75 | .unwrap_or_else(|| {
76 | Path::new(
77 | &std::env::vars()
78 | .find(|(k, _v)| k == "HOME")
79 | .map(|(_, v)| v)
80 | .unwrap(),
81 | )
82 | .join(".local/share/peoci")
83 | });
84 | let client = peoci::ocidist_cache::Client::builder()
85 | .dir(peoci_cache_dir)
86 | .load_from_disk(true)
87 | .auth(auth)
88 | .build()
89 | .await
90 | .unwrap();
91 |
92 | let res = client
93 | .get_image_manifest_and_configuration(&image_ref, Arch::Amd64, Os::Linux)
94 | .await
95 | .unwrap();
96 | let manifest_config = res.get().unwrap();
97 | println!("got manifest {:#?}", manifest_config.manifest);
98 | println!("got configuration {:#?}", manifest_config.configuration);
99 |
100 | //let manifest: oci_spec::image::ImageManifest = manifest_config.manifest.try_into().unwrap();
101 | //let configuration: oci_spec::image::ImageConfiguration = manifest_config.configuration.try_into().unwrap();
102 |
103 | //let _fd = client
104 | // .get_blob(&image_ref, manifest.layers()[0].digest())
105 | // .await
106 | // .unwrap();
107 | //println!("got blob {:?}", manifest.layers()[0].digest());
108 |
109 | if args.blobs {
110 | let layers = client
111 | .get_layers(&image_ref, &manifest_config.manifest)
112 | .await
113 | .unwrap();
114 | println!("got layers {:?}", layers);
115 | }
116 |
117 | println!("{:#?}", client.stats().await);
118 |
119 | client.persist().unwrap();
120 | } else {
121 | let client = peoci::ocidist::Client::new().unwrap();
122 |
123 | // manual testing
124 | //use std::time::{Instant, Duration};
125 | //client.ratelimit.write().await.insert("index.docker.io".to_string(), Instant::now() + Duration::from_secs(30));
126 |
127 | client.set_auth(auth).await;
128 |
129 | let outfile = args.outfile;
130 |
131 | let image_ref = if image_ref.digest().is_some() {
132 | image_ref
133 | } else {
134 | let manifest_descriptor = client
135 | .get_matching_descriptor_from_index(&image_ref, Arch::Amd64, Os::Linux)
136 | .await
137 | .unwrap()
138 | .unwrap();
139 | image_ref.clone_with_digest(manifest_descriptor.digest().to_string())
140 | };
141 |
142 | let manifest_response = client
143 | .get_image_manifest(&image_ref)
144 | .await
145 | .unwrap()
146 | .unwrap();
147 | let manifest = manifest_response.get().unwrap();
148 | if args.json {
149 | jq(manifest_response.data());
150 | } else {
151 | println!("got manifest {:#?}", manifest);
152 | }
153 |
154 | let configuration_response = client
155 | .get_image_configuration(&image_ref, manifest.config())
156 | .await
157 | .unwrap()
158 | .unwrap();
159 | let config = configuration_response.get().unwrap();
160 | if args.json {
161 | jq(configuration_response.data());
162 | } else {
163 | println!("got configuration {:#?}", config);
164 | }
165 |
166 | if let Some(outfile) = outfile {
167 | let mut writer = BufWriter::new(File::create(outfile).await.unwrap());
168 | let size = client
169 | .get_blob(&image_ref, &manifest.layers()[0], &mut writer)
170 | .await
171 | .unwrap()
172 | .unwrap();
173 | writer.flush().await.unwrap();
174 | let file = writer.into_inner();
175 | println!(
176 | "wrote {size} bytes, file size is {}",
177 | file.metadata().await.unwrap().len()
178 | );
179 | }
180 | }
181 | }
182 |
183 | fn jq(buf: impl AsRef<[u8]>) {
184 | let mut child = Command::new("jq").stdin(Stdio::piped()).spawn().unwrap();
185 | child.stdin.take().unwrap().write_all(buf.as_ref()).unwrap();
186 | child.wait().unwrap();
187 | }
188 |
--------------------------------------------------------------------------------
/peserver/src/testclient.rs:
--------------------------------------------------------------------------------
1 | use std::io::{Read, Write};
2 | use std::path::Path;
3 |
4 | use bytes::Bytes;
5 | use clap::Parser;
6 | use flate2::read::GzDecoder;
7 | use http::Method;
8 | use pingora::prelude::{HttpPeer, RequestHeader};
9 | use std::time::Duration;
10 |
11 | use pearchive::{unpack_visitor, PackMemToVec, PackMemVisitor, UnpackVisitor};
12 | use peserver::api;
13 | use peserver::api::v2 as apiv2;
14 |
15 | use peserver::util::read_full_client_response_body;
16 |
17 | fn escape_dump(input: &[u8]) {
18 | let mut output = Vec::::with_capacity(1024);
19 | for b in input.iter() {
20 | for e in std::ascii::escape_default(*b) {
21 | output.push(e);
22 | }
23 | }
24 | std::io::stdout().write_all(output.as_slice()).unwrap();
25 | println!();
26 | }
27 |
28 | fn zcat(input: &[u8]) -> std::io::Result> {
29 | let mut gz = GzDecoder::new(input);
30 | let mut ret = Vec::with_capacity(4096);
31 | gz.read_to_end(&mut ret)?;
32 | Ok(ret)
33 | }
34 |
35 | struct UnpackVisitorPrinter {}
36 |
37 | impl UnpackVisitor for UnpackVisitorPrinter {
38 | fn on_file(&mut self, name: &Path, data: &[u8]) -> bool {
39 | println!("=== {:?} ({}) ===", name, data.len());
40 | if !data.is_empty() {
41 | escape_dump(data);
42 | }
43 | true
44 | }
45 | }
46 |
47 | #[derive(Parser, Debug)]
48 | #[command(version, about, long_about = None)]
49 | struct Args {
50 | #[arg(long, default_value = "localhost:6188")]
51 | addr: String,
52 |
53 | #[arg(long, default_value = "index.docker.io/library/busybox:1.36.0")]
54 | image: String,
55 |
56 | #[arg(long)]
57 | stdin: Option,
58 |
59 | #[arg(long)]
60 | env: Vec,
61 |
62 | #[arg(long)]
63 | gzip: bool,
64 |
65 | #[arg(long)]
66 | body_too_big: bool,
67 |
68 | #[arg(long)]
69 | header_too_many: bool,
70 |
71 | #[arg(long)]
72 | header_too_big: bool,
73 |
74 | #[arg(long)]
75 | corrupt_body: bool,
76 |
77 | #[arg(trailing_var_arg = true, allow_hyphen_values = true)]
78 | args: Vec,
79 | }
80 |
81 | fn print_headers(prefix: &str, headers: &http::HeaderMap) {
82 | for (k, v) in headers.iter() {
83 | println!("{}{}: {:?}", prefix, k, v);
84 | }
85 | }
86 |
87 | //fn hexdump(buf: &[u8]) {
88 | // for chunk in buf.chunks(16) {
89 | // for byte in chunk {
90 | // print!("{:02x} ", byte);
91 | // }
92 | // println!();
93 | // }
94 | //}
95 |
96 | #[tokio::main]
97 | async fn main() {
98 | let args = Args::parse();
99 |
100 | let connector = pingora::connectors::http::v1::Connector::new(None);
101 | let peer = if args.addr.starts_with("/") {
102 | HttpPeer::new_uds(&args.addr, false, "".to_string()).unwrap()
103 | } else {
104 | HttpPeer::new(&args.addr, false, "".to_string())
105 | };
106 | let (mut session, _) = connector.get_http_session(&peer).await.unwrap();
107 | session.read_timeout = Some(Duration::from_secs(5));
108 | session.write_timeout = Some(Duration::from_secs(5));
109 |
110 | let api_req = apiv2::runi::Request {
111 | cmd: Some(args.args),
112 | entrypoint: Some(vec![]),
113 | stdin: args.stdin,
114 | env: Some(args.env),
115 | };
116 |
117 | let buf = {
118 | let json = serde_json::to_vec(&api_req).unwrap();
119 | let jsonlen: u32 = json.len().try_into().unwrap();
120 | let mut buf: Vec = jsonlen.to_le_bytes().into_iter().collect();
121 | buf.extend_from_slice(&json);
122 | let mut v = PackMemToVec::with_vec(buf);
123 | v.file("file1", b"data1").unwrap();
124 | if args.body_too_big {
125 | let too_much_data = vec![0; 65536];
126 | v.file("file2", &too_much_data).unwrap();
127 | }
128 | if args.corrupt_body {
129 | let mut v = v.into_vec().unwrap();
130 | v.push(4); // BadTag
131 | v
132 | } else {
133 | v.into_vec().unwrap()
134 | }
135 | };
136 |
137 | let url = apiv2::runi::PREFIX.to_owned() + &args.image;
138 | let req = {
139 | let mut x = RequestHeader::build(Method::POST, url.as_bytes(), Some(3)).unwrap();
140 | x.insert_header("Content-Type", api::APPLICATION_X_PE_ARCHIVEV1)
141 | .unwrap();
142 | x.insert_header("Content-Length", buf.len()).unwrap();
143 | if args.gzip {
144 | x.insert_header("Accept-Encoding", "gzip").unwrap();
145 | }
146 | if args.header_too_many {
147 | for i in 0..1000 {
148 | x.insert_header(format!("my-header-{}", i), "blah-blah-blah")
149 | .unwrap();
150 | }
151 | }
152 | if args.header_too_big {
153 | // okay doesn't seem like there is an upper limit yet...
154 | let mut s = String::with_capacity(4096 * 16);
155 | for _ in 0..s.capacity() {
156 | s.push('x');
157 | }
158 | x.insert_header("my-big-header", s).unwrap();
159 | }
160 | Box::new(x)
161 | };
162 |
163 | println!("{} {:?} {}", req.method, req.version, req.uri);
164 | print_headers("> ", &req.headers);
165 |
166 | let _ = session.write_request_header(req).await.unwrap();
167 | let _ = session.write_body(&buf).await.unwrap();
168 | let _ = session.read_response().await.unwrap();
169 | let res_parts: &http::response::Parts = session.resp_header().unwrap();
170 | let status = res_parts.status;
171 |
172 | println!("{} {:?}", status, res_parts.version);
173 | print_headers("< ", &res_parts.headers);
174 |
175 | if args.gzip
176 | && res_parts
177 | .headers
178 | .get("Content-encoding")
179 | .and_then(|x| x.to_str().ok())
180 | != Some("gzip")
181 | {
182 | println!("yoooooooooooooooooo gzip not there");
183 | }
184 |
185 | let body = {
186 | let body = read_full_client_response_body(&mut session).await.unwrap();
187 | if args.gzip {
188 | Bytes::from(zcat(&body).unwrap())
189 | } else {
190 | body
191 | }
192 | };
193 | if status != 200 {
194 | println!("ERROR {:?}", body);
195 | return;
196 | }
197 | //hexdump(&body[..min(body.len(), 256)]);
198 | let (response, archive) = apiv2::runi::parse_response(&body).unwrap();
199 | println!("api response {:#?}", response);
200 |
201 | let mut unpacker = UnpackVisitorPrinter {};
202 | unpack_visitor(archive, &mut unpacker).unwrap();
203 | }
204 |
--------------------------------------------------------------------------------
/peserver/src/util.rs:
--------------------------------------------------------------------------------
1 | use std::io::Write;
2 | use std::net::{IpAddr, Ipv6Addr};
3 |
4 | use base64::prelude::{Engine, BASE64_STANDARD};
5 | use bytes::{Bytes, BytesMut};
6 | use env_logger;
7 | use http::{Response, StatusCode};
8 | use log::Level;
9 | use rustix::fd::AsFd;
10 | use serde::Serialize;
11 | use sha2::{Digest, Sha256};
12 |
13 | use pingora;
14 | use pingora::protocols::http::ServerSession;
15 | use pingora::proxy::Session;
16 |
17 | use crate::api::{APPLICATION_JSON, APPLICATION_X_PE_ARCHIVEV1};
18 |
19 | // taken from https://github.com/swsnr/systemd-journal-logger.rs/blob/main/src/lib.rs
20 | // which does more than I want by trying to connect to /run/systemd/journal/socket
21 | fn connected_to_journal() -> bool {
22 | rustix::fs::fstat(std::io::stderr().as_fd())
23 | .map(|stat| format!("{}:{}", stat.st_dev, stat.st_ino))
24 | .ok()
25 | .and_then(|stderr| {
26 | std::env::var_os("JOURNAL_STREAM").map(|s| s.to_string_lossy() == stderr.as_str())
27 | })
28 | .unwrap_or(false)
29 | }
30 |
31 | pub fn setup_logs() {
32 | if connected_to_journal() {
33 | env_logger::builder()
34 | .format(|buf, record| {
35 | let priority = match record.level() {
36 | Level::Error => "3",
37 | Level::Warn => "4",
38 | Level::Info => "5",
39 | Level::Debug => "6",
40 | Level::Trace => "7",
41 | };
42 | writeln!(buf, "<{}> {}", priority, record.args())
43 | })
44 | .init();
45 | } else {
46 | env_logger::init();
47 | }
48 | }
49 |
50 | pub async fn read_full_server_request_body(
51 | session: &mut ServerSession,
52 | max_len: usize,
53 | ) -> Result> {
54 | let mut acc = BytesMut::with_capacity(4096);
55 | while let Some(bytes) = session.read_request_body().await? {
56 | acc.extend_from_slice(&bytes);
57 | if acc.len() > max_len {
58 | return Err(pingora::Error::new(pingora::ErrorType::ReadError));
59 | }
60 | }
61 | Ok(acc.freeze())
62 | }
63 |
64 | pub async fn read_full_client_response_body(
65 | session: &mut pingora::protocols::http::v1::client::HttpSession,
66 | ) -> Result> {
67 | let mut acc = BytesMut::with_capacity(4096);
68 | while let Some(bytes) = session.read_body_ref().await? {
69 | acc.extend_from_slice(bytes);
70 | }
71 | Ok(acc.freeze())
72 | }
73 |
74 | fn ipv6_64(ip: &Ipv6Addr) -> [u8; 8] {
75 | let o = ip.octets();
76 | [o[0], o[1], o[2], o[3], o[4], o[5], o[6], o[7]]
77 | }
78 |
79 | pub fn session_ip_id(session: &Session) -> u64 {
80 | let ip = session
81 | .client_addr()
82 | .and_then(|x| x.as_inet())
83 | .map(|x| x.ip());
84 | match ip {
85 | Some(IpAddr::V4(ipv4)) => u32::from_ne_bytes(ipv4.octets()) as u64,
86 | Some(IpAddr::V6(ipv6)) => u64::from_ne_bytes(ipv6_64(&ipv6)),
87 | None => 42,
88 | }
89 | }
90 |
91 | pub fn response_no_body(status: StatusCode) -> Response> {
92 | Response::builder()
93 | .status(status)
94 | .header(http::header::CONTENT_LENGTH, 0)
95 | .body(vec![])
96 | .unwrap()
97 | }
98 |
99 | pub fn response_string(status: StatusCode, body: &str) -> Response> {
100 | let body = body.as_bytes().to_vec();
101 | Response::builder()
102 | .status(status)
103 | .header(http::header::CONTENT_LENGTH, body.len())
104 | .body(body)
105 | .unwrap()
106 | }
107 |
108 | pub fn response_json(
109 | status: StatusCode,
110 | body: T,
111 | ) -> serde_json::Result>> {
112 | Ok(response_json_vec(status, serde_json::to_vec(&body)?))
113 | }
114 |
115 | pub fn response_json_vec(status: StatusCode, body: Vec) -> Response> {
116 | // TODO presize headermap
117 | Response::builder()
118 | .status(status)
119 | .header(http::header::CONTENT_TYPE, APPLICATION_JSON)
120 | .header(http::header::CONTENT_LENGTH, body.len())
121 | .body(body)
122 | .unwrap()
123 | }
124 |
125 | pub fn response_pearchivev1(status: StatusCode, body: Vec) -> Response> {
126 | // TODO presize headermap
127 | Response::builder()
128 | .status(status)
129 | .header(http::header::CONTENT_TYPE, APPLICATION_X_PE_ARCHIVEV1)
130 | .header(http::header::CONTENT_LENGTH, body.len())
131 | .body(body)
132 | .unwrap()
133 | }
134 |
135 | pub fn etag(data: &[u8]) -> String {
136 | let hash = Sha256::digest(data);
137 | let mut ret = String::with_capacity(16);
138 | ret.push('W');
139 | ret.push('/');
140 | ret.push('"');
141 | BASE64_STANDARD.encode_string(hash, &mut ret);
142 | ret.push('"');
143 | ret
144 | }
145 |
146 | pub mod premade_responses {
147 | use crate::api::MAX_REQ_PER_SEC;
148 | use http::StatusCode;
149 | use once_cell::sync::Lazy;
150 | use pingora::http::ResponseHeader;
151 | use pingora::protocols::http::error_resp;
152 |
153 | // annoyingly this doesn't work because status gets captured
154 | //fn e(status: StatusCode) -> Lazy {
155 | // Lazy::new(move || error_resp::gen_error_response(status.into()))
156 | //}
157 |
158 | pub static NOT_FOUND: Lazy =
159 | Lazy::new(|| error_resp::gen_error_response(StatusCode::NOT_FOUND.into()));
160 | pub static INTERNAL_SERVER_ERROR: Lazy =
161 | Lazy::new(|| error_resp::gen_error_response(StatusCode::INTERNAL_SERVER_ERROR.into()));
162 | pub static SERVICE_UNAVAILABLE: Lazy =
163 | Lazy::new(|| error_resp::gen_error_response(StatusCode::SERVICE_UNAVAILABLE.into()));
164 | pub static PAYLOAD_TOO_LARGE: Lazy =
165 | Lazy::new(|| error_resp::gen_error_response(StatusCode::PAYLOAD_TOO_LARGE.into()));
166 | pub static BAD_REQUEST: Lazy =
167 | Lazy::new(|| error_resp::gen_error_response(StatusCode::BAD_REQUEST.into()));
168 |
169 | pub static TOO_MANY_REQUESTS: Lazy = Lazy::new(|| {
170 | let mut header = ResponseHeader::build(StatusCode::TOO_MANY_REQUESTS, Some(3)).unwrap();
171 | header
172 | .insert_header("X-Rate-Limit-Limit", MAX_REQ_PER_SEC.to_string())
173 | .unwrap();
174 | header.insert_header("X-Rate-Limit-Remaining", "0").unwrap();
175 | header.insert_header("X-Rate-Limit-Reset", "1").unwrap();
176 | header.insert_header("Content-Length", "0").unwrap();
177 | header
178 | });
179 |
180 | pub static NOT_MODIFIED: Lazy = Lazy::new(|| {
181 | let mut header = ResponseHeader::build(StatusCode::NOT_MODIFIED, Some(1)).unwrap();
182 | header.insert_header("Content-Length", "0").unwrap();
183 | header
184 | });
185 | }
186 |
--------------------------------------------------------------------------------
/attic/chperf.py:
--------------------------------------------------------------------------------
1 | from subprocess import run
2 | from pathlib import Path
3 | import itertools
4 | import sys
5 |
6 | ch = str(Path('../cloud-hypervisor/target/x86_64-unknown-linux-musl/profiling/cloud-hypervisor').resolve())
7 | group = 'chaml'
8 |
9 | funcs = [
10 | '_ZN11acpi_tables3aml49_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$u8$GT$12to_aml_bytes17hc7b5465092900902E',
11 | '_ZN11acpi_tables3aml50_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$u16$GT$12to_aml_bytes17hcd512bf8974793fbE',
12 | '_ZN11acpi_tables3aml50_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$u32$GT$12to_aml_bytes17hab6ce64640da050fE',
13 | '_ZN11acpi_tables3aml50_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$u64$GT$12to_aml_bytes17h1b412693412cd2d1E',
14 | '_ZN11acpi_tables3aml52_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$usize$GT$12to_aml_bytes17h670e93dd04f80979E',
15 | '_ZN11acpi_tables3aml54_$LT$impl$u20$acpi_tables..Aml$u20$for$u20$$RF$str$GT$12to_aml_bytes17h40fe958944e083d2E',
16 | '_ZN50_$LT$vmm..cpu..Cpu$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h10d5fe8da08c7890E',
17 | '_ZN56_$LT$vmm..cpu..CpuNotify$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hf4c7a776d090cc70E',
18 | '_ZN57_$LT$acpi_tables..aml..IO$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hf27e446c1d6ad7cfE',
19 | '_ZN57_$LT$acpi_tables..aml..If$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hd16f05b94ce164baE',
20 | '_ZN57_$LT$vmm..cpu..CpuMethods$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17ha51e9280697a2b11E',
21 | '_ZN58_$LT$acpi_tables..aml..Add$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h3ffdc35cc660dd18E',
22 | '_ZN58_$LT$acpi_tables..aml..And$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h7300ff09b617dd0cE',
23 | '_ZN58_$LT$acpi_tables..aml..Arg$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h1087d25a60a8d217E',
24 | '_ZN58_$LT$acpi_tables..aml..One$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hba87c10b9295918dE',
25 | '_ZN59_$LT$acpi_tables..aml..Name$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h61b4cccbc82c2730E',
26 | '_ZN59_$LT$acpi_tables..aml..Path$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h31553157280a48abE',
27 | '_ZN59_$LT$acpi_tables..aml..Zero$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17ha0ef9fb79c6b7d37E',
28 | '_ZN60_$LT$acpi_tables..aml..Equal$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h5ae19113535f3364E',
29 | '_ZN60_$LT$acpi_tables..aml..Field$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h402253a9338028f0E',
30 | '_ZN60_$LT$acpi_tables..aml..Local$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h41379ce915ebb08dE',
31 | '_ZN60_$LT$acpi_tables..aml..Mutex$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h48a9044430603f2aE',
32 | '_ZN60_$LT$acpi_tables..aml..Store$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hdb1c5d4fe4379d30E',
33 | '_ZN60_$LT$acpi_tables..aml..While$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hdae3bdbb90316e36E',
34 | '_ZN61_$LT$acpi_tables..aml..Device$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h9eaf94d63642df01E',
35 | '_ZN61_$LT$acpi_tables..aml..Method$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h11edb6e7a7164d73E',
36 | '_ZN61_$LT$acpi_tables..aml..Notify$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h12d63dd4ea614955E',
37 | '_ZN61_$LT$acpi_tables..aml..Return$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hb8fd31f7afbe11ccE',
38 | '_ZN62_$LT$acpi_tables..aml..Acquire$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h5af7829065df7a9bE',
39 | '_ZN62_$LT$acpi_tables..aml..Package$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h83e371f41e11a795E',
40 | '_ZN62_$LT$acpi_tables..aml..Release$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h3f80ee21d18be886E',
41 | '_ZN63_$LT$acpi_tables..aml..EISAName$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hc0b96c9715b3ac0aE',
42 | '_ZN63_$LT$acpi_tables..aml..LessThan$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h4e668738440e1517E',
43 | '_ZN63_$LT$acpi_tables..aml..OpRegion$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h980359c866b10a6cE',
44 | '_ZN63_$LT$acpi_tables..aml..Subtract$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h44e8015b5fbce456E',
45 | '_ZN64_$LT$acpi_tables..aml..Interrupt$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h74ee6eb14628c807E',
46 | '_ZN64_$LT$acpi_tables..aml..ShiftLeft$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17ha8952b756d4c0dcaE',
47 | '_ZN65_$LT$acpi_tables..aml..BufferData$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h5a3c694458fadd0bE',
48 | '_ZN65_$LT$acpi_tables..aml..MethodCall$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17ha9cff1c72e73bd4bE',
49 | '_ZN65_$LT$vmm..pci_segment..PciDevSlot$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hd0dfd53e2b107eabE',
50 | '_ZN67_$LT$vmm..pci_segment..PciDsmMethod$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h3270863991dee19bE',
51 | '_ZN68_$LT$acpi_tables..aml..Memory32Fixed$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h1e110be3c561dbf3E',
52 | '_ZN69_$LT$vmm..memory_manager..MemorySlots$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hb8bf6904652895a7E',
53 | '_ZN70_$LT$vmm..memory_manager..MemoryNotify$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hc4c44ab1dd96074bE',
54 | '_ZN71_$LT$acpi_tables..aml..CreateDWordField$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h88ea16c05d8017e5E',
55 | '_ZN71_$LT$acpi_tables..aml..CreateQWordField$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h64d1f79c364f49ceE',
56 | '_ZN71_$LT$acpi_tables..aml..ResourceTemplate$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hd68eace74fcecd60E',
57 | '_ZN71_$LT$vmm..device_manager..DeviceManager$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h145be8c8d50e637bE',
58 | '_ZN71_$LT$vmm..memory_manager..MemoryMethods$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hf67a97296bf2e2eeE',
59 | '_ZN71_$LT$vmm..pci_segment..PciDevSlotNotify$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h0399b29273414c76E',
60 | '_ZN72_$LT$vmm..pci_segment..PciDevSlotMethods$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h5d6a44ae4ed23365E',
61 | '_ZN78_$LT$acpi_tables..aml..AddressSpace$LT$u16$GT$$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17h83a600c229f6e246E',
62 | '_ZN78_$LT$acpi_tables..aml..AddressSpace$LT$u64$GT$$u20$as$u20$acpi_tables..Aml$GT$12to_aml_bytes17hfa8ad483e3c67c78E',
63 | ]
64 |
65 |
66 | def delete():
67 | run(['perf', 'probe', '-d', 'chaml:*'], check=True)
68 |
69 | def add():
70 | for i, func in enumerate(funcs):
71 | name = f'aml_{i:03d}'
72 | probe = f'{group}:{name}={func}'
73 | rprobe = f'{group}:r{name}={func}%return'
74 | run(['perf', 'probe', '-x', ch, '--add', probe], check=True)
75 | run(['perf', 'probe', '-x', ch, '--add', rprobe], check=True)
76 |
77 | def flatten(it): return list(itertools.chain.from_iterable(it))
78 | def record():
79 | k = '/home/andrew/Repos/linux/vmlinux'
80 | events = flatten([('-e', f'{group}:aml_{i:03d}') for i, _ in enumerate(funcs)])
81 | run(['perf', 'record'] + events + [
82 | #'-e', 'chaml:aml051',
83 | #'-e', 'chaml:aml050',
84 | ch,
85 | '--seccomp', 'log',
86 | '--kernel', k,
87 | '--initramfs', 'initramfs',
88 | '--cpus', 'boot=1',
89 | '--memory', 'size=1024M',
90 | ], check=True)
91 |
92 | actions = {'add': add, 'record': record, 'delete': delete}
93 | actions[sys.argv[1]]()
94 |
95 | # postmortem, this didn't work with the return probe, was getting a could not find symbol error
96 |
--------------------------------------------------------------------------------
/pefrontend/src/pearchive.ts:
--------------------------------------------------------------------------------
1 | const MAX_NAME_LEN = 255; // tmpfs max name length
2 |
3 | enum ArchiveFormat1Tag {
4 | File = 1,
5 | Dir = 2,
6 | Pop = 3,
7 | }
8 |
9 | function stripLeadingJunk(x: string): string {
10 | return
11 | }
12 |
13 | export function makeHiearachy(files: {path: string, data: string|ArrayBuffer}[]) {
14 | let ret = new Map();
15 | for (let file of files) {
16 | let parts = file.path.split('/');
17 | if (parts.length === 0) {
18 | continue;
19 | }
20 | let cur = ret;
21 | for (let part of parts.slice(0, -1)) {
22 | // we just skip this junk
23 | if (part === '' || part === '.' || part === '..') {
24 | continue;
25 | }
26 | let x = cur.get(part);
27 | if (x === undefined) {
28 | x = new Map();
29 | cur.set(part, x);
30 | }
31 | cur = x;
32 | }
33 | let name = parts[parts.length - 1];
34 | if (name === '' || name === '.' || name === '..') {
35 | throw new Error('bad file name');
36 | }
37 | cur.set(name, file);
38 | }
39 | return ret;
40 | }
41 |
42 |
43 | // kinda gross, not sure
44 | function encodeHierarchy(root: Map): Blob {
45 | let lenbuf = new ArrayBuffer(4);
46 | let lenbufview = new DataView(lenbuf);
47 | // new Blob([1]) == [49] because ord('1') == 49 (it calls toString()!)
48 | // so we have to make a u8 array for each tag (and null)
49 | let tagDir = new Uint8Array([ArchiveFormat1Tag.Dir]);
50 | let tagFile = new Uint8Array([ArchiveFormat1Tag.File]);
51 | let tagPop = new Uint8Array([ArchiveFormat1Tag.Pop]);
52 | let nullByte = new Uint8Array([0]);
53 | let te = new TextEncoder();
54 |
55 | function* recur(cur) {
56 | for (let [name, v] of cur.entries()) {
57 | if (v instanceof Map) {
58 | yield tagDir;
59 | yield te.encode(name);
60 | yield nullByte; // null term
61 | yield* recur(v);
62 | yield tagPop;
63 | } else {
64 | yield tagFile;
65 | yield te.encode(name);
66 | yield nullByte; // null term
67 | if (typeof v.data === 'string') {
68 | let data = te.encode(v.data);
69 | lenbufview.setUint32(0, data.byteLength, /* LE */ true);
70 | yield lenbuf.slice();
71 | yield data;
72 | } else {
73 | lenbufview.setUint32(0, v.data.byteLength, /* LE */ true);
74 | yield lenbuf.slice();
75 | yield v.data;
76 | }
77 | }
78 | }
79 | }
80 |
81 | return new Blob(recur(root));
82 | }
83 |
84 | // encodes files to pearchivev1
85 | export function packArchiveV1(files: {path: string, data: string|ArrayBuffer}[]): Blob {
86 | console.time('packArchiveV1');
87 | let hierachy = makeHiearachy(files);
88 | let ret = encodeHierarchy(hierachy);
89 | console.timeEnd('packArchiveV1');
90 | return ret;
91 | }
92 |
93 | function findZeroByte(buf: DataView, start: number): number {
94 | for (let i = start; i < Math.min(start + MAX_NAME_LEN, buf.byteLength); i++) {
95 | if (buf.getUint8(i) === 0) return i;
96 | }
97 | return -1;
98 | }
99 |
100 |
101 | // tries to decode as utf-8, if fails, returns as arraybuffer and you can retry with another encoding
102 | // okay we don't actually respect the byteLength of a DataView since we read the length from the archive and slice
103 | // a new one from the underlying buffer. But really we just need it for the offset
104 | export function unpackArchiveV1(data: ArrayBuffer|Uint8Array|DataView): {path: string, data: string|ArrayBuffer}[] {
105 | console.time('unpackArchiveV1');
106 | let i = (data instanceof DataView) ? data.byteOffset : 0;
107 | // note we recreate a view if given a view and always just work with the offset it gave
108 | let view = (data instanceof ArrayBuffer) ? new DataView(data) : new DataView(data.buffer);
109 | let buffer = (data instanceof ArrayBuffer) ? data : data.buffer;
110 | const n = view.byteLength;
111 |
112 | let lenbuf = new ArrayBuffer(4);
113 | let lenbufview = new DataView(lenbuf);
114 | let te = new TextDecoder('utf-8', {fatal: true});
115 | let acc = [];
116 | let pathBuf = [];
117 |
118 | // decode as utf-8 or copy the slice as a DataView (so that we can free the original blob eventually)
119 | function extractFile(view: DataView): string | ArrayBuffer {
120 | try {
121 | return te.decode(view);
122 | } catch {
123 | return view.buffer.slice(view.byteOffset, view.byteOffset + view.byteLength);
124 | }
125 | }
126 |
127 | while (i < n) {
128 | let tag = view.getUint8(i);
129 | i++;
130 | switch (tag) {
131 | case ArchiveFormat1Tag.File: {
132 | let zbi = findZeroByte(view, i);
133 | if (zbi === -1) { throw new Error("didnt get null byte"); } // TODO
134 | let nameLen = zbi - i;
135 | let name = te.decode(new DataView(buffer, i, nameLen));
136 | pathBuf.push(name);
137 | let path = pathBuf.join('/');
138 | pathBuf.pop();
139 | let len = view.getUint32(zbi+1, /* LE */ true);
140 | i = zbi + 1 + 4;
141 | let fileView = new DataView(buffer, i, len); // this is where we don't respect a DataView.byteLength
142 | let data = extractFile(fileView);
143 | i += len;
144 | acc.push({path, data});
145 | break;
146 | }
147 | case ArchiveFormat1Tag.Dir: {
148 | let zbi = findZeroByte(view, i);
149 | if (zbi === -1) { throw new Error("didnt get null byte"); } // TODO
150 | let nameLen = zbi - i;
151 | let name = te.decode(new DataView(buffer, i, nameLen));
152 | pathBuf.push(name);
153 | i = zbi + 1;
154 | break;
155 | }
156 | case ArchiveFormat1Tag.Pop:
157 | pathBuf.pop();
158 | break;
159 | default:
160 | return acc;
161 | }
162 | }
163 |
164 | console.timeEnd('unpackArchiveV1');
165 | return acc;
166 | }
167 |
168 | //
169 | export function combineRequestAndArchive(req, archive: Blob): Blob {
170 | let te = new TextEncoder();
171 | let reqbuf = te.encode(JSON.stringify(req));
172 | let lenbuf = new ArrayBuffer(4);
173 | new DataView(lenbuf).setUint32(0, reqbuf.byteLength, /* LE */ true);
174 |
175 | return new Blob([lenbuf, reqbuf, archive]);
176 | }
177 |
178 | // we use DataView as a standin for a ArrayBuffer slice
179 | export function splitResponseAndArchive(buf: ArrayBuffer): [any, DataView] {
180 | let lenview = new DataView(buf);
181 | let responseLen = lenview.getUint32(0, true);
182 | let responseView = new DataView(buf, 4, responseLen);
183 | let responseString = new TextDecoder().decode(responseView);
184 | let responseJson = JSON.parse(responseString);
185 | let archiveSlice = new DataView(buf, 4 + responseLen);
186 | return [responseJson, archiveSlice];
187 | }
188 |
--------------------------------------------------------------------------------
/peimage/src/index.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::fs::File;
3 | use std::io;
4 | use std::io::{Read, Seek, SeekFrom};
5 | use std::path::{Path, PathBuf};
6 |
7 | use byteorder::{ReadBytesExt, LE};
8 | use oci_spec::image as oci_image;
9 | use peinit::RootfsKind;
10 | use serde::{Deserialize, Serialize};
11 |
12 | const INDEX_JSON_MAGIC: u64 = 0x1db56abd7b82da38;
13 |
14 | #[derive(Debug, Serialize, Deserialize, Clone)]
15 | pub struct PEImageId {
16 | pub digest: String,
17 | pub repository: String,
18 | pub registry: String,
19 | pub tag: String,
20 | }
21 |
22 | impl PEImageId {
23 | pub fn name(&self) -> String {
24 | format!("{}/{}:{}", self.registry, self.repository, self.tag)
25 | }
26 |
27 | pub fn upstream_link(&self) -> Option {
28 | match self.registry.as_str() {
29 | "index.docker.io" => {
30 | let tag = &self.tag;
31 | let repository = &self.repository;
32 | let digest = self.digest.replace(":", "-");
33 | Some(format!(
34 | "https://hub.docker.com/layers/{repository}/{tag}/images/{digest}"
35 | ))
36 | }
37 | "quay.io" => {
38 | let repository = &self.repository;
39 | let digest = &self.digest;
40 | Some(format!("https://quay.io/repository/{repository}/{digest}"))
41 | }
42 | _ => None,
43 | }
44 | }
45 | }
46 |
47 | #[derive(Debug, Deserialize, Clone)]
48 | pub struct PEImageIndexEntry {
49 | pub rootfs: String,
50 | pub config: oci_image::ImageConfiguration,
51 | pub manifest: oci_image::ImageManifest,
52 | pub id: PEImageId,
53 | }
54 |
55 | #[derive(Debug, Deserialize)]
56 | pub struct PEImageIndex {
57 | pub images: Vec,
58 | }
59 |
60 | impl PEImageIndex {
61 | pub fn from_path>(p: P) -> io::Result {
62 | Self::from_file(&mut File::open(p)?)
63 | }
64 |
65 | pub fn from_file(f: &mut File) -> io::Result {
66 | let len = f.metadata()?.len();
67 | if len < (8 + 4) {
68 | return Err(io::Error::new(
69 | io::ErrorKind::InvalidData,
70 | "file too short to have magic",
71 | ));
72 | }
73 | f.seek(SeekFrom::End(-i64::from(8 + 4)))?;
74 | let data_size = f.read_u32::()?;
75 | let magic = f.read_u64::()?;
76 | if magic != INDEX_JSON_MAGIC {
77 | return Err(io::Error::new(
78 | io::ErrorKind::InvalidData,
79 | "file doesn't end with magic",
80 | ));
81 | }
82 | if u64::from(data_size) + 8 + 4 > len {
83 | return Err(io::Error::new(
84 | io::ErrorKind::InvalidData,
85 | "file too short to hold index.json",
86 | ));
87 | }
88 | f.seek(SeekFrom::End(-i64::from(8 + 4 + data_size)))?;
89 | let mut buf = vec![0; data_size as usize];
90 | f.read_exact(&mut buf)?;
91 | serde_json::from_slice(buf.as_slice()).map_err(|_| {
92 | io::Error::new(
93 | io::ErrorKind::InvalidData,
94 | "index.json not valid PEImageIndex",
95 | )
96 | })
97 | }
98 | }
99 |
100 | pub struct PEImageMultiIndexEntry {
101 | pub path: PathBuf,
102 | pub image: PEImageIndexEntry,
103 | pub rootfs_kind: RootfsKind,
104 | }
105 |
106 | pub enum PEImageMultiIndexKeyType {
107 | Name, // index.docker.io/library/busybox:1.37
108 | DigestWithSlash, // sha256/abcd1234 I wrongly thought the colon had to be escaped in urls
109 | Digest, // sha256:abcd1234
110 | }
111 |
112 | pub struct PEImageMultiIndex {
113 | map: HashMap,
114 | key_type: PEImageMultiIndexKeyType,
115 | }
116 |
117 | impl PEImageMultiIndex {
118 | pub fn new(key_type: PEImageMultiIndexKeyType) -> PEImageMultiIndex {
119 | Self {
120 | key_type: key_type,
121 | map: HashMap::new(),
122 | }
123 | }
124 |
125 | pub fn from_paths>(
126 | key_type: PEImageMultiIndexKeyType,
127 | paths: &[P],
128 | ) -> io::Result {
129 | let mut ret = Self::new(key_type);
130 | for p in paths {
131 | ret.add_path(p)?;
132 | }
133 | Ok(ret)
134 | }
135 |
136 | pub fn from_paths_by_digest_with_colon>(paths: &[P]) -> io::Result {
137 | Self::from_paths(PEImageMultiIndexKeyType::Digest, paths)
138 | }
139 |
140 | pub fn add_dir>(&mut self, path: P) -> io::Result<()> {
141 | fn is_erofs_or_sqfs(p: &Path) -> bool {
142 | match p.extension() {
143 | // boo we can't match a static str against OsStr...
144 | //Some("erofs") | Some("sqfs") => true,
145 | Some(s) => s == "erofs" || s == "sqfs",
146 | _ => false,
147 | }
148 | }
149 |
150 | for entry in (path.as_ref().read_dir()?).flatten() {
151 | let p = entry.path();
152 | if p.is_file() && is_erofs_or_sqfs(&p) {
153 | self.add_path(p)?;
154 | }
155 | }
156 | Ok(())
157 | }
158 |
159 | pub fn add_path>(&mut self, path: P) -> io::Result<()> {
160 | let idx = PEImageIndex::from_path(&path)?;
161 | let rootfs_kind = RootfsKind::try_from_path_name(&path).ok_or_else(|| {
162 | io::Error::new(io::ErrorKind::InvalidData, "couldn't determine rootfs kind")
163 | })?;
164 | let pathbuf: PathBuf = path.as_ref().to_path_buf();
165 | for image in idx.images {
166 | let key = image.id.name();
167 | if self.map.contains_key(&key) {
168 | return Err(io::Error::new(
169 | io::ErrorKind::InvalidData,
170 | "duplicate image id name",
171 | ));
172 | }
173 | let entry = PEImageMultiIndexEntry {
174 | path: pathbuf.clone(),
175 | image: image.clone(),
176 | rootfs_kind: rootfs_kind,
177 | };
178 | self.insert(&image.id, entry);
179 | }
180 | Ok(())
181 | }
182 |
183 | fn insert(&mut self, id: &PEImageId, entry: PEImageMultiIndexEntry) {
184 | match self.key_type {
185 | PEImageMultiIndexKeyType::Name => {
186 | self.map.insert(id.name(), entry);
187 | }
188 | PEImageMultiIndexKeyType::DigestWithSlash => {
189 | self.map.insert(id.digest.replace(":", "/"), entry);
190 | }
191 | PEImageMultiIndexKeyType::Digest => {
192 | self.map.insert(id.digest.clone(), entry);
193 | }
194 | }
195 | }
196 |
197 | pub fn get<'a>(&'a self, key: &str) -> Option<&'a PEImageMultiIndexEntry> {
198 | self.map.get(key)
199 | }
200 |
201 | pub fn map(&self) -> &HashMap {
202 | &self.map
203 | }
204 |
205 | pub fn is_empty(&self) -> bool {
206 | self.map.is_empty()
207 | }
208 | }
209 |
210 | impl Default for PEImageMultiIndex {
211 | fn default() -> PEImageMultiIndex {
212 | PEImageMultiIndex::new(PEImageMultiIndexKeyType::Digest)
213 | }
214 | }
215 |
--------------------------------------------------------------------------------
/peerofs/src/dump.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashSet;
2 | use std::fs::File;
3 |
4 | use memmap2::MmapOptions;
5 | use rustix::fs::FileType;
6 |
7 | use peerofs::disk::{DirentFileType, Erofs, Error, Inode, Layout};
8 | use env_logger;
9 |
10 | #[allow(dead_code)]
11 | fn all_inodes<'a>(erofs: &Erofs<'a>) -> Result>, Error> {
12 | let mut seen = HashSet::new();
13 | let mut ret = vec![];
14 | let mut q = vec![erofs.get_root_inode()?.disk_id()];
15 |
16 | while let Some(cur) = q.pop() {
17 | if !seen.insert(cur) {
18 | continue;
19 | }
20 | let inode = erofs.get_inode(cur)?;
21 | if inode.file_type() == FileType::Directory {
22 | let dirents = erofs.get_dirents(&inode)?;
23 | //eprintln!("iterating dirent id {:?}", inode.disk_id());
24 | for item in dirents.iter()? {
25 | let item = item?;
26 | q.push(item.disk_id.try_into().expect("why is this u64"));
27 | }
28 | }
29 | ret.push(inode);
30 | }
31 | Ok(ret)
32 | }
33 |
34 | fn main() {
35 | env_logger::init();
36 | let args: Vec<_> = std::env::args().collect();
37 | let image = args.get(1).expect("give me an image name");
38 | let file = File::open(image).expect("file open failed");
39 | let mmap = unsafe { MmapOptions::new().map(&file).expect("mmap failed") };
40 |
41 | let erofs = Erofs::new(&mmap).expect("fail to create view");
42 |
43 | let inode: u32 = match args.get(2) {
44 | Some(s) => s.parse::().expect("bad int"),
45 | None => erofs
46 | .get_root_inode()
47 | .expect("root inode get failed")
48 | .disk_id(),
49 | };
50 |
51 | println!("{:?}", erofs.sb);
52 |
53 | let dir = erofs.get_inode(inode).expect("inode get failed");
54 | //println!("{:?}", root_dir);
55 | //let dir = erofs.get_inode(2427390).expect("inode get failed"); //
56 | //let dir = erofs.get_inode(39099352).expect("inode get failed"); // usr/share/doc
57 | println!("{:?}", dir);
58 | println!("layout={:?}", dir.layout());
59 | //if let Some(xattrs) = erofs.get_xattrs(&dir).unwrap() {
60 | // for xattr in xattrs.iter() {
61 | // if let Ok(xattr) = xattr {
62 | // println!("xattr key={} value={}", xattr.name.escape_ascii().to_string(), xattr.value.escape_ascii().to_string())
63 | // }
64 | // }
65 | //}
66 | let dirents = erofs.get_dirents(&dir).expect("get_dirents failed");
67 |
68 | for item in dirents.iter().expect("couldn't create iterator") {
69 | let item = item.expect("bad item");
70 | let inode = erofs.get_inode_from_dirent(&item).unwrap();
71 | print!(
72 | " {:>20} {:4} {:?} {}/{} {:o}",
73 | item.name.escape_ascii().to_string(),
74 | item.disk_id,
75 | item.file_type,
76 | inode.uid(),
77 | inode.gid(),
78 | inode.mode()
79 | );
80 | if let Some(xattrs) = erofs.get_xattrs(&inode).unwrap() {
81 | //println!("header {:?}", xattrs.header);
82 | print!(" {{");
83 | for xattr in xattrs.iter() {
84 | if let Ok(xattr) = xattr {
85 | let prefix = erofs
86 | .get_xattr_prefix(&xattr)
87 | .unwrap()
88 | .escape_ascii()
89 | .to_string();
90 | print!(
91 | "{}{}={}, ",
92 | prefix,
93 | xattr.name.escape_ascii(),
94 | xattr.value.escape_ascii(),
95 | );
96 | } else {
97 | eprintln!("error getting xattr {:?}", xattr);
98 | }
99 | }
100 | print!("}}");
101 | }
102 | //println!("{:?}", inode);
103 | match item.file_type {
104 | //DirentFileType::Directory => {
105 | // let child_inode = erofs.get_inode_from_dirent(&item).expect("fail to get child inode");
106 | // let dir_dirents = erofs.get_dirents(&child_inode).expect("fail to get child dirents");
107 | // for item in dirents.iter().expect("couldn't create iterator") {
108 | // println!(" {:?}", item);
109 | // }
110 | //}
111 | DirentFileType::Symlink => {
112 | let inode = erofs.get_inode_from_dirent(&item).unwrap();
113 | let link = erofs.get_symlink(&inode).unwrap();
114 | print!(" -> {}", link.escape_ascii());
115 | }
116 | DirentFileType::RegularFile => {
117 | let inode = erofs.get_inode_from_dirent(&item).unwrap();
118 | print!(
119 | " size={} ({:?} block={:x})",
120 | inode.data_size(),
121 | inode.layout(),
122 | inode.raw_block_addr()
123 | );
124 | }
125 | _ => {}
126 | }
127 | println!();
128 |
129 | match item.file_type {
130 | DirentFileType::RegularFile => {
131 | let inode = erofs.get_inode_from_dirent(&item).unwrap();
132 | match inode.layout() {
133 | Layout::CompressedFull => {
134 | let n_compressed_blocks = inode.raw_compressed_blocks();
135 | // weird thing is compressed_blocks isn't read during decompression or
136 | // anything
137 | println!(
138 | "size={} compressed_blocks={}",
139 | inode.data_size(),
140 | n_compressed_blocks
141 | );
142 | erofs.inspect(&inode, 64).unwrap();
143 | let header = erofs.get_map_header(&inode).unwrap();
144 | println!("{:?}", header);
145 | //for (i, lci) in erofs.get_logical_cluster_indices(&inode).unwrap().iter().enumerate() {
146 | // println!("{i} {:?}", lci);
147 | //}
148 | let mut f = File::create("/tmp/out").unwrap();
149 | erofs.get_compressed_data(&inode, &mut f).unwrap();
150 | //for (i, byte) in data.iter().enumerate() {
151 | // print!("{byte:02x}");
152 | // if i > 0 && i % 64 == 0 { println!(); }
153 | //}
154 | }
155 | _ => {}
156 | }
157 | }
158 | _ => {}
159 | }
160 | }
161 |
162 | //let inodes = all_inodes(&erofs).expect("inode gather fail");
163 | //if let Some(inode) = inodes
164 | // .iter()
165 | // //.find(|x| x.layout() == Layout::CompressedCompact)
166 | // .find(|x| x.xattr_count() > 0)
167 | //{
168 | // println!(
169 | // "inode disk_id={:?} {:?} {:?} size={:?} {:?}",
170 | // inode.disk_id(),
171 | // inode.file_type(),
172 | // inode.layout(),
173 | // inode.data_size(),
174 | // inode.raw_block_addr()
175 | // );
176 | // //let map = erofs
177 | // // .get_map_header(&inode)
178 | // // .expect("failed to get map header");
179 | // //println!("{:?}", map);
180 | // let xattr_header = erofs.get_xattr_header(inode).expect("should have inode header");
181 | // println!("{:?}", xattr_header);
182 | //}
183 | //
184 | //if let Some(inode) = find_with_xattr(&erofs).unwrap() {
185 | // println!("yo got inode with erofs {:?}", inode);
186 | //} else {
187 | // println!("didn't find anything with nonzero xattr size");
188 | //}
189 | }
190 |
--------------------------------------------------------------------------------