├── .cargo └── config.toml ├── .ci ├── .gitignore ├── build-and-push-images.sh ├── cache-shell.sh ├── common.sh └── run ├── .editorconfig ├── .envrc ├── .gitattributes ├── .github ├── install-attic-ci.sh └── workflows │ ├── book.yml │ ├── build.yml │ └── lint.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── attic ├── Cargo.toml ├── benches │ └── chunking.rs ├── build.rs └── src │ ├── api │ ├── binary_cache.rs │ ├── mod.rs │ └── v1 │ │ ├── cache_config.rs │ │ ├── get_missing_paths.rs │ │ ├── mod.rs │ │ └── upload_path.rs │ ├── cache.rs │ ├── chunking │ └── mod.rs │ ├── error.rs │ ├── hash │ ├── mod.rs │ └── tests │ │ ├── .gitattributes │ │ ├── blob │ │ └── mod.rs │ ├── lib.rs │ ├── mime.rs │ ├── nix_store │ ├── README.md │ ├── bindings │ │ ├── mod.rs │ │ ├── nix.cpp │ │ └── nix.hpp │ ├── mod.rs │ ├── nix_store.rs │ └── tests │ │ ├── .gitattributes │ │ ├── README.md │ │ ├── drv │ │ ├── no-deps.nix │ │ └── with-deps.nix │ │ ├── mod.rs │ │ ├── nar │ │ ├── 3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final │ │ ├── 3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export │ │ ├── 3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.nar │ │ ├── 544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b │ │ ├── 544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export │ │ ├── 544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar │ │ ├── n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a │ │ ├── n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export │ │ ├── n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar │ │ ├── nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps │ │ ├── nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.export │ │ └── nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar │ │ └── test_nar.rs │ ├── signing │ ├── mod.rs │ └── tests.rs │ ├── stream.rs │ ├── testing │ ├── mod.rs │ └── shadow_store │ │ ├── mod.rs │ │ └── nix-wrapper.sh │ └── util.rs ├── book ├── .gitignore ├── book.toml ├── colorized-help.nix ├── default.nix ├── src │ ├── SUMMARY.md │ ├── admin-guide │ │ ├── README.md │ │ ├── chunking.md │ │ └── deployment │ │ │ └── nixos.md │ ├── faqs.md │ ├── introduction.md │ ├── reference │ │ ├── README.md │ │ ├── attic-cli.md │ │ ├── atticadm-cli.md │ │ └── atticd-cli.md │ ├── tutorial.md │ └── user-guide │ │ └── README.md └── theme │ └── highlight.js ├── ci-installer.nix ├── client ├── Cargo.toml └── src │ ├── api │ └── mod.rs │ ├── cache.rs │ ├── cli.rs │ ├── command │ ├── cache.rs │ ├── get_closure.rs │ ├── login.rs │ ├── mod.rs │ ├── push.rs │ ├── use.rs │ └── watch_store.rs │ ├── config.rs │ ├── main.rs │ ├── nix_config.rs │ ├── nix_netrc.rs │ ├── push.rs │ └── version.rs ├── crane.nix ├── default.nix ├── flake-compat.nix ├── flake.lock ├── flake.nix ├── flake ├── devshells.nix ├── distributor.nix ├── integration-tests.nix ├── nix-versions.nix ├── nixos.nix ├── overlays.nix └── packages.nix ├── garnix.yaml ├── integration-tests ├── README.md ├── basic │ └── default.nix ├── default.nix └── nixpkgs.nix ├── justfile ├── nixos └── atticd.nix ├── package.nix ├── renovate.json ├── server ├── Cargo.toml └── src │ ├── access │ ├── http.rs │ ├── mod.rs │ └── tests.rs │ ├── adm │ ├── command │ │ ├── make_token.rs │ │ └── mod.rs │ └── main.rs │ ├── api │ ├── binary_cache.rs │ ├── mod.rs │ ├── placeholder.html │ └── v1 │ │ ├── cache_config.rs │ │ ├── get_missing_paths.rs │ │ ├── mod.rs │ │ └── upload_path.rs │ ├── config-template.toml │ ├── config.rs │ ├── database │ ├── entity │ │ ├── cache.rs │ │ ├── chunk.rs │ │ ├── chunkref.rs │ │ ├── mod.rs │ │ ├── nar.rs │ │ └── object.rs │ ├── migration │ │ ├── m20221227_000001_create_cache_table.rs │ │ ├── m20221227_000002_create_nar_table.rs │ │ ├── m20221227_000003_create_object_table.rs │ │ ├── m20221227_000004_add_object_last_accessed.rs │ │ ├── m20221227_000005_add_cache_retention_period.rs │ │ ├── m20230103_000001_add_object_created_by.rs │ │ ├── m20230112_000001_add_chunk_table.rs │ │ ├── m20230112_000002_add_chunkref_table.rs │ │ ├── m20230112_000003_add_nar_num_chunks.rs │ │ ├── m20230112_000004_migrate_nar_remote_files_to_chunks.rs │ │ ├── m20230112_000005_drop_old_nar_columns.rs │ │ ├── m20230112_000006_add_nar_completeness_hint.rs │ │ └── mod.rs │ └── mod.rs │ ├── error.rs │ ├── gc.rs │ ├── lib.rs │ ├── main.rs │ ├── middleware.rs │ ├── narinfo │ ├── mod.rs │ └── tests.rs │ ├── nix_manifest │ ├── deserializer.rs │ ├── mod.rs │ ├── serializer.rs │ └── tests.rs │ ├── oobe.rs │ └── storage │ ├── local.rs │ ├── mod.rs │ └── s3.rs ├── shell.nix └── token ├── Cargo.toml └── src ├── lib.rs ├── tests.rs └── util.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["--cfg", "tokio_unstable"] 3 | rustdocflags = ["--cfg", "tokio_unstable"] 4 | -------------------------------------------------------------------------------- /.ci/.gitignore: -------------------------------------------------------------------------------- 1 | /cached-shell 2 | -------------------------------------------------------------------------------- /.ci/build-and-push-images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if [[ "$#" -lt "2" ]]; then 5 | >&2 echo "Usage: $0 ..." 6 | >&2 echo "Example: $0 ghcr.io/zhaofengli/attic main abcd123" 7 | exit 1 8 | fi 9 | 10 | cleanup() { 11 | if [[ -f "${manifest_spec}" ]]; then 12 | rm "${manifest_spec}" 13 | fi 14 | } 15 | trap cleanup EXIT 16 | 17 | image_name="$1" 18 | tags=("${@:2}") 19 | 20 | manifest_spec="$(mktemp -t attic-manifest-spec.XXXXXXXXXX)" 21 | 22 | declare -a digests 23 | 24 | emit_header() { 25 | echo "image: ${image_name}" 26 | echo "tags:" 27 | for tag in "${tags[@]}"; do 28 | echo "- ${tag}" 29 | done 30 | echo "manifests:" 31 | } 32 | 33 | push_digest() { 34 | source_image="docker-archive:$1" 35 | digest="$(skopeo inspect "${source_image}" | jq -r .Digest)" 36 | target_image="docker://${image_name}@${digest}" 37 | 38 | >&2 echo "${source_image} ▸ ${target_image}" 39 | >&2 skopeo copy --insecure-policy "${source_image}" "${target_image}" 40 | 41 | echo -n "- " 42 | skopeo inspect "${source_image}" | \ 43 | jq '{platform: {architecture: .Architecture, os: .Os}, image: ($image_name + "@" + .Digest)}' \ 44 | --arg image_name "${image_name}" 45 | } 46 | 47 | >>"${manifest_spec}" emit_header 48 | 49 | nix build .#attic-server-image .#attic-server-image-aarch64 -L --print-out-paths | \ 50 | while read -r output; do 51 | >>"${manifest_spec}" push_digest "${output}" 52 | done 53 | 54 | >&2 echo "----------" 55 | >&2 echo "Generated manifest-tool spec:" 56 | >&2 echo "----------" 57 | cat "${manifest_spec}" 58 | >&2 echo "----------" 59 | 60 | manifest-tool push from-spec "${manifest_spec}" 61 | -------------------------------------------------------------------------------- /.ci/cache-shell.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source "$(dirname "${BASH_SOURCE[0]}")/common.sh" 3 | 4 | >&2 echo "Caching dev shell" 5 | nix print-dev-env "${base}#" >"${cached_shell}" 6 | -------------------------------------------------------------------------------- /.ci/common.sh: -------------------------------------------------------------------------------- 1 | # Use as: 2 | # 3 | # source "$(dirname "${BASH_SOURCE[0]}")/common.sh" 4 | 5 | set -euo pipefail 6 | base="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/..)" 7 | cached_shell="${base}/.ci/cached-shell" 8 | -------------------------------------------------------------------------------- /.ci/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source "$(dirname "${BASH_SOURCE[0]}")/common.sh" 3 | 4 | if [[ ! -f "${cached_shell}" ]]; then 5 | >&2 echo "No cached shell in ${cached_shell}" 6 | exit 1 7 | fi 8 | 9 | . "${cached_shell}" 10 | exec "$@" 11 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig configuration for Attic 2 | 3 | # Top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file, utf-8 charset 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | trim_trailing_whitespace = true 11 | charset = utf-8 12 | 13 | # Rust 14 | [*.rs] 15 | indent_style = space 16 | indent_size = 4 17 | 18 | # Misc 19 | [*.{yaml,yml,md,nix}] 20 | indent_style = space 21 | indent_size = 2 22 | 23 | [attic/src/nix_store/tests/nar/**] 24 | charset = unset 25 | end_of_line = unset 26 | insert_final_newline = unset 27 | trim_trailing_whitespace = unset 28 | indent_style = unset 29 | indent_size = unset 30 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | if ! has nix_direnv_version || ! nix_direnv_version 2.2.0; then 2 | source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.0/direnvrc" "sha256-5EwyKnkJNQeXrRkYbwwRBcXbibosCJqyIUuz9Xq+LRc=" 3 | fi 4 | 5 | use_flake 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | /book/theme/highlight.js linguist-generated=true 2 | -------------------------------------------------------------------------------- /.github/install-attic-ci.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | expr=$(mktemp) 4 | 5 | cleanup() { 6 | rm -f "$expr" 7 | } 8 | 9 | cat >"$expr" <<'EOF' 10 | { system ? builtins.currentSystem }: 11 | let 12 | maybeStorePath = if builtins ? langVersion && builtins.lessThan 1 builtins.langVersion 13 | then builtins.storePath 14 | else x: x; 15 | mkFakeDerivation = attrs: outputs: 16 | let 17 | outputNames = builtins.attrNames outputs; 18 | common = attrs // outputsSet // 19 | { type = "derivation"; 20 | outputs = outputNames; 21 | all = outputsList; 22 | }; 23 | outputToAttrListElement = outputName: 24 | { name = outputName; 25 | value = common // { 26 | inherit outputName; 27 | outPath = maybeStorePath (builtins.getAttr outputName outputs); 28 | }; 29 | }; 30 | outputsList = map outputToAttrListElement outputNames; 31 | outputsSet = builtins.listToAttrs outputsList; 32 | in outputsSet; 33 | in 34 | 35 | { 36 | "x86_64-linux" = (mkFakeDerivation { 37 | name = "attic-static-x86_64-unknown-linux-musl-0.1.0"; 38 | system = "x86_64-linux"; 39 | } { 40 | out = "/nix/store/qi9cixkq0pj60yw1y5l28hid7f53310i-attic-static-x86_64-unknown-linux-musl-0.1.0"; 41 | }).out; 42 | 43 | "aarch64-linux" = (mkFakeDerivation { 44 | name = "attic-0.1.0"; 45 | system = "aarch64-linux"; 46 | } { 47 | out = "/nix/store/ng1aykmyk7ayzrn69hlv4n4qzbywj5rk-attic-0.1.0"; 48 | }).out; 49 | 50 | "x86_64-darwin" = (mkFakeDerivation { 51 | name = "attic-0.1.0"; 52 | system = "x86_64-darwin"; 53 | } { 54 | out = "/nix/store/zh35fhdyxnxzp1mjyla95g2yv2ayzc3f-attic-0.1.0"; 55 | }).out; 56 | 57 | "aarch64-darwin" = (mkFakeDerivation { 58 | name = "attic-0.1.0"; 59 | system = "aarch64-darwin"; 60 | } { 61 | out = "/nix/store/z9i4grsf3h0wdlbhg95v6nanxiqx5xvp-attic-0.1.0"; 62 | }).out; 63 | 64 | }.${system} 65 | 66 | EOF 67 | 68 | nix-env --substituters "https://staging.attic.rs/attic-ci https://cache.nixos.org" --trusted-public-keys "attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" -if "$expr" 69 | -------------------------------------------------------------------------------- /.github/workflows/book.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Book 2 | 3 | on: 4 | push: 5 | 6 | permissions: 7 | contents: read 8 | pages: write 9 | id-token: write 10 | 11 | jobs: 12 | deploy-unstable: 13 | name: Deploy 14 | 15 | runs-on: ubuntu-latest 16 | if: github.repository == 'zhaofengli/attic' 17 | 18 | steps: 19 | - uses: actions/checkout@v4.1.7 20 | 21 | - uses: DeterminateSystems/nix-installer-action@v15 22 | continue-on-error: true # Self-hosted runners already have Nix installed 23 | 24 | - name: Install Attic 25 | run: ./.github/install-attic-ci.sh 26 | 27 | - name: Configure Attic 28 | run: | 29 | export PATH=$HOME/.nix-profile/bin:$PATH # FIXME 30 | attic login staging https://staging.attic.rs/ "$ATTIC_TOKEN" 31 | attic use attic-ci 32 | env: 33 | ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} 34 | 35 | - name: Build book 36 | run: nix build .#book -L 37 | 38 | - name: Copy book artifact 39 | run: | 40 | cp --recursive --dereference --no-preserve=mode,ownership result public 41 | 42 | - name: Upload book artifact 43 | uses: actions/upload-pages-artifact@v3.0.1 44 | with: 45 | path: public 46 | 47 | - name: Deploy book 48 | uses: actions/deploy-pages@v4.0.5 49 | 50 | # TODO: Just take a diff of the list of store paths, also abstract all of this out 51 | - name: Push build artifacts 52 | run: | 53 | export PATH=$HOME/.nix-profile/bin:$PATH # FIXME 54 | if [ -n "$ATTIC_TOKEN" ]; then 55 | nix build .#book --no-link --print-out-paths -L | \ 56 | xargs attic push attic-ci 57 | fi 58 | env: 59 | ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} 60 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | pull_request: 5 | push: 6 | jobs: 7 | lint: 8 | name: Lint 9 | 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4.1.7 14 | 15 | - name: Install current Bash on macOS 16 | if: runner.os == 'macOS' 17 | run: | 18 | command -v brew && brew install bash || true 19 | 20 | - uses: DeterminateSystems/nix-installer-action@v15 21 | continue-on-error: true # Self-hosted runners already have Nix installed 22 | 23 | - name: Install Attic 24 | run: | 25 | if ! command -v attic &> /dev/null; then 26 | ./.github/install-attic-ci.sh 27 | fi 28 | 29 | - name: Configure Attic 30 | run: | 31 | : "${ATTIC_SERVER:=https://staging.attic.rs/}" 32 | : "${ATTIC_CACHE:=attic-ci}" 33 | echo ATTIC_CACHE=$ATTIC_CACHE >>$GITHUB_ENV 34 | export PATH=$HOME/.nix-profile/bin:$PATH # FIXME 35 | attic login --set-default ci "$ATTIC_SERVER" "$ATTIC_TOKEN" 36 | attic use "$ATTIC_CACHE" 37 | env: 38 | ATTIC_SERVER: ${{ secrets.ATTIC_SERVER }} 39 | ATTIC_CACHE: ${{ secrets.ATTIC_CACHE }} 40 | ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} 41 | 42 | - name: Cache dev shell 43 | run: | 44 | .ci/cache-shell.sh 45 | system=$(nix-instantiate --eval -E 'builtins.currentSystem') 46 | echo system=$system >>$GITHUB_ENV 47 | 48 | - name: Check rustfmt 49 | run: .ci/run just ci-rustfmt 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .direnv 2 | 3 | /target 4 | result 5 | 6 | fly.toml 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | 4 | members = [ 5 | "attic", 6 | "client", 7 | "server", 8 | "token", 9 | ] 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 Zhaofeng Li and the Attic contributors 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Attic 2 | 3 | **Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider. 4 | It has support for global deduplication and garbage collection. 5 | 6 | Attic is an early prototype. 7 | 8 | ``` 9 | ⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)... 10 | ✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s) 11 | ✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s) 12 | ✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s) 13 | 🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s) 14 | 🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s) 15 | ``` 16 | 17 | ## Try it out (15 minutes) 18 | 19 | Let's [spin up Attic](https://docs.attic.rs/tutorial.html) in just 15 minutes. 20 | And yes, it works on macOS too! 21 | 22 | ## Goals 23 | 24 | - **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches. 25 | - **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed NAR Store and Chunk Store. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR. 26 | - **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key. 27 | - **Scalabilty**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup. 28 | - **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner. 29 | 30 | ## Licensing 31 | 32 | Attic is available under the **Apache License, Version 2.0**. 33 | See `LICENSE` for details. 34 | 35 | By contributing to the project, you agree to license your work under the aforementioned license. 36 | -------------------------------------------------------------------------------- /attic/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "attic" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | async-stream = { version = "0.3.5", optional = true } 9 | base64 = "0.22.1" 10 | bytes = "1.4.0" 11 | displaydoc = "0.2.4" 12 | digest = "0.10.7" 13 | ed25519-compact = "2.0.4" 14 | fastcdc = "3.0.3" 15 | futures = "0.3.28" 16 | hex = "0.4.3" 17 | lazy_static = "1.4.0" 18 | log = "0.4.18" 19 | nix-base32 = "0.2.0" 20 | regex = "1.8.3" 21 | serde = { version = "1.0.163", features = ["derive"] } 22 | serde_yaml = "0.9.21" 23 | serde_with = "3.0.0" 24 | sha2 = "0.10.6" 25 | tempfile = "3" 26 | wildmatch = "2.1.1" 27 | xdg = "2.5.0" 28 | 29 | # Native libnixstore bindings. 30 | cxx = { version = "1.0", optional = true } 31 | 32 | [dependencies.tokio] 33 | version = "1.28.2" 34 | optional = true 35 | features = [ 36 | "io-util", 37 | "macros", 38 | "sync", 39 | ] 40 | 41 | [dev-dependencies] 42 | criterion = { version = "0.5", features = ["html_reports", "async_tokio"] } 43 | fastcdc = { version = "*", features = ["tokio"] } 44 | serde_json = "1.0.96" 45 | 46 | [build-dependencies] 47 | cc = "1.1.13" 48 | cxx-build = { version = "1.0", optional = true } 49 | pkg-config = "0.3.27" 50 | tempfile = "3" 51 | version-compare = "0.2.0" 52 | 53 | [features] 54 | default = [ 55 | "chunking", 56 | "nix_store", 57 | "stream", 58 | "tokio", 59 | ] 60 | 61 | # Chunking. 62 | chunking = ["tokio", "stream", "dep:async-stream"] 63 | 64 | # Native libnixstore bindings. 65 | # 66 | # When disabled, the native Rust portions of nix_store can still be used. 67 | nix_store = [ 68 | "tokio", 69 | "tokio/fs", 70 | "tokio/process", 71 | "dep:cxx", 72 | "dep:cxx-build", 73 | ] 74 | 75 | # Stream utilities. 76 | stream = ["tokio", "dep:async-stream"] 77 | 78 | # Tokio runtime. 79 | tokio = ["dep:tokio", "tokio/rt", "tokio/time"] 80 | 81 | [[bench]] 82 | name = "chunking" 83 | harness = false 84 | -------------------------------------------------------------------------------- /attic/benches/chunking.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; 4 | use futures::StreamExt; 5 | 6 | use attic::chunking::chunk_stream; 7 | use attic::testing::{get_fake_data, get_runtime}; 8 | 9 | struct Parameters { 10 | min_size: u32, 11 | avg_size: u32, 12 | max_size: u32, 13 | } 14 | 15 | pub fn bench_chunking(c: &mut Criterion) { 16 | let rt = get_runtime(); 17 | let data = get_fake_data(128 * 1024 * 1024); // 128 MiB 18 | 19 | let cases = [ 20 | ( 21 | "2K,4K,8K", 22 | Parameters { 23 | min_size: 2 * 1024, 24 | avg_size: 4 * 1024, 25 | max_size: 8 * 1024, 26 | }, 27 | ), 28 | ( 29 | "8K,16K,32K", 30 | Parameters { 31 | min_size: 8 * 1024, 32 | avg_size: 16 * 1024, 33 | max_size: 32 * 1024, 34 | }, 35 | ), 36 | ( 37 | "1M,4M,16M", 38 | Parameters { 39 | min_size: 1024 * 1024, 40 | avg_size: 4 * 1024 * 1024, 41 | max_size: 16 * 1024 * 1024, 42 | }, 43 | ), 44 | ]; 45 | 46 | let mut group = c.benchmark_group("chunking"); 47 | group.throughput(Throughput::Bytes(data.len() as u64)); 48 | 49 | for (case, params) in cases { 50 | group.bench_with_input(BenchmarkId::new("ronomon", case), ¶ms, |b, params| { 51 | b.to_async(&rt).iter(|| async { 52 | let cursor = Cursor::new(&data); 53 | let mut chunks = chunk_stream( 54 | cursor, 55 | params.min_size as usize, 56 | params.avg_size as usize, 57 | params.max_size as usize, 58 | ); 59 | while let Some(chunk) = chunks.next().await { 60 | black_box(chunk).unwrap(); 61 | } 62 | }) 63 | }); 64 | group.bench_with_input(BenchmarkId::new("v2020", case), ¶ms, |b, params| { 65 | b.to_async(&rt).iter(|| async { 66 | let cursor = Cursor::new(&data); 67 | let mut chunks = fastcdc::v2020::AsyncStreamCDC::new( 68 | cursor, 69 | params.min_size, 70 | params.avg_size, 71 | params.max_size, 72 | ); 73 | let mut chunks = Box::pin(chunks.as_stream()); 74 | while let Some(chunk) = chunks.next().await { 75 | black_box(chunk).unwrap(); 76 | } 77 | }) 78 | }); 79 | } 80 | group.finish(); 81 | } 82 | 83 | criterion_group!(benches, bench_chunking); 84 | criterion_main!(benches); 85 | -------------------------------------------------------------------------------- /attic/build.rs: -------------------------------------------------------------------------------- 1 | //! Build script. 2 | //! 3 | //! We link against libnixstore to perform actions on the Nix Store. 4 | 5 | fn main() { 6 | #[cfg(feature = "nix_store")] 7 | nix_store::build_bridge(); 8 | } 9 | 10 | #[cfg(feature = "nix_store")] 11 | mod nix_store { 12 | use cc::Build; 13 | use version_compare::Version; 14 | 15 | struct NixDependency { 16 | version: String, 17 | } 18 | 19 | impl NixDependency { 20 | fn detect() -> Self { 21 | let library = pkg_config::Config::new() 22 | .cargo_metadata(false) 23 | .atleast_version("2.4") 24 | .probe("nix-main") 25 | .expect("Failed to find nix-main >=2.4 through pkg-config"); 26 | 27 | Self { 28 | version: library.version, 29 | } 30 | } 31 | 32 | fn apply_version_flags(&self, build: &mut Build) { 33 | let version = Version::from(&self.version).unwrap(); 34 | 35 | if version >= Version::from("2.20").unwrap() { 36 | build.flag("-DATTIC_NIX_2_20"); 37 | } 38 | } 39 | 40 | fn emit_cargo_metadata(&self) { 41 | pkg_config::Config::new() 42 | .atleast_version("2.4") 43 | .probe("nix-store") 44 | .unwrap(); 45 | 46 | pkg_config::Config::new() 47 | .atleast_version("2.4") 48 | .probe("nix-main") 49 | .unwrap(); 50 | } 51 | } 52 | 53 | pub fn build_bridge() { 54 | let nix_dep = NixDependency::detect(); 55 | 56 | // Temporary workaround for issue in 57 | let hacky_include = { 58 | let dir = 59 | tempfile::tempdir().expect("Failed to create temporary directory for workaround"); 60 | std::fs::write(dir.path().join("uds-remote-store.md"), "\"\"").unwrap(); 61 | dir 62 | }; 63 | 64 | let mut build = cxx_build::bridge("src/nix_store/bindings/mod.rs"); 65 | build 66 | .file("src/nix_store/bindings/nix.cpp") 67 | .flag("-std=c++2a") 68 | .flag("-O2") 69 | .flag("-include") 70 | .flag("nix/config.h") 71 | .flag("-idirafter") 72 | .flag(hacky_include.path().to_str().unwrap()) 73 | // In Nix 2.19+, nix/args/root.hh depends on being able to #include "args.hh" (which is in its parent directory), for some reason 74 | .flag("-I") 75 | .flag(concat!(env!("NIX_INCLUDE_PATH"), "/nix")); 76 | 77 | nix_dep.apply_version_flags(&mut build); 78 | 79 | build.compile("nixbinding"); 80 | 81 | println!("cargo:rerun-if-changed=src/nix_store/bindings"); 82 | 83 | // the -l flags must be after -lnixbinding 84 | nix_dep.emit_cargo_metadata(); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /attic/src/api/binary_cache.rs: -------------------------------------------------------------------------------- 1 | //! Nix Binary Cache server. 2 | //! 3 | //! This module contains Attic-specific extensions to the 4 | //! Nix Binary Cache API. 5 | 6 | /// Header indicating a cache's visibility. 7 | pub const ATTIC_CACHE_VISIBILITY: &str = "X-Attic-Cache-Visibility"; 8 | -------------------------------------------------------------------------------- /attic/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod binary_cache; 2 | pub mod v1; 3 | -------------------------------------------------------------------------------- /attic/src/api/v1/cache_config.rs: -------------------------------------------------------------------------------- 1 | //! Cache configuration endpoint. 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::signing::NixKeypair; 6 | 7 | #[derive(Debug, Serialize, Deserialize)] 8 | pub struct CreateCacheRequest { 9 | /// The keypair of the cache. 10 | pub keypair: KeypairConfig, 11 | 12 | /// Whether the cache is public or not. 13 | /// 14 | /// Anonymous clients are implicitly granted the "pull" 15 | /// permission to public caches. 16 | pub is_public: bool, 17 | 18 | /// The Nix store path this binary cache uses. 19 | /// 20 | /// This is usually `/nix/store`. 21 | pub store_dir: String, 22 | 23 | /// The priority of the binary cache. 24 | /// 25 | /// A lower number denotes a higher priority. 26 | /// has a priority of 40. 27 | pub priority: i32, 28 | 29 | /// A list of signing key names of upstream caches. 30 | /// 31 | /// The list serves as a hint to clients to avoid uploading 32 | /// store paths signed with such keys. 33 | pub upstream_cache_key_names: Vec, 34 | } 35 | 36 | /// Configuration of a cache. 37 | /// 38 | /// Specifying `None` means using the default value or 39 | /// keeping the current value. 40 | #[derive(Debug, Serialize, Deserialize)] 41 | pub struct CacheConfig { 42 | /// The keypair of the cache. 43 | /// 44 | /// The keypair is never returned by the server, but can 45 | /// be configured by the client. 46 | #[serde(skip_serializing_if = "Option::is_none")] 47 | pub keypair: Option, 48 | 49 | /// The Nix binary cache endpoint of the cache. 50 | /// 51 | /// This is the endpoint that should be added to `nix.conf`. 52 | /// This is read-only and may not be available. 53 | #[serde(skip_serializing_if = "Option::is_none")] 54 | pub substituter_endpoint: Option, 55 | 56 | /// The Attic API endpoint. 57 | /// 58 | /// This is read-only and may not be available. 59 | #[serde(skip_serializing_if = "Option::is_none")] 60 | pub api_endpoint: Option, 61 | 62 | /// The public key of the cache, in the canonical format used by Nix. 63 | /// 64 | /// This is read-only and may not be available. 65 | #[serde(skip_serializing_if = "Option::is_none")] 66 | pub public_key: Option, 67 | 68 | /// Whether the cache is public or not. 69 | /// 70 | /// Anonymous clients are implicitly granted the "pull" 71 | /// permission to public caches. 72 | #[serde(skip_serializing_if = "Option::is_none")] 73 | pub is_public: Option, 74 | 75 | /// The Nix store path this binary cache uses. 76 | /// 77 | /// This is usually `/nix/store`. 78 | #[serde(skip_serializing_if = "Option::is_none")] 79 | pub store_dir: Option, 80 | 81 | /// The priority of the binary cache. 82 | /// 83 | /// A lower number denotes a higher priority. 84 | /// has a priority of 40. 85 | #[serde(skip_serializing_if = "Option::is_none")] 86 | pub priority: Option, 87 | 88 | /// A list of signing key names of upstream caches. 89 | /// 90 | /// The list serves as a hint to clients to avoid uploading 91 | /// store paths signed with such keys. 92 | #[serde(skip_serializing_if = "Option::is_none")] 93 | pub upstream_cache_key_names: Option>, 94 | 95 | /// The retention period of the cache. 96 | #[serde(skip_serializing_if = "Option::is_none")] 97 | pub retention_period: Option, 98 | } 99 | 100 | /// Configuaration of a keypair. 101 | #[derive(Debug, Serialize, Deserialize)] 102 | pub enum KeypairConfig { 103 | /// Use a randomly-generated keypair. 104 | Generate, 105 | 106 | /// Use a client-specified keypair. 107 | Keypair(NixKeypair), 108 | } 109 | 110 | /// Configuration of retention period. 111 | #[derive(Debug, Serialize, Deserialize)] 112 | pub enum RetentionPeriodConfig { 113 | /// Use the global default. 114 | Global, 115 | 116 | /// Specify a retention period in seconds. 117 | /// 118 | /// If 0, then time-based garbage collection is disabled. 119 | Period(u32), 120 | } 121 | 122 | impl CacheConfig { 123 | pub fn blank() -> Self { 124 | Self { 125 | keypair: None, 126 | substituter_endpoint: None, 127 | api_endpoint: None, 128 | public_key: None, 129 | is_public: None, 130 | store_dir: None, 131 | priority: None, 132 | upstream_cache_key_names: None, 133 | retention_period: None, 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /attic/src/api/v1/get_missing_paths.rs: -------------------------------------------------------------------------------- 1 | //! get-missing-paths v1 2 | //! 3 | //! `POST /_api/v1/get-missing-paths` 4 | //! 5 | //! Requires "push" permission. 6 | 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use crate::cache::CacheName; 10 | use crate::nix_store::StorePathHash; 11 | 12 | #[derive(Debug, Serialize, Deserialize)] 13 | pub struct GetMissingPathsRequest { 14 | /// The name of the cache. 15 | pub cache: CacheName, 16 | 17 | /// The list of store paths. 18 | pub store_path_hashes: Vec, 19 | } 20 | 21 | #[derive(Debug, Serialize, Deserialize)] 22 | pub struct GetMissingPathsResponse { 23 | /// A list of paths that are not in the cache. 24 | pub missing_paths: Vec, 25 | } 26 | -------------------------------------------------------------------------------- /attic/src/api/v1/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cache_config; 2 | pub mod get_missing_paths; 3 | pub mod upload_path; 4 | -------------------------------------------------------------------------------- /attic/src/api/v1/upload_path.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use serde_with::{serde_as, DefaultOnError}; 3 | 4 | use crate::cache::CacheName; 5 | use crate::hash::Hash; 6 | use crate::nix_store::StorePathHash; 7 | 8 | /// Header containing the upload info. 9 | pub const ATTIC_NAR_INFO: &str = "X-Attic-Nar-Info"; 10 | 11 | /// Header containing the size of the upload info at the beginning of the body. 12 | pub const ATTIC_NAR_INFO_PREAMBLE_SIZE: &str = "X-Attic-Nar-Info-Preamble-Size"; 13 | 14 | /// NAR information associated with a upload. 15 | /// 16 | /// There are two ways for the client to supply the NAR information: 17 | /// 18 | /// 1. At the beginning of the PUT body. The `X-Attic-Nar-Info-Preamble-Size` 19 | /// header must be set to the size of the JSON. 20 | /// 2. Through the `X-Attic-Nar-Info` header. 21 | /// 22 | /// The client is advised to use the first method if the serialized 23 | /// JSON is large (>4K). 24 | /// 25 | /// Regardless of client compression, the server will always decompress 26 | /// the NAR to validate the NAR hash before applying the server-configured 27 | /// compression again. 28 | #[derive(Debug, Serialize, Deserialize)] 29 | pub struct UploadPathNarInfo { 30 | /// The name of the binary cache to upload to. 31 | pub cache: CacheName, 32 | 33 | /// The hash portion of the store path. 34 | pub store_path_hash: StorePathHash, 35 | 36 | /// The full store path being cached, including the store directory. 37 | pub store_path: String, 38 | 39 | /// Other store paths this object directly refereces. 40 | pub references: Vec, 41 | 42 | /// The system this derivation is built for. 43 | pub system: Option, 44 | 45 | /// The derivation that produced this object. 46 | pub deriver: Option, 47 | 48 | /// The signatures of this object. 49 | pub sigs: Vec, 50 | 51 | /// The CA field of this object. 52 | pub ca: Option, 53 | 54 | /// The hash of the NAR. 55 | /// 56 | /// It must begin with `sha256:` with the SHA-256 hash in the 57 | /// hexadecimal format (64 hex characters). 58 | /// 59 | /// This is informational and the server always validates the supplied 60 | /// hash. 61 | pub nar_hash: Hash, 62 | 63 | /// The size of the NAR. 64 | pub nar_size: usize, 65 | } 66 | 67 | #[serde_as] 68 | #[derive(Debug, Serialize, Deserialize)] 69 | pub struct UploadPathResult { 70 | #[serde_as(deserialize_as = "DefaultOnError")] 71 | pub kind: UploadPathResultKind, 72 | 73 | /// The compressed size of the NAR, in bytes. 74 | #[serde(skip_serializing_if = "Option::is_none")] 75 | pub file_size: Option, 76 | 77 | /// The fraction of data that was deduplicated, from 0 to 1. 78 | pub frac_deduplicated: Option, 79 | } 80 | 81 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 82 | #[non_exhaustive] 83 | pub enum UploadPathResultKind { 84 | /// The path was uploaded. 85 | /// 86 | /// This is purely informational and servers may return 87 | /// this variant even when the NAR is deduplicated. 88 | Uploaded, 89 | 90 | /// The path was globally deduplicated. 91 | /// 92 | /// The exact semantics of what counts as deduplicated 93 | /// is opaque to the client. 94 | Deduplicated, 95 | } 96 | 97 | impl Default for UploadPathResultKind { 98 | fn default() -> Self { 99 | Self::Uploaded 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /attic/src/chunking/mod.rs: -------------------------------------------------------------------------------- 1 | //! Chunking. 2 | //! 3 | //! We perform chunking on uncompressed NARs using the FastCDC 4 | //! algorithm. 5 | 6 | use async_stream::try_stream; 7 | use bytes::{BufMut, Bytes, BytesMut}; 8 | use fastcdc::ronomon::FastCDC; 9 | use futures::stream::Stream; 10 | use tokio::io::AsyncRead; 11 | 12 | use crate::stream::read_chunk_async; 13 | 14 | /// Splits a streams into content-defined chunks. 15 | /// 16 | /// This is a wrapper over fastcdc-rs that takes an `AsyncRead` and 17 | /// returns a `Stream` of chunks as `Bytes`s. 18 | pub fn chunk_stream( 19 | mut stream: R, 20 | min_size: usize, 21 | avg_size: usize, 22 | max_size: usize, 23 | ) -> impl Stream> 24 | where 25 | R: AsyncRead + Unpin + Send, 26 | { 27 | let s = try_stream! { 28 | let mut buf = BytesMut::with_capacity(max_size); 29 | 30 | loop { 31 | let read = read_chunk_async(&mut stream, buf).await?; 32 | 33 | let mut eof = false; 34 | if read.is_empty() { 35 | // Already EOF 36 | break; 37 | } else if read.len() < max_size { 38 | // Last read 39 | eof = true; 40 | } 41 | 42 | let chunks = FastCDC::with_eof(&read, min_size, avg_size, max_size, eof); 43 | let mut consumed = 0; 44 | 45 | for chunk in chunks { 46 | consumed += chunk.length; 47 | 48 | let slice = read.slice(chunk.offset..chunk.offset + chunk.length); 49 | yield slice; 50 | } 51 | 52 | if eof { 53 | break; 54 | } 55 | 56 | buf = BytesMut::with_capacity(max_size); 57 | 58 | if consumed < read.len() { 59 | // remaining bytes for the next read 60 | buf.put_slice(&read[consumed..]); 61 | } 62 | } 63 | }; 64 | 65 | Box::pin(s) 66 | } 67 | 68 | #[cfg(test)] 69 | mod tests { 70 | use super::*; 71 | 72 | use std::io::Cursor; 73 | 74 | use futures::StreamExt; 75 | 76 | use crate::testing::get_fake_data; 77 | 78 | /// Chunks and reconstructs a file. 79 | #[tokio::test] 80 | async fn test_chunking_basic() { 81 | async fn case(size: usize) { 82 | let test_file = get_fake_data(size); // 32 MiB 83 | let mut reconstructed_file = Vec::new(); 84 | 85 | let cursor = Cursor::new(&test_file); 86 | let mut chunks = chunk_stream(cursor, 8 * 1024, 16 * 1024, 32 * 1024); 87 | 88 | while let Some(chunk) = chunks.next().await { 89 | let chunk = chunk.unwrap(); 90 | eprintln!("Got a {}-byte chunk", chunk.len()); 91 | reconstructed_file.extend(chunk); 92 | } 93 | 94 | assert_eq!(reconstructed_file, test_file); 95 | } 96 | 97 | case(32 * 1024 * 1024 - 1).await; 98 | case(32 * 1024 * 1024).await; 99 | case(32 * 1024 * 1024 + 1).await; 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /attic/src/error.rs: -------------------------------------------------------------------------------- 1 | //! Error handling. 2 | 3 | use std::error::Error as StdError; 4 | use std::io; 5 | use std::path::PathBuf; 6 | 7 | use displaydoc::Display; 8 | 9 | pub type AtticResult = Result; 10 | 11 | /// An error. 12 | #[derive(Debug, Display)] 13 | pub enum AtticError { 14 | /// Invalid store path {path:?}: {reason} 15 | InvalidStorePath { path: PathBuf, reason: &'static str }, 16 | 17 | /// Invalid store path base name {base_name:?}: {reason} 18 | InvalidStorePathName { 19 | base_name: PathBuf, 20 | reason: &'static str, 21 | }, 22 | 23 | /// Invalid store path hash "{hash}": {reason} 24 | InvalidStorePathHash { hash: String, reason: &'static str }, 25 | 26 | /// Invalid cache name "{name}" 27 | InvalidCacheName { name: String }, 28 | 29 | /// Signing error: {0} 30 | SigningError(super::signing::Error), 31 | 32 | /// Hashing error: {0} 33 | HashError(super::hash::Error), 34 | 35 | /// I/O error: {error}. 36 | IoError { error: io::Error }, 37 | 38 | /// Unknown C++ exception: {exception}. 39 | CxxError { exception: String }, 40 | } 41 | 42 | impl AtticError { 43 | pub fn name(&self) -> &'static str { 44 | match self { 45 | Self::InvalidStorePath { .. } => "InvalidStorePath", 46 | Self::InvalidStorePathName { .. } => "InvalidStorePathName", 47 | Self::InvalidStorePathHash { .. } => "InvalidStorePathHash", 48 | Self::InvalidCacheName { .. } => "InvalidCacheName", 49 | Self::SigningError(_) => "SigningError", 50 | Self::HashError(_) => "HashError", 51 | Self::IoError { .. } => "IoError", 52 | Self::CxxError { .. } => "CxxError", 53 | } 54 | } 55 | } 56 | 57 | impl StdError for AtticError {} 58 | 59 | #[cfg(feature = "nix_store")] 60 | impl From for AtticError { 61 | fn from(exception: cxx::Exception) -> Self { 62 | Self::CxxError { 63 | exception: exception.what().to_string(), 64 | } 65 | } 66 | } 67 | 68 | impl From for AtticError { 69 | fn from(error: io::Error) -> Self { 70 | Self::IoError { error } 71 | } 72 | } 73 | 74 | impl From for AtticError { 75 | fn from(error: super::signing::Error) -> Self { 76 | Self::SigningError(error) 77 | } 78 | } 79 | 80 | impl From for AtticError { 81 | fn from(error: super::hash::Error) -> Self { 82 | Self::HashError(error) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /attic/src/hash/mod.rs: -------------------------------------------------------------------------------- 1 | //! Hashing utilities. 2 | 3 | #[cfg(test)] 4 | mod tests; 5 | 6 | use displaydoc::Display; 7 | use serde::{de, ser, Deserialize, Serialize}; 8 | use sha2::{Digest, Sha256}; 9 | 10 | use crate::error::AtticResult; 11 | 12 | /// A hash. 13 | #[derive(Debug, Clone, PartialEq, Eq)] 14 | pub enum Hash { 15 | /// An SHA-256 hash. 16 | Sha256([u8; 32]), 17 | } 18 | 19 | /// A hashing error. 20 | #[derive(Debug, Display)] 21 | pub enum Error { 22 | /// The string lacks a colon separator. 23 | NoColonSeparator, 24 | 25 | /// Hash algorithm {0} is not supported. 26 | UnsupportedHashAlgorithm(String), 27 | 28 | /// Invalid base16 hash: {0} 29 | InvalidBase16Hash(hex::FromHexError), 30 | 31 | /// Invalid base32 hash. 32 | InvalidBase32Hash, 33 | 34 | /// Invalid length for {typ} string: Must be either {base16_len} (hexadecimal) or {base32_len} (base32), got {actual}. 35 | InvalidHashStringLength { 36 | typ: &'static str, 37 | base16_len: usize, 38 | base32_len: usize, 39 | actual: usize, 40 | }, 41 | } 42 | 43 | impl Hash { 44 | /// Convenience function to generate a SHA-256 hash from a slice. 45 | pub fn sha256_from_bytes(bytes: &[u8]) -> Self { 46 | let mut hasher = Sha256::new(); 47 | hasher.update(bytes); 48 | Self::Sha256(hasher.finalize().into()) 49 | } 50 | 51 | /// Parses a typed representation of a hash. 52 | pub fn from_typed(s: &str) -> AtticResult { 53 | let colon = s.find(':').ok_or(Error::NoColonSeparator)?; 54 | 55 | let (typ, rest) = s.split_at(colon); 56 | let hash = &rest[1..]; 57 | 58 | match typ { 59 | "sha256" => { 60 | let v = decode_hash(hash, "SHA-256", 32)?; 61 | Ok(Self::Sha256(v.try_into().unwrap())) 62 | } 63 | _ => Err(Error::UnsupportedHashAlgorithm(typ.to_owned()).into()), 64 | } 65 | } 66 | 67 | /// Returns the hash in Nix-specific Base32 format, with the hash type prepended. 68 | pub fn to_typed_base32(&self) -> String { 69 | format!("{}:{}", self.hash_type(), self.to_base32()) 70 | } 71 | 72 | /// Returns the hash in hexadecimal format, with the hash type prepended. 73 | /// 74 | /// This is the canonical representation of hashes in the Attic database. 75 | pub fn to_typed_base16(&self) -> String { 76 | format!("{}:{}", self.hash_type(), hex::encode(self.data())) 77 | } 78 | 79 | fn data(&self) -> &[u8] { 80 | match self { 81 | Self::Sha256(d) => d, 82 | } 83 | } 84 | 85 | fn hash_type(&self) -> &'static str { 86 | match self { 87 | Self::Sha256(_) => "sha256", 88 | } 89 | } 90 | 91 | /// Returns the hash in Nix-specific Base32 format. 92 | fn to_base32(&self) -> String { 93 | nix_base32::to_nix_base32(self.data()) 94 | } 95 | } 96 | 97 | impl<'de> Deserialize<'de> for Hash { 98 | /// Deserializes a typed hash string. 99 | fn deserialize(deserializer: D) -> Result 100 | where 101 | D: de::Deserializer<'de>, 102 | { 103 | use de::Error; 104 | 105 | String::deserialize(deserializer) 106 | .and_then(|s| Self::from_typed(&s).map_err(|e| Error::custom(e.to_string()))) 107 | } 108 | } 109 | 110 | impl Serialize for Hash { 111 | /// Serializes a hash into a hexadecimal hash string. 112 | fn serialize(&self, serializer: S) -> Result 113 | where 114 | S: ser::Serializer, 115 | { 116 | serializer.serialize_str(&self.to_typed_base16()) 117 | } 118 | } 119 | 120 | /// Decodes a base16 or base32 encoded hash containing a specified number of bytes. 121 | fn decode_hash(s: &str, typ: &'static str, expected_bytes: usize) -> AtticResult> { 122 | let base16_len = expected_bytes * 2; 123 | let base32_len = (expected_bytes * 8 - 1) / 5 + 1; 124 | 125 | let v = if s.len() == base16_len { 126 | hex::decode(s).map_err(Error::InvalidBase16Hash)? 127 | } else if s.len() == base32_len { 128 | nix_base32::from_nix_base32(s).ok_or(Error::InvalidBase32Hash)? 129 | } else { 130 | return Err(Error::InvalidHashStringLength { 131 | typ, 132 | base16_len, 133 | base32_len, 134 | actual: s.len(), 135 | } 136 | .into()); 137 | }; 138 | 139 | assert!(v.len() == expected_bytes); 140 | 141 | Ok(v) 142 | } 143 | -------------------------------------------------------------------------------- /attic/src/hash/tests/.gitattributes: -------------------------------------------------------------------------------- 1 | blob -text 2 | -------------------------------------------------------------------------------- /attic/src/hash/tests/blob: -------------------------------------------------------------------------------- 1 | ⊂_ヽ 2 |   \\ _ 3 |    \( •_•) F 4 |     < ⌒ヽ A 5 |    /   へ\ B 6 |    /  / \\ U 7 |    レ ノ   ヽ_つ L 8 |   / / O 9 |   / /| U 10 |  ( (ヽ S 11 |  | |、\ 12 |  | 丿 \ ⌒) 13 |  | |  ) / 14 | `ノ )  Lノ 15 | (_/ 16 | -------------------------------------------------------------------------------- /attic/src/hash/tests/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use crate::error::AtticError; 4 | use crate::nix_store::tests::test_nar; 5 | 6 | const BLOB: &[u8] = include_bytes!("blob"); 7 | 8 | #[test] 9 | fn test_basic() { 10 | let hash = Hash::sha256_from_bytes(BLOB); 11 | 12 | let expected_base16 = "sha256:df3404eaf1481506db9ca155e0a871d5b4d22e62a96961e8bf4ad1a8ca525330"; 13 | assert_eq!(expected_base16, hash.to_typed_base16()); 14 | 15 | let expected_base32 = "sha256:0c2kab5ailaapzl62sd9c8pd5d6mf6lf0md1kkdhc5a8y7m08d6z"; 16 | assert_eq!(expected_base32, hash.to_typed_base32()); 17 | } 18 | 19 | #[test] 20 | fn test_nar_hash() { 21 | let nar = test_nar::NO_DEPS; 22 | let hash = Hash::sha256_from_bytes(nar.nar()); 23 | 24 | let expected_base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms"; 25 | assert_eq!(expected_base32, hash.to_typed_base32()); 26 | } 27 | 28 | #[test] 29 | fn test_from_typed() { 30 | let base16 = "sha256:baeabdb75c223d171800c17b05c5e7e8e9980723a90eb6ffcc632a305afc5a42"; 31 | let base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms"; 32 | 33 | assert_eq!( 34 | Hash::from_typed(base16).unwrap(), 35 | Hash::from_typed(base32).unwrap() 36 | ); 37 | 38 | assert!(matches!( 39 | Hash::from_typed("sha256"), 40 | Err(AtticError::HashError(Error::NoColonSeparator)) 41 | )); 42 | 43 | assert!(matches!( 44 | Hash::from_typed("sha256:"), 45 | Err(AtticError::HashError(Error::InvalidHashStringLength { .. })) 46 | )); 47 | 48 | assert!(matches!( 49 | Hash::from_typed("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), 50 | Err(AtticError::HashError(Error::InvalidBase32Hash)) 51 | )); 52 | 53 | assert!(matches!( 54 | Hash::from_typed("sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg"), 55 | Err(AtticError::HashError(Error::InvalidBase16Hash(_))) 56 | )); 57 | 58 | assert!(matches!( 59 | Hash::from_typed("md5:invalid"), 60 | Err(AtticError::HashError(Error::UnsupportedHashAlgorithm(alg))) if alg == "md5" 61 | )); 62 | } 63 | -------------------------------------------------------------------------------- /attic/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! The Attic Library. 2 | 3 | #![deny( 4 | asm_sub_register, 5 | deprecated, 6 | missing_abi, 7 | unsafe_code, 8 | unused_macros, 9 | unused_must_use, 10 | unused_unsafe 11 | )] 12 | #![deny(clippy::from_over_into, clippy::needless_question_mark)] 13 | #![cfg_attr( 14 | not(debug_assertions), 15 | deny(unused_imports, unused_mut, unused_variables,) 16 | )] 17 | 18 | pub mod api; 19 | pub mod cache; 20 | #[cfg(feature = "chunking")] 21 | pub mod chunking; 22 | pub mod error; 23 | pub mod hash; 24 | pub mod mime; 25 | pub mod nix_store; 26 | pub mod signing; 27 | #[cfg(feature = "stream")] 28 | pub mod stream; 29 | #[cfg(target_family = "unix")] 30 | pub mod testing; 31 | #[cfg(feature = "tokio")] 32 | pub mod util; 33 | 34 | pub use error::{AtticError, AtticResult}; 35 | -------------------------------------------------------------------------------- /attic/src/mime.rs: -------------------------------------------------------------------------------- 1 | //! MIME types. 2 | 3 | /// /nix-cache-info 4 | pub const NIX_CACHE_INFO: &str = "text/x-nix-cache-info"; 5 | 6 | /// .narinfo 7 | pub const NARINFO: &str = "text/x-nix-narinfo"; 8 | 9 | /// .nar 10 | pub const NAR: &str = "application/x-nix-nar"; 11 | -------------------------------------------------------------------------------- /attic/src/nix_store/README.md: -------------------------------------------------------------------------------- 1 | # Nix Store Bindings 2 | 3 | This directory contains a set of high-level Rust bindings to `libnixstore`, compatible with `async`/`await` semantics. 4 | We currently target Nix 2.4+. 5 | 6 | ## Why? 7 | 8 | With this wrapper, now you can do things like: 9 | 10 | ```rust 11 | let store = NixStore::connect()?; 12 | let store_path = store.parse_store_path("/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5")?; 13 | let nar_stream = store.nar_from_path(store_path); # AsyncWrite 14 | ``` 15 | -------------------------------------------------------------------------------- /attic/src/nix_store/bindings/nix.cpp: -------------------------------------------------------------------------------- 1 | // C++ side of the libnixstore glue. 2 | // 3 | // We implement a mid-level wrapper of the Nix Store interface, 4 | // which is then wrapped again in the Rust side to enable full 5 | // async-await operation. 6 | // 7 | // Here we stick with the naming conventions of Rust and handle 8 | // Rust types directly where possible, so that the interfaces are 9 | // satisfying to use from the Rust side via cxx.rs. 10 | 11 | #include "attic/src/nix_store/bindings/nix.hpp" 12 | 13 | static std::mutex g_init_nix_mutex; 14 | static bool g_init_nix_done = false; 15 | 16 | static nix::StorePath store_path_from_rust(RBasePathSlice base_name) { 17 | std::string_view sv((const char *)base_name.data(), base_name.size()); 18 | return nix::StorePath(sv); 19 | } 20 | 21 | static bool hash_is_sha256(const nix::Hash &hash) { 22 | #ifdef ATTIC_NIX_2_20 23 | return hash.algo == nix::HashAlgorithm::SHA256; 24 | #else 25 | return hash.type == nix::htSHA256; 26 | #endif 27 | } 28 | 29 | // ======== 30 | // RustSink 31 | // ======== 32 | 33 | RustSink::RustSink(RBox sender) : sender(std::move(sender)) {} 34 | 35 | void RustSink::operator () (std::string_view data) { 36 | RBasePathSlice s((const unsigned char *)data.data(), data.size()); 37 | 38 | this->sender->send(s); 39 | } 40 | 41 | void RustSink::eof() { 42 | this->sender->eof(); 43 | } 44 | 45 | 46 | // ========= 47 | // CPathInfo 48 | // ========= 49 | 50 | CPathInfo::CPathInfo(nix::ref pi) : pi(pi) {} 51 | 52 | RHashSlice CPathInfo::nar_sha256_hash() { 53 | auto &hash = this->pi->narHash; 54 | 55 | if (!hash_is_sha256(hash)) { 56 | throw nix::Error("Only SHA-256 hashes are supported at the moment"); 57 | } 58 | 59 | return RHashSlice(hash.hash, hash.hashSize); 60 | } 61 | 62 | uint64_t CPathInfo::nar_size() { 63 | return this->pi->narSize; 64 | } 65 | 66 | std::unique_ptr> CPathInfo::sigs() { 67 | std::vector result; 68 | for (auto&& elem : this->pi->sigs) { 69 | result.push_back(std::string(elem)); 70 | } 71 | return std::make_unique>(result); 72 | } 73 | 74 | std::unique_ptr> CPathInfo::references() { 75 | std::vector result; 76 | for (auto&& elem : this->pi->references) { 77 | result.push_back(std::string(elem.to_string())); 78 | } 79 | return std::make_unique>(result); 80 | } 81 | 82 | RString CPathInfo::ca() { 83 | if (this->pi->ca) { 84 | return RString(nix::renderContentAddress(this->pi->ca)); 85 | } else { 86 | return RString(""); 87 | } 88 | } 89 | 90 | // ========= 91 | // CNixStore 92 | // ========= 93 | 94 | CNixStore::CNixStore() { 95 | std::map params; 96 | std::lock_guard lock(g_init_nix_mutex); 97 | 98 | if (!g_init_nix_done) { 99 | nix::initNix(); 100 | g_init_nix_done = true; 101 | } 102 | 103 | this->store = nix::openStore(nix::settings.storeUri.get(), params); 104 | } 105 | 106 | RString CNixStore::store_dir() { 107 | return RString(this->store->storeDir); 108 | } 109 | 110 | std::unique_ptr CNixStore::query_path_info(RBasePathSlice base_name) { 111 | auto store_path = store_path_from_rust(base_name); 112 | 113 | auto r = this->store->queryPathInfo(store_path); 114 | return std::make_unique(r); 115 | } 116 | 117 | std::unique_ptr> CNixStore::compute_fs_closure(RBasePathSlice base_name, bool flip_direction, bool include_outputs, bool include_derivers) { 118 | std::set out; 119 | 120 | this->store->computeFSClosure(store_path_from_rust(base_name), out, flip_direction, include_outputs, include_derivers); 121 | 122 | std::vector result; 123 | for (auto&& elem : out) { 124 | result.push_back(std::string(elem.to_string())); 125 | } 126 | return std::make_unique>(result); 127 | } 128 | 129 | std::unique_ptr> CNixStore::compute_fs_closure_multi(RSlice base_names, bool flip_direction, bool include_outputs, bool include_derivers) { 130 | std::set path_set, out; 131 | for (auto&& base_name : base_names) { 132 | path_set.insert(store_path_from_rust(base_name)); 133 | } 134 | 135 | this->store->computeFSClosure(path_set, out, flip_direction, include_outputs, include_derivers); 136 | 137 | std::vector result; 138 | for (auto&& elem : out) { 139 | result.push_back(std::string(elem.to_string())); 140 | } 141 | return std::make_unique>(result); 142 | } 143 | 144 | void CNixStore::nar_from_path(RVec base_name, RBox sender) { 145 | RustSink sink(std::move(sender)); 146 | 147 | std::string_view sv((const char *)base_name.data(), base_name.size()); 148 | nix::StorePath store_path(sv); 149 | 150 | // exceptions will be thrown into Rust 151 | this->store->narFromPath(store_path, sink); 152 | sink.eof(); 153 | } 154 | 155 | std::unique_ptr open_nix_store() { 156 | return std::make_unique(); 157 | } 158 | -------------------------------------------------------------------------------- /attic/src/nix_store/bindings/nix.hpp: -------------------------------------------------------------------------------- 1 | // C++ side of the libnixstore glue. 2 | // 3 | // We implement a mid-level wrapper of the Nix Store interface, 4 | // which is then wrapped again in the Rust side to enable full 5 | // async-await operation. 6 | // 7 | // Here we stick with the naming conventions of Rust and handle 8 | // Rust types directly where possible, so that the interfaces are 9 | // satisfying to use from the Rust side via cxx.rs. 10 | 11 | #pragma once 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | template using RVec = rust::Vec; 27 | template using RBox = rust::Box; 28 | template using RSlice = rust::Slice; 29 | using RString = rust::String; 30 | using RStr = rust::Str; 31 | using RBasePathSlice = RSlice; 32 | using RHashSlice = RSlice; 33 | 34 | struct AsyncWriteSender; 35 | 36 | struct RustSink : nix::Sink 37 | { 38 | RBox sender; 39 | public: 40 | RustSink(RBox sender); 41 | void operator () (std::string_view data) override; 42 | void eof(); 43 | }; 44 | 45 | // Opaque wrapper for nix::ValidPathInfo 46 | class CPathInfo { 47 | nix::ref pi; 48 | public: 49 | CPathInfo(nix::ref pi); 50 | RHashSlice nar_sha256_hash(); 51 | uint64_t nar_size(); 52 | std::unique_ptr> sigs(); 53 | std::unique_ptr> references(); 54 | RString ca(); 55 | }; 56 | 57 | class CNixStore { 58 | std::shared_ptr store; 59 | public: 60 | CNixStore(); 61 | 62 | RString store_dir(); 63 | std::unique_ptr query_path_info(RBasePathSlice base_name); 64 | std::unique_ptr> compute_fs_closure( 65 | RBasePathSlice base_name, 66 | bool flip_direction, 67 | bool include_outputs, 68 | bool include_derivers); 69 | std::unique_ptr> compute_fs_closure_multi( 70 | RSlice base_names, 71 | bool flip_direction, 72 | bool include_outputs, 73 | bool include_derivers); 74 | void nar_from_path(RVec base_name, RBox sender); 75 | }; 76 | 77 | std::unique_ptr open_nix_store(); 78 | 79 | // Relies on our definitions 80 | #include "attic/src/nix_store/bindings/mod.rs.h" 81 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/.gitattributes: -------------------------------------------------------------------------------- 1 | * -text 2 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | The included tests require trusted user access to import the test NAR dumps. 4 | 5 | ## Test Derivations 6 | 7 | To keep things minimal, we have a couple of polyglot derivations that double as their builders in `drv`. 8 | They result in the following store paths when built: 9 | 10 | - `no-deps.nix` -> `/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps` 11 | - `with-deps.nix` -> `/nix/store/7wp86qa87v2pwh6sr2a02qci0h71rs9z-attic-test-with-deps` 12 | 13 | NAR dumps for those store paths are included in `nar`. 14 | `.nar` files are produced by `nix-store --export`, and `.export` files are produced by `nix-store --export`. 15 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/drv/no-deps.nix: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | /*/sh -c "echo Hi! I have no dependencies. > $out"; exit 0; */ 3 | derivation { 4 | name = "attic-test-no-deps"; 5 | builder = ./no-deps.nix; 6 | system = "x86_64-linux"; 7 | } 8 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/drv/with-deps.nix: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | /*/sh -c "echo Hi! I depend on $dep. > $out"; exit 0; */ 3 | let 4 | a = derivation { 5 | name = "attic-test-with-deps-a"; 6 | builder = ./with-deps.nix; 7 | system = "x86_64-linux"; 8 | dep = b; 9 | }; 10 | b = derivation { 11 | name = "attic-test-with-deps-b"; 12 | builder = ./with-deps.nix; 13 | system = "x86_64-linux"; 14 | dep = c; 15 | }; 16 | c = derivation { 17 | name = "attic-test-with-deps-c-final"; 18 | builder = ./with-deps.nix; 19 | system = "x86_64-linux"; 20 | }; 21 | in a 22 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final: -------------------------------------------------------------------------------- 1 | Hi! I depend on . 2 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export: -------------------------------------------------------------------------------- 1 |  nix-archive-1(typeregularcontentsHi! I depend on . 2 | )NIXEH/nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-finalL/nix/store/hhdfzrah5yfhfsspa3m5gnjmalmfpck8-attic-test-with-deps-c-final.drv -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.nar: -------------------------------------------------------------------------------- 1 | nix-archive-1(typeregularcontentsHi! I depend on . 2 | ) -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b: -------------------------------------------------------------------------------- 1 | Hi! I depend on /nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final. 2 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export: -------------------------------------------------------------------------------- 1 |  nix-archive-1(typeregularcontentsZHi! I depend on /nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final. 2 | )NIXEB/nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-bH/nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-finalF/nix/store/wnhmq1a370y17r7lpi9mwf4hbnw9slam-attic-test-with-deps-b.drv -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar: -------------------------------------------------------------------------------- 1 | nix-archive-1(typeregularcontentsZHi! I depend on /nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final. 2 | ) -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a: -------------------------------------------------------------------------------- 1 | Hi! I depend on /nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b. 2 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export: -------------------------------------------------------------------------------- 1 |  nix-archive-1(typeregularcontentsTHi! I depend on /nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b. 2 | )NIXEB/nix/store/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-aB/nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-bF/nix/store/f0jcxv00lr4vvvv9gb3mb23dkjcpks3z-attic-test-with-deps-a.drv -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar: -------------------------------------------------------------------------------- 1 | nix-archive-1(typeregularcontentsTHi! I depend on /nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b. 2 | ) -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps: -------------------------------------------------------------------------------- 1 | Hi! I have no dependencies. 2 | -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.export: -------------------------------------------------------------------------------- 1 |  nix-archive-1(typeregularcontentsHi! I have no dependencies. 2 | )NIXE>/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-depsB/nix/store/dvz2n7iqlpa2rs9mqhx4mhkzrgrpca8v-attic-test-no-deps.drv -------------------------------------------------------------------------------- /attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar: -------------------------------------------------------------------------------- 1 | nix-archive-1(typeregularcontentsHi! I have no dependencies. 2 | ) -------------------------------------------------------------------------------- /attic/src/signing/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | #[test] 4 | fn test_generate_key() { 5 | let keypair = NixKeypair::generate("attic-test").expect("Could not generate key"); 6 | 7 | let export_priv = keypair.export_keypair(); 8 | let export_pub = keypair.export_public_key(); 9 | 10 | eprintln!("Private key: {}", export_priv); 11 | eprintln!(" Public key: {}", export_pub); 12 | 13 | // re-import keypair 14 | let import = NixKeypair::from_str(&export_priv).expect("Could not re-import generated key"); 15 | 16 | assert_eq!(keypair.name, import.name); 17 | assert_eq!(keypair.keypair, import.keypair); 18 | 19 | // re-import public key 20 | let import_pub = NixPublicKey::from_str(&export_pub).expect("Could not re-import public key"); 21 | 22 | assert_eq!(keypair.name, import_pub.name); 23 | assert_eq!(keypair.keypair.pk, import_pub.public); 24 | 25 | // test the export functionality of NixPublicKey as well 26 | let export_pub2 = import_pub.export(); 27 | let import_pub2 = NixPublicKey::from_str(&export_pub2).expect("Could not re-import public key"); 28 | 29 | assert_eq!(keypair.name, import_pub2.name); 30 | assert_eq!(keypair.keypair.pk, import_pub2.public); 31 | } 32 | 33 | #[test] 34 | fn test_serde() { 35 | let json = "\"attic-test:x326WFy/JUl+MQnN1u9NPdWQPBbcVn2mwoIqSLS3DmQqZ8qT8rBSxxEnyhtl3jDouBqodlyfq6F+HsVhbTYPMA==\""; 36 | 37 | let keypair: NixKeypair = serde_json::from_str(json).expect("Could not deserialize keypair"); 38 | 39 | let export = serde_json::to_string(&keypair).expect("Could not serialize keypair"); 40 | 41 | eprintln!("Public Key: {}", keypair.export_public_key()); 42 | 43 | assert_eq!(json, &export); 44 | } 45 | 46 | #[test] 47 | fn test_import_public_key() { 48 | let cache_nixos_org = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="; 49 | let import = NixPublicKey::from_str(cache_nixos_org).expect("Could not import public key"); 50 | 51 | assert_eq!(cache_nixos_org, import.export()); 52 | } 53 | 54 | #[test] 55 | fn test_signing() { 56 | let keypair = NixKeypair::generate("attic-test").expect("Could not generate key"); 57 | 58 | let public = keypair.to_public_key(); 59 | 60 | let message = b"hello world"; 61 | 62 | let signature = keypair.sign(message); 63 | 64 | keypair.verify(message, &signature).unwrap(); 65 | public.verify(message, &signature).unwrap(); 66 | 67 | keypair.verify(message, "attic-test:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ==").unwrap_err(); 68 | } 69 | -------------------------------------------------------------------------------- /attic/src/testing/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for testing. 2 | 3 | pub mod shadow_store; 4 | 5 | use tokio::runtime::Runtime; 6 | 7 | /// Returns a new Tokio runtime. 8 | pub fn get_runtime() -> Runtime { 9 | tokio::runtime::Builder::new_current_thread() 10 | .enable_time() 11 | .build() 12 | .unwrap() 13 | } 14 | 15 | /// Returns some fake data. 16 | pub fn get_fake_data(len: usize) -> Vec { 17 | let mut state = 42u32; 18 | let mut data = vec![0u8; len]; 19 | 20 | for (i, byte) in data.iter_mut().enumerate() { 21 | (state, _) = state.overflowing_mul(1664525u32); 22 | (state, _) = state.overflowing_add(1013904223u32); 23 | *byte = ((state >> (i % 24)) & 0xff) as u8; 24 | } 25 | 26 | data 27 | } 28 | -------------------------------------------------------------------------------- /attic/src/testing/shadow_store/mod.rs: -------------------------------------------------------------------------------- 1 | //! Shadow Nix store. 2 | //! 3 | //! Since Nix 2.0, Nix can use an alternative root for the store via 4 | //! `--store` while keeping the same `storeDir`. To test pulling from 5 | //! an Attic server with vanilla Nix, we create a temporary root 6 | //! for the store, as well as `nix.conf` and `netrc` configurations 7 | //! required to connect to an Attic server. 8 | //! 9 | //! ## Manual example 10 | //! 11 | //! ```bash 12 | //! NIX_CONF_DIR="$SHADOW/etc/nix" NIX_USER_CONF_FILES="" NIX_REMOTE="" \ 13 | //! nix-store --store "$SHADOW" -r /nix/store/h8fxhm945jlsfxlr4rvkkqlws771l07c-nix-2.7pre20220127_558c4ee -v 14 | //! ``` 15 | //! 16 | //! `nix.conf`: 17 | //! 18 | //! ```text 19 | //! substituters = http://localhost:8080/attic-test 20 | //! trusted-public-keys = attic-test:KmfKk/KwUscRJ8obZd4w6LgaqHZcn6uhfh7FYW02DzA= 21 | //! ``` 22 | //! 23 | //! `netrc`: 24 | //! 25 | //! ```text 26 | //! machine localhost password eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjQwNzA5MDg4MDAsImh0dHBzOi8vemhhb2ZlbmdsaS5naXRodWIuaW8vYXR0aWMiOnsieC1hdHRpYy1hY2Nlc3MiOnsiY2FjaGVzIjp7IioiOnsicHVzaCI6dHJ1ZSwicHVsbCI6dHJ1ZX19fX19.58WIuL8H_fQGEPmUG7U61FUHtAmsHXanYtQFSgqni6U 27 | //! ``` 28 | 29 | use std::ffi::OsString; 30 | use std::fs::{self, Permissions}; 31 | use std::os::unix::fs::PermissionsExt; 32 | use std::path::Path; 33 | 34 | use tempfile::{Builder as TempfileBuilder, TempDir}; 35 | 36 | const WRAPPER_TEMPLATE: &str = include_str!("nix-wrapper.sh"); 37 | 38 | /// A shadow Nix store. 39 | /// 40 | /// After creation, wrappers of common Nix executables will be 41 | /// available under `bin`, allowing you to easily interact with 42 | /// the shadow store. 43 | pub struct ShadowStore { 44 | store_root: TempDir, 45 | } 46 | 47 | impl ShadowStore { 48 | pub fn new() -> Self { 49 | let store_root = TempfileBuilder::new() 50 | .prefix("shadow-store-") 51 | .tempdir() 52 | .expect("failed to create temporary root"); 53 | 54 | fs::create_dir_all(store_root.path().join("etc/nix")) 55 | .expect("failed to create temporary config dir"); 56 | 57 | fs::create_dir_all(store_root.path().join("bin")) 58 | .expect("failed to create temporary wrapper dir"); 59 | 60 | let store = Self { store_root }; 61 | store.create_wrapper("nix-store"); 62 | 63 | store 64 | } 65 | 66 | /// Returns the path to the store root. 67 | pub fn path(&self) -> &Path { 68 | self.store_root.path() 69 | } 70 | 71 | /// Returns the path to the `nix-store` wrapper. 72 | pub fn nix_store_cmd(&self) -> OsString { 73 | self.store_root 74 | .path() 75 | .join("bin/nix-store") 76 | .as_os_str() 77 | .to_owned() 78 | } 79 | 80 | /// Creates a wrapper script for a Nix command. 81 | fn create_wrapper(&self, command: &str) { 82 | let path = self.store_root.path().join("bin").join(command); 83 | let permissions = Permissions::from_mode(0o755); 84 | let wrapper = WRAPPER_TEMPLATE 85 | .replace("%command%", command) 86 | .replace("%store_root%", &self.store_root.path().to_string_lossy()); 87 | 88 | fs::write(&path, wrapper).expect("failed to write wrapper script"); 89 | 90 | fs::set_permissions(&path, permissions).expect("failed to set wrapper permissions"); 91 | } 92 | } 93 | 94 | impl Drop for ShadowStore { 95 | fn drop(&mut self) { 96 | // recursively set write permissions on directories so we can 97 | // cleanly delete the entire store 98 | 99 | fn walk(dir: &Path) { 100 | // excuse the unwraps 101 | let metadata = fs::metadata(dir).unwrap(); 102 | let mut permissions = metadata.permissions(); 103 | permissions.set_mode(permissions.mode() | 0o200); 104 | fs::set_permissions(dir, permissions).unwrap(); 105 | 106 | for entry in fs::read_dir(dir).unwrap() { 107 | let entry = entry.unwrap(); 108 | 109 | if entry.file_type().unwrap().is_dir() { 110 | walk(&entry.path()); 111 | } 112 | } 113 | } 114 | 115 | walk(self.store_root.path()); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /attic/src/testing/shadow_store/nix-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export NIX_CONF_DIR="%store_root%/etc/nix" 3 | export NIX_USER_CONF_FILE="" 4 | export NIX_REMOTE="" 5 | 6 | exec %command% --store "%store_root%" "$@" 7 | -------------------------------------------------------------------------------- /attic/src/util.rs: -------------------------------------------------------------------------------- 1 | //! Misc utilities. 2 | 3 | use std::future::Future; 4 | use std::mem; 5 | 6 | use tokio::task; 7 | 8 | /// Runs a future when dropped. 9 | /// 10 | /// This is used to clean up external resources that are 11 | /// difficult to correctly model using ownerships. 12 | pub struct Finally 13 | where 14 | F::Output: Send + 'static, 15 | { 16 | f: Option, 17 | } 18 | 19 | impl Finally 20 | where 21 | F::Output: Send + 'static, 22 | { 23 | #[must_use] 24 | pub fn new(f: F) -> Self { 25 | Self { f: Some(f) } 26 | } 27 | 28 | pub fn cancel(self) { 29 | mem::forget(self); 30 | } 31 | } 32 | 33 | impl Drop for Finally 34 | where 35 | F::Output: Send + 'static, 36 | { 37 | fn drop(&mut self) { 38 | task::spawn(self.f.take().unwrap()); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /book/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | -------------------------------------------------------------------------------- /book/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Zhaofeng Li"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "Attic" 7 | 8 | [output.html] 9 | git-repository-url = "https://github.com/zhaofengli/attic" 10 | edit-url-template = "https://github.com/zhaofengli/attic/edit/main/book/{path}" 11 | -------------------------------------------------------------------------------- /book/colorized-help.nix: -------------------------------------------------------------------------------- 1 | { lib, stdenv, runCommand, attic, ansi2html }: 2 | 3 | with builtins; 4 | 5 | let 6 | commands = { 7 | attic = [ 8 | null 9 | "login" 10 | "use" 11 | "push" 12 | "watch-store" 13 | "cache" 14 | "cache create" 15 | "cache configure" 16 | "cache destroy" 17 | "cache info" 18 | ]; 19 | atticd = [ 20 | null 21 | ]; 22 | atticadm = [ 23 | null 24 | "make-token" 25 | ]; 26 | }; 27 | renderMarkdown = name: subcommands: '' 28 | mkdir -p $out 29 | ( 30 | ansi2html -H 31 | ${lib.concatMapStrings (subcommand: let 32 | fullCommand = "${name} ${if subcommand == null then "" else subcommand}"; 33 | in "${renderCommand fullCommand}\n") subcommands} 34 | ) >>$out/${name}.md 35 | ''; 36 | renderCommand = fullCommand: '' 37 | echo '## `${fullCommand}`' 38 | echo -n '
' 39 | TERM=xterm-256color CLICOLOR_FORCE=1 ${fullCommand} --help | ansi2html -p 40 | echo '
' 41 | ''; 42 | in runCommand "attic-colorized-help" { 43 | nativeBuildInputs = [ attic ansi2html ]; 44 | } (concatStringsSep "\n" (lib.mapAttrsToList renderMarkdown commands)) 45 | -------------------------------------------------------------------------------- /book/default.nix: -------------------------------------------------------------------------------- 1 | { lib, stdenv, nix-gitignore, mdbook, mdbook-linkcheck, python3, callPackage, writeScript 2 | , attic ? null 3 | }: 4 | 5 | let 6 | colorizedHelp = let 7 | help = callPackage ./colorized-help.nix { 8 | inherit attic; 9 | }; 10 | in if attic != null then help else null; 11 | in stdenv.mkDerivation { 12 | inherit colorizedHelp; 13 | 14 | name = "attic-book"; 15 | 16 | src = nix-gitignore.gitignoreSource [] ./.; 17 | 18 | nativeBuildInputs = [ mdbook ]; 19 | 20 | buildPhase = '' 21 | emitColorizedHelp() { 22 | command=$1 23 | 24 | if [[ -n "$colorizedHelp" ]]; then 25 | cat "$colorizedHelp/$command.md" >> src/reference/$command-cli.md 26 | else 27 | echo "Error: No attic executable passed to the builder" >> src/reference/$command-cli.md 28 | fi 29 | } 30 | 31 | emitColorizedHelp attic 32 | emitColorizedHelp atticd 33 | emitColorizedHelp atticadm 34 | 35 | mdbook build -d ./build 36 | cp -r ./build $out 37 | ''; 38 | 39 | installPhase = "true"; 40 | } 41 | -------------------------------------------------------------------------------- /book/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Introduction](./introduction.md) 4 | - [Tutorial](./tutorial.md) 5 | - [User Guide](./user-guide/README.md) 6 | - [Admin Guide](./admin-guide/README.md) 7 | - [Deploying to NixOS](./admin-guide/deployment/nixos.md) 8 | - [Chunking](./admin-guide/chunking.md) 9 | - [FAQs](./faqs.md) 10 | - [Reference](./reference/README.md) 11 | - [attic](./reference/attic-cli.md) 12 | - [atticd](./reference/atticd-cli.md) 13 | - [atticadm](./reference/atticadm-cli.md) 14 | -------------------------------------------------------------------------------- /book/src/admin-guide/README.md: -------------------------------------------------------------------------------- 1 | # Admin Guide 2 | 3 | > This section is under construction. 4 | 5 | This section describes how to set up and administer an Attic Server. 6 | For a quick start, read the [Tutorial](../tutorial.md). 7 | 8 | - **[Deploying to NixOS](./deployment/nixos.md)** - Deploying to a NixOS machine 9 | - **[Chunking](./chunking.md)** - Configuring Content-Defined Chunking data deduplication in Attic 10 | -------------------------------------------------------------------------------- /book/src/admin-guide/chunking.md: -------------------------------------------------------------------------------- 1 | # Chunking 2 | 3 | Attic uses the [FastCDC algorithm](https://www.usenix.org/conference/atc16/technical-sessions/presentation/xia) to split uploaded NARs into chunks for deduplication. 4 | There are four main parameters that control chunking in Attic: 5 | 6 | - `nar-size-threshold`: The minimum NAR size to trigger chunking 7 | - When set to 0, chunking is disabled entirely for newly-uploaded NARs 8 | - When set to 1, all newly-uploaded NARs are chunked 9 | - `min-size`: The preferred minimum size of a chunk, in bytes 10 | - `avg-size`: The preferred average size of a chunk, in bytes 11 | - `max-size`: The preferred maximum size of a chunk, in bytes 12 | 13 | ## Configuration 14 | 15 | When upgrading from an older version without support for chunking, you must include the new `[chunking]` section: 16 | 17 | ```toml 18 | # Data chunking 19 | # 20 | # Warning: If you change any of the values here, it will be 21 | # difficult to reuse existing chunks for newly-uploaded NARs 22 | # since the cutpoints will be different. As a result, the 23 | # deduplication ratio will suffer for a while after the change. 24 | [chunking] 25 | # The minimum NAR size to trigger chunking 26 | # 27 | # If 0, chunking is disabled entirely for newly-uploaded NARs. 28 | # If 1, all newly-uploaded NARs are chunked. 29 | nar-size-threshold = 131072 # chunk files that are 128 KiB or larger 30 | 31 | # The preferred minimum size of a chunk, in bytes 32 | min-size = 65536 # 64 KiB 33 | 34 | # The preferred average size of a chunk, in bytes 35 | avg-size = 131072 # 128 KiB 36 | 37 | # The preferred maximum size of a chunk, in bytes 38 | max-size = 262144 # 256 KiB 39 | ``` 40 | -------------------------------------------------------------------------------- /book/src/admin-guide/deployment/nixos.md: -------------------------------------------------------------------------------- 1 | # Deploying to NixOS 2 | 3 | Attic provides [a NixOS module](https://github.com/zhaofengli/attic/blob/main/nixos/atticd.nix) that allows you to deploy the Attic Server on a NixOS machine. 4 | 5 | ## Prerequisites 6 | 7 | 1. A machine running NixOS 8 | 1. _(Optional)_ A dedicated bucket on S3 or a S3-compatible storage service 9 | - You can either [set up Minio](https://search.nixos.org/options?query=services.minio) or use a hosted service like [Backblaze B2](https://www.backblaze.com/b2/docs) and [Cloudflare R2](https://developers.cloudflare.com/r2). 10 | 1. _(Optional)_ A PostgreSQL database 11 | 12 | ## Generating the Credentials File 13 | 14 | The RS256 JWT secret can be generated with the `openssl` utility: 15 | 16 | ```bash 17 | nix run nixpkgs#openssl -- genrsa -traditional 4096 | base64 -w0 18 | ``` 19 | 20 | Create a file on the server containing the following contents: 21 | 22 | ``` 23 | ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64="output from above" 24 | ``` 25 | 26 | Ensure the file is only accessible by root. 27 | 28 | ## Importing the Module 29 | 30 | You can import the module in one of two ways: 31 | 32 | - Ad-hoc: Import the `nixos/atticd.nix` from [the repository](https://github.com/zhaofengli/attic). 33 | - Flakes: Add `github:zhaofengli/attic` as an input, then import `attic.nixosModules.atticd`. 34 | 35 | ## Configuration 36 | 37 | > Note: These options are subject to change. 38 | 39 | ```nix 40 | { 41 | services.atticd = { 42 | enable = true; 43 | 44 | # Replace with absolute path to your environment file 45 | environmentFile = "/etc/atticd.env"; 46 | 47 | settings = { 48 | listen = "[::]:8080"; 49 | 50 | jwt = { }; 51 | 52 | # Data chunking 53 | # 54 | # Warning: If you change any of the values here, it will be 55 | # difficult to reuse existing chunks for newly-uploaded NARs 56 | # since the cutpoints will be different. As a result, the 57 | # deduplication ratio will suffer for a while after the change. 58 | chunking = { 59 | # The minimum NAR size to trigger chunking 60 | # 61 | # If 0, chunking is disabled entirely for newly-uploaded NARs. 62 | # If 1, all NARs are chunked. 63 | nar-size-threshold = 64 * 1024; # 64 KiB 64 | 65 | # The preferred minimum size of a chunk, in bytes 66 | min-size = 16 * 1024; # 16 KiB 67 | 68 | # The preferred average size of a chunk, in bytes 69 | avg-size = 64 * 1024; # 64 KiB 70 | 71 | # The preferred maximum size of a chunk, in bytes 72 | max-size = 256 * 1024; # 256 KiB 73 | }; 74 | }; 75 | }; 76 | } 77 | ``` 78 | 79 | After the new configuration is deployed, the Attic Server will be accessible on port 8080. 80 | It's highly recommended to place it behind a reverse proxy like [NGINX](https://nixos.wiki/wiki/Nginx) to provide HTTPS. 81 | 82 | ## Operations 83 | 84 | The NixOS module installs the `atticd-atticadm` wrapper which runs the `atticadm` command as the `atticd` user. 85 | Use this command to [generate new tokens](../../reference/atticadm-cli.md#atticadm-make-token) to be distributed to users. 86 | -------------------------------------------------------------------------------- /book/src/faqs.md: -------------------------------------------------------------------------------- 1 | # FAQs 2 | 3 | 4 | 5 | ## Does it replace [Cachix](https://www.cachix.org)? 6 | 7 | No, it does not. 8 | Cachix is an awesome product and the direct inspiration for the user experience of Attic. 9 | It works at a much larger scale than Attic and is a proven solution. 10 | Numerous open-source projects in the Nix community (including mine!) use Cachix to share publicly-available binaries. 11 | 12 | Attic can be thought to provide a similar user experience at a much smaller scale (personal or team use). 13 | 14 | ## What happens if a user uploads a path that is already in the global cache? 15 | 16 | The user will still fully upload the path to the server because they have to prove possession of the file. 17 | The difference is that instead of having the upload streamed to the storage backend (e.g., S3), it's only run through a hash function and discarded. 18 | Once the NAR hash is confirmed, a mapping is created to grant the local cache access to the global NAR. 19 | The global deduplication behavior is transparent to the client. 20 | 21 | This requirement may be disabled by setting `require-proof-of-possession` to false in the configuration. 22 | When disabled, uploads of NARs that already exist in the Global NAR Store will immediately succeed. 23 | 24 | ## What happens if a user uploads a path with incorrect/malicious metadata? 25 | 26 | They will only pollute their own cache. 27 | Path metadata (store path, references, deriver, etc.) are associated with the local cache and the global cache only contains content-addressed NARs and chunks that are "context-free." 28 | 29 | ## How is authentication handled? 30 | 31 | Authentication is done via signed JWTs containing the allowed permissions. 32 | Each instance of `atticd --mode api-server` is stateless. 33 | This design may be revisited later, with option for a more stateful method of authentication. 34 | 35 | ## On what granularity is deduplication done? 36 | 37 | Global deduplication is done on two levels: NAR files and chunks. 38 | During an upload, the NAR file is split into chunks using the [FastCDC algorithm](https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf). 39 | Identical chunks are only stored once in the storage backend. 40 | If an identical NAR exists in the Global NAR Store, chunking is skipped and the NAR is directly deduplicated. 41 | 42 | During a download, `atticd` reassembles the entire NAR from constituent chunks by streaming from the storage backend. 43 | 44 | Data chunking is optional and can be disabled entirely for NARs smaller than a threshold. 45 | When chunking is disabled, all new NARs are uploaded as a single chunk and NAR-level deduplication is still in effect. 46 | 47 | ## Why chunk NARs instead of individual files? 48 | 49 | In the current design, chunking is applied to the entire uncompressed NAR file instead of individual constituent files in the NAR. 50 | Big NARs that benefit the most from chunk-based deduplication (e.g., VSCode, Zoom) often have hundreds or thousands of small files. 51 | During NAR reassembly, it's often uneconomical or impractical to fetch thousands of files to reconstruct the NAR in a scalable way. 52 | By chunking the entire NAR, it's possible to configure the average chunk size to a larger value, ignoring file boundaries and lumping small files together. 53 | This is also the approach [`casync`](https://github.com/systemd/casync) has taken. 54 | 55 | You may have heard that [the Tvix store protocol](https://flokli.de/posts/2022-06-30-store-protocol/) chunks individual files instead of the NAR. 56 | The design of Attic is driven by the desire to effectively utilize existing platforms with practical limitations, while looking forward to the future. 57 | 58 | ## What happens if a chunk is corrupt/missing? 59 | 60 | When a chunk is deleted from the database, all dependent `.nar` will become unavailable (503). 61 | However, this can be recovered from automatically when any NAR containing the chunk is uploaded. 62 | 63 | At the moment, Attic cannot automatically detect when a chunk is corrupt or missing. 64 | Correctly distinguishing between transient and persistent failures is difficult. 65 | The `atticadm` utility will have the functionality to kill/delete bad chunks. 66 | 67 | ## How is compression handled? 68 | 69 | Uploaded NARs are chunked then compressed on the server before being streamed to the storage backend. 70 | On the chunk level, we use the hash of the _uncompressed chunk_ to perform global deduplication. 71 | 72 | ``` 73 | ┌───────────────────────────────────►Chunk Hash 74 | │ 75 | │ 76 | ├───────────────────────────────────►Chunk Size 77 | │ 78 | ┌───────┴────┐ ┌──────────┐ ┌───────────┐ 79 | Chunk Stream──►│Chunk Hasher├─►│Compressor├─►│File Hasher├─►File Stream─►S3 80 | └────────────┘ └──────────┘ └─────┬─────┘ 81 | │ 82 | ├───────►File Hash 83 | │ 84 | │ 85 | └───────►File Size 86 | ``` 87 | -------------------------------------------------------------------------------- /book/src/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | **Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider. 4 | It has support for global deduplication and garbage collection. 5 | 6 | Attic is still an early prototype and is looking for more testers. Want to jump in? [Start your own Attic server](./tutorial.md) in 15 minutes. 7 | 8 | ``` 9 | ⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)... 10 | ✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s) 11 | ✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s) 12 | ✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s) 13 | 🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s) 14 | 🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s) 15 | ``` 16 | 17 | ## Goals 18 | 19 | - **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches. 20 | - **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed NAR Store and Chunk Store. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR. 21 | - **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key. 22 | - **Scalability**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup. 23 | - **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner. 24 | -------------------------------------------------------------------------------- /book/src/reference/README.md: -------------------------------------------------------------------------------- 1 | # Reference 2 | 3 | This section contains detailed listings of options and parameters accepted by Attic: 4 | 5 | - [`attic` CLI](./attic-cli.md) 6 | - [`atticd` CLI](./atticd-cli.md) 7 | - [`atticadm` CLI](./atticadm-cli.md) 8 | -------------------------------------------------------------------------------- /book/src/reference/attic-cli.md: -------------------------------------------------------------------------------- 1 | # `attic` CLI 2 | 3 | The following are the help messages that will be printed when you invoke any sub-command with `--help`: 4 | 5 | 13 | -------------------------------------------------------------------------------- /book/src/reference/atticadm-cli.md: -------------------------------------------------------------------------------- 1 | # `atticadm` CLI 2 | 3 | The following are the help messages that will be printed when you invoke any sub-command with `--help`: 4 | 5 | 12 | -------------------------------------------------------------------------------- /book/src/reference/atticd-cli.md: -------------------------------------------------------------------------------- 1 | # `atticd` CLI 2 | 3 | The following are the help messages that will be printed when you invoke any sub-command with `--help`: 4 | 5 | 12 | -------------------------------------------------------------------------------- /book/src/user-guide/README.md: -------------------------------------------------------------------------------- 1 | # User Guide 2 | 3 | ## Logging in 4 | 5 | You should have received an `attic login` command from an admin like the following: 6 | 7 | ``` 8 | attic login central https://attic.domain.tld/ eyJ... 9 | ``` 10 | 11 | The `attic` client can work with multiple servers at the same time. 12 | To select the `foo` cache from server `central`, use one of the following: 13 | 14 | - `foo`, if the `central` server is configured as the default 15 | - `central:foo` 16 | 17 | To configure the default server, set `default-server` in `~/.config/attic/config.toml`. 18 | 19 | ## Enabling a cache 20 | 21 | To configure Nix to automatically use cache `foo`: 22 | 23 | ``` 24 | attic use foo 25 | ``` 26 | 27 | ## Disabling a cache 28 | 29 | To configure Nix to no longer use a cache, remove the corresponding entries from the list of `substituters` and `trusted-public-keys` in `~/.config/nix/nix.conf` 30 | 31 | ## Pushing to the cache 32 | 33 | To push a store path to cache `foo`: 34 | 35 | ```bash 36 | attic push foo /nix/store/... 37 | ``` 38 | 39 | Other examples include: 40 | 41 | ```bash 42 | attic push foo ./result 43 | attic push foo /run/current-system 44 | ``` 45 | -------------------------------------------------------------------------------- /ci-installer.nix: -------------------------------------------------------------------------------- 1 | # Generates a script to bootstrap the Attic client with a substituter for 2 | # CI usage. Will be simplier when Attic is in cache.nixos.org. 3 | 4 | { self 5 | , writeText 6 | , writeScript 7 | 8 | , substituter ? "https://staging.attic.rs/attic-ci" 9 | , trustedPublicKey ? "attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo=" 10 | }: 11 | 12 | let 13 | cacheNixosOrg = "https://cache.nixos.org"; 14 | cacheNixosOrgKey = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="; 15 | 16 | bootstrapHeader = '' 17 | let 18 | maybeStorePath = if builtins ? langVersion && builtins.lessThan 1 builtins.langVersion 19 | then builtins.storePath 20 | else x: x; 21 | mkFakeDerivation = attrs: outputs: 22 | let 23 | outputNames = builtins.attrNames outputs; 24 | common = attrs // outputsSet // 25 | { type = "derivation"; 26 | outputs = outputNames; 27 | all = outputsList; 28 | }; 29 | outputToAttrListElement = outputName: 30 | { name = outputName; 31 | value = common // { 32 | inherit outputName; 33 | outPath = maybeStorePath (builtins.getAttr outputName outputs); 34 | }; 35 | }; 36 | outputsList = map outputToAttrListElement outputNames; 37 | outputsSet = builtins.listToAttrs outputsList; 38 | in outputsSet; 39 | in 40 | ''; 41 | 42 | makeBootstrap = system: let 43 | package = 44 | if system == "x86_64-linux" then self.packages.${system}.attic-client-static 45 | else self.packages.${system}.attic-client; 46 | in '' 47 | "${system}" = (mkFakeDerivation { 48 | name = "${package.name}"; 49 | system = "${system}"; 50 | } { 51 | out = "${package.out}"; 52 | }).out; 53 | ''; 54 | 55 | bootstrapExpr = '' 56 | { system ? builtins.currentSystem }: 57 | ${bootstrapHeader} 58 | { 59 | ${makeBootstrap "x86_64-linux"} 60 | ${makeBootstrap "aarch64-linux"} 61 | ${makeBootstrap "x86_64-darwin"} 62 | ${makeBootstrap "aarch64-darwin"} 63 | }.''${system} 64 | ''; 65 | 66 | bootstrapScript = writeScript "install-attic-ci.sh" '' 67 | #!/usr/bin/env bash 68 | set -euo pipefail 69 | expr=$(mktemp) 70 | 71 | cleanup() { 72 | rm -f "$expr" 73 | } 74 | 75 | cat >"$expr" <<'EOF' 76 | ${bootstrapExpr} 77 | EOF 78 | 79 | nix-env --substituters "${substituter} ${cacheNixosOrg}" --trusted-public-keys "${trustedPublicKey} ${cacheNixosOrgKey}" -if "$expr" 80 | ''; 81 | in bootstrapScript 82 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "attic-client" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [[bin]] 8 | name = "attic" 9 | path = "src/main.rs" 10 | 11 | [dependencies] 12 | attic = { path = "../attic" } 13 | 14 | anyhow = "1.0.71" 15 | async-channel = "2.3.1" 16 | bytes = "1.4.0" 17 | clap = { version = "4.3", features = ["derive"] } 18 | clap_complete = "4.3.0" 19 | const_format = "0.2.30" 20 | dialoguer = "0.11.0" 21 | displaydoc = "0.2.4" 22 | enum-as-inner = "0.6.0" 23 | futures = "0.3.28" 24 | humantime = "2.1.0" 25 | indicatif = "0.17.3" 26 | lazy_static = "1.4.0" 27 | notify = { version = "7.0.0", default-features = false, features = ["macos_kqueue"] } 28 | regex = "1.8.3" 29 | reqwest = { version = "0.12.4", default-features = false, features = ["json", "rustls-tls", "rustls-tls-native-roots", "stream"] } 30 | serde = { version = "1.0.163", features = ["derive"] } 31 | serde_json = "1.0.96" 32 | toml = "0.8.8" 33 | tracing = "0.1.37" 34 | tracing-subscriber = "0.3.17" 35 | xdg = "2.5.0" 36 | 37 | [dependencies.tokio] 38 | version = "1.28.2" 39 | features = [ 40 | "fs", 41 | "io-util", 42 | "macros", 43 | "process", 44 | "rt", 45 | "rt-multi-thread", 46 | "sync", 47 | ] 48 | -------------------------------------------------------------------------------- /client/src/cache.rs: -------------------------------------------------------------------------------- 1 | //! Client-specific cache references. 2 | //! 3 | //! The Attic client is designed to work with multiple servers. 4 | //! Therefore, users can refer to caches in the following forms: 5 | //! 6 | //! - `cachename`: Will use `cachename` on the default server 7 | //! - `servername:cachename`: Will use `cachename` on server `servername` 8 | //! - `https://cache.server.tld/username`: Will auto-detect 9 | //! - To be implemented 10 | 11 | use std::ops::Deref; 12 | use std::str::FromStr; 13 | 14 | use anyhow::{anyhow, Result}; 15 | use serde::{Deserialize, Serialize}; 16 | 17 | pub use attic::cache::CacheName; 18 | 19 | /// A reference to a cache. 20 | #[derive(Debug, Clone)] 21 | pub enum CacheRef { 22 | DefaultServer(CacheName), 23 | ServerQualified(ServerName, CacheName), 24 | } 25 | 26 | /// A server name. 27 | /// 28 | /// It has the same requirements as a cache name. 29 | #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] 30 | #[serde(transparent)] 31 | pub struct ServerName(CacheName); 32 | 33 | impl CacheRef { 34 | fn try_parse_cache(s: &str) -> Option { 35 | let name = CacheName::new(s.to_owned()).ok()?; 36 | Some(Self::DefaultServer(name)) 37 | } 38 | 39 | fn try_parse_server_qualified(s: &str) -> Option { 40 | let (server, cache) = s.split_once(':')?; 41 | let server = CacheName::new(server.to_owned()).ok()?; 42 | let cache = CacheName::new(cache.to_owned()).ok()?; 43 | Some(Self::ServerQualified(ServerName(server), cache)) 44 | } 45 | } 46 | 47 | impl FromStr for CacheRef { 48 | type Err = anyhow::Error; 49 | 50 | fn from_str(s: &str) -> Result { 51 | if let Some(r) = Self::try_parse_cache(s) { 52 | return Ok(r); 53 | } 54 | 55 | if let Some(r) = Self::try_parse_server_qualified(s) { 56 | return Ok(r); 57 | } 58 | 59 | Err(anyhow!("Invalid cache reference")) 60 | } 61 | } 62 | 63 | impl Deref for ServerName { 64 | type Target = CacheName; 65 | 66 | fn deref(&self) -> &Self::Target { 67 | &self.0 68 | } 69 | } 70 | 71 | impl FromStr for ServerName { 72 | type Err = anyhow::Error; 73 | 74 | fn from_str(s: &str) -> Result { 75 | Ok(Self(CacheName::from_str(s)?)) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /client/src/cli.rs: -------------------------------------------------------------------------------- 1 | //! Global CLI Setup. 2 | 3 | use std::env; 4 | 5 | use anyhow::{anyhow, Result}; 6 | use clap::{CommandFactory, Parser, Subcommand}; 7 | use clap_complete::Shell; 8 | use enum_as_inner::EnumAsInner; 9 | 10 | use crate::command::cache::{self, Cache}; 11 | use crate::command::get_closure::{self, GetClosure}; 12 | use crate::command::login::{self, Login}; 13 | use crate::command::push::{self, Push}; 14 | use crate::command::r#use::{self, Use}; 15 | use crate::command::watch_store::{self, WatchStore}; 16 | 17 | /// Attic binary cache client. 18 | #[derive(Debug, Parser)] 19 | #[clap(version)] 20 | #[clap(propagate_version = true)] 21 | pub struct Opts { 22 | #[clap(subcommand)] 23 | pub command: Command, 24 | } 25 | 26 | #[derive(Debug, Subcommand, EnumAsInner)] 27 | pub enum Command { 28 | Login(Login), 29 | Use(Use), 30 | Push(Push), 31 | Cache(Cache), 32 | WatchStore(WatchStore), 33 | 34 | #[clap(hide = true)] 35 | GetClosure(GetClosure), 36 | } 37 | 38 | /// Generate shell autocompletion files. 39 | #[derive(Debug, Parser)] 40 | pub struct GenCompletions { 41 | /// The shell to generate autocompletion files for. 42 | shell: Shell, 43 | } 44 | 45 | pub async fn run() -> Result<()> { 46 | // https://github.com/clap-rs/clap/issues/1335 47 | if let Some("gen-completions") = env::args().nth(1).as_deref() { 48 | return gen_completions(env::args().nth(2)).await; 49 | } 50 | 51 | let opts = Opts::parse(); 52 | 53 | match opts.command { 54 | Command::Login(_) => login::run(opts).await, 55 | Command::Use(_) => r#use::run(opts).await, 56 | Command::Push(_) => push::run(opts).await, 57 | Command::Cache(_) => cache::run(opts).await, 58 | Command::WatchStore(_) => watch_store::run(opts).await, 59 | Command::GetClosure(_) => get_closure::run(opts).await, 60 | } 61 | } 62 | 63 | async fn gen_completions(shell: Option) -> Result<()> { 64 | let shell: Shell = shell 65 | .ok_or_else(|| anyhow!("Must specify a shell."))? 66 | .parse() 67 | .unwrap(); 68 | 69 | clap_complete::generate(shell, &mut Opts::command(), "attic", &mut std::io::stdout()); 70 | 71 | Ok(()) 72 | } 73 | -------------------------------------------------------------------------------- /client/src/command/get_closure.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::Result; 4 | use clap::Parser; 5 | 6 | use crate::cli::Opts; 7 | use attic::nix_store::NixStore; 8 | 9 | /// Returns the closure of a store path (test). 10 | /// 11 | /// This is similar to `nix-store -qR`. 12 | #[derive(Debug, Parser)] 13 | pub struct GetClosure { 14 | store_path: PathBuf, 15 | 16 | /// For derivations, include their outputs. 17 | #[clap(long)] 18 | include_outputs: bool, 19 | } 20 | 21 | pub async fn run(opts: Opts) -> Result<()> { 22 | let sub = opts.command.as_get_closure().unwrap(); 23 | 24 | let store = NixStore::connect()?; 25 | let store_path = store.follow_store_path(&sub.store_path)?; 26 | let closure = store 27 | .compute_fs_closure(store_path, false, sub.include_outputs, false) 28 | .await?; 29 | 30 | for path in &closure { 31 | println!("{}", store.get_full_path(path).to_str().unwrap()); 32 | } 33 | 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /client/src/command/login.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | 4 | use crate::cache::ServerName; 5 | use crate::cli::Opts; 6 | use crate::config::{Config, ServerConfig, ServerTokenConfig}; 7 | 8 | /// Log into an Attic server. 9 | #[derive(Debug, Parser)] 10 | pub struct Login { 11 | /// Name of the server. 12 | name: ServerName, 13 | 14 | /// Endpoint of the server. 15 | endpoint: String, 16 | 17 | /// Access token. 18 | token: Option, 19 | 20 | /// Set the server as the default. 21 | #[clap(long)] 22 | set_default: bool, 23 | } 24 | 25 | pub async fn run(opts: Opts) -> Result<()> { 26 | let sub = opts.command.as_login().unwrap(); 27 | let mut config = Config::load()?; 28 | let mut config_m = config.as_mut(); 29 | 30 | if let Some(server) = config_m.servers.get_mut(&sub.name) { 31 | eprintln!("✍️ Overwriting server \"{}\"", sub.name.as_str()); 32 | 33 | server.endpoint = sub.endpoint.to_owned(); 34 | 35 | if let Some(token) = &sub.token { 36 | server.token = Some(ServerTokenConfig::Raw { 37 | token: token.clone(), 38 | }); 39 | } 40 | } else { 41 | eprintln!("✍️ Configuring server \"{}\"", sub.name.as_str()); 42 | 43 | config_m.servers.insert( 44 | sub.name.to_owned(), 45 | ServerConfig { 46 | endpoint: sub.endpoint.to_owned(), 47 | token: sub 48 | .token 49 | .to_owned() 50 | .map(|token| ServerTokenConfig::Raw { token }), 51 | }, 52 | ); 53 | } 54 | 55 | if sub.set_default || config_m.servers.len() == 1 { 56 | config_m.default_server = Some(sub.name.to_owned()); 57 | } 58 | 59 | Ok(()) 60 | } 61 | -------------------------------------------------------------------------------- /client/src/command/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cache; 2 | pub mod get_closure; 3 | pub mod login; 4 | pub mod push; 5 | pub mod r#use; 6 | pub mod watch_store; 7 | -------------------------------------------------------------------------------- /client/src/command/use.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use clap::Parser; 3 | use reqwest::Url; 4 | 5 | use crate::api::ApiClient; 6 | use crate::cache::CacheRef; 7 | use crate::cli::Opts; 8 | use crate::config::Config; 9 | use crate::nix_config::NixConfig; 10 | use crate::nix_netrc::NixNetrc; 11 | 12 | /// Configure Nix to use a binary cache. 13 | #[derive(Debug, Parser)] 14 | pub struct Use { 15 | /// The cache to configure. 16 | /// 17 | /// This can be either `servername:cachename` or `cachename` 18 | /// when using the default server. 19 | cache: CacheRef, 20 | } 21 | 22 | pub async fn run(opts: Opts) -> Result<()> { 23 | let sub = opts.command.as_use().unwrap(); 24 | let config = Config::load()?; 25 | 26 | let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; 27 | 28 | let api = ApiClient::from_server_config(server.clone())?; 29 | let cache_config = api.get_cache_config(cache).await?; 30 | 31 | let substituter = cache_config 32 | .substituter_endpoint 33 | .ok_or_else(|| anyhow!("The server did not tell us where the binary cache endpoint is."))?; 34 | let public_key = cache_config.public_key 35 | .ok_or_else(|| anyhow!("The server did not tell us which public key it uses. Is signing managed by the client?"))?; 36 | 37 | eprintln!( 38 | "Configuring Nix to use \"{cache}\" on \"{server_name}\":", 39 | cache = cache.as_str(), 40 | server_name = server_name.as_str(), 41 | ); 42 | 43 | // Modify nix.conf 44 | eprintln!("+ Substituter: {}", substituter); 45 | eprintln!("+ Trusted Public Key: {}", public_key); 46 | 47 | let mut nix_config = NixConfig::load().await?; 48 | nix_config.add_substituter(&substituter); 49 | nix_config.add_trusted_public_key(&public_key); 50 | 51 | // Modify netrc 52 | if let Some(token) = server.token()? { 53 | eprintln!("+ Access Token"); 54 | 55 | let mut nix_netrc = NixNetrc::load().await?; 56 | let host = Url::parse(&substituter)? 57 | .host() 58 | .map(|h| h.to_string()) 59 | .unwrap(); 60 | nix_netrc.add_token(host, token.to_string()); 61 | nix_netrc.save().await?; 62 | 63 | let netrc_path = nix_netrc.path().unwrap().to_str().unwrap(); 64 | 65 | nix_config.set_netrc_file(netrc_path); 66 | } 67 | 68 | nix_config.save().await?; 69 | 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /client/src/command/watch_store.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | use std::sync::Arc; 3 | 4 | use anyhow::{anyhow, Result}; 5 | use clap::Parser; 6 | use indicatif::MultiProgress; 7 | use notify::{EventKind, RecursiveMode, Watcher}; 8 | use tokio::sync::mpsc; 9 | 10 | use crate::api::ApiClient; 11 | use crate::cache::CacheRef; 12 | use crate::cli::Opts; 13 | use crate::config::Config; 14 | use crate::push::{PushConfig, PushSessionConfig, Pusher}; 15 | use attic::nix_store::{NixStore, StorePath}; 16 | 17 | /// Watch the Nix Store for new paths and upload them to a binary cache. 18 | #[derive(Debug, Parser)] 19 | pub struct WatchStore { 20 | /// The cache to push to. 21 | /// 22 | /// This can be either `servername:cachename` or `cachename` 23 | /// when using the default server. 24 | cache: CacheRef, 25 | 26 | /// Push the new paths only and do not compute closures. 27 | #[clap(long, hide = true)] 28 | no_closure: bool, 29 | 30 | /// Ignore the upstream cache filter. 31 | #[clap(long)] 32 | ignore_upstream_cache_filter: bool, 33 | 34 | /// The maximum number of parallel upload processes. 35 | #[clap(short = 'j', long, default_value = "5")] 36 | jobs: usize, 37 | 38 | /// Always send the upload info as part of the payload. 39 | #[clap(long, hide = true)] 40 | force_preamble: bool, 41 | } 42 | 43 | pub async fn run(opts: Opts) -> Result<()> { 44 | let sub = opts.command.as_watch_store().unwrap(); 45 | if sub.jobs == 0 { 46 | return Err(anyhow!("The number of jobs cannot be 0")); 47 | } 48 | 49 | let config = Config::load()?; 50 | 51 | let store = Arc::new(NixStore::connect()?); 52 | let store_dir = store.store_dir().to_owned(); 53 | 54 | let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; 55 | let mut api = ApiClient::from_server_config(server.clone())?; 56 | 57 | // Confirm remote cache validity, query cache config 58 | let cache_config = api.get_cache_config(cache).await?; 59 | 60 | if let Some(api_endpoint) = &cache_config.api_endpoint { 61 | // Use delegated API endpoint 62 | api.set_endpoint(api_endpoint)?; 63 | } 64 | 65 | let push_config = PushConfig { 66 | num_workers: sub.jobs, 67 | force_preamble: sub.force_preamble, 68 | }; 69 | 70 | let push_session_config = PushSessionConfig { 71 | no_closure: sub.no_closure, 72 | ignore_upstream_cache_filter: sub.ignore_upstream_cache_filter, 73 | }; 74 | 75 | let mp = MultiProgress::new(); 76 | let session = Pusher::new( 77 | store.clone(), 78 | api, 79 | cache.to_owned(), 80 | cache_config, 81 | mp, 82 | push_config, 83 | ) 84 | .into_push_session(push_session_config); 85 | 86 | let (tx, mut rx) = mpsc::unbounded_channel(); 87 | 88 | let mut watcher = notify::recommended_watcher(move |res: notify::Result| { 89 | tx.send(res).unwrap(); 90 | })?; 91 | 92 | watcher.watch(&store_dir, RecursiveMode::NonRecursive)?; 93 | 94 | eprintln!( 95 | "👀 Pushing new store paths to \"{cache}\" on \"{server}\"", 96 | cache = cache.as_str(), 97 | server = server_name.as_str(), 98 | ); 99 | 100 | while let Some(res) = rx.recv().await { 101 | match res { 102 | Ok(event) => { 103 | // We watch the removals of lock files which signify 104 | // store paths becoming valid 105 | if let EventKind::Remove(_) = event.kind { 106 | let paths = event 107 | .paths 108 | .iter() 109 | .filter_map(|p| { 110 | let base = strip_lock_file(p)?; 111 | store.parse_store_path(base).ok() 112 | }) 113 | .collect::>(); 114 | 115 | if !paths.is_empty() { 116 | session.queue_many(paths).unwrap(); 117 | } 118 | } 119 | } 120 | Err(e) => eprintln!("Error during watch: {:?}", e), 121 | } 122 | } 123 | 124 | Ok(()) 125 | } 126 | 127 | fn strip_lock_file(p: &Path) -> Option { 128 | p.to_str() 129 | .and_then(|p| p.strip_suffix(".lock")) 130 | .filter(|t| !t.ends_with(".drv") && !t.ends_with("-source")) 131 | .map(PathBuf::from) 132 | } 133 | -------------------------------------------------------------------------------- /client/src/main.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | asm_sub_register, 3 | deprecated, 4 | missing_abi, 5 | unsafe_code, 6 | unused_macros, 7 | unused_must_use, 8 | unused_unsafe 9 | )] 10 | #![deny(clippy::from_over_into, clippy::needless_question_mark)] 11 | #![cfg_attr( 12 | not(debug_assertions), 13 | deny(unused_imports, unused_mut, unused_variables,) 14 | )] 15 | 16 | mod api; 17 | mod cache; 18 | mod cli; 19 | mod command; 20 | mod config; 21 | mod nix_config; 22 | mod nix_netrc; 23 | mod push; 24 | mod version; 25 | 26 | use anyhow::Result; 27 | 28 | #[tokio::main] 29 | async fn main() -> Result<()> { 30 | init_logging()?; 31 | cli::run().await 32 | } 33 | 34 | fn init_logging() -> Result<()> { 35 | tracing_subscriber::fmt::init(); 36 | Ok(()) 37 | } 38 | -------------------------------------------------------------------------------- /client/src/version.rs: -------------------------------------------------------------------------------- 1 | /// The distributor of this Attic client. 2 | /// 3 | /// Common values include `nixpkgs`, `attic` and `dev`. 4 | pub const ATTIC_DISTRIBUTOR: &str = if let Some(distro) = option_env!("ATTIC_DISTRIBUTOR") { 5 | distro 6 | } else { 7 | "unknown" 8 | }; 9 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | let 2 | flake = import ./flake-compat.nix; 3 | in flake.defaultNix.default.overrideAttrs (_: { 4 | passthru = { 5 | attic-client = flake.defaultNix.outputs.packages.${builtins.currentSystem}.attic-client; 6 | demo = flake.defaultNix.outputs.devShells.${builtins.currentSystem}.demo; 7 | }; 8 | }) 9 | -------------------------------------------------------------------------------- /flake-compat.nix: -------------------------------------------------------------------------------- 1 | let 2 | lock = builtins.fromJSON (builtins.readFile ./flake.lock); 3 | flakeCompat = import (fetchTarball { 4 | url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; 5 | sha256 = lock.nodes.flake-compat.locked.narHash; 6 | }); 7 | in flakeCompat { 8 | src = ./.; 9 | } 10 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "crane": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ] 8 | }, 9 | "locked": { 10 | "lastModified": 1722960479, 11 | "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", 12 | "owner": "ipetkov", 13 | "repo": "crane", 14 | "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", 15 | "type": "github" 16 | }, 17 | "original": { 18 | "owner": "ipetkov", 19 | "repo": "crane", 20 | "type": "github" 21 | } 22 | }, 23 | "flake-compat": { 24 | "flake": false, 25 | "locked": { 26 | "lastModified": 1696426674, 27 | "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", 28 | "owner": "edolstra", 29 | "repo": "flake-compat", 30 | "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "owner": "edolstra", 35 | "repo": "flake-compat", 36 | "type": "github" 37 | } 38 | }, 39 | "flake-parts": { 40 | "inputs": { 41 | "nixpkgs-lib": [ 42 | "nixpkgs" 43 | ] 44 | }, 45 | "locked": { 46 | "lastModified": 1722555600, 47 | "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", 48 | "owner": "hercules-ci", 49 | "repo": "flake-parts", 50 | "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", 51 | "type": "github" 52 | }, 53 | "original": { 54 | "owner": "hercules-ci", 55 | "repo": "flake-parts", 56 | "type": "github" 57 | } 58 | }, 59 | "nix-github-actions": { 60 | "inputs": { 61 | "nixpkgs": [ 62 | "nixpkgs" 63 | ] 64 | }, 65 | "locked": { 66 | "lastModified": 1729742964, 67 | "narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=", 68 | "owner": "nix-community", 69 | "repo": "nix-github-actions", 70 | "rev": "e04df33f62cdcf93d73e9a04142464753a16db67", 71 | "type": "github" 72 | }, 73 | "original": { 74 | "owner": "nix-community", 75 | "repo": "nix-github-actions", 76 | "type": "github" 77 | } 78 | }, 79 | "nixpkgs": { 80 | "locked": { 81 | "lastModified": 1726042813, 82 | "narHash": "sha256-LnNKCCxnwgF+575y0pxUdlGZBO/ru1CtGHIqQVfvjlA=", 83 | "owner": "NixOS", 84 | "repo": "nixpkgs", 85 | "rev": "159be5db480d1df880a0135ca0bfed84c2f88353", 86 | "type": "github" 87 | }, 88 | "original": { 89 | "owner": "NixOS", 90 | "ref": "nixpkgs-unstable", 91 | "repo": "nixpkgs", 92 | "type": "github" 93 | } 94 | }, 95 | "nixpkgs-stable": { 96 | "locked": { 97 | "lastModified": 1724316499, 98 | "narHash": "sha256-Qb9MhKBUTCfWg/wqqaxt89Xfi6qTD3XpTzQ9eXi3JmE=", 99 | "owner": "NixOS", 100 | "repo": "nixpkgs", 101 | "rev": "797f7dc49e0bc7fab4b57c021cdf68f595e47841", 102 | "type": "github" 103 | }, 104 | "original": { 105 | "owner": "NixOS", 106 | "ref": "nixos-24.05", 107 | "repo": "nixpkgs", 108 | "type": "github" 109 | } 110 | }, 111 | "root": { 112 | "inputs": { 113 | "crane": "crane", 114 | "flake-compat": "flake-compat", 115 | "flake-parts": "flake-parts", 116 | "nix-github-actions": "nix-github-actions", 117 | "nixpkgs": "nixpkgs", 118 | "nixpkgs-stable": "nixpkgs-stable" 119 | } 120 | } 121 | }, 122 | "root": "root", 123 | "version": 7 124 | } 125 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "A Nix binary cache server"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; 6 | nixpkgs-stable.url = "github:NixOS/nixpkgs/nixos-24.05"; 7 | 8 | flake-parts = { 9 | url = "github:hercules-ci/flake-parts"; 10 | inputs.nixpkgs-lib.follows = "nixpkgs"; 11 | }; 12 | 13 | crane = { 14 | url = "github:ipetkov/crane"; 15 | inputs.nixpkgs.follows = "nixpkgs"; 16 | }; 17 | 18 | nix-github-actions = { 19 | url = "github:nix-community/nix-github-actions"; 20 | inputs.nixpkgs.follows = "nixpkgs"; 21 | }; 22 | 23 | flake-compat = { 24 | url = "github:edolstra/flake-compat"; 25 | flake = false; 26 | }; 27 | }; 28 | 29 | outputs = inputs @ { self, flake-parts, ... }: let 30 | supportedSystems = [ 31 | "x86_64-linux" 32 | "aarch64-linux" 33 | "riscv64-linux" 34 | "aarch64-darwin" 35 | "x86_64-darwin" 36 | ]; 37 | 38 | inherit (inputs.nixpkgs) lib; 39 | 40 | modules = builtins.foldl' (acc: f: f acc) ./flake [ 41 | builtins.readDir 42 | (lib.filterAttrs (name: type: 43 | type == "regular" && lib.hasSuffix ".nix" name 44 | )) 45 | (lib.mapAttrsToList (name: _: 46 | lib.path.append ./flake name 47 | )) 48 | ]; 49 | 50 | in flake-parts.lib.mkFlake { inherit inputs; } { 51 | imports = modules; 52 | systems = supportedSystems; 53 | 54 | debug = true; 55 | }; 56 | } 57 | -------------------------------------------------------------------------------- /flake/devshells.nix: -------------------------------------------------------------------------------- 1 | # Development shells 2 | 3 | toplevel @ { lib, flake-parts-lib, ... }: 4 | let 5 | inherit (lib) 6 | mkOption 7 | types 8 | ; 9 | inherit (flake-parts-lib) 10 | mkPerSystemOption 11 | ; 12 | in 13 | { 14 | options = { 15 | perSystem = mkPerSystemOption { 16 | options.attic.devshell = { 17 | packageSets = mkOption { 18 | type = types.attrsOf (types.listOf types.package); 19 | default = {}; 20 | }; 21 | extraPackages = mkOption { 22 | type = types.listOf types.package; 23 | default = []; 24 | }; 25 | extraArgs = mkOption { 26 | type = types.attrsOf types.unspecified; 27 | default = {}; 28 | }; 29 | }; 30 | }; 31 | }; 32 | 33 | config = { 34 | perSystem = { self', pkgs, config, ... }: let 35 | cfg = config.attic.devshell; 36 | in { 37 | attic.devshell.packageSets = with pkgs; { 38 | rustc = lib.optionals (config.attic.toolchain == null) [ 39 | rustc 40 | ]; 41 | 42 | rust = [ 43 | cargo-expand 44 | cargo-outdated 45 | cargo-edit 46 | tokio-console 47 | ]; 48 | 49 | linters = [ 50 | clippy 51 | rustfmt 52 | 53 | editorconfig-checker 54 | ]; 55 | 56 | utils = [ 57 | jq 58 | just 59 | ]; 60 | 61 | ops = [ 62 | postgresql 63 | sqlite-interactive 64 | 65 | flyctl 66 | skopeo 67 | manifest-tool 68 | ] ++ lib.optionals pkgs.stdenv.isLinux [ 69 | wrangler 70 | ]; 71 | 72 | bench = [ 73 | wrk 74 | ] ++ lib.optionals pkgs.stdenv.isLinux [ 75 | linuxPackages.perf 76 | ]; 77 | 78 | wasm = [ 79 | llvmPackages_latest.bintools 80 | worker-build wasm-pack wasm-bindgen-cli 81 | ]; 82 | }; 83 | 84 | devShells.default = pkgs.mkShell (lib.recursiveUpdate { 85 | inputsFrom = [ 86 | self'.packages.attic 87 | self'.packages.book 88 | ]; 89 | 90 | packages = lib.flatten (lib.attrValues cfg.packageSets); 91 | 92 | env = { 93 | ATTIC_DISTRIBUTOR = toplevel.config.attic.distributor; 94 | 95 | RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library"; 96 | 97 | NIX_PATH = "nixpkgs=${pkgs.path}"; 98 | 99 | # See comment in `attic/build.rs` 100 | NIX_INCLUDE_PATH = "${lib.getDev self'.packages.attic.passthru.nix}/include"; 101 | 102 | # Used by `just with-nix` to build/test with alternative Nix versions. 103 | NIX_VERSIONS = config.attic.nix-versions.manifestFile; 104 | }; 105 | } cfg.extraArgs); 106 | 107 | devShells.demo = pkgs.mkShell { 108 | packages = [ self'.packages.default ]; 109 | 110 | shellHook = '' 111 | >&2 echo 112 | >&2 echo '🚀 Run `atticd` to get started!' 113 | >&2 echo 114 | ''; 115 | }; 116 | }; 117 | }; 118 | } 119 | -------------------------------------------------------------------------------- /flake/distributor.nix: -------------------------------------------------------------------------------- 1 | { lib, flake-parts-lib, ... }: 2 | let 3 | inherit (lib) 4 | mkOption 5 | types 6 | ; 7 | in 8 | { 9 | options = { 10 | attic.distributor = mkOption { 11 | type = types.str; 12 | default = "dev"; 13 | }; 14 | }; 15 | } 16 | -------------------------------------------------------------------------------- /flake/integration-tests.nix: -------------------------------------------------------------------------------- 1 | { lib, flake-parts-lib, inputs, self, ... }: 2 | let 3 | inherit (lib) 4 | mkOption 5 | types 6 | ; 7 | inherit (flake-parts-lib) 8 | mkPerSystemOption 9 | ; 10 | in 11 | { 12 | options = { 13 | perSystem = mkPerSystemOption { 14 | options.attic.integration-tests = { 15 | nixpkgsArgs = mkOption { 16 | type = types.attrsOf types.anything; 17 | default = {}; 18 | }; 19 | tests = mkOption { 20 | type = types.attrsOf types.package; 21 | default = {}; 22 | }; 23 | stableTests = mkOption { 24 | type = types.attrsOf types.package; 25 | default = {}; 26 | }; 27 | }; 28 | }; 29 | }; 30 | 31 | config = { 32 | flake.githubActions = inputs.nix-github-actions.lib.mkGithubMatrix { 33 | checks = { 34 | inherit (self.checks) x86_64-linux; 35 | }; 36 | }; 37 | 38 | perSystem = { self', pkgs, config, system, ... }: let 39 | cfg = config.attic.integration-tests; 40 | 41 | vmPkgs = import inputs.nixpkgs ({ 42 | inherit system; 43 | overlays = [ self.overlays.default ]; 44 | } // cfg.nixpkgsArgs); 45 | vmPkgsStable = import inputs.nixpkgs-stable ({ 46 | inherit system; 47 | overlays = [ self.overlays.default ]; 48 | } // cfg.nixpkgsArgs); 49 | 50 | makeIntegrationTests = pkgs: import ../integration-tests { 51 | inherit pkgs; 52 | flake = self; 53 | }; 54 | in { 55 | attic.integration-tests = { 56 | tests = makeIntegrationTests vmPkgs; 57 | stableTests = makeIntegrationTests vmPkgsStable; 58 | }; 59 | 60 | checks = let 61 | tests = cfg.tests; 62 | stableTests = lib.mapAttrs' (name: lib.nameValuePair "stable-${name}") cfg.stableTests; 63 | in lib.optionalAttrs pkgs.stdenv.isLinux (tests // stableTests); 64 | }; 65 | }; 66 | } 67 | -------------------------------------------------------------------------------- /flake/nix-versions.nix: -------------------------------------------------------------------------------- 1 | { lib, flake-parts-lib, config, ... }: 2 | let 3 | inherit (lib) 4 | mkOption 5 | types 6 | ; 7 | inherit (flake-parts-lib) 8 | mkPerSystemOption 9 | ; 10 | in 11 | { 12 | options = { 13 | perSystem = mkPerSystemOption { 14 | options.attic.nix-versions = { 15 | versions = mkOption { 16 | type = types.attrsOf types.package; 17 | default = {}; 18 | }; 19 | manifestFile = mkOption { 20 | type = types.package; 21 | }; 22 | }; 23 | 24 | options.internalMatrix = mkOption { 25 | type = types.attrsOf (types.attrsOf types.package); 26 | }; 27 | }; 28 | }; 29 | 30 | config = { 31 | flake.internalMatrix = lib.mapAttrs (system: ps: ps.internalMatrix) config.allSystems; 32 | 33 | perSystem = { self', pkgs, config, cranePkgs, ... }: let 34 | cfg = config.attic.nix-versions; 35 | in { 36 | attic.nix-versions = { 37 | versions = { 38 | default = pkgs.nix; 39 | "2.20" = pkgs.nixVersions.nix_2_20; 40 | "2.24" = pkgs.nixVersions.nix_2_24; 41 | }; 42 | 43 | manifestFile = let 44 | manifest = lib.mapAttrs (_: nix: { 45 | inherit nix; 46 | shellHook = '' 47 | export NIX_INCLUDE_PATH="${lib.getDev nix}/include" 48 | export NIX_CFLAGS_COMPILE="-isystem $NIX_INCLUDE_PATH $NIX_CFLAGS_COMPILE" 49 | export NIX_LDFLAGS="-L${nix}/lib $NIX_LDFLAGS" 50 | export PKG_CONFIG_PATH="${lib.getDev nix}/lib/pkgconfig:$PKG_CONFIG_PATH" 51 | export PATH="${lib.getBin nix}/bin:$PATH" 52 | ''; 53 | }) cfg.versions; 54 | in pkgs.writeText "nix-versions.json" (builtins.toJSON manifest); 55 | }; 56 | 57 | internalMatrix = lib.mapAttrs (_: nix: let 58 | cranePkgs' = cranePkgs.override { inherit nix; }; 59 | in { 60 | inherit (cranePkgs') attic-tests cargoArtifacts; 61 | }) cfg.versions; 62 | }; 63 | }; 64 | } 65 | -------------------------------------------------------------------------------- /flake/nixos.nix: -------------------------------------------------------------------------------- 1 | { config, ... }: 2 | { 3 | flake.nixosModules = { 4 | atticd = { 5 | imports = [ 6 | ../nixos/atticd.nix 7 | ]; 8 | 9 | services.atticd.useFlakeCompatOverlay = false; 10 | 11 | nixpkgs.overlays = [ 12 | config.flake.overlays.default 13 | ]; 14 | }; 15 | }; 16 | } 17 | -------------------------------------------------------------------------------- /flake/overlays.nix: -------------------------------------------------------------------------------- 1 | { makeCranePkgs, ... }: 2 | { 3 | flake.overlays = { 4 | default = final: prev: let 5 | cranePkgs = makeCranePkgs final; 6 | in { 7 | inherit (cranePkgs) 8 | attic 9 | attic-client 10 | attic-server 11 | ; 12 | }; 13 | }; 14 | } 15 | -------------------------------------------------------------------------------- /flake/packages.nix: -------------------------------------------------------------------------------- 1 | { self 2 | , lib 3 | , flake-parts-lib 4 | , inputs 5 | , config 6 | , makeCranePkgs 7 | , getSystem 8 | , ... 9 | }: 10 | 11 | let 12 | inherit (lib) 13 | mkOption 14 | types 15 | ; 16 | inherit (flake-parts-lib) 17 | mkPerSystemOption 18 | ; 19 | 20 | # Re-evaluate perSystem with cross nixpkgs 21 | # HACK before https://github.com/hercules-ci/flake-parts/issues/95 is solved 22 | evalCross = { system, pkgs }: config.allSystems.${system}.debug.extendModules { 23 | modules = [ 24 | ({ config, lib, ... }: { 25 | _module.args.pkgs = pkgs; 26 | _module.args.self' = lib.mkForce config; 27 | }) 28 | ]; 29 | }; 30 | in 31 | { 32 | options = { 33 | perSystem = mkPerSystemOption { 34 | options.attic = { 35 | toolchain = mkOption { 36 | type = types.nullOr types.package; 37 | default = null; 38 | }; 39 | extraPackageArgs = mkOption { 40 | type = types.attrsOf types.anything; 41 | default = {}; 42 | }; 43 | }; 44 | }; 45 | }; 46 | 47 | config = { 48 | _module.args.makeCranePkgs = lib.mkDefault (pkgs: let 49 | perSystemConfig = getSystem pkgs.system; 50 | craneLib = builtins.foldl' (acc: f: f acc) pkgs [ 51 | inputs.crane.mkLib 52 | (craneLib: 53 | if perSystemConfig.attic.toolchain == null then craneLib 54 | else craneLib.overrideToolchain config.attic.toolchain 55 | ) 56 | ]; 57 | in pkgs.callPackage ../crane.nix { 58 | inherit craneLib; 59 | inherit (perSystemConfig.attic) extraPackageArgs; 60 | }); 61 | 62 | perSystem = { self', pkgs, config, cranePkgs, ... }: (lib.mkMerge [ 63 | { 64 | _module.args.cranePkgs = makeCranePkgs pkgs; 65 | 66 | packages = { 67 | default = self'.packages.attic; 68 | 69 | inherit (cranePkgs) 70 | attic 71 | attic-client 72 | attic-server 73 | ; 74 | 75 | attic-nixpkgs = pkgs.callPackage ../package.nix { }; 76 | 77 | attic-ci-installer = pkgs.callPackage ../ci-installer.nix { 78 | inherit self; 79 | }; 80 | 81 | book = pkgs.callPackage ../book { 82 | attic = self'.packages.attic; 83 | }; 84 | }; 85 | } 86 | 87 | (lib.mkIf pkgs.stdenv.isLinux { 88 | packages = { 89 | attic-server-image = pkgs.dockerTools.buildImage { 90 | name = "attic-server"; 91 | tag = "main"; 92 | copyToRoot = [ 93 | self'.packages.attic-server 94 | 95 | # Debugging utilities for `fly ssh console` 96 | pkgs.busybox 97 | 98 | # Now required by the fly.io sshd 99 | pkgs.dockerTools.fakeNss 100 | ]; 101 | config = { 102 | Entrypoint = [ "${self'.packages.attic-server}/bin/atticd" ]; 103 | Env = [ 104 | "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" 105 | ]; 106 | }; 107 | }; 108 | }; 109 | }) 110 | 111 | (lib.mkIf (pkgs.system == "x86_64-linux") { 112 | packages = { 113 | attic-server-image-aarch64 = let 114 | eval = evalCross { 115 | system = "aarch64-linux"; 116 | pkgs = pkgs.pkgsCross.aarch64-multiplatform; 117 | }; 118 | 119 | in eval.config.packages.attic-server-image; 120 | }; 121 | }) 122 | 123 | # Unfortunately, x86_64-darwin fails to evaluate static builds 124 | (lib.mkIf (pkgs.system != "x86_64-darwin") { 125 | packages = { 126 | # TODO: Make this work with Crane 127 | attic-static = (pkgs.pkgsStatic.callPackage ../package.nix { 128 | nix = pkgs.pkgsStatic.nixVersions.nix_2_18.overrideAttrs (old: { 129 | patches = (old.patches or []) ++ [ 130 | # Diff: https://github.com/zhaofengli/nix/compare/501a805fcd4a90e2bc112e9547417cfc4e04ca66...1dbe9899a8acb695f5f08197f1ff51c14bcc7f42 131 | (pkgs.fetchpatch { 132 | url = "https://github.com/zhaofengli/nix/compare/501a805fcd4a90e2bc112e9547417cfc4e04ca66...1dbe9899a8acb695f5f08197f1ff51c14bcc7f42.diff"; 133 | hash = "sha256-bxBZDUUNTBUz6F4pwxx1ZnPcOKG3EhV+kDBt8BrFh6k="; 134 | }) 135 | ]; 136 | }); 137 | }).overrideAttrs (old: { 138 | nativeBuildInputs = (old.nativeBuildInputs or []) ++ [ 139 | pkgs.nukeReferences 140 | ]; 141 | 142 | # Read by pkg_config crate (do some autodetection in build.rs?) 143 | PKG_CONFIG_ALL_STATIC = "1"; 144 | 145 | "NIX_CFLAGS_LINK_${pkgs.pkgsStatic.stdenv.cc.suffixSalt}" = "-lc"; 146 | RUSTFLAGS = "-C relocation-model=static"; 147 | 148 | postFixup = (old.postFixup or "") + '' 149 | rm -f $out/nix-support/propagated-build-inputs 150 | nuke-refs $out/bin/attic 151 | ''; 152 | }); 153 | 154 | attic-client-static = self'.packages.attic-static.override { 155 | clientOnly = true; 156 | }; 157 | }; 158 | }) 159 | ]); 160 | }; 161 | } 162 | -------------------------------------------------------------------------------- /garnix.yaml: -------------------------------------------------------------------------------- 1 | builds: 2 | include: 3 | - 'checks.x86_64-linux.*' 4 | -------------------------------------------------------------------------------- /integration-tests/README.md: -------------------------------------------------------------------------------- 1 | # End-to-End Tests 2 | 3 | This directory contains some end-to-end tests for Attic. 4 | -------------------------------------------------------------------------------- /integration-tests/default.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import ./nixpkgs.nix 2 | , flake ? (import ../flake-compat.nix).defaultNix 3 | }: 4 | 5 | let 6 | inherit (pkgs) lib; 7 | 8 | nixosLib = import (pkgs.path + "/nixos/lib") { }; 9 | 10 | runTest = module: (nixosLib.evalTest ({ config, ... }: { 11 | imports = [ 12 | module 13 | { 14 | hostPkgs = pkgs; 15 | _module.args.flake = flake; 16 | } 17 | ]; 18 | result = config.test; 19 | })).config.result; 20 | 21 | basicTests = let 22 | matrix = { 23 | database = [ "sqlite" "postgres" ]; 24 | storage = [ "local" "minio" ]; 25 | }; 26 | in builtins.listToAttrs (map (e: { 27 | name = "basic-${e.database}-${e.storage}"; 28 | value = runTest { 29 | imports = [ 30 | ./basic 31 | { 32 | inherit (e) database storage; 33 | } 34 | ]; 35 | }; 36 | }) (lib.cartesianProduct matrix)); 37 | in { 38 | } // basicTests 39 | -------------------------------------------------------------------------------- /integration-tests/nixpkgs.nix: -------------------------------------------------------------------------------- 1 | let 2 | flake = (import ../flake-compat.nix).defaultNix; 3 | in import flake.inputs.nixpkgs.outPath { 4 | overlays = [ 5 | flake.overlays.default 6 | ]; 7 | } 8 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | set positional-arguments 2 | 3 | here := env_var_or_default("JUST_INVOCATION_DIR", invocation_directory()) 4 | base := `pwd` 5 | 6 | #@echo "here: {{ here }}" 7 | #@echo "base: {{ base }}" 8 | 9 | # List available targets 10 | list: 11 | @just --list --unsorted 12 | 13 | # Run a command with an alternative Nix version 14 | with-nix version *command: 15 | set -e; \ 16 | hook="$(jq -e -r '.[$version].shellHook' --arg version "{{ version }}" < "$NIX_VERSIONS" || (>&2 echo "Version {{ version }} doesn't exist"; exit 1))"; \ 17 | eval "$hook"; \ 18 | CARGO_TARGET_DIR="{{ base }}/target/nix-{{ version }}" \ 19 | {{ command }} 20 | 21 | # (CI) Build WebAssembly crates 22 | ci-build-wasm: 23 | #!/usr/bin/env bash 24 | set -euxo pipefail 25 | 26 | # https://github.com/rust-lang/rust/issues/122357 27 | export RUST_MIN_STACK=16777216 28 | 29 | pushd attic 30 | cargo build --target wasm32-unknown-unknown --no-default-features -F chunking -F stream 31 | popd 32 | pushd token 33 | cargo build --target wasm32-unknown-unknown 34 | popd 35 | 36 | # (CI) Run unit tests 37 | ci-unit-tests matrix: 38 | #!/usr/bin/env bash 39 | set -euxo pipefail 40 | 41 | system=$(nix-instantiate --eval -E 'builtins.currentSystem') 42 | tests=$(nix build .#internalMatrix."$system".\"{{ matrix }}\".attic-tests --no-link --print-out-paths -L) 43 | find "$tests/bin" -exec {} \; 44 | 45 | # (CI) Run rustfmt check 46 | ci-rustfmt: 47 | cargo fmt --check 48 | 49 | # (CI) Build and push images 50 | ci-build-and-push-images *args: 51 | .ci/build-and-push-images.sh {{ args }} 52 | -------------------------------------------------------------------------------- /package.nix: -------------------------------------------------------------------------------- 1 | # This is an alternative package expression of Attic in a nixpkgs-acceptable 2 | # form. It will be submitted when the Attic API is considered stable. 3 | # 4 | # For the expression used for CI as well as distribution from this repo, see 5 | # `crane.nix`. 6 | 7 | { lib, stdenv, rustPlatform 8 | , pkg-config 9 | , installShellFiles 10 | , nix 11 | , boost 12 | , darwin 13 | 14 | # Only build the client 15 | , clientOnly ? false 16 | 17 | # Only build certain crates 18 | , crates ? if clientOnly then [ "attic-client" ] else [ "attic-client" "attic-server" ] 19 | }: 20 | 21 | let 22 | ignoredPaths = [ ".github" "target" "book" ]; 23 | 24 | in rustPlatform.buildRustPackage rec { 25 | pname = "attic"; 26 | version = "0.1.0"; 27 | 28 | src = lib.cleanSourceWith { 29 | filter = name: type: !(type == "directory" && builtins.elem (baseNameOf name) ignoredPaths); 30 | src = lib.cleanSource ./.; 31 | }; 32 | 33 | nativeBuildInputs = [ 34 | pkg-config 35 | installShellFiles 36 | ]; 37 | 38 | buildInputs = [ 39 | nix boost 40 | ] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [ 41 | SystemConfiguration 42 | ]); 43 | 44 | cargoLock = { 45 | lockFile = ./Cargo.lock; 46 | allowBuiltinFetchGit = true; 47 | }; 48 | cargoBuildFlags = lib.concatMapStrings (c: "-p ${c} ") crates; 49 | 50 | ATTIC_DISTRIBUTOR = "attic"; 51 | 52 | # See comment in `attic/build.rs` 53 | NIX_INCLUDE_PATH = "${lib.getDev nix}/include"; 54 | 55 | # Recursive Nix is not stable yet 56 | doCheck = false; 57 | 58 | postInstall = lib.optionalString (stdenv.hostPlatform == stdenv.buildPlatform) '' 59 | if [[ -f $out/bin/attic ]]; then 60 | installShellCompletion --cmd attic \ 61 | --bash <($out/bin/attic gen-completions bash) \ 62 | --zsh <($out/bin/attic gen-completions zsh) \ 63 | --fish <($out/bin/attic gen-completions fish) 64 | fi 65 | ''; 66 | 67 | meta = with lib; { 68 | description = "Multi-tenant Nix binary cache system"; 69 | homepage = "https://github.com/zhaofengli/attic"; 70 | license = licenses.asl20; 71 | maintainers = with maintainers; [ zhaofengli ]; 72 | platforms = platforms.linux ++ platforms.darwin; 73 | mainProgram = "attic"; 74 | }; 75 | } 76 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended", 5 | "group:allNonMajor" 6 | ], 7 | "lockFileMaintenance": { 8 | "enabled": true, 9 | "extends": ["schedule:weekly"] 10 | }, 11 | "nix": { 12 | "enabled": true 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "attic-server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [lib] 8 | name = "attic_server" 9 | path = "src/lib.rs" 10 | 11 | [[bin]] 12 | name = "atticd" 13 | path = "src/main.rs" 14 | doc = false 15 | 16 | [[bin]] 17 | name = "atticadm" 18 | path = "src/adm/main.rs" 19 | doc = false 20 | 21 | [dependencies] 22 | attic = { path = "../attic", default-features = false, features = ["chunking", "stream", "tokio"] } 23 | attic-token = { path = "../token" } 24 | 25 | anyhow = "1.0.71" 26 | async-stream = "0.3.5" 27 | async-trait = "0.1.68" 28 | aws-config = "1.5.0" 29 | aws-sdk-s3 = "1.32.0" 30 | axum = "0.7.5" 31 | axum-macros = "0.4.1" 32 | base64 = "0.22.1" 33 | bytes = "1.4.0" 34 | chrono = "0.4.24" 35 | clap = { version = "4.3", features = ["derive"] } 36 | derivative = "2.2.0" 37 | digest = "0.10.7" 38 | displaydoc = "0.2.4" 39 | enum-as-inner = "0.6.0" 40 | futures = "0.3.28" 41 | hex = "0.4.3" 42 | http-body-util = "0.1.1" 43 | humantime = "2.1.0" 44 | humantime-serde = "1.1.1" 45 | itoa = "=1.0.5" 46 | maybe-owned = "0.3.4" 47 | rand = "0.8.5" 48 | regex = "1.8.3" 49 | ryu = "1.0.13" 50 | sha2 = { version = "0.10.6", features = ["asm"] } 51 | serde = "1.0.163" 52 | serde_json = "1.0.96" 53 | serde_with = "3.0.0" 54 | tokio-util = { version = "0.7.8", features = [ "io" ] } 55 | toml = "0.8.8" 56 | tower-http = { version = "0.5.2", features = [ "catch-panic", "trace" ] } 57 | tracing = "0.1.37" 58 | tracing-error = "0.2.0" 59 | tracing-subscriber = { version = "0.3.17", features = [ "json" ] } 60 | uuid = { version = "1.3.3", features = ["v4"] } 61 | console-subscriber = "0.2.0" 62 | xdg = "2.5.0" 63 | rsa = "0.9.3" 64 | 65 | [dependencies.async-compression] 66 | version = "0.4.0" 67 | features = [ 68 | "tokio", 69 | "xz", 70 | "zstd", 71 | "brotli", 72 | ] 73 | 74 | [dependencies.sea-orm] 75 | version = "0.12.10" 76 | features = [ 77 | "runtime-tokio-rustls", 78 | "macros", 79 | "sqlx-postgres", 80 | "sqlx-sqlite", 81 | "debug-print", 82 | ] 83 | 84 | [dependencies.sea-orm-migration] 85 | version = "0.12.10" 86 | 87 | [dependencies.tokio] 88 | version = "1.28.2" 89 | features = [ 90 | "fs", 91 | "io-util", 92 | "macros", 93 | "process", 94 | "rt", 95 | "rt-multi-thread", 96 | "sync", 97 | ] 98 | -------------------------------------------------------------------------------- /server/src/access/http.rs: -------------------------------------------------------------------------------- 1 | //! HTTP middlewares for access control. 2 | 3 | use attic::cache::CacheName; 4 | use attic_token::util::parse_authorization_header; 5 | use axum::{extract::Request, middleware::Next, response::Response}; 6 | use sea_orm::DatabaseConnection; 7 | use tokio::sync::OnceCell; 8 | 9 | use crate::access::{CachePermission, Token}; 10 | use crate::database::{entity::cache::CacheModel, AtticDatabase}; 11 | use crate::error::ServerResult; 12 | use crate::{RequestState, State}; 13 | 14 | /// Auth state. 15 | #[derive(Debug)] 16 | pub struct AuthState { 17 | /// The JWT token. 18 | pub token: OnceCell, 19 | } 20 | 21 | impl AuthState { 22 | /// Returns an auth state with no authenticated user and no permissions. 23 | pub fn new() -> Self { 24 | Self { 25 | token: OnceCell::new(), 26 | } 27 | } 28 | 29 | /// Returns the username if it exists. 30 | /// 31 | /// Currently it's the `sub` claim of the JWT. 32 | pub fn username(&self) -> Option<&str> { 33 | self.token.get().and_then(|token| token.sub()) 34 | } 35 | 36 | /// Finds and performs authorization for a cache. 37 | pub async fn auth_cache( 38 | &self, 39 | database: &DatabaseConnection, 40 | cache_name: &CacheName, 41 | f: F, 42 | ) -> ServerResult 43 | where 44 | F: FnOnce(CacheModel, &mut CachePermission) -> ServerResult, 45 | { 46 | let mut permission = if let Some(token) = self.token.get() { 47 | token.get_permission_for_cache(cache_name) 48 | } else { 49 | CachePermission::default() 50 | }; 51 | 52 | let cache = match database.find_cache(cache_name).await { 53 | Ok(d) => { 54 | if d.is_public { 55 | permission.add_public_permissions(); 56 | } 57 | 58 | d 59 | } 60 | Err(mut e) => { 61 | e.set_discovery_permission(permission.can_discover()); 62 | return Err(e); 63 | } 64 | }; 65 | 66 | match f(cache, &mut permission) { 67 | Ok(t) => Ok(t), 68 | Err(mut e) => { 69 | e.set_discovery_permission(permission.can_discover()); 70 | Err(e) 71 | } 72 | } 73 | } 74 | 75 | /// Returns permission granted for a cache. 76 | pub fn get_permission_for_cache( 77 | &self, 78 | cache: &CacheName, 79 | grant_public_permissions: bool, 80 | ) -> CachePermission { 81 | let mut permission = if let Some(token) = self.token.get() { 82 | token.get_permission_for_cache(cache) 83 | } else { 84 | CachePermission::default() 85 | }; 86 | 87 | if grant_public_permissions { 88 | permission.add_public_permissions(); 89 | } 90 | 91 | permission 92 | } 93 | } 94 | 95 | /// Performs auth. 96 | pub async fn apply_auth(req: Request, next: Next) -> Response { 97 | let token: Option = req 98 | .headers() 99 | .get("Authorization") 100 | .and_then(|bytes| bytes.to_str().ok()) 101 | .and_then(parse_authorization_header) 102 | .and_then(|jwt| { 103 | let state = req.extensions().get::().unwrap(); 104 | let signature_type = state.config.jwt.signing_config.clone().into(); 105 | 106 | let res_token = Token::from_jwt( 107 | &jwt, 108 | &signature_type, 109 | &state.config.jwt.token_bound_issuer, 110 | &state.config.jwt.token_bound_audiences, 111 | ); 112 | 113 | if let Err(e) = &res_token { 114 | tracing::debug!("Ignoring bad JWT token: {}", e); 115 | } 116 | 117 | res_token.ok() 118 | }); 119 | 120 | if let Some(token) = token { 121 | let req_state = req.extensions().get::().unwrap(); 122 | req_state.auth.token.set(token).unwrap(); 123 | tracing::trace!("Added valid token"); 124 | } 125 | 126 | next.run(req).await 127 | } 128 | -------------------------------------------------------------------------------- /server/src/access/mod.rs: -------------------------------------------------------------------------------- 1 | //! Access control. 2 | //! 3 | //! See [attic_token] for more details. 4 | 5 | pub mod http; 6 | 7 | pub use attic_token::*; 8 | -------------------------------------------------------------------------------- /server/src/access/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use attic::cache::CacheName; 4 | 5 | macro_rules! cache { 6 | ($n:expr) => { 7 | CacheName::new($n.to_string()).unwrap() 8 | }; 9 | } 10 | 11 | #[test] 12 | fn test_basic() { 13 | // "very secure secret" 14 | let base64_secret = "dmVyeSBzZWN1cmUgc2VjcmV0"; 15 | 16 | let dec_key = 17 | JwtDecodingKey::from_base64_secret(base64_secret).expect("Could not import decoding key"); 18 | 19 | /* 20 | { 21 | "sub": "meow", 22 | "exp": 4102324986, 23 | "https://jwt.attic.rs/v1": { 24 | "caches": { 25 | "cache-rw": {"r":1,"w":1}, 26 | "cache-ro": {"r":1}, 27 | "team-*": {"r":1,"w":1,"cc":1} 28 | } 29 | } 30 | } 31 | */ 32 | 33 | let token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJtZW93IiwiZXhwIjo0MTAyMzI0OTg2LCJodHRwczovL2p3dC5hdHRpYy5ycy92MSI6eyJjYWNoZXMiOnsiY2FjaGUtcnciOnsiciI6MSwidyI6MX0sImNhY2hlLXJvIjp7InIiOjF9LCJ0ZWFtLSoiOnsiciI6MSwidyI6MSwiY2MiOjF9fX19.UlsIM9bQHr9SXGAcSQcoVPo9No8Zhh6Y5xfX8vCmKmA"; 34 | 35 | let decoded = Token::from_jwt(token, &dec_key).unwrap(); 36 | 37 | let perm_rw = decoded.get_permission_for_cache(&cache! { "cache-rw" }); 38 | 39 | assert!(perm_rw.pull); 40 | assert!(perm_rw.push); 41 | assert!(!perm_rw.delete); 42 | assert!(!perm_rw.create_cache); 43 | 44 | assert!(perm_rw.require_pull().is_ok()); 45 | assert!(perm_rw.require_push().is_ok()); 46 | assert!(perm_rw.require_delete().is_err()); 47 | assert!(perm_rw.require_create_cache().is_err()); 48 | 49 | let perm_ro = decoded.get_permission_for_cache(&cache! { "cache-ro" }); 50 | 51 | assert!(perm_ro.pull); 52 | assert!(!perm_ro.push); 53 | assert!(!perm_ro.delete); 54 | assert!(!perm_ro.create_cache); 55 | 56 | assert!(perm_ro.require_pull().is_ok()); 57 | assert!(perm_ro.require_push().is_err()); 58 | assert!(perm_ro.require_delete().is_err()); 59 | assert!(perm_ro.require_create_cache().is_err()); 60 | 61 | let perm_team = decoded.get_permission_for_cache(&cache! { "team-xyz" }); 62 | 63 | assert!(perm_team.pull); 64 | assert!(perm_team.push); 65 | assert!(!perm_team.delete); 66 | assert!(perm_team.create_cache); 67 | 68 | assert!(perm_team.require_pull().is_ok()); 69 | assert!(perm_team.require_push().is_ok()); 70 | assert!(perm_team.require_delete().is_err()); 71 | assert!(perm_team.require_create_cache().is_ok()); 72 | 73 | assert!(!decoded 74 | .get_permission_for_cache(&cache! { "forbidden-cache" }) 75 | .can_discover()); 76 | } 77 | -------------------------------------------------------------------------------- /server/src/adm/command/make_token.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use chrono::{Duration as ChronoDuration, Utc}; 3 | use clap::Parser; 4 | use humantime::Duration; 5 | 6 | use crate::Opts; 7 | use attic::cache::CacheNamePattern; 8 | use attic_server::access::Token; 9 | use attic_server::config::Config; 10 | 11 | /// Generate a new token. 12 | /// 13 | /// For example, to generate a token for Alice with read-write access 14 | /// to any cache starting with `dev-` and read-only access to `prod`, 15 | /// expiring in 2 years: 16 | /// 17 | /// $ atticadm make-token --sub "alice" --validity "2y" --pull "dev-*" --push "dev-*" --pull "prod" 18 | #[derive(Debug, Parser)] 19 | pub struct MakeToken { 20 | /// The subject of the JWT token. 21 | #[clap(long)] 22 | sub: String, 23 | 24 | /// The validity period of the JWT token. 25 | /// 26 | /// You can use expressions like "2 years", "3 months" 27 | /// and "1y". 28 | #[clap(long)] 29 | validity: Duration, 30 | 31 | /// Dump the claims without signing and encoding it. 32 | #[clap(long)] 33 | dump_claims: bool, 34 | 35 | /// A cache that the token may pull from. 36 | /// 37 | /// The value may contain wildcards. Specify this flag multiple 38 | /// times to allow multiple patterns. 39 | #[clap(long = "pull", value_name = "PATTERN")] 40 | pull_patterns: Vec, 41 | 42 | /// A cache that the token may push to. 43 | /// 44 | /// The value may contain wildcards. Specify this flag multiple 45 | /// times to allow multiple patterns. 46 | #[clap(long = "push", value_name = "PATTERN")] 47 | push_patterns: Vec, 48 | 49 | /// A cache that the token may delete store paths from. 50 | /// 51 | /// The value may contain wildcards. Specify this flag multiple 52 | /// times to allow multiple patterns. 53 | #[clap(long = "delete", value_name = "PATTERN")] 54 | delete_patterns: Vec, 55 | 56 | /// A cache that the token may create. 57 | /// 58 | /// The value may contain wildcards. Specify this flag multiple 59 | /// times to allow multiple patterns. 60 | #[clap(long = "create-cache", value_name = "PATTERN")] 61 | create_cache_patterns: Vec, 62 | 63 | /// A cache that the token may configure. 64 | /// 65 | /// The value may contain wildcards. Specify this flag multiple 66 | /// times to allow multiple patterns. 67 | #[clap(long = "configure-cache", value_name = "PATTERN")] 68 | configure_cache_patterns: Vec, 69 | 70 | /// A cache that the token may configure retention/quota for. 71 | /// 72 | /// The value may contain wildcards. Specify this flag multiple 73 | /// times to allow multiple patterns. 74 | #[clap(long = "configure-cache-retention", value_name = "PATTERN")] 75 | configure_cache_retention_patterns: Vec, 76 | 77 | /// A cache that the token may destroy. 78 | /// 79 | /// The value may contain wildcards. Specify this flag multiple 80 | /// times to allow multiple patterns. 81 | #[clap(long = "destroy-cache", value_name = "PATTERN")] 82 | destroy_cache_patterns: Vec, 83 | } 84 | 85 | macro_rules! grant_permissions { 86 | ($token:ident, $list:expr, $perm:ident) => { 87 | for pattern in $list { 88 | let perm = $token.get_or_insert_permission_mut(pattern.to_owned()); 89 | perm.$perm = true; 90 | } 91 | }; 92 | } 93 | 94 | pub async fn run(config: Config, opts: Opts) -> Result<()> { 95 | let sub = opts.command.as_make_token().unwrap(); 96 | let duration = ChronoDuration::from_std(sub.validity.into())?; 97 | let exp = Utc::now() 98 | .checked_add_signed(duration) 99 | .ok_or_else(|| anyhow!("Expiry timestamp overflowed"))?; 100 | 101 | let mut token = Token::new(sub.sub.to_owned(), &exp); 102 | 103 | grant_permissions!(token, &sub.pull_patterns, pull); 104 | grant_permissions!(token, &sub.push_patterns, push); 105 | grant_permissions!(token, &sub.delete_patterns, delete); 106 | grant_permissions!(token, &sub.create_cache_patterns, create_cache); 107 | grant_permissions!(token, &sub.configure_cache_patterns, configure_cache); 108 | grant_permissions!( 109 | token, 110 | &sub.configure_cache_retention_patterns, 111 | configure_cache_retention 112 | ); 113 | grant_permissions!(token, &sub.destroy_cache_patterns, destroy_cache); 114 | 115 | if sub.dump_claims { 116 | println!("{}", serde_json::to_string(token.opaque_claims())?); 117 | } else { 118 | let signature_type = config.jwt.signing_config.into(); 119 | 120 | let encoded_token = token.encode( 121 | &signature_type, 122 | &config.jwt.token_bound_issuer, 123 | &config.jwt.token_bound_audiences, 124 | )?; 125 | println!("{}", encoded_token); 126 | } 127 | 128 | Ok(()) 129 | } 130 | -------------------------------------------------------------------------------- /server/src/adm/command/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod make_token; 2 | -------------------------------------------------------------------------------- /server/src/adm/main.rs: -------------------------------------------------------------------------------- 1 | mod command; 2 | 3 | use std::path::PathBuf; 4 | 5 | use anyhow::Result; 6 | use clap::{Parser, Subcommand}; 7 | use enum_as_inner::EnumAsInner; 8 | 9 | use attic_server::config; 10 | use command::make_token::{self, MakeToken}; 11 | 12 | /// Attic server administration utilities. 13 | #[derive(Debug, Parser)] 14 | #[clap(version, author = "Zhaofeng Li ")] 15 | #[clap(propagate_version = true)] 16 | pub struct Opts { 17 | /// Path to the config file. 18 | #[clap(short = 'f', long, global = true)] 19 | config: Option, 20 | 21 | /// The sub-command. 22 | #[clap(subcommand)] 23 | pub command: Command, 24 | } 25 | 26 | #[derive(Debug, Subcommand, EnumAsInner)] 27 | pub enum Command { 28 | MakeToken(MakeToken), 29 | } 30 | 31 | #[tokio::main] 32 | async fn main() -> Result<()> { 33 | let opts = Opts::parse(); 34 | let config = config::load_config(opts.config.as_deref(), false).await?; 35 | 36 | match opts.command { 37 | Command::MakeToken(_) => make_token::run(config, opts).await?, 38 | } 39 | 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /server/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | //! HTTP API. 2 | 3 | mod binary_cache; 4 | mod v1; 5 | 6 | use axum::{response::Html, routing::get, Router}; 7 | 8 | async fn placeholder() -> Html<&'static str> { 9 | Html(include_str!("placeholder.html")) 10 | } 11 | 12 | pub(crate) fn get_router() -> Router { 13 | Router::new() 14 | .route("/", get(placeholder)) 15 | .merge(binary_cache::get_router()) 16 | .merge(v1::get_router()) 17 | } 18 | -------------------------------------------------------------------------------- /server/src/api/placeholder.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Attic Binary Cache 5 | 21 | 22 | 23 |
24 | ┏━━━━━━━━━━━━━━━━┑
25 | ┃┏━━━ @ ━━━ @ ━━┓┃
26 | ┃┃              ┃┃
27 | ┃┃$ attic push 
┃┃ 28 | ┃┃ ┃┃ 29 | ┃┗━━━ ╰─────╯ ━━┛┃ 30 | ┗━━━━━━━━━━━━━━━━┛ 31 | ╲ ############### ╲ 32 | ╲ ############### ╲ 33 | ╲ ############### ╲ 34 | ━━━━━━━━━━━━━━━━━━ 35 |
36 | 37 | 38 | -------------------------------------------------------------------------------- /server/src/api/v1/get_missing_paths.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | use axum::extract::{Extension, Json}; 4 | use sea_orm::entity::prelude::*; 5 | use sea_orm::{FromQueryResult, QuerySelect}; 6 | use tracing::instrument; 7 | 8 | use crate::database::entity::cache; 9 | use crate::database::entity::nar; 10 | use crate::database::entity::object::{self, Entity as Object}; 11 | use crate::error::{ServerError, ServerResult}; 12 | use crate::{RequestState, State}; 13 | use attic::api::v1::get_missing_paths::{GetMissingPathsRequest, GetMissingPathsResponse}; 14 | use attic::nix_store::StorePathHash; 15 | 16 | #[derive(FromQueryResult)] 17 | struct StorePathHashOnly { 18 | store_path_hash: String, 19 | } 20 | 21 | /// Gets information on missing paths in a cache. 22 | /// 23 | /// Requires "push" permission as it essentially allows probing 24 | /// of cache contents. 25 | #[instrument(skip_all, fields(payload))] 26 | pub(crate) async fn get_missing_paths( 27 | Extension(state): Extension, 28 | Extension(req_state): Extension, 29 | Json(payload): Json, 30 | ) -> ServerResult> { 31 | let database = state.database().await?; 32 | req_state 33 | .auth 34 | .auth_cache(database, &payload.cache, |_, permission| { 35 | permission.require_push()?; 36 | Ok(()) 37 | }) 38 | .await?; 39 | 40 | let requested_hashes: HashSet = payload 41 | .store_path_hashes 42 | .iter() 43 | .map(|h| h.as_str().to_owned()) 44 | .collect(); 45 | 46 | let query_in = requested_hashes.iter().map(|h| Value::from(h.to_owned())); 47 | 48 | let result: Vec = Object::find() 49 | .select_only() 50 | .column_as(object::Column::StorePathHash, "store_path_hash") 51 | .join(sea_orm::JoinType::InnerJoin, object::Relation::Cache.def()) 52 | .join(sea_orm::JoinType::InnerJoin, object::Relation::Nar.def()) 53 | .filter(cache::Column::Name.eq(payload.cache.as_str())) 54 | .filter(object::Column::StorePathHash.is_in(query_in)) 55 | .filter(nar::Column::CompletenessHint.eq(true)) 56 | .into_model::() 57 | .all(database) 58 | .await 59 | .map_err(ServerError::database_error)?; 60 | 61 | let found_hashes: HashSet = result.into_iter().map(|row| row.store_path_hash).collect(); 62 | 63 | // Safety: All requested_hashes are validated `StorePathHash`es. 64 | // No need to pay the cost of checking again 65 | #[allow(unsafe_code)] 66 | let missing_paths = requested_hashes 67 | .difference(&found_hashes) 68 | .map(|h| unsafe { StorePathHash::new_unchecked(h.to_string()) }) 69 | .collect(); 70 | 71 | Ok(Json(GetMissingPathsResponse { missing_paths })) 72 | } 73 | -------------------------------------------------------------------------------- /server/src/api/v1/mod.rs: -------------------------------------------------------------------------------- 1 | mod cache_config; 2 | mod get_missing_paths; 3 | mod upload_path; 4 | 5 | use axum::{ 6 | routing::{delete, get, patch, post, put}, 7 | Router, 8 | }; 9 | 10 | pub(crate) fn get_router() -> Router { 11 | Router::new() 12 | .route( 13 | "/_api/v1/get-missing-paths", 14 | post(get_missing_paths::get_missing_paths), 15 | ) 16 | .route("/_api/v1/upload-path", put(upload_path::upload_path)) 17 | .route( 18 | "/:cache/attic-cache-info", 19 | get(cache_config::get_cache_config), 20 | ) 21 | .route( 22 | "/_api/v1/cache-config/:cache", 23 | get(cache_config::get_cache_config), 24 | ) 25 | .route( 26 | "/_api/v1/cache-config/:cache", 27 | post(cache_config::create_cache), 28 | ) 29 | .route( 30 | "/_api/v1/cache-config/:cache", 31 | patch(cache_config::configure_cache), 32 | ) 33 | .route( 34 | "/_api/v1/cache-config/:cache", 35 | delete(cache_config::destroy_cache), 36 | ) 37 | } 38 | -------------------------------------------------------------------------------- /server/src/database/entity/cache.rs: -------------------------------------------------------------------------------- 1 | //! A binary cache. 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | use super::Json; 6 | use attic::error::AtticResult; 7 | use attic::signing::NixKeypair; 8 | 9 | pub type CacheModel = Model; 10 | 11 | /// A binary cache. 12 | #[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] 13 | #[sea_orm(table_name = "cache")] 14 | pub struct Model { 15 | /// Unique numeric ID of the cache. 16 | #[sea_orm(primary_key)] 17 | pub id: i64, 18 | 19 | /// Unique name of the cache. 20 | #[sea_orm(column_type = "String(Some(50))", unique, indexed)] 21 | pub name: String, 22 | 23 | /// Signing keypair for the cache. 24 | pub keypair: String, 25 | 26 | /// Whether the cache is public or not. 27 | /// 28 | /// Anonymous clients are implicitly granted the "pull" 29 | /// permission to public caches. 30 | pub is_public: bool, 31 | 32 | /// The Nix store path this binary cache uses. 33 | pub store_dir: String, 34 | 35 | /// The priority of the binary cache. 36 | /// 37 | /// A lower number denotes a higher priority. 38 | /// has a priority of 40. 39 | pub priority: i32, 40 | 41 | /// A list of signing key names for upstream caches. 42 | pub upstream_cache_key_names: Json>, 43 | 44 | /// Timestamp when the binary cache is created. 45 | pub created_at: ChronoDateTimeUtc, 46 | 47 | /// Timestamp when the binary cache is deleted. 48 | pub deleted_at: Option, 49 | 50 | /// The retention period of the cache, in seconds. 51 | pub retention_period: Option, 52 | } 53 | 54 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 55 | pub enum Relation { 56 | #[sea_orm(has_many = "super::object::Entity")] 57 | Object, 58 | } 59 | 60 | impl Model { 61 | pub fn keypair(&self) -> AtticResult { 62 | NixKeypair::from_str(&self.keypair) 63 | } 64 | } 65 | 66 | impl Related for Entity { 67 | fn to() -> RelationDef { 68 | Relation::Object.def() 69 | } 70 | } 71 | 72 | impl ActiveModelBehavior for ActiveModel {} 73 | -------------------------------------------------------------------------------- /server/src/database/entity/chunk.rs: -------------------------------------------------------------------------------- 1 | //! A content-addressed chunk in the global chunk store. 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | use super::Json; 6 | use crate::storage::RemoteFile; 7 | 8 | pub type ChunkModel = Model; 9 | 10 | /// The state of a chunk. 11 | #[derive(EnumIter, DeriveActiveEnum, Debug, Clone, PartialEq, Eq)] 12 | #[sea_orm(rs_type = "String", db_type = "String(Some(1))")] 13 | pub enum ChunkState { 14 | /// The chunk can be used. 15 | /// 16 | /// The raw and compressed hashes are available. 17 | #[sea_orm(string_value = "V")] 18 | Valid, 19 | 20 | /// The chunk is a pending upload. 21 | /// 22 | /// The raw and compressed hashes may not be available. 23 | #[sea_orm(string_value = "P")] 24 | PendingUpload, 25 | 26 | /// The chunk can be deleted because it already exists. 27 | /// 28 | /// This state can be transitioned into from `PendingUpload` 29 | /// if some other client completes uploading the same chunk 30 | /// faster. 31 | #[sea_orm(string_value = "C")] 32 | ConfirmedDeduplicated, 33 | 34 | /// The chunk is being deleted. 35 | /// 36 | /// This row will be deleted shortly. 37 | #[sea_orm(string_value = "D")] 38 | Deleted, 39 | } 40 | 41 | /// A content-addressed chunk in the global cache. 42 | #[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] 43 | #[sea_orm(table_name = "chunk")] 44 | pub struct Model { 45 | /// Unique numeric ID of the chunk. 46 | #[sea_orm(primary_key)] 47 | pub id: i64, 48 | 49 | /// The state of the chunk. 50 | state: ChunkState, 51 | 52 | /// The hash of the uncompressed chunk. 53 | /// 54 | /// This always begins with "sha256:" with the hash in the 55 | /// hexadecimal format. 56 | /// 57 | /// The global chunk store may have several chunks with the same 58 | /// hash: 59 | /// 60 | /// - Racing uploads from different clients 61 | /// - Different compression methods 62 | #[sea_orm(indexed)] 63 | pub chunk_hash: String, 64 | 65 | /// The size of the uncompressed chunk. 66 | pub chunk_size: i64, 67 | 68 | /// The hash of the compressed chunk. 69 | /// 70 | /// This always begins with "sha256:" with the hash in the 71 | /// hexadecimal format. 72 | /// 73 | /// This field may not be available if the file hashes aren't 74 | /// confirmed. 75 | pub file_hash: Option, 76 | 77 | /// The size of the compressed chunk. 78 | /// 79 | /// This field may not be available if the file hashes aren't 80 | /// confirmed. 81 | pub file_size: Option, 82 | 83 | /// The type of compression in use. 84 | #[sea_orm(column_type = "String(Some(10))")] 85 | pub compression: String, 86 | 87 | /// The remote file backing this chunk. 88 | pub remote_file: Json, 89 | 90 | /// Unique string identifying the remote file. 91 | #[sea_orm(unique)] 92 | pub remote_file_id: String, 93 | 94 | /// Number of processes holding this chunk. 95 | /// 96 | /// This is for preventing garbage collection of chunks when 97 | /// there is a pending upload that can be deduplicated and 98 | /// there are no existing NAR references. 99 | pub holders_count: i32, 100 | 101 | /// Timestamp when the chunk is created. 102 | pub created_at: ChronoDateTimeUtc, 103 | } 104 | 105 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 106 | pub enum Relation { 107 | #[sea_orm(has_many = "super::chunkref::Entity")] 108 | ChunkRef, 109 | } 110 | 111 | impl ActiveModelBehavior for ActiveModel {} 112 | -------------------------------------------------------------------------------- /server/src/database/entity/chunkref.rs: -------------------------------------------------------------------------------- 1 | //! A reference binding a NAR and a chunk. 2 | //! 3 | //! A NAR is backed by a sequence of chunks. 4 | //! 5 | //! A chunk may become unavailable (e.g., disk corruption) and 6 | //! removed from the database, in which case all dependent NARs 7 | //! will become unavailable. 8 | //! 9 | //! Such scenario can be recovered from by reuploading any object 10 | //! that has the missing chunk. `atticadm` will have the functionality 11 | //! to kill/delete a corrupted chunk from the database and to find 12 | //! objects with missing chunks so they can be repaired. 13 | 14 | use sea_orm::entity::prelude::*; 15 | 16 | pub type ChunkRefModel = Model; 17 | 18 | /// A reference binding a NAR to a chunk. 19 | #[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] 20 | #[sea_orm(table_name = "chunkref")] 21 | pub struct Model { 22 | /// Unique numeric ID of the link. 23 | #[sea_orm(primary_key)] 24 | pub id: i64, 25 | 26 | /// ID of the NAR. 27 | #[sea_orm(indexed)] 28 | pub nar_id: i64, 29 | 30 | /// The zero-indexed sequence number of the chunk. 31 | pub seq: i32, 32 | 33 | /// ID of the chunk. 34 | /// 35 | /// This may be NULL when the chunk is missing from the 36 | /// database. 37 | #[sea_orm(indexed)] 38 | pub chunk_id: Option, 39 | 40 | /// The hash of the uncompressed chunk. 41 | /// 42 | /// This always begins with "sha256:" with the hash in the 43 | /// hexadecimal format. 44 | /// 45 | /// This is used for recovering from a missing chunk. 46 | #[sea_orm(indexed)] 47 | pub chunk_hash: String, 48 | 49 | /// The compression of the compressed chunk. 50 | /// 51 | /// This is used for recovering from a missing chunk. 52 | pub compression: String, 53 | } 54 | 55 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 56 | pub enum Relation { 57 | #[sea_orm( 58 | belongs_to = "super::chunk::Entity", 59 | from = "Column::ChunkId", 60 | to = "super::chunk::Column::Id" 61 | )] 62 | Chunk, 63 | 64 | #[sea_orm( 65 | belongs_to = "super::nar::Entity", 66 | from = "Column::NarId", 67 | to = "super::nar::Column::Id" 68 | )] 69 | Nar, 70 | } 71 | 72 | impl Related for Entity { 73 | fn to() -> RelationDef { 74 | Relation::Chunk.def() 75 | } 76 | } 77 | 78 | impl Related for Entity { 79 | fn to() -> RelationDef { 80 | Relation::Nar.def() 81 | } 82 | } 83 | 84 | impl ActiveModelBehavior for ActiveModel {} 85 | -------------------------------------------------------------------------------- /server/src/database/entity/mod.rs: -------------------------------------------------------------------------------- 1 | //! Database entities. 2 | //! 3 | //! We use SeaORM and target PostgreSQL (production) and SQLite (development). 4 | 5 | pub mod cache; 6 | pub mod chunk; 7 | pub mod chunkref; 8 | pub mod nar; 9 | pub mod object; 10 | 11 | use sea_orm::entity::Value; 12 | use sea_orm::sea_query::{ArrayType, ColumnType, ValueType, ValueTypeErr}; 13 | use sea_orm::{DbErr, QueryResult, TryGetError, TryGetable}; 14 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 15 | 16 | // A more generic version of https://github.com/SeaQL/sea-orm/pull/783 17 | 18 | /// A value that is stored in the database as JSON. 19 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 20 | #[serde(transparent)] 21 | pub struct Json(pub T); 22 | 23 | impl From> for Value { 24 | fn from(value: Json) -> Self { 25 | let opt = serde_json::to_string(&value).ok().map(Box::new); 26 | 27 | Value::String(opt) 28 | } 29 | } 30 | 31 | impl TryGetable for Json { 32 | fn try_get_by(res: &QueryResult, idx: I) -> Result { 33 | let json_str: String = res.try_get_by(idx).map_err(TryGetError::DbErr)?; 34 | 35 | serde_json::from_str(&json_str).map_err(|e| TryGetError::DbErr(DbErr::Json(e.to_string()))) 36 | } 37 | } 38 | 39 | impl ValueType for Json { 40 | fn try_from(v: Value) -> Result { 41 | match v { 42 | Value::String(Some(x)) => Ok(Json(serde_json::from_str(&x).map_err(|_| ValueTypeErr)?)), 43 | _ => Err(ValueTypeErr), 44 | } 45 | } 46 | 47 | fn type_name() -> String { 48 | stringify!(Json).to_owned() 49 | } 50 | 51 | fn column_type() -> ColumnType { 52 | ColumnType::String(None) 53 | } 54 | 55 | fn array_type() -> ArrayType { 56 | ArrayType::String 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /server/src/database/entity/nar.rs: -------------------------------------------------------------------------------- 1 | //! A content-addressed NAR in the global cache. 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | pub type NarModel = Model; 6 | 7 | /// The state of a NAR. 8 | #[derive(EnumIter, DeriveActiveEnum, Debug, Clone, PartialEq, Eq)] 9 | #[sea_orm(rs_type = "String", db_type = "String(Some(1))")] 10 | pub enum NarState { 11 | /// The NAR can be used. 12 | /// 13 | /// The NAR and file hashes have been confirmed. 14 | #[sea_orm(string_value = "V")] 15 | Valid, 16 | 17 | /// The NAR is a pending upload. 18 | /// 19 | /// The NAR and file hashes aren't trusted and may 20 | /// not be available. 21 | #[sea_orm(string_value = "P")] 22 | PendingUpload, 23 | 24 | /// The NAR can be deleted because it already exists. 25 | /// 26 | /// This state can be transitioned into from `PendingUpload` 27 | /// if some other client completes uploading the same NAR 28 | /// faster. 29 | #[sea_orm(string_value = "C")] 30 | ConfirmedDeduplicated, 31 | 32 | /// The NAR is being deleted. 33 | /// 34 | /// This row will be deleted shortly. 35 | /// This variant is no longer used since the actual storage is managed as chunks. 36 | #[sea_orm(string_value = "D")] 37 | Deleted, 38 | } 39 | 40 | /// A content-addressed NAR in the global cache. 41 | /// 42 | /// A NAR without `nix-store --export` metadata is context-free, 43 | /// meaning that it's not associated with a store path and only 44 | /// depends on its contents. 45 | /// 46 | /// ## NAR Repair 47 | /// 48 | /// After a NAR is transitioned into the `Valid` state, its list 49 | /// of constituent chunks in `chunkref` is immutable. When a client 50 | /// uploads an existing NAR and the NAR has unavailable chunks, 51 | /// a new `nar` entry is created and all dependent `object` rows 52 | /// will have the `nar_id` updated. The old `nar` entry will 53 | /// be garbage-collected. 54 | /// 55 | /// Why don't we just fill in the missing chunks in the existing 56 | /// `nar`? Because the NAR stream from the client _might_ be chunked 57 | /// differently. This is not supposed to happen since FastCDC 58 | /// has a deterministic lookup table for cut-point judgment, however 59 | /// we want the system to tolerate different chunking behavior because 60 | /// of table changes, for example. 61 | /// 62 | /// However, when a chunk is added, all broken `chunkref`s with 63 | /// the same `chunk_hash` _are_ repaired. In other words, by 64 | /// re-uploading a broken NAR you are helping other NARs with 65 | /// the same broken chunk. 66 | #[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] 67 | #[sea_orm(table_name = "nar")] 68 | pub struct Model { 69 | /// Unique numeric ID of the NAR. 70 | #[sea_orm(primary_key)] 71 | pub id: i64, 72 | 73 | /// The state of the NAR archive. 74 | state: NarState, 75 | 76 | /// The hash of the NAR archive. 77 | /// 78 | /// This always begins with "sha256:" with the hash in the 79 | /// hexadecimal format. 80 | /// 81 | /// The global cache may have several NARs with the same NAR 82 | /// hash: 83 | /// 84 | /// - Unconfirmed uploads from clients 85 | /// - Global deduplication is turned off 86 | #[sea_orm(indexed)] 87 | pub nar_hash: String, 88 | 89 | /// The size of the NAR archive. 90 | pub nar_size: i64, 91 | 92 | /// The type of compression in use. 93 | #[sea_orm(column_type = "String(Some(10))")] 94 | pub compression: String, 95 | 96 | /// Number of chunks that make up this NAR. 97 | pub num_chunks: i32, 98 | 99 | /// Hint indicating whether all chunks making up this NAR are available. 100 | /// 101 | /// This is used by the `get-missing-paths` endpoint to 102 | /// also return store paths that are inaccessible due to 103 | /// missing chunks in the associated NARs. They can then be 104 | /// repaired by any client uploading. 105 | /// 106 | /// This flag may be outdated, but it's okay since when a client 107 | /// tries to upload the same NAR, it will be immediately deduplicated 108 | /// if all chunks are present and the flag will be updated. 109 | pub completeness_hint: bool, 110 | 111 | /// Number of processes holding this NAR. 112 | /// 113 | /// This is for preventing garbage collection of NARs when 114 | /// there is a pending upload that can be deduplicated and 115 | /// there are no existing object references. 116 | pub holders_count: i32, 117 | 118 | /// Timestamp when the NAR is created. 119 | pub created_at: ChronoDateTimeUtc, 120 | } 121 | 122 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 123 | pub enum Relation { 124 | #[sea_orm(has_many = "super::object::Entity")] 125 | Object, 126 | 127 | #[sea_orm(has_many = "super::chunkref::Entity")] 128 | ChunkRef, 129 | } 130 | 131 | impl ActiveModelBehavior for ActiveModel {} 132 | -------------------------------------------------------------------------------- /server/src/database/entity/object.rs: -------------------------------------------------------------------------------- 1 | //! An object in a local cache. 2 | //! 3 | //! It's backed by a NAR in the global cache. 4 | 5 | use std::path::PathBuf; 6 | use std::str::FromStr; 7 | 8 | use sea_orm::entity::prelude::*; 9 | use sea_orm::sea_query::OnConflict; 10 | use sea_orm::Insert; 11 | 12 | use super::nar::NarModel; 13 | use super::Json; 14 | use crate::error::{ServerError, ServerResult}; 15 | use crate::narinfo::{Compression, NarInfo}; 16 | use attic::hash::Hash; 17 | 18 | pub type ObjectModel = Model; 19 | 20 | pub trait InsertExt { 21 | fn on_conflict_do_update(self) -> Self; 22 | } 23 | 24 | /// An object in a binary cache. 25 | #[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] 26 | #[sea_orm(table_name = "object")] 27 | pub struct Model { 28 | /// Unique numeric ID of the object. 29 | #[sea_orm(primary_key)] 30 | pub id: i64, 31 | 32 | /// ID of the binary cache the object belongs to. 33 | #[sea_orm(indexed)] 34 | pub cache_id: i64, 35 | 36 | /// ID of the NAR this object points to. 37 | pub nar_id: i64, 38 | 39 | /// The hash portion of the store path. 40 | #[sea_orm(column_type = "String(Some(32))", indexed)] 41 | pub store_path_hash: String, 42 | 43 | /// The full store path being cached, including the store directory. 44 | pub store_path: String, 45 | 46 | /// Other store paths this object directly refereces. 47 | pub references: Json>, 48 | 49 | /// The system this derivation is built for. 50 | pub system: Option, 51 | 52 | /// The derivation that produced this object. 53 | pub deriver: Option, 54 | 55 | /// Client-supplied signatures of this object. 56 | pub sigs: Json>, 57 | 58 | /// The content address of this object. 59 | /// 60 | /// Technically this might belong to the NAR table since it's 61 | /// an inherent property of the content, but there are multiple 62 | /// formats for the CA and the feature isn't stable. So now we 63 | /// simply treat it as a untrusted string. 64 | pub ca: Option, 65 | 66 | /// Timestamp when the object is created. 67 | pub created_at: ChronoDateTimeUtc, 68 | 69 | /// Timestamp when the object is last accessed. 70 | pub last_accessed_at: Option, 71 | 72 | /// The uploader of the object. 73 | /// 74 | /// This is a "username." Currently, it's set to the `sub` claim in 75 | /// the client's JWT. 76 | pub created_by: Option, 77 | } 78 | 79 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 80 | pub enum Relation { 81 | #[sea_orm( 82 | belongs_to = "super::cache::Entity", 83 | from = "Column::CacheId", 84 | to = "super::cache::Column::Id" 85 | )] 86 | Cache, 87 | 88 | #[sea_orm( 89 | belongs_to = "super::nar::Entity", 90 | from = "Column::NarId", 91 | to = "super::nar::Column::Id" 92 | )] 93 | Nar, 94 | } 95 | 96 | impl InsertExt for Insert { 97 | fn on_conflict_do_update(self) -> Self { 98 | self.on_conflict( 99 | OnConflict::columns([Column::CacheId, Column::StorePathHash]) 100 | .update_columns([ 101 | Column::NarId, 102 | Column::StorePath, 103 | Column::References, 104 | Column::System, 105 | Column::Deriver, 106 | Column::Sigs, 107 | Column::Ca, 108 | Column::CreatedAt, 109 | Column::LastAccessedAt, 110 | Column::CreatedBy, 111 | ]) 112 | .to_owned(), 113 | ) 114 | } 115 | } 116 | 117 | impl Model { 118 | /// Converts this object to a NarInfo. 119 | pub fn to_nar_info(&self, nar: &NarModel) -> ServerResult { 120 | let nar_size = nar 121 | .nar_size 122 | .try_into() 123 | .map_err(ServerError::database_error)?; 124 | 125 | Ok(NarInfo { 126 | store_path: PathBuf::from(self.store_path.to_owned()), 127 | url: format!("nar/{}.nar", self.store_path_hash.as_str()), 128 | 129 | compression: Compression::from_str(&nar.compression)?, 130 | file_hash: None, // FIXME 131 | file_size: None, // FIXME 132 | nar_hash: Hash::from_typed(&nar.nar_hash)?, 133 | nar_size, 134 | system: self.system.to_owned(), 135 | references: self.references.0.to_owned(), 136 | deriver: self.deriver.to_owned(), 137 | signature: None, 138 | ca: self.ca.to_owned(), 139 | }) 140 | } 141 | } 142 | 143 | impl Related for Entity { 144 | fn to() -> RelationDef { 145 | Relation::Cache.def() 146 | } 147 | } 148 | 149 | impl Related for Entity { 150 | fn to() -> RelationDef { 151 | Relation::Nar.def() 152 | } 153 | } 154 | 155 | impl ActiveModelBehavior for ActiveModel {} 156 | -------------------------------------------------------------------------------- /server/src/database/migration/m20221227_000001_create_cache_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::cache::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20221227_000001_create_cache_table" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .create_table( 18 | Table::create() 19 | .table(Entity) 20 | .if_not_exists() 21 | .col( 22 | ColumnDef::new(Column::Id) 23 | .big_integer() 24 | .not_null() 25 | .auto_increment() 26 | .primary_key(), 27 | ) 28 | .col( 29 | ColumnDef::new(Column::Name) 30 | .string_len(50) 31 | .not_null() 32 | .unique_key(), 33 | ) 34 | .col(ColumnDef::new(Column::Keypair).string().not_null()) 35 | .col(ColumnDef::new(Column::IsPublic).boolean().not_null()) 36 | .col(ColumnDef::new(Column::StoreDir).string().not_null()) 37 | .col(ColumnDef::new(Column::Priority).integer().not_null()) 38 | .col( 39 | ColumnDef::new(Column::UpstreamCacheKeyNames) 40 | .string() 41 | .not_null(), 42 | ) 43 | .col( 44 | ColumnDef::new(Column::CreatedAt) 45 | .timestamp_with_time_zone() 46 | .not_null(), 47 | ) 48 | .col( 49 | ColumnDef::new(Column::DeletedAt) 50 | .timestamp_with_time_zone() 51 | .null(), 52 | ) 53 | .to_owned(), 54 | ) 55 | .await?; 56 | 57 | manager 58 | .create_index( 59 | Index::create() 60 | .name("idx-cache-name") 61 | .table(Entity) 62 | .col(Column::Name) 63 | .to_owned(), 64 | ) 65 | .await 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /server/src/database/migration/m20221227_000002_create_nar_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::nar::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20221227_000003_create_nar_table" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .create_table( 18 | Table::create() 19 | .table(Entity) 20 | .if_not_exists() 21 | .col( 22 | ColumnDef::new(Column::Id) 23 | .big_integer() 24 | .not_null() 25 | .auto_increment() 26 | .primary_key(), 27 | ) 28 | .col( 29 | ColumnDef::new(Column::State) 30 | .r#char() 31 | .char_len(1) 32 | .not_null(), 33 | ) 34 | .col(ColumnDef::new(Column::NarHash).string().not_null()) 35 | .col(ColumnDef::new(Column::NarSize).big_integer().not_null()) 36 | .col(ColumnDef::new(Alias::new("file_hash")).string().null()) 37 | .col(ColumnDef::new(Alias::new("file_size")).big_integer().null()) 38 | .col(ColumnDef::new(Column::Compression).string().not_null()) 39 | .col( 40 | ColumnDef::new(Alias::new("remote_file")) 41 | .string() 42 | .not_null(), 43 | ) 44 | .col( 45 | ColumnDef::new(Alias::new("remote_file_id")) 46 | .string() 47 | .not_null() 48 | .unique_key(), 49 | ) 50 | .col( 51 | ColumnDef::new(Column::HoldersCount) 52 | .integer() 53 | .not_null() 54 | .default(0), 55 | ) 56 | .col( 57 | ColumnDef::new(Column::CreatedAt) 58 | .timestamp_with_time_zone() 59 | .not_null(), 60 | ) 61 | .to_owned(), 62 | ) 63 | .await?; 64 | 65 | manager 66 | .create_index( 67 | Index::create() 68 | .name("idx-nar-nar-hash") 69 | .table(Entity) 70 | .col(Column::NarHash) 71 | .to_owned(), 72 | ) 73 | .await 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /server/src/database/migration/m20221227_000003_create_object_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::cache; 4 | use crate::database::entity::nar; 5 | use crate::database::entity::object::*; 6 | 7 | pub struct Migration; 8 | 9 | impl MigrationName for Migration { 10 | fn name(&self) -> &str { 11 | "m20221227_000002_create_object_table" 12 | } 13 | } 14 | 15 | #[async_trait::async_trait] 16 | impl MigrationTrait for Migration { 17 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 18 | manager 19 | .create_table( 20 | Table::create() 21 | .table(Entity) 22 | .if_not_exists() 23 | .col( 24 | ColumnDef::new(Column::Id) 25 | .big_integer() 26 | .not_null() 27 | .auto_increment() 28 | .primary_key(), 29 | ) 30 | .col(ColumnDef::new(Column::CacheId).big_integer().not_null()) 31 | .col(ColumnDef::new(Column::NarId).big_integer().not_null()) 32 | .col( 33 | ColumnDef::new(Column::StorePathHash) 34 | .string_len(32) 35 | .not_null(), 36 | ) 37 | .col(ColumnDef::new(Column::StorePath).string().not_null()) 38 | .col(ColumnDef::new(Column::References).string().not_null()) 39 | .col(ColumnDef::new(Column::System).string()) 40 | .col(ColumnDef::new(Column::Deriver).string()) 41 | .col(ColumnDef::new(Column::Sigs).string().not_null()) 42 | .col(ColumnDef::new(Column::Ca).string()) 43 | .col( 44 | ColumnDef::new(Column::CreatedAt) 45 | .timestamp_with_time_zone() 46 | .not_null(), 47 | ) 48 | .foreign_key( 49 | ForeignKeyCreateStatement::new() 50 | .name("fk_object_cache") 51 | .from_tbl(Entity) 52 | .from_col(Column::CacheId) 53 | .to_tbl(cache::Entity) 54 | .to_col(cache::Column::Id) 55 | .on_delete(ForeignKeyAction::Cascade), 56 | ) 57 | .foreign_key( 58 | ForeignKeyCreateStatement::new() 59 | .name("fk_object_nar") 60 | .from_tbl(Entity) 61 | .from_col(Column::NarId) 62 | .to_tbl(nar::Entity) 63 | .to_col(nar::Column::Id) 64 | .on_delete(ForeignKeyAction::Cascade), 65 | ) 66 | .to_owned(), 67 | ) 68 | .await?; 69 | 70 | manager 71 | .create_index( 72 | Index::create() 73 | .name("idx-object-cache-hash") 74 | .table(Entity) 75 | .col(Column::CacheId) 76 | .col(Column::StorePathHash) 77 | .unique() 78 | .to_owned(), 79 | ) 80 | .await 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /server/src/database/migration/m20221227_000004_add_object_last_accessed.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::object::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20221227_000004_add_object_last_accessed" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .alter_table( 18 | Table::alter() 19 | .table(Entity) 20 | .add_column( 21 | ColumnDef::new(Column::LastAccessedAt) 22 | .timestamp_with_time_zone() 23 | .null(), 24 | ) 25 | .to_owned(), 26 | ) 27 | .await?; 28 | 29 | Ok(()) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /server/src/database/migration/m20221227_000005_add_cache_retention_period.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::cache::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20221227_000005_add_cache_retention_period" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .alter_table( 18 | Table::alter() 19 | .table(Entity) 20 | .add_column(ColumnDef::new(Column::RetentionPeriod).integer().null()) 21 | .to_owned(), 22 | ) 23 | .await?; 24 | 25 | Ok(()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /server/src/database/migration/m20230103_000001_add_object_created_by.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::object::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20230103_000001_add_object_created_by" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .alter_table( 18 | Table::alter() 19 | .table(Entity) 20 | .add_column(ColumnDef::new(Column::CreatedBy).string().null()) 21 | .to_owned(), 22 | ) 23 | .await?; 24 | 25 | Ok(()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /server/src/database/migration/m20230112_000001_add_chunk_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::chunk::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20230112_000001_add_chunk_table" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .create_table( 18 | Table::create() 19 | .table(Entity) 20 | .col( 21 | ColumnDef::new(Column::Id) 22 | .big_integer() 23 | .not_null() 24 | .auto_increment() 25 | .primary_key(), 26 | ) 27 | .col( 28 | ColumnDef::new(Column::State) 29 | .r#char() 30 | .char_len(1) 31 | .not_null(), 32 | ) 33 | .col(ColumnDef::new(Column::ChunkHash).string().not_null()) 34 | .col(ColumnDef::new(Column::ChunkSize).big_integer().not_null()) 35 | .col(ColumnDef::new(Alias::new("file_hash")).string().null()) 36 | .col(ColumnDef::new(Alias::new("file_size")).big_integer().null()) 37 | .col(ColumnDef::new(Column::Compression).string().not_null()) 38 | .col(ColumnDef::new(Column::RemoteFile).string().not_null()) 39 | .col( 40 | ColumnDef::new(Column::RemoteFileId) 41 | .string() 42 | .not_null() 43 | .unique_key(), 44 | ) 45 | .col( 46 | ColumnDef::new(Column::HoldersCount) 47 | .integer() 48 | .not_null() 49 | .default(0), 50 | ) 51 | .col( 52 | ColumnDef::new(Column::CreatedAt) 53 | .timestamp_with_time_zone() 54 | .not_null(), 55 | ) 56 | .to_owned(), 57 | ) 58 | .await?; 59 | 60 | manager 61 | .create_index( 62 | Index::create() 63 | .name("idx-chunk-chunk-hash") 64 | .table(Entity) 65 | .col(Column::ChunkHash) 66 | .to_owned(), 67 | ) 68 | .await 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /server/src/database/migration/m20230112_000002_add_chunkref_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::chunk; 4 | use crate::database::entity::chunkref::*; 5 | use crate::database::entity::nar; 6 | 7 | pub struct Migration; 8 | 9 | impl MigrationName for Migration { 10 | fn name(&self) -> &str { 11 | "m20230112_000002_add_chunkref_table" 12 | } 13 | } 14 | 15 | #[async_trait::async_trait] 16 | impl MigrationTrait for Migration { 17 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 18 | manager 19 | .create_table( 20 | Table::create() 21 | .table(Entity) 22 | .col( 23 | ColumnDef::new(Column::Id) 24 | .big_integer() 25 | .not_null() 26 | .auto_increment() 27 | .primary_key(), 28 | ) 29 | .col(ColumnDef::new(Column::NarId).big_integer().not_null()) 30 | .col(ColumnDef::new(Column::Seq).integer().not_null()) 31 | .col(ColumnDef::new(Column::ChunkId).big_integer().null()) 32 | .col(ColumnDef::new(Column::ChunkHash).string().not_null()) 33 | .col(ColumnDef::new(Column::Compression).string().not_null()) 34 | .foreign_key( 35 | ForeignKeyCreateStatement::new() 36 | .name("fk_chunkref_chunk") 37 | .from_tbl(Entity) 38 | .from_col(Column::ChunkId) 39 | .to_tbl(chunk::Entity) 40 | .to_col(chunk::Column::Id) 41 | .on_delete(ForeignKeyAction::SetNull), 42 | ) 43 | .foreign_key( 44 | ForeignKeyCreateStatement::new() 45 | .name("fk_chunkref_nar") 46 | .from_tbl(Entity) 47 | .from_col(Column::NarId) 48 | .to_tbl(nar::Entity) 49 | .to_col(nar::Column::Id) 50 | .on_delete(ForeignKeyAction::Cascade), 51 | ) 52 | .to_owned(), 53 | ) 54 | .await?; 55 | 56 | manager 57 | .create_index( 58 | Index::create() 59 | .name("idx-chunk-nar-id") 60 | .table(Entity) 61 | .col(Column::NarId) 62 | .to_owned(), 63 | ) 64 | .await?; 65 | 66 | manager 67 | .create_index( 68 | Index::create() 69 | .name("idx-chunk-chunk-id") 70 | .table(Entity) 71 | .col(Column::ChunkId) 72 | .to_owned(), 73 | ) 74 | .await 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /server/src/database/migration/m20230112_000003_add_nar_num_chunks.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::nar::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20230112_000003_add_nar_num_chunks" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .alter_table( 18 | Table::alter() 19 | .table(Entity) 20 | .add_column( 21 | ColumnDef::new(Column::NumChunks) 22 | .integer() 23 | .not_null() 24 | .default(1), 25 | ) 26 | .to_owned(), 27 | ) 28 | .await?; 29 | 30 | Ok(()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /server/src/database/migration/m20230112_000006_add_nar_completeness_hint.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | use crate::database::entity::nar::*; 4 | 5 | pub struct Migration; 6 | 7 | impl MigrationName for Migration { 8 | fn name(&self) -> &str { 9 | "m20230112_000006_add_nar_completeness_hint" 10 | } 11 | } 12 | 13 | #[async_trait::async_trait] 14 | impl MigrationTrait for Migration { 15 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | manager 17 | .alter_table( 18 | Table::alter() 19 | .table(Entity) 20 | .add_column( 21 | ColumnDef::new(Column::CompletenessHint) 22 | .boolean() 23 | .not_null() 24 | .default(true), 25 | ) 26 | .to_owned(), 27 | ) 28 | .await?; 29 | 30 | Ok(()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /server/src/database/migration/mod.rs: -------------------------------------------------------------------------------- 1 | //! Database migrations. 2 | 3 | pub use sea_orm_migration::*; 4 | 5 | mod m20221227_000001_create_cache_table; 6 | mod m20221227_000002_create_nar_table; 7 | mod m20221227_000003_create_object_table; 8 | mod m20221227_000004_add_object_last_accessed; 9 | mod m20221227_000005_add_cache_retention_period; 10 | mod m20230103_000001_add_object_created_by; 11 | mod m20230112_000001_add_chunk_table; 12 | mod m20230112_000002_add_chunkref_table; 13 | mod m20230112_000003_add_nar_num_chunks; 14 | mod m20230112_000004_migrate_nar_remote_files_to_chunks; 15 | mod m20230112_000005_drop_old_nar_columns; 16 | mod m20230112_000006_add_nar_completeness_hint; 17 | 18 | pub struct Migrator; 19 | 20 | #[async_trait::async_trait] 21 | impl MigratorTrait for Migrator { 22 | fn migrations() -> Vec> { 23 | vec![ 24 | Box::new(m20221227_000001_create_cache_table::Migration), 25 | Box::new(m20221227_000002_create_nar_table::Migration), 26 | Box::new(m20221227_000003_create_object_table::Migration), 27 | Box::new(m20221227_000004_add_object_last_accessed::Migration), 28 | Box::new(m20221227_000005_add_cache_retention_period::Migration), 29 | Box::new(m20230103_000001_add_object_created_by::Migration), 30 | Box::new(m20230112_000001_add_chunk_table::Migration), 31 | Box::new(m20230112_000002_add_chunkref_table::Migration), 32 | Box::new(m20230112_000003_add_nar_num_chunks::Migration), 33 | Box::new(m20230112_000004_migrate_nar_remote_files_to_chunks::Migration), 34 | Box::new(m20230112_000005_drop_old_nar_columns::Migration), 35 | Box::new(m20230112_000006_add_nar_completeness_hint::Migration), 36 | ] 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /server/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::net::SocketAddr; 3 | use std::path::PathBuf; 4 | 5 | use anyhow::Result; 6 | use clap::{Parser, ValueEnum}; 7 | use tokio::join; 8 | use tokio::task::spawn; 9 | use tracing_error::ErrorLayer; 10 | use tracing_subscriber::prelude::*; 11 | use tracing_subscriber::EnvFilter; 12 | 13 | use attic_server::config; 14 | 15 | /// Nix binary cache server. 16 | #[derive(Debug, Parser)] 17 | #[clap(version, author = "Zhaofeng Li ")] 18 | #[clap(propagate_version = true)] 19 | struct Opts { 20 | /// Path to the config file. 21 | #[clap(short = 'f', long)] 22 | config: Option, 23 | 24 | /// Socket address to listen on. 25 | /// 26 | /// This overrides `listen` in the config. 27 | #[clap(short = 'l', long)] 28 | listen: Option, 29 | 30 | /// Mode to run. 31 | #[clap(long, default_value = "monolithic")] 32 | mode: ServerMode, 33 | 34 | /// Whether to enable tokio-console. 35 | /// 36 | /// The console server will listen on its default port. 37 | #[clap(long)] 38 | tokio_console: bool, 39 | } 40 | 41 | #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] 42 | enum ServerMode { 43 | /// Run all components. 44 | Monolithic, 45 | 46 | /// Run the API server. 47 | ApiServer, 48 | 49 | /// Run the garbage collector periodically. 50 | GarbageCollector, 51 | 52 | /// Run the database migrations then exit. 53 | DbMigrations, 54 | 55 | /// Run garbage collection then exit. 56 | GarbageCollectorOnce, 57 | 58 | /// Check the configuration then exit. 59 | CheckConfig, 60 | } 61 | 62 | #[tokio::main] 63 | async fn main() -> Result<()> { 64 | let opts = Opts::parse(); 65 | 66 | init_logging(opts.tokio_console); 67 | dump_version(); 68 | 69 | let config = 70 | config::load_config(opts.config.as_deref(), opts.mode == ServerMode::Monolithic).await?; 71 | 72 | match opts.mode { 73 | ServerMode::Monolithic => { 74 | attic_server::run_migrations(config.clone()).await?; 75 | 76 | let (api_server, _) = join!( 77 | attic_server::run_api_server(opts.listen, config.clone()), 78 | attic_server::gc::run_garbage_collection(config.clone()), 79 | ); 80 | 81 | api_server?; 82 | } 83 | ServerMode::ApiServer => { 84 | attic_server::run_api_server(opts.listen, config).await?; 85 | } 86 | ServerMode::GarbageCollector => { 87 | attic_server::gc::run_garbage_collection(config.clone()).await; 88 | } 89 | ServerMode::DbMigrations => { 90 | attic_server::run_migrations(config).await?; 91 | } 92 | ServerMode::GarbageCollectorOnce => { 93 | attic_server::gc::run_garbage_collection_once(config).await?; 94 | } 95 | ServerMode::CheckConfig => { 96 | // config is valid, let's just exit :) 97 | } 98 | } 99 | 100 | Ok(()) 101 | } 102 | 103 | fn init_logging(tokio_console: bool) { 104 | let env_filter = EnvFilter::from_default_env(); 105 | let fmt_layer = tracing_subscriber::fmt::layer().with_filter(env_filter); 106 | 107 | let error_layer = ErrorLayer::default(); 108 | 109 | let console_layer = if tokio_console { 110 | let (layer, server) = console_subscriber::ConsoleLayer::new(); 111 | spawn(server.serve()); 112 | Some(layer) 113 | } else { 114 | None 115 | }; 116 | 117 | tracing_subscriber::registry() 118 | .with(fmt_layer) 119 | .with(error_layer) 120 | .with(console_layer) 121 | .init(); 122 | 123 | if tokio_console { 124 | eprintln!("Note: tokio-console is enabled"); 125 | } 126 | } 127 | 128 | fn dump_version() { 129 | #[cfg(debug_assertions)] 130 | eprintln!("Attic Server {} (debug)", env!("CARGO_PKG_VERSION")); 131 | 132 | #[cfg(not(debug_assertions))] 133 | eprintln!("Attic Server {} (release)", env!("CARGO_PKG_VERSION")); 134 | } 135 | -------------------------------------------------------------------------------- /server/src/middleware.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicBool, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use anyhow::anyhow; 5 | use axum::{ 6 | extract::{Extension, Host, Request}, 7 | http::HeaderValue, 8 | middleware::Next, 9 | response::Response, 10 | }; 11 | 12 | use super::{AuthState, RequestState, RequestStateInner, State}; 13 | use crate::error::{ErrorKind, ServerResult}; 14 | use attic::api::binary_cache::ATTIC_CACHE_VISIBILITY; 15 | 16 | /// Initializes per-request state. 17 | pub async fn init_request_state( 18 | Extension(state): Extension, 19 | Host(host): Host, 20 | mut req: Request, 21 | next: Next, 22 | ) -> Response { 23 | // X-Forwarded-Proto is an untrusted header 24 | let client_claims_https = 25 | if let Some(x_forwarded_proto) = req.headers().get("x-forwarded-proto") { 26 | x_forwarded_proto.as_bytes() == b"https" 27 | } else { 28 | false 29 | }; 30 | 31 | let req_state = Arc::new(RequestStateInner { 32 | auth: AuthState::new(), 33 | api_endpoint: state.config.api_endpoint.to_owned(), 34 | substituter_endpoint: state.config.substituter_endpoint.to_owned(), 35 | host, 36 | client_claims_https, 37 | public_cache: AtomicBool::new(false), 38 | }); 39 | 40 | req.extensions_mut().insert(req_state); 41 | next.run(req).await 42 | } 43 | 44 | /// Restricts valid Host headers. 45 | /// 46 | /// We also require that all request have a Host header in 47 | /// the first place. 48 | pub async fn restrict_host( 49 | Extension(state): Extension, 50 | Host(host): Host, 51 | req: Request, 52 | next: Next, 53 | ) -> ServerResult { 54 | let allowed_hosts = &state.config.allowed_hosts; 55 | 56 | if !allowed_hosts.is_empty() && !allowed_hosts.iter().any(|h| h.as_str() == host) { 57 | return Err(ErrorKind::RequestError(anyhow!("Bad Host")).into()); 58 | } 59 | 60 | Ok(next.run(req).await) 61 | } 62 | 63 | /// Sets the `X-Attic-Cache-Visibility` header in responses. 64 | pub(crate) async fn set_visibility_header( 65 | Extension(req_state): Extension, 66 | req: Request, 67 | next: Next, 68 | ) -> ServerResult { 69 | let mut response = next.run(req).await; 70 | 71 | if req_state.public_cache.load(Ordering::Relaxed) { 72 | response 73 | .headers_mut() 74 | .append(ATTIC_CACHE_VISIBILITY, HeaderValue::from_static("public")); 75 | } 76 | 77 | Ok(response) 78 | } 79 | -------------------------------------------------------------------------------- /server/src/narinfo/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use std::path::Path; 4 | 5 | use attic::signing::NixPublicKey; 6 | 7 | #[test] 8 | fn test_basic() { 9 | let s = r#" 10 | StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 11 | URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz 12 | Compression: xz 13 | FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 14 | FileSize: 41104 15 | NarHash: sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci 16 | NarSize: 206104 17 | References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 18 | Deriver: vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv 19 | Sig: cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ== 20 | "#; 21 | 22 | let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); 23 | 24 | fn verify_narinfo(narinfo: &NarInfo) { 25 | assert_eq!( 26 | Path::new("/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10"), 27 | narinfo.store_path 28 | ); 29 | assert_eq!(Path::new("/nix/store"), narinfo.store_dir()); 30 | assert_eq!( 31 | "nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz", 32 | narinfo.url 33 | ); 34 | assert_eq!(Compression::Xz, narinfo.compression); 35 | assert_eq!( 36 | "sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9", 37 | narinfo.file_hash.as_ref().unwrap().to_typed_base32() 38 | ); 39 | assert_eq!(Some(41104), narinfo.file_size); 40 | assert_eq!( 41 | "sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci", 42 | narinfo.nar_hash.to_typed_base32() 43 | ); 44 | assert_eq!(206104, narinfo.nar_size); 45 | assert_eq!( 46 | vec![ 47 | "563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56", 48 | "xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10", 49 | ], 50 | narinfo.references 51 | ); 52 | assert_eq!( 53 | Some("vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv".to_string()), 54 | narinfo.deriver 55 | ); 56 | assert_eq!(Some("cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ==".to_string()), narinfo.signature); 57 | } 58 | 59 | verify_narinfo(&narinfo); 60 | 61 | let round_trip = narinfo.to_string().expect("Could not serialize narinfo"); 62 | 63 | eprintln!("{}", round_trip); 64 | 65 | let reparse = NarInfo::from_str(&round_trip).expect("Could not re-parse serialized narinfo"); 66 | 67 | verify_narinfo(&reparse); 68 | } 69 | 70 | #[test] 71 | fn test_deriver() { 72 | let s = r#" 73 | StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 74 | URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz 75 | Compression: xz 76 | FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 77 | FileSize: 41104 78 | NarHash: sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci 79 | NarSize: 206104 80 | References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 81 | Deriver: unknown-deriver 82 | "#; 83 | 84 | let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); 85 | 86 | assert_eq!(None, narinfo.deriver); 87 | } 88 | 89 | #[test] 90 | fn test_fingerprint() { 91 | let s = r#" 92 | StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 93 | URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz 94 | Compression: xz 95 | FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 96 | FileSize: 41104 97 | NarHash: sha256:91e129ac1959d062ad093d2b1f8b65afae0f712056fe3eac78ec530ff6a1bb9a 98 | NarSize: 206104 99 | References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 100 | Deriver: vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv 101 | Sig: cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ== 102 | "#; 103 | 104 | let correct_fingerprint = b"1;/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10;sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci;206104;/nix/store/563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56,/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10"; 105 | 106 | let public_key = 107 | NixPublicKey::from_str("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=") 108 | .expect("Could not import cache.nixos.org public key"); 109 | 110 | let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); 111 | 112 | let fingerprint = narinfo.fingerprint(); 113 | 114 | eprintln!( 115 | "Expected: {}", 116 | String::from_utf8(correct_fingerprint.to_vec()).unwrap() 117 | ); 118 | eprintln!( 119 | " Actual: {}", 120 | String::from_utf8(fingerprint.clone()).unwrap() 121 | ); 122 | 123 | assert_eq!(correct_fingerprint, fingerprint.as_slice()); 124 | 125 | public_key 126 | .verify(&narinfo.fingerprint(), narinfo.signature().unwrap()) 127 | .expect("Could not verify signature"); 128 | } 129 | -------------------------------------------------------------------------------- /server/src/nix_manifest/mod.rs: -------------------------------------------------------------------------------- 1 | //! The Nix manifest format. 2 | //! 3 | //! Nix uses a simple format in binary cache manifests (`.narinfo`, 4 | //! `/nix-cache-info`). It consists of a single, flat KV map with 5 | //! colon (`:`) as the delimiter. 6 | //! 7 | //! It's not well-defined and the official implementation performs 8 | //! serialization and deserialization by hand [1]. Here we implement 9 | //! a deserializer and a serializer using the serde framework. 10 | //! 11 | //! An example of a `/nix-cache-info` file: 12 | //! 13 | //! ```text 14 | //! StoreDir: /nix/store 15 | //! WantMassQuery: 1 16 | //! Priority: 40 17 | //! ``` 18 | //! 19 | //! [1] 20 | 21 | mod deserializer; 22 | mod serializer; 23 | 24 | #[cfg(test)] 25 | mod tests; 26 | 27 | use std::fmt::Display; 28 | use std::result::Result as StdResult; 29 | 30 | use displaydoc::Display; 31 | use serde::{de, ser, Deserialize, Serialize}; 32 | use serde_with::{formats::SpaceSeparator, StringWithSeparator}; 33 | 34 | use crate::error::{ErrorKind, ServerResult}; 35 | use deserializer::Deserializer; 36 | use serializer::Serializer; 37 | 38 | type Result = StdResult; 39 | 40 | pub fn from_str(s: &str) -> ServerResult 41 | where 42 | T: for<'de> Deserialize<'de>, 43 | { 44 | let mut deserializer = Deserializer::from_str(s); 45 | T::deserialize(&mut deserializer).map_err(|e| ErrorKind::ManifestSerializationError(e).into()) 46 | 47 | // FIXME: Reject extra output?? 48 | } 49 | 50 | pub fn to_string(value: &T) -> ServerResult 51 | where 52 | T: Serialize, 53 | { 54 | let mut serializer = Serializer::new(); 55 | value 56 | .serialize(&mut serializer) 57 | .map_err(ErrorKind::ManifestSerializationError)?; 58 | 59 | Ok(serializer.into_output()) 60 | } 61 | 62 | /// An error during (de)serialization. 63 | #[derive(Debug, Display)] 64 | pub enum Error { 65 | /// Unexpected {0}. 66 | Unexpected(&'static str), 67 | 68 | /// Unexpected EOF. 69 | UnexpectedEof, 70 | 71 | /// Expected a colon. 72 | ExpectedColon, 73 | 74 | /// Expected a boolean. 75 | ExpectedBoolean, 76 | 77 | /// Expected an integer. 78 | ExpectedInteger, 79 | 80 | /// "{0}" values are unsupported. 81 | Unsupported(&'static str), 82 | 83 | /// Not possible to auto-determine the type. 84 | AnyUnsupported, 85 | 86 | /// None is unsupported. Add #[serde(skip_serializing_if = "Option::is_none")] 87 | NoneUnsupported, 88 | 89 | /// Nested maps are unsupported. 90 | NestedMapUnsupported, 91 | 92 | /// Floating point numbers are unsupported. 93 | FloatUnsupported, 94 | 95 | /// Custom error: {0} 96 | Custom(String), 97 | } 98 | 99 | /// Custom (de)serializer for a space-delimited list. 100 | /// 101 | /// Example usage: 102 | /// 103 | /// ``` 104 | /// use serde::Deserialize; 105 | /// use serde_with::serde_as; 106 | /// # use attic_server::nix_manifest::{self, SpaceDelimitedList}; 107 | /// 108 | /// #[serde_as] 109 | /// #[derive(Debug, Deserialize)] 110 | /// struct MyManifest { 111 | /// #[serde_as(as = "SpaceDelimitedList")] 112 | /// some_list: Vec, 113 | /// } 114 | /// 115 | /// let s = "some_list: item-a item-b"; 116 | /// let parsed: MyManifest = nix_manifest::from_str(s).unwrap(); 117 | /// 118 | /// assert_eq!(vec![ "item-a", "item-b" ], parsed.some_list); 119 | /// ``` 120 | pub type SpaceDelimitedList = StringWithSeparator; 121 | 122 | impl std::error::Error for Error {} 123 | 124 | impl de::Error for Error { 125 | fn custom(msg: T) -> Self { 126 | let f = format!("{}", msg); 127 | Self::Custom(f) 128 | } 129 | } 130 | 131 | impl ser::Error for Error { 132 | fn custom(msg: T) -> Self { 133 | let f = format!("{}", msg); 134 | Self::Custom(f) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /server/src/nix_manifest/tests.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | /// A hypothetical manifest. 6 | #[derive(Debug, PartialEq, Deserialize, Serialize)] 7 | struct HypotheticalManifest { 8 | #[serde(rename = "StoreDir")] 9 | store_dir: PathBuf, 10 | 11 | #[serde(rename = "WantMassQuery")] 12 | want_mass_query: bool, 13 | } 14 | 15 | #[test] 16 | fn test_basic() { 17 | let manifest = r#" 18 | StoreDir: /nix/store 19 | WantMassQuery: 1 20 | "#; 21 | 22 | let expected = HypotheticalManifest { 23 | store_dir: PathBuf::from("/nix/store"), 24 | want_mass_query: true, 25 | }; 26 | 27 | let parsed = super::from_str::(manifest).unwrap(); 28 | assert_eq!(parsed, expected); 29 | 30 | // TODO: Use the actual Nix parser to reparse the resulting manifest? 31 | let round_trip = super::to_string(&parsed).unwrap(); 32 | 33 | // FIXME: This is pretty fragile. Just testing that it can be parsed again should 34 | // be enough. 35 | assert_eq!(manifest.trim(), round_trip.trim()); 36 | 37 | let parsed2 = super::from_str::(&round_trip).unwrap(); 38 | assert_eq!(parsed2, expected); 39 | } 40 | 41 | #[test] 42 | fn test_unquoted_number() { 43 | let manifest = r#" 44 | StoreDir: 12345 45 | WantMassQuery: 1 46 | "#; 47 | 48 | let expected = HypotheticalManifest { 49 | store_dir: PathBuf::from("12345"), 50 | want_mass_query: true, 51 | }; 52 | 53 | let parsed = super::from_str::(manifest).unwrap(); 54 | assert_eq!(parsed, expected); 55 | } 56 | -------------------------------------------------------------------------------- /server/src/oobe.rs: -------------------------------------------------------------------------------- 1 | //! Guided out-of-box experience. 2 | //! 3 | //! This performs automatic setup for people running `atticd` 4 | //! directly without specifying any configurations. The goal is 5 | //! to let them quickly have a taste of Attic with a config 6 | //! template that provide guidance for them to achieve a more 7 | //! permanent setup. 8 | //! 9 | //! Paths: 10 | //! - Config: `~/.config/attic/server.yaml` 11 | //! - SQLite: `~/.local/share/attic/server.db` 12 | //! - NARs: `~/.local/share/attic/storage` 13 | 14 | use anyhow::Result; 15 | use base64::{engine::general_purpose::STANDARD as BASE64_STANDARD, Engine}; 16 | use chrono::{Months, Utc}; 17 | use rsa::pkcs1::EncodeRsaPrivateKey; 18 | use tokio::fs::{self, OpenOptions}; 19 | 20 | use crate::access::{decode_token_rs256_secret_base64, SignatureType, Token}; 21 | use crate::config; 22 | use attic::cache::CacheNamePattern; 23 | 24 | const CONFIG_TEMPLATE: &str = include_str!("config-template.toml"); 25 | 26 | pub async fn run_oobe() -> Result<()> { 27 | let config_path = config::get_xdg_config_path()?; 28 | 29 | if config_path.exists() { 30 | return Ok(()); 31 | } 32 | 33 | let data_path = config::get_xdg_data_path()?; 34 | 35 | // Generate a simple config 36 | let database_path = data_path.join("server.db"); 37 | let database_url = format!("sqlite://{}", database_path.to_str().unwrap()); 38 | OpenOptions::new() 39 | .create(true) 40 | .write(true) 41 | .open(&database_path) 42 | .await?; 43 | 44 | let storage_path = data_path.join("storage"); 45 | fs::create_dir_all(&storage_path).await?; 46 | 47 | let rs256_secret_base64 = { 48 | let mut rng = rand::thread_rng(); 49 | let private_key = rsa::RsaPrivateKey::new(&mut rng, 4096)?; 50 | let pkcs1_pem = private_key.to_pkcs1_pem(rsa::pkcs1::LineEnding::LF)?; 51 | 52 | BASE64_STANDARD.encode(pkcs1_pem.as_bytes()) 53 | }; 54 | 55 | let config_content = CONFIG_TEMPLATE 56 | .replace("%database_url%", &database_url) 57 | .replace("%storage_path%", storage_path.to_str().unwrap()) 58 | .replace("%token_rs256_secret_base64%", &rs256_secret_base64); 59 | 60 | fs::write(&config_path, config_content.as_bytes()).await?; 61 | 62 | // Generate a JWT token 63 | let root_token = { 64 | let in_two_years = Utc::now().checked_add_months(Months::new(24)).unwrap(); 65 | let mut token = Token::new("root".to_string(), &in_two_years); 66 | let any_cache = CacheNamePattern::new("*".to_string()).unwrap(); 67 | let perm = token.get_or_insert_permission_mut(any_cache); 68 | perm.pull = true; 69 | perm.push = true; 70 | perm.delete = true; 71 | perm.create_cache = true; 72 | perm.configure_cache = true; 73 | perm.configure_cache_retention = true; 74 | perm.destroy_cache = true; 75 | 76 | let key = decode_token_rs256_secret_base64(&rs256_secret_base64).unwrap(); 77 | token.encode(&SignatureType::RS256(key), &None, &None)? 78 | }; 79 | 80 | eprintln!(); 81 | eprintln!("-----------------"); 82 | eprintln!("Welcome to Attic!"); 83 | eprintln!(); 84 | eprintln!("A simple setup using SQLite and local storage has been configured for you in:"); 85 | eprintln!(); 86 | eprintln!(" {}", config_path.to_str().unwrap()); 87 | eprintln!(); 88 | eprintln!("Run the following command to log into this server:"); 89 | eprintln!(); 90 | eprintln!(" attic login local http://localhost:8080 {root_token}"); 91 | eprintln!(); 92 | eprintln!("Documentations and guides:"); 93 | eprintln!(); 94 | eprintln!(" https://docs.attic.rs"); 95 | eprintln!(); 96 | eprintln!("Enjoy!"); 97 | eprintln!("-----------------"); 98 | eprintln!(); 99 | 100 | Ok(()) 101 | } 102 | -------------------------------------------------------------------------------- /server/src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | //! Remote file storage. 2 | 3 | mod local; 4 | mod s3; 5 | 6 | use serde::{Deserialize, Serialize}; 7 | use tokio::io::AsyncRead; 8 | 9 | use crate::error::ServerResult; 10 | 11 | pub(crate) use self::local::{LocalBackend, LocalRemoteFile, LocalStorageConfig}; 12 | pub(crate) use self::s3::{S3Backend, S3RemoteFile, S3StorageConfig}; 13 | 14 | /// Reference to a location where a NAR is stored. 15 | /// 16 | /// To be compatible with the Nix Binary Cache API, the reference 17 | /// must be able to be converted to a (time-limited) direct link 18 | /// to the file that the client will be redirected to when they 19 | /// request the NAR. 20 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 21 | pub enum RemoteFile { 22 | /// File in an S3-compatible storage bucket. 23 | S3(S3RemoteFile), 24 | 25 | /// File in local storage. 26 | Local(LocalRemoteFile), 27 | 28 | /// A direct HTTP link. 29 | /// 30 | /// This is mostly here to facilitate testing. 31 | Http(HttpRemoteFile), 32 | } 33 | 34 | /// Way to download a file. 35 | pub enum Download { 36 | /// A possibly ephemeral URL. 37 | Url(String), 38 | 39 | /// An AsyncRead. 40 | AsyncRead(Box), 41 | } 42 | 43 | // TODO: Maybe make RemoteFile the one true reference instead of having two sets of APIs? 44 | /// A storage backend. 45 | #[async_trait::async_trait] 46 | pub trait StorageBackend: Send + Sync + std::fmt::Debug { 47 | /// Uploads a file. 48 | async fn upload_file( 49 | &self, 50 | name: String, 51 | stream: &mut (dyn AsyncRead + Unpin + Send), 52 | ) -> ServerResult; 53 | 54 | /// Deletes a file. 55 | async fn delete_file(&self, name: String) -> ServerResult<()>; 56 | 57 | /// Deletes a file using a database reference. 58 | async fn delete_file_db(&self, file: &RemoteFile) -> ServerResult<()>; 59 | 60 | /// Downloads a file using the current configuration. 61 | async fn download_file(&self, name: String, prefer_stream: bool) -> ServerResult; 62 | 63 | /// Downloads a file using a database reference. 64 | async fn download_file_db( 65 | &self, 66 | file: &RemoteFile, 67 | prefer_stream: bool, 68 | ) -> ServerResult; 69 | 70 | /// Creates a database reference for a file. 71 | async fn make_db_reference(&self, name: String) -> ServerResult; 72 | } 73 | 74 | /// Reference to an HTTP link from which the file can be downloaded. 75 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 76 | pub struct HttpRemoteFile { 77 | /// URL of the file. 78 | pub url: String, 79 | } 80 | 81 | impl RemoteFile { 82 | /// Returns the remote file ID. 83 | pub fn remote_file_id(&self) -> String { 84 | match self { 85 | Self::S3(f) => format!("s3:{}/{}/{}", f.region, f.bucket, f.key), 86 | Self::Http(f) => format!("http:{}", f.url), 87 | Self::Local(f) => format!("local:{}", f.name), 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | flake = import ./flake-compat.nix; 3 | in flake.shellNix 4 | -------------------------------------------------------------------------------- /token/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "attic-token" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | attic = { path = "../attic", default-features = false } 10 | 11 | base64 = "0.22.1" 12 | chrono = "0.4.31" 13 | displaydoc = "0.2.4" 14 | indexmap = { version = "2.2.6", features = ["serde"] } 15 | jwt-simple = "0.11.5" 16 | lazy_static = "1.4.0" 17 | regex = "1.8.3" 18 | serde = "1.0.163" 19 | serde_with = "3.0.0" 20 | tracing = "0.1.37" 21 | rsa = "0.9.3" 22 | -------------------------------------------------------------------------------- /token/src/util.rs: -------------------------------------------------------------------------------- 1 | use std::str; 2 | 3 | use base64::{engine::general_purpose::STANDARD as BASE64_STANDARD, Engine}; 4 | use lazy_static::lazy_static; 5 | use regex::Regex; 6 | 7 | lazy_static! { 8 | static ref AUTHORIZATION_REGEX: Regex = 9 | Regex::new(r"^(?i)((?Pbearer)|(?Pbasic))(?-i) (?P(.*))$").unwrap(); 10 | } 11 | 12 | /// Extracts the JWT from an Authorization header. 13 | pub fn parse_authorization_header(authorization: &str) -> Option { 14 | let captures = AUTHORIZATION_REGEX.captures(authorization)?; 15 | let rest = captures.name("rest").unwrap().as_str(); 16 | 17 | if captures.name("bearer").is_some() { 18 | // Bearer token 19 | Some(rest.to_string()) 20 | } else { 21 | // Basic auth 22 | let bytes = BASE64_STANDARD.decode(rest).ok()?; 23 | 24 | let user_pass = str::from_utf8(&bytes).ok()?; 25 | let colon = user_pass.find(':')?; 26 | let pass = &user_pass[colon + 1..]; 27 | 28 | Some(pass.to_string()) 29 | } 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | fn test_parse_authorization_header() { 38 | assert_eq!( 39 | "somepass", 40 | parse_authorization_header("Basic c29tZXVzZXI6c29tZXBhc3M=").unwrap(), 41 | ); 42 | 43 | assert_eq!( 44 | "somepass", 45 | parse_authorization_header("baSIC c29tZXVzZXI6c29tZXBhc3M=").unwrap(), 46 | ); 47 | 48 | assert_eq!( 49 | "some-token", 50 | parse_authorization_header("bearer some-token").unwrap(), 51 | ); 52 | } 53 | } 54 | --------------------------------------------------------------------------------