├── .editorconfig ├── .envrc ├── .github └── workflows │ ├── build.yaml │ ├── check-and-test.yaml │ ├── flakehub.yaml │ ├── keygen.yaml │ ├── release-branches.yml │ ├── release-prs.yml │ ├── release-tags.yml │ ├── update-flake-lock.yaml │ └── upload_s3.sh ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── flake.lock ├── flake.nix ├── gha-cache ├── Cargo.toml ├── README.md └── src │ ├── api.rs │ ├── credentials.rs │ ├── lib.rs │ └── util.rs ├── magic-nix-cache ├── Cargo.toml └── src │ ├── api.rs │ ├── binary_cache.rs │ ├── env.rs │ ├── error.rs │ ├── flakehub.rs │ ├── gha.rs │ ├── github.rs │ ├── main.rs │ ├── pbh.rs │ ├── telemetry.rs │ └── util.rs └── shell.nix /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = space 6 | indent_size = 2 7 | end_of_line = lf 8 | charset = utf-8 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build artifacts 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | 7 | jobs: 8 | build-artifacts: 9 | runs-on: ${{ matrix.systems.runner }} 10 | permissions: 11 | contents: read 12 | id-token: write 13 | env: 14 | ARTIFACT_KEY: magic-nix-cache-${{ matrix.systems.system }} 15 | ARCHIVE_NAME: magic-nix-cache.closure.xz 16 | strategy: 17 | matrix: 18 | systems: 19 | - nix-system: x86_64-linux 20 | system: X64-Linux 21 | runner: ubuntu-22.04 22 | - nix-system: aarch64-linux 23 | system: ARM64-Linux 24 | runner: namespace-profile-default-arm64 25 | - nix-system: x86_64-darwin 26 | system: X64-macOS 27 | runner: macos-14-large 28 | - nix-system: aarch64-darwin 29 | system: ARM64-macOS 30 | runner: macos-latest-xlarge 31 | steps: 32 | - uses: actions/checkout@v4 33 | - name: Install Nix on ${{ matrix.systems.system }} 34 | uses: DeterminateSystems/determinate-nix-action@v3 35 | - name: Set up FlakeHub Cache 36 | uses: DeterminateSystems/flakehub-cache-action@main 37 | 38 | - name: Build and cache dev shell for ${{ matrix.systems.nix-system }} 39 | run: | 40 | nix build ".#devShells.${{ matrix.systems.nix-system }}.default" 41 | 42 | - name: Build package and create closure for ${{ matrix.systems.system }} 43 | run: | 44 | nix build .# -L --fallback && \ 45 | nix-store --export $(nix-store -qR ./result) | xz -9 > "${{ env.ARCHIVE_NAME }}" 46 | 47 | - name: Upload magic-nix-cache closure for ${{ matrix.systems.system }} 48 | uses: actions/upload-artifact@v4.6.0 49 | with: 50 | # Artifact name 51 | name: ${{ env.ARTIFACT_KEY }} 52 | path: ${{ env.ARCHIVE_NAME }} 53 | retention-days: 1 54 | -------------------------------------------------------------------------------- /.github/workflows/check-and-test.yaml: -------------------------------------------------------------------------------- 1 | name: Run checks and integration test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | jobs: 9 | checks: 10 | name: Nix and Rust checks 11 | runs-on: ubuntu-22.04 12 | permissions: 13 | contents: read 14 | id-token: write 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Check health of flake.lock 19 | uses: DeterminateSystems/flake-checker-action@main 20 | with: 21 | fail-mode: true 22 | 23 | - name: Install Nix 24 | uses: DeterminateSystems/determinate-nix-action@v3 25 | 26 | - uses: DeterminateSystems/flakehub-cache-action@main 27 | 28 | - name: Check Rust formatting 29 | run: nix develop --command cargo fmt --check 30 | 31 | - name: Clippy 32 | run: nix develop --command cargo clippy 33 | 34 | build: 35 | name: Build artifacts 36 | needs: checks 37 | uses: ./.github/workflows/build.yaml 38 | secrets: inherit 39 | 40 | action-integration-test: 41 | name: Integration test for magic-nix-cache-action 42 | runs-on: ${{ matrix.systems.runner }} 43 | needs: build 44 | env: 45 | ARTIFACT_KEY: magic-nix-cache-${{ matrix.systems.system }} 46 | ARCHIVE_NAME: magic-nix-cache.closure.xz 47 | strategy: 48 | matrix: 49 | systems: 50 | - system: X64-Linux 51 | runner: ubuntu-22.04 52 | - system: ARM64-Linux 53 | runner: namespace-profile-default-arm64 54 | - system: X64-macOS 55 | runner: macos-14-large 56 | - system: ARM64-macOS 57 | runner: macos-latest-xlarge 58 | permissions: 59 | contents: read 60 | id-token: write 61 | steps: 62 | - uses: actions/checkout@v4 63 | 64 | - name: Download closure for ${{ matrix.systems.system }} 65 | uses: actions/download-artifact@v4.1.8 66 | with: 67 | name: ${{ env.ARTIFACT_KEY }} 68 | path: ${{ env.ARTIFACT_KEY }} 69 | 70 | - name: Install Nix on ${{ matrix.systems.system }} 71 | uses: DeterminateSystems/determinate-nix-action@v3 72 | 73 | - name: Test magic-nix-cache-action@main on ${{ matrix.systems.runner }} 74 | uses: DeterminateSystems/flakehub-cache-action@main 75 | with: 76 | source-binary: "${{ env.ARTIFACT_KEY }}/${{ env.ARCHIVE_NAME }}" 77 | _internal-strict-mode: true 78 | 79 | - name: Run nix to test magic-nix-cache-action 80 | run: | 81 | nix develop --command echo "just testing" 82 | - name: Exhaust our GitHub Actions Cache tokens 83 | # Generally skip this step since it is so intensive 84 | if: ${{ false }} 85 | run: | 86 | date >> README.md 87 | nix build .#veryLongChain -v 88 | -------------------------------------------------------------------------------- /.github/workflows/flakehub.yaml: -------------------------------------------------------------------------------- 1 | name: "Publish every Git push to main to FlakeHub" 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | 8 | jobs: 9 | flakehub-publish: 10 | runs-on: "ubuntu-latest" 11 | permissions: 12 | id-token: "write" 13 | contents: "read" 14 | steps: 15 | - uses: "actions/checkout@v4" 16 | - uses: "DeterminateSystems/determinate-nix-action@v3" 17 | - uses: "DeterminateSystems/flakehub-push@main" 18 | with: 19 | name: "DeterminateSystems/magic-nix-cache" 20 | rolling: true 21 | visibility: "public" 22 | -------------------------------------------------------------------------------- /.github/workflows/keygen.yaml: -------------------------------------------------------------------------------- 1 | name: Generate Credentials 2 | on: 3 | - workflow_dispatch 4 | jobs: 5 | build: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v4 9 | - name: Install Nix 10 | uses: DeterminateSystems/determinate-nix-action@v3 11 | - uses: DeterminateSystems/flakehub-cache-action@main 12 | - name: Expose GitHub Runtime 13 | uses: crazy-max/ghaction-github-runtime@v2 14 | - name: Dump credentials 15 | run: | 16 | if [[ -z "$AGE_PUBLIC_KEY" ]]; then 17 | >&2 echo 'The AGE_PUBLIC_KEY secret must be present.' 18 | >&2 echo 'You can generate one with `age-keygen -o key.txt`.' 19 | exit 1 20 | fi 21 | 22 | cat >creds.json <creds.json" 33 | env: 34 | AGE_PUBLIC_KEY: ${{ secrets.AGE_PUBLIC_KEY }} 35 | -------------------------------------------------------------------------------- /.github/workflows/release-branches.yml: -------------------------------------------------------------------------------- 1 | name: Release Branch 2 | 3 | on: 4 | push: 5 | branches: 6 | # NOTE: make sure any branches here are also valid directory names, 7 | # otherwise creating the directory and uploading to s3 will fail 8 | - "main" 9 | 10 | jobs: 11 | build: 12 | uses: ./.github/workflows/build.yaml 13 | secrets: inherit 14 | 15 | release: 16 | needs: build 17 | 18 | concurrency: release 19 | runs-on: ubuntu-latest 20 | permissions: 21 | contents: read 22 | id-token: write # In order to request a JWT for AWS auth 23 | steps: 24 | - name: Checkout 25 | uses: actions/checkout@v4 26 | - name: Configure AWS Credentials 27 | uses: aws-actions/configure-aws-credentials@v2 28 | with: 29 | role-to-assume: ${{ secrets.AWS_S3_UPLOAD_ROLE }} 30 | aws-region: us-east-2 31 | 32 | - name: Create the artifacts directory 33 | run: rm -rf ./artifacts && mkdir ./artifacts 34 | 35 | - uses: actions/download-artifact@v4.1.8 36 | with: 37 | name: magic-nix-cache-ARM64-macOS 38 | path: cache-binary-ARM64-macOS 39 | - name: Persist the cache binary 40 | run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS 41 | 42 | - uses: actions/download-artifact@v4.1.8 43 | with: 44 | name: magic-nix-cache-X64-macOS 45 | path: cache-binary-X64-macOS 46 | - name: Persist the cache binary 47 | run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS 48 | 49 | - uses: actions/download-artifact@v4.1.8 50 | with: 51 | name: magic-nix-cache-X64-Linux 52 | path: cache-binary-X64-Linux 53 | - name: Persist the cache binary 54 | run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux 55 | 56 | - uses: actions/download-artifact@v4.1.8 57 | with: 58 | name: magic-nix-cache-ARM64-Linux 59 | path: cache-binary-ARM64-Linux 60 | - name: Persist the cache binary 61 | run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux 62 | 63 | - name: Publish Release (Branch) 64 | env: 65 | AWS_BUCKET: ${{ secrets.AWS_S3_UPLOAD_BUCKET }} 66 | run: | 67 | .github/workflows/upload_s3.sh branch "${{ github.ref_name }}" "$GITHUB_SHA" 68 | -------------------------------------------------------------------------------- /.github/workflows/release-prs.yml: -------------------------------------------------------------------------------- 1 | name: Release PR 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | - reopened 8 | - synchronize 9 | - labeled 10 | 11 | jobs: 12 | build: 13 | # We want to build artifacts only if the `upload to s3` label is applied 14 | # Only intra-repo PRs are allowed to have PR artifacts uploaded 15 | # We only want to trigger once the upload once in the case the upload label is added, not when any label is added 16 | if: | 17 | github.event.pull_request.head.repo.full_name == 'DeterminateSystems/magic-nix-cache' 18 | && ( 19 | (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') 20 | || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) 21 | ) 22 | uses: ./.github/workflows/build.yaml 23 | secrets: inherit 24 | 25 | release: 26 | needs: build 27 | concurrency: release 28 | runs-on: ubuntu-latest 29 | permissions: 30 | id-token: write # In order to request a JWT for AWS auth 31 | contents: read 32 | steps: 33 | - name: Checkout 34 | uses: actions/checkout@v4 35 | 36 | - name: Create the artifacts directory 37 | run: rm -rf ./artifacts && mkdir ./artifacts 38 | 39 | - uses: actions/download-artifact@v4.1.8 40 | with: 41 | name: magic-nix-cache-ARM64-macOS 42 | path: cache-binary-ARM64-macOS 43 | - name: Persist the cache binary 44 | run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS 45 | 46 | - uses: actions/download-artifact@v4.1.8 47 | with: 48 | name: magic-nix-cache-X64-macOS 49 | path: cache-binary-X64-macOS 50 | - name: Persist the cache binary 51 | run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS 52 | 53 | - uses: actions/download-artifact@v4.1.8 54 | with: 55 | name: magic-nix-cache-X64-Linux 56 | path: cache-binary-X64-Linux 57 | - name: Persist the cache binary 58 | run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux 59 | 60 | - uses: actions/download-artifact@v4.1.8 61 | with: 62 | name: magic-nix-cache-ARM64-Linux 63 | path: cache-binary-ARM64-Linux 64 | - name: Persist the cache binary 65 | run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux 66 | 67 | - name: Configure AWS Credentials 68 | uses: aws-actions/configure-aws-credentials@v2 69 | with: 70 | role-to-assume: ${{ secrets.AWS_S3_UPLOAD_ROLE }} 71 | aws-region: us-east-2 72 | - name: Publish Release (PR) 73 | env: 74 | AWS_BUCKET: ${{ secrets.AWS_S3_UPLOAD_BUCKET }} 75 | run: | 76 | .github/workflows/upload_s3.sh pr "${{ github.event.pull_request.number }}" "${{ github.event.pull_request.head.sha }}" 77 | -------------------------------------------------------------------------------- /.github/workflows/release-tags.yml: -------------------------------------------------------------------------------- 1 | name: Release Tags 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | build: 10 | uses: ./.github/workflows/build.yaml 11 | 12 | release: 13 | needs: build 14 | 15 | concurrency: release 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: write # In order to upload artifacts to GitHub releases 19 | id-token: write # In order to request a JWT for AWS auth 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Create the artifacts directory 25 | run: rm -rf ./artifacts && mkdir ./artifacts 26 | 27 | - uses: actions/download-artifact@v4.1.8 28 | with: 29 | name: magic-nix-cache-ARM64-macOS 30 | path: cache-binary-ARM64-macOS 31 | - name: Persist the cache binary 32 | run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS 33 | 34 | - uses: actions/download-artifact@v4.1.8 35 | with: 36 | name: magic-nix-cache-X64-macOS 37 | path: cache-binary-X64-macOS 38 | - name: Persist the cache binary 39 | run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS 40 | 41 | - uses: actions/download-artifact@v4.1.8 42 | with: 43 | name: magic-nix-cache-X64-Linux 44 | path: cache-binary-X64-Linux 45 | - name: Persist the cache binary 46 | run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux 47 | 48 | - uses: actions/download-artifact@v4.1.8 49 | with: 50 | name: magic-nix-cache-ARM64-Linux 51 | path: cache-binary-ARM64-Linux 52 | - name: Persist the cache binary 53 | run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux 54 | 55 | - name: Configure AWS Credentials 56 | uses: aws-actions/configure-aws-credentials@v2 57 | with: 58 | role-to-assume: ${{ secrets.AWS_S3_UPLOAD_ROLE }} 59 | aws-region: us-east-2 60 | - name: Publish Release to S3 (Tag) 61 | env: 62 | AWS_BUCKET: ${{ secrets.AWS_S3_UPLOAD_BUCKET }} 63 | run: | 64 | .github/workflows/upload_s3.sh "tag" "$GITHUB_REF_NAME" "$GITHUB_SHA" 65 | - name: Publish Release to GitHub (Tag) 66 | uses: softprops/action-gh-release@v1 67 | with: 68 | fail_on_unmatched_files: true 69 | draft: true 70 | files: | 71 | artifacts/** 72 | -------------------------------------------------------------------------------- /.github/workflows/update-flake-lock.yaml: -------------------------------------------------------------------------------- 1 | name: update-flake-lock 2 | 3 | on: 4 | workflow_dispatch: # enable manual triggering 5 | schedule: 6 | - cron: "0 0 * * 0" # every Sunday at midnight 7 | 8 | jobs: 9 | lockfile: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: DeterminateSystems/determinate-nix-action@v3 14 | - uses: DeterminateSystems/flakehub-cache-action@main 15 | - uses: DeterminateSystems/update-flake-lock@main 16 | with: 17 | pr-title: Update flake.lock 18 | pr-labels: | 19 | dependencies 20 | automated 21 | -------------------------------------------------------------------------------- /.github/workflows/upload_s3.sh: -------------------------------------------------------------------------------- 1 | set -eu 2 | 3 | TYPE="$1" 4 | TYPE_ID="$2" 5 | GIT_ISH="$3" 6 | 7 | if [ "$TYPE" == "tag" ]; then 8 | DEST="${TYPE_ID}" 9 | else 10 | DEST="${TYPE}_${TYPE_ID}" 11 | fi 12 | 13 | is_tag() { 14 | if [[ "$GITHUB_REF_TYPE" == "tag" ]]; then 15 | return 0 16 | else 17 | return 1 18 | fi 19 | } 20 | 21 | # If the revision directory has already been created in S3 somehow, we don't want to reupload 22 | if aws s3 ls "$AWS_BUCKET"/"$GIT_ISH"/; then 23 | # Only exit if it's not a tag (since we're tagging a commit previously pushed to main) 24 | if ! is_tag; then 25 | echo "Revision $GIT_ISH was already uploaded; exiting" 26 | exit 1 27 | fi 28 | fi 29 | 30 | mkdir "$DEST" 31 | mkdir "$GIT_ISH" 32 | 33 | for artifact in $(find artifacts/ -type f); do 34 | chmod +x "$artifact" 35 | cp "$artifact" "$DEST"/ 36 | cp "$artifact" "$GIT_ISH"/ 37 | done 38 | 39 | # If any artifact already exists in S3 and the hash is the same, we don't want to reupload 40 | check_reupload() { 41 | dest="$1" 42 | 43 | for file in $(find "$dest" -type f); do 44 | artifact_path="$dest"/"$(basename "$artifact")" 45 | md5="$(md5sum "$artifact" | cut -d' ' -f1)" 46 | obj="$(aws s3api head-object --bucket "$AWS_BUCKET" --key "$artifact_path" || echo '{}')" 47 | obj_md5="$(jq -r .ETag <<<"$obj" | jq -r)" # head-object call returns ETag quoted, so `jq -r` again to unquote it 48 | 49 | if [[ "$md5" == "$obj_md5" ]]; then 50 | echo "Artifact $artifact was already uploaded; exiting" 51 | # If we already uploaded to a tag, that's probably bad 52 | is_tag && exit 1 || exit 0 53 | fi 54 | done 55 | } 56 | 57 | check_reupload "$DEST" 58 | if ! is_tag; then 59 | check_reupload "$GIT_ISH" 60 | fi 61 | 62 | aws s3 sync "$DEST"/ s3://"$AWS_BUCKET"/"$DEST"/ --acl public-read 63 | if ! is_tag; then 64 | aws s3 sync "$GIT_ISH"/ s3://"$AWS_BUCKET"/"$GIT_ISH"/ --acl public-read 65 | fi 66 | 67 | 68 | cat <<-EOF >> $GITHUB_STEP_SUMMARY 69 | This commit's magic-nix-cache binaries can be fetched from: 70 | 71 | Intel macOS: 72 | 73 | \`\`\` 74 | curl --output magic-nix-cache --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/magic-nix-cache/rev/$GIT_ISH/X64-macOS 75 | \`\`\` 76 | 77 | x86_64 Linux: 78 | 79 | \`\`\` 80 | curl --output magic-nix-cache --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/magic-nix-cache/rev/$GIT_ISH/X64-Linux 81 | \`\`\` 82 | 83 | Or generally from this ${TYPE}: 84 | 85 | Intel macOS: 86 | 87 | \`\`\` 88 | curl --output magic-nix-cache --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/magic-nix-cache/${TYPE}/${TYPE_ID}/X64-macOS 89 | \`\`\` 90 | 91 | x86_64 Linux: 92 | 93 | \`\`\` 94 | curl --output magic-nix-cache --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/magic-nix-cache/${TYPE}/${TYPE_ID}/X64-Linux 95 | \`\`\` 96 | EOF 97 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .direnv 2 | 3 | result* 4 | /target 5 | 6 | key.txt 7 | creds.json 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "gha-cache", 4 | "magic-nix-cache", 5 | ] 6 | resolver = "2" 7 | 8 | [profile.release] 9 | opt-level = 'z' 10 | strip = true 11 | lto = true 12 | panic = "abort" 13 | incremental = false 14 | codegen-units = 1 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023 Determinate Systems, Inc., Zhaofeng Li 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Magic Nix Cache 2 | 3 | > [!WARNING] 4 | > The [Magic Nix Cache will will stop working](https://determinate.systems/posts/magic-nix-cache-free-tier-eol) on **February 1st, 2025** unless you're on [GitHub Enterprise Server](https://github.com/enterprise). 5 | > 6 | > You can upgrade to [FlakeHub Cache](https://flakehub.com/cache) and get **one month free** using the coupon code **`FHC`**. 7 | > 8 | > For more information, read [this blog post](https://determinate.systems/posts/magic-nix-cache-free-tier-eol/). 9 | 10 | Save 30-50%+ of CI time without any effort or cost. 11 | Use Magic Nix Cache, a totally free and zero-configuration binary cache for Nix on GitHub Actions. 12 | 13 | Add our [GitHub Action][action] after installing Nix, in your workflow, like this: 14 | 15 | ```yaml 16 | permissions: 17 | contents: read 18 | id-token: write 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: DeterminateSystems/determinate-nix-action@v3 22 | - uses: DeterminateSystems/flakehub-cache-action@main 23 | - run: nix flake check 24 | ``` 25 | 26 | See [Usage](#usage) for a detailed example. 27 | 28 | ## Why use the Magic Nix Cache? 29 | 30 | Magic Nix Cache uses the GitHub Actions [built-in cache][ghacache] to share builds between Workflow runs, and has many advantages over alternatives. 31 | 32 | 1. Totally free: backed by GitHub Actions' cache, there is no additional service to pay for. 33 | 1. Zero configuration: add our action to your workflow. 34 | That's it. 35 | Everything built in your workflow will be cached. 36 | 1. No secrets: Forks and pull requests benefit from the cache, too. 37 | 1. Secure: Magic Nix Cache follows the [same semantics as the GitHub Actions cache][semantics], and malicious pull requests cannot pollute your project. 38 | 1. Private: The cache is stored in the GitHub Actions cache, not with an additional third party. 39 | 40 | > **Note:** the Magic Nix Cache doesn't offer a publicly available cache. 41 | > This means the cache is only usable in CI. 42 | > [Zero to Nix][z2n] has an article on binary caching if you want to [share Nix builds][z2ncache] with users outside of CI. 43 | 44 | ## Usage 45 | 46 | Add it to your Linux and macOS GitHub Actions workflows, like this: 47 | 48 | ```yaml 49 | name: CI 50 | 51 | on: 52 | push: 53 | pull_request: 54 | 55 | jobs: 56 | check: 57 | runs-on: ubuntu-22.04 58 | permissions: 59 | contents: read 60 | id-token: write 61 | steps: 62 | - uses: actions/checkout@v4 63 | - uses: DeterminateSystems/determinate-nix-action@v3 64 | - uses: DeterminateSystems/flakehub-cache-action@main 65 | - run: nix flake check 66 | ``` 67 | 68 | That's it. 69 | Everything built in your workflow will be cached. 70 | 71 | ## Usage Notes 72 | 73 | The GitHub Actions Cache has a rate limit on reads and writes. 74 | Occasionally, large projects or large rebuilds may exceed those rate-limits, and you'll see evidence of that in your logs. 75 | The error looks like this: 76 | 77 | ``` 78 | error: unable to download 'http://127.0.0.1:37515/<...>': HTTP error 418 79 | response body: 80 | GitHub API error: API error (429 Too Many Requests): StructuredApiError { message: "Request was blocked due to exceeding usage of resource 'Count' in namespace ''." } 81 | ``` 82 | 83 | The caching daemon and Nix both handle this gracefully, and won't cause your CI to fail. 84 | When the rate limit is exceeded while pulling dependencies, your workflow may perform more builds than usual. 85 | When the rate limit is exceeded while uploading to the cache, the remainder of those store paths will be uploaded on the next run of the workflow. 86 | 87 | ## Development 88 | 89 | This project depends on the GitHub Actions Cache API. 90 | For local development, see `gha-cache/README.md` for more details on how to obtain the required tokens. 91 | 92 | ```shell 93 | cargo run -- -c creds.json --upstream https://cache.nixos.org 94 | cargo build --release --target x86_64-unknown-linux-gnu 95 | cargo build --release --target aarch64-unknown-linux-gnu 96 | nix copy --to 'http://127.0.0.1:3000' $(which bash) 97 | nix-store --store $PWD/test-root --extra-substituters 'http://localhost:3000' --option require-sigs false -r $(which bash) 98 | ``` 99 | 100 | ## Acknowledgement 101 | 102 | Magic Nix Cache is a collaboration with [Zhaofeng Li][zhaofeng]. 103 | Zhaofeng is a major contributor to the Nix community, having authored [Attic][attic] and [Colmena][colmena]. 104 | We'd like to express our deep gratitude to Zhaofeng for his tremendous work on this project. 105 | 106 | ## Telemetry 107 | 108 | The goal of Magic Nix Cache is to help teams save time in CI. 109 | The cache daemon collects a little bit of telemetry information to help us make that true. 110 | 111 | Here is a table of the [telemetry data we collect][telemetry]: 112 | 113 | | Field | Use | 114 | | -------------------------------- | ---------------------------------------------------------------------------------------------------------------- | 115 | | `distinct_id` | An opaque string that represents your project, anonymized by sha256 hashing repository and organization details. | 116 | | `version` | The version of Magic Nix Cache. | 117 | | `is_ci` | Whether the Magic Nix Cache is being used in CI (i.e.: GitHub Actions). | 118 | | `elapsed_seconds` | How long the cache daemon was running. | 119 | | `narinfos_served` | Number of narinfos served from the cache daemon. | 120 | | `narinfos_sent_upstream` | Number of narinfo requests forwarded to the upstream cache. | 121 | | `narinfos_negative_cache_hits` | Effectiveness of an internal data structure which minimizes cache requests. | 122 | | `narinfos_negative_cache_misses` | Effectiveness of an internal data structure which minimizes cache requests. | 123 | | `narinfos_uploaded` | Number of new narinfo files cached during this run. | 124 | | `nars_served` | Number of nars served from the cache daemon. | 125 | | `nars_sent_upstream` | Number of nar requests forwarded to the upstream cache. | 126 | | `nars_uploaded` | Number of nars uploaded during this run. | 127 | | `num_original_paths` | Number of store paths that existed on startup. | 128 | | `num_final_paths` | Number of store paths that existed on shutdown. | 129 | | `num_new_paths` | The difference between `num_original_paths` and `num_final_paths`. | 130 | 131 | To disable diagnostic reporting, set the diagnostics URL to an empty string by passing `--diagnostic-endpoint=""`. 132 | 133 | You can read the full privacy policy for [Determinate Systems][detsys], the creators of this tool and the [Determinate Nix Installer][installer], [here][privacy]. 134 | 135 | [detsys]: https://determinate.systems/ 136 | [action]: https://github.com/DeterminateSystems/magic-nix-cache-action/ 137 | [installer]: https://github.com/DeterminateSystems/nix-installer/ 138 | [ghacache]: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows 139 | [privacy]: https://determinate.systems/policies/privacy 140 | [telemetry]: https://github.com/DeterminateSystems/magic-nix-cache/blob/main/magic-nix-cache/src/telemetry.rs 141 | [semantics]: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache 142 | [z2ncache]: https://zero-to-nix.com/concepts/caching#binary-caches 143 | [zhaofeng]: https://github.com/zhaofengli/ 144 | [attic]: https://github.com/zhaofengli/attic 145 | [colmena]: https://github.com/zhaofengli/colmena 146 | [z2n]: https://zero-to-nix.com 147 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "crane": { 4 | "locked": { 5 | "lastModified": 1741479724, 6 | "narHash": "sha256-fnyETBKSVRa5abjOiRG/IAzKZq5yX8U6oRrHstPl4VM=", 7 | "rev": "60202a2e3597a3d91f5e791aab03f45470a738b5", 8 | "revCount": 709, 9 | "type": "tarball", 10 | "url": "https://api.flakehub.com/f/pinned/ipetkov/crane/0.20.2/0195784b-915b-7d2d-915d-ab02d1112ef9/source.tar.gz" 11 | }, 12 | "original": { 13 | "type": "tarball", 14 | "url": "https://flakehub.com/f/ipetkov/crane/%2A" 15 | } 16 | }, 17 | "flake-compat": { 18 | "flake": false, 19 | "locked": { 20 | "lastModified": 1733328505, 21 | "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", 22 | "owner": "edolstra", 23 | "repo": "flake-compat", 24 | "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", 25 | "type": "github" 26 | }, 27 | "original": { 28 | "owner": "edolstra", 29 | "repo": "flake-compat", 30 | "type": "github" 31 | } 32 | }, 33 | "flake-parts": { 34 | "inputs": { 35 | "nixpkgs-lib": [ 36 | "nix", 37 | "nixpkgs" 38 | ] 39 | }, 40 | "locked": { 41 | "lastModified": 1733312601, 42 | "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", 43 | "owner": "hercules-ci", 44 | "repo": "flake-parts", 45 | "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", 46 | "type": "github" 47 | }, 48 | "original": { 49 | "owner": "hercules-ci", 50 | "repo": "flake-parts", 51 | "type": "github" 52 | } 53 | }, 54 | "git-hooks-nix": { 55 | "inputs": { 56 | "flake-compat": [ 57 | "nix" 58 | ], 59 | "gitignore": [ 60 | "nix" 61 | ], 62 | "nixpkgs": [ 63 | "nix", 64 | "nixpkgs" 65 | ], 66 | "nixpkgs-stable": [ 67 | "nix", 68 | "nixpkgs" 69 | ] 70 | }, 71 | "locked": { 72 | "lastModified": 1734279981, 73 | "narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=", 74 | "owner": "cachix", 75 | "repo": "git-hooks.nix", 76 | "rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785", 77 | "type": "github" 78 | }, 79 | "original": { 80 | "owner": "cachix", 81 | "repo": "git-hooks.nix", 82 | "type": "github" 83 | } 84 | }, 85 | "nix": { 86 | "inputs": { 87 | "flake-compat": "flake-compat", 88 | "flake-parts": "flake-parts", 89 | "git-hooks-nix": "git-hooks-nix", 90 | "nixpkgs": "nixpkgs", 91 | "nixpkgs-23-11": "nixpkgs-23-11", 92 | "nixpkgs-regression": "nixpkgs-regression" 93 | }, 94 | "locked": { 95 | "lastModified": 1742824067, 96 | "narHash": "sha256-rBPulEBpn4IiqkPsetuh7BRzT2iGCzZYnogTAsbrvhU=", 97 | "rev": "9cb662df7442a1e2c4600fb8ecb2ad613ebc5a95", 98 | "revCount": 19496, 99 | "type": "tarball", 100 | "url": "https://api.flakehub.com/f/pinned/NixOS/nix/2.27.1/0195c8c5-1964-7a31-b025-ebf9bfeef991/source.tar.gz" 101 | }, 102 | "original": { 103 | "type": "tarball", 104 | "url": "https://flakehub.com/f/NixOS/nix/2" 105 | } 106 | }, 107 | "nixpkgs": { 108 | "locked": { 109 | "lastModified": 1734359947, 110 | "narHash": "sha256-1Noao/H+N8nFB4Beoy8fgwrcOQLVm9o4zKW1ODaqK9E=", 111 | "owner": "NixOS", 112 | "repo": "nixpkgs", 113 | "rev": "48d12d5e70ee91fe8481378e540433a7303dbf6a", 114 | "type": "github" 115 | }, 116 | "original": { 117 | "owner": "NixOS", 118 | "ref": "release-24.11", 119 | "repo": "nixpkgs", 120 | "type": "github" 121 | } 122 | }, 123 | "nixpkgs-23-11": { 124 | "locked": { 125 | "lastModified": 1717159533, 126 | "narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=", 127 | "owner": "NixOS", 128 | "repo": "nixpkgs", 129 | "rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446", 130 | "type": "github" 131 | }, 132 | "original": { 133 | "owner": "NixOS", 134 | "repo": "nixpkgs", 135 | "rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446", 136 | "type": "github" 137 | } 138 | }, 139 | "nixpkgs-regression": { 140 | "locked": { 141 | "lastModified": 1643052045, 142 | "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", 143 | "owner": "NixOS", 144 | "repo": "nixpkgs", 145 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", 146 | "type": "github" 147 | }, 148 | "original": { 149 | "owner": "NixOS", 150 | "repo": "nixpkgs", 151 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", 152 | "type": "github" 153 | } 154 | }, 155 | "nixpkgs_2": { 156 | "locked": { 157 | "lastModified": 1745234285, 158 | "narHash": "sha256-GfpyMzxwkfgRVN0cTGQSkTC0OHhEkv3Jf6Tcjm//qZ0=", 159 | "rev": "c11863f1e964833214b767f4a369c6e6a7aba141", 160 | "revCount": 787278, 161 | "type": "tarball", 162 | "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.787278%2Brev-c11863f1e964833214b767f4a369c6e6a7aba141/01965f13-2a6b-76bb-86a2-9cdd58892f68/source.tar.gz" 163 | }, 164 | "original": { 165 | "type": "tarball", 166 | "url": "https://flakehub.com/f/NixOS/nixpkgs/0.1" 167 | } 168 | }, 169 | "root": { 170 | "inputs": { 171 | "crane": "crane", 172 | "nix": "nix", 173 | "nixpkgs": "nixpkgs_2" 174 | } 175 | } 176 | }, 177 | "root": "root", 178 | "version": 7 179 | } 180 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "GitHub Actions-powered Nix binary cache"; 3 | 4 | inputs = { 5 | nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1"; 6 | 7 | crane.url = "https://flakehub.com/f/ipetkov/crane/*"; 8 | 9 | nix.url = "https://flakehub.com/f/NixOS/nix/2"; 10 | }; 11 | 12 | outputs = inputs: 13 | let 14 | supportedSystems = [ 15 | "aarch64-linux" 16 | "x86_64-linux" 17 | "aarch64-darwin" 18 | "x86_64-darwin" 19 | ]; 20 | 21 | forEachSupportedSystem = f: inputs.nixpkgs.lib.genAttrs supportedSystems (system: f rec { 22 | pkgs = import inputs.nixpkgs { 23 | inherit system; 24 | overlays = [ 25 | inputs.self.overlays.default 26 | ]; 27 | }; 28 | inherit system; 29 | }); 30 | in 31 | { 32 | 33 | overlays.default = final: prev: 34 | let 35 | craneLib = inputs.crane.mkLib final; 36 | crateName = craneLib.crateNameFromCargoToml { 37 | cargoToml = ./magic-nix-cache/Cargo.toml; 38 | }; 39 | 40 | commonArgs = { 41 | inherit (crateName) pname version; 42 | src = inputs.self; 43 | 44 | nativeBuildInputs = with final; [ 45 | pkg-config 46 | ]; 47 | 48 | buildInputs = [ 49 | inputs.nix.packages.${final.stdenv.system}.default 50 | final.boost 51 | ]; 52 | }; 53 | 54 | cargoArtifacts = craneLib.buildDepsOnly commonArgs; 55 | in 56 | { 57 | magic-nix-cache = craneLib.buildPackage (commonArgs // { 58 | inherit cargoArtifacts; 59 | }); 60 | }; 61 | 62 | packages = forEachSupportedSystem ({ pkgs, ... }: rec { 63 | magic-nix-cache = pkgs.magic-nix-cache; 64 | default = magic-nix-cache; 65 | 66 | veryLongChain = 67 | let 68 | ctx = ./README.md; 69 | 70 | # Function to write the current date to a file 71 | startFile = 72 | pkgs.stdenv.mkDerivation { 73 | name = "start-file"; 74 | buildCommand = '' 75 | cat ${ctx} > $out 76 | ''; 77 | }; 78 | 79 | # Recursive function to create a chain of derivations 80 | createChain = n: startFile: 81 | pkgs.stdenv.mkDerivation { 82 | name = "chain-${toString n}"; 83 | src = 84 | if n == 0 then 85 | startFile 86 | else createChain (n - 1) startFile; 87 | buildCommand = '' 88 | echo $src > $out 89 | ''; 90 | }; 91 | 92 | in 93 | # Starting point of the chain 94 | createChain 200 startFile; 95 | }); 96 | 97 | devShells = forEachSupportedSystem ({ system, pkgs }: { 98 | default = pkgs.mkShell { 99 | packages = with pkgs; [ 100 | rustc 101 | cargo 102 | clippy 103 | rustfmt 104 | rust-analyzer 105 | 106 | inputs.nix.packages.${stdenv.system}.default # for linking attic 107 | boost # for linking attic 108 | bashInteractive 109 | pkg-config 110 | 111 | cargo-bloat 112 | cargo-edit 113 | cargo-udeps 114 | cargo-watch 115 | bacon 116 | 117 | age 118 | ]; 119 | 120 | RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library"; 121 | }; 122 | }); 123 | }; 124 | } 125 | -------------------------------------------------------------------------------- /gha-cache/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gha-cache" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | [dependencies] 8 | async-trait = "0.1.68" 9 | bytes = { version = "1.4.0", default-features = false } 10 | derivative = { version = "2.2.0", default-features = false } 11 | futures = { version = "0.3.28", default-features = false, features = ["alloc"] } 12 | hex = "0.4.3" 13 | rand = { version = "0.8.5", default-features = false, features = ["std", "std_rng"] } 14 | reqwest = { version = "0.12.5", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "trust-dns"] } 15 | serde = { version = "1.0.162", default-features = false, features = ["derive"] } 16 | serde_json = { version = "1.0.96", default-features = false } 17 | sha2 = { version = "0.10.6", default-features = false } 18 | thiserror = "1.0.40" 19 | tokio = { version = "1.44.2", default-features = false, features = ["io-util"] } 20 | tracing = { version = "0.1.37", default-features = false } 21 | unicode-bom = "2.0.2" 22 | 23 | [dev-dependencies] 24 | anyhow = "1.0.71" 25 | -------------------------------------------------------------------------------- /gha-cache/README.md: -------------------------------------------------------------------------------- 1 | # gha-cache 2 | 3 | `gha-cache` provides an async API to the GitHub Actions Cache API. 4 | You can upload blobs with `AsyncRead` streams and obtain presigned URLs to download them. 5 | 6 | ## Introduction 7 | 8 | The GitHub Actions Cache (hereinafter GHAC) service stores binary blobs [identified](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#matching-a-cache-key) by the following 3-tuple: 9 | 10 | - **Cache Key**: The developer-specified name of the blob. 11 | - **Cache Version**: A string identifying conditions that affect compatibility of the blob. It works like a namespace. 12 | - The official implementation uses a SHA256 hash of the paths and the compression method, but it can be anything. 13 | - In this crate, we let the user feed in arbitrary bytes to mutate the hash. 14 | - **Cache Scope**: The branch containing the workflow run that uploaded the blob 15 | 16 | ### APIs 17 | 18 | Two sets of APIs are in use: 19 | 20 | - [GitHub Actions Cache API](https://github.com/actions/toolkit/blob/457303960f03375db6f033e214b9f90d79c3fe5c/packages/cache/src/internal/cacheHttpClient.ts#L38): Private API used by GHAC. This API allows uploading and downloading blobs. 21 | - Endpoint: `$ACTIONS_CACHE_URL` 22 | - Token: `$ACTIONS_RUNTIME_TOKEN` 23 | - [GitHub REST API](https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-github-actions-caches-for-a-repository-using-a-cache-key): Public API. This API allows listing and deleting blobs. 24 | - Endpoint: `$GITHUB_API_URL` / `https://api.github.com` 25 | - Token: `${{ secrets.GITHUB_TOKEN }}` 26 | 27 | This crate supports only the former API. 28 | We should contribute support for the latter to [Octocrab](https://github.com/XAMPPRocky/octocrab). 29 | 30 | ## Quick Start 31 | 32 | Since GHAC uses private APIs that use special tokens for authentication, we need to get them from a workflow run. 33 | 34 | The easiest way is with the `keygen` workflow in this repo. 35 | Generate an `age` encryption key with `nix shell nixpkgs#age --command age-keygen -o key.txt`, and add the Public Key as a repository secret named `AGE_PUBLIC_KEY`. 36 | Then, trigger the `keygen` workflow which will print out a command that will let you decrypt the credentials. 37 | -------------------------------------------------------------------------------- /gha-cache/src/api.rs: -------------------------------------------------------------------------------- 1 | //! GitHub Actions Cache API client. 2 | //! 3 | //! We expose a high-level API that deals with "files." 4 | 5 | use std::fmt; 6 | #[cfg(debug_assertions)] 7 | use std::sync::atomic::AtomicUsize; 8 | use std::sync::atomic::{AtomicBool, Ordering}; 9 | use std::sync::Arc; 10 | 11 | use async_trait::async_trait; 12 | use bytes::{Bytes, BytesMut}; 13 | use futures::future; 14 | use rand::{distributions::Alphanumeric, Rng}; 15 | use reqwest::{ 16 | header::{HeaderMap, HeaderValue, CONTENT_RANGE, CONTENT_TYPE}, 17 | Client, StatusCode, 18 | }; 19 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 20 | use sha2::{Digest, Sha256}; 21 | use thiserror::Error; 22 | use tokio::{io::AsyncRead, sync::Semaphore}; 23 | use unicode_bom::Bom; 24 | 25 | use crate::credentials::Credentials; 26 | use crate::util::read_chunk_async; 27 | 28 | /// The API version we implement. 29 | /// 30 | /// 31 | const API_VERSION: &str = "6.0-preview.1"; 32 | 33 | /// The User-Agent string for the client. 34 | /// 35 | /// We want to be polite :) 36 | const USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")); 37 | 38 | /// The default cache version/namespace. 39 | const DEFAULT_VERSION: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")); 40 | 41 | /// The chunk size in bytes. 42 | /// 43 | /// We greedily read this much from the input stream at a time. 44 | const CHUNK_SIZE: usize = 32 * 1024 * 1024; 45 | 46 | /// The number of chunks to upload at the same time. 47 | const MAX_CONCURRENCY: usize = 4; 48 | 49 | type Result = std::result::Result; 50 | 51 | pub type CircuitBreakerTrippedCallback = Arc>; 52 | 53 | /// An API error. 54 | #[derive(Error, Debug)] 55 | pub enum Error { 56 | #[error("Failed to initialize the client: {0}")] 57 | InitError(Box), 58 | 59 | #[error( 60 | "GitHub Actions Cache throttled Magic Nix Cache. Not trying to use it again on this run." 61 | )] 62 | CircuitBreakerTripped, 63 | 64 | #[error("Request error: {0}")] 65 | RequestError(#[from] reqwest::Error), // TODO: Better errors 66 | 67 | #[error("Failed to decode response ({status}): {error}")] 68 | DecodeError { 69 | status: StatusCode, 70 | bytes: Bytes, 71 | error: serde_json::Error, 72 | }, 73 | 74 | #[error("API error ({status}): {info}")] 75 | ApiError { 76 | status: StatusCode, 77 | info: ApiErrorInfo, 78 | }, 79 | 80 | #[error("I/O error: {0}, context: {1}")] 81 | IoError(std::io::Error, String), 82 | 83 | #[error("Too many collisions")] 84 | TooManyCollisions, 85 | } 86 | 87 | pub struct Api { 88 | /// Credentials to access the cache. 89 | credentials: Credentials, 90 | 91 | /// The version used for all caches. 92 | /// 93 | /// This value should be tied to everything that affects 94 | /// the compatibility of the cached objects. 95 | version: String, 96 | 97 | /// The hasher of the version. 98 | version_hasher: Sha256, 99 | 100 | /// The HTTP client for authenticated requests. 101 | client: Client, 102 | 103 | /// The concurrent upload limit. 104 | concurrency_limit: Arc, 105 | 106 | circuit_breaker_429_tripped: Arc, 107 | 108 | circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback, 109 | 110 | /// Backend request statistics. 111 | #[cfg(debug_assertions)] 112 | stats: RequestStats, 113 | } 114 | 115 | /// A file allocation. 116 | #[derive(Debug, Clone, Copy)] 117 | pub struct FileAllocation(CacheId); 118 | 119 | /// The ID of a cache. 120 | #[derive(Debug, Clone, Copy, Serialize, Deserialize)] 121 | #[serde(transparent)] 122 | struct CacheId(pub i64); 123 | 124 | /// An API error. 125 | #[derive(Debug, Clone)] 126 | pub enum ApiErrorInfo { 127 | /// An error that we couldn't decode. 128 | Unstructured(Bytes), 129 | 130 | /// A structured API error. 131 | Structured(StructuredApiError), 132 | } 133 | 134 | /// A structured API error. 135 | #[derive(Debug, Clone, Deserialize)] 136 | #[allow(dead_code)] 137 | pub struct StructuredApiError { 138 | /// A human-readable error message. 139 | message: String, 140 | } 141 | 142 | /// A cache entry. 143 | /// 144 | /// A valid entry looks like: 145 | /// 146 | /// ```text 147 | /// ArtifactCacheEntry { 148 | /// cache_key: Some("hello-224".to_string()), 149 | /// scope: Some("refs/heads/main".to_string()), 150 | /// cache_version: Some("gha-cache/0.1.0".to_string()), 151 | /// creation_time: Some("2023-01-01T00:00:00.0000000Z".to_string()), 152 | /// archive_location: Some( 153 | /// "https://[...].blob.core.windows.net/[...]/[...]?sv=2019-07-07&sr=b&sig=[...]".to_string() 154 | /// ), 155 | /// } 156 | /// ``` 157 | #[derive(Debug, Clone, Deserialize)] 158 | #[allow(dead_code)] 159 | struct ArtifactCacheEntry { 160 | /// The cache key. 161 | #[serde(rename = "cacheKey")] 162 | cache_key: Option, 163 | 164 | /// The scope of the cache. 165 | /// 166 | /// It appears to be the branch name. 167 | scope: Option, 168 | 169 | /// The version of the cache. 170 | #[serde(rename = "cacheVersion")] 171 | cache_version: Option, 172 | 173 | /// The creation timestamp. 174 | #[serde(rename = "creationTime")] 175 | creation_time: Option, 176 | 177 | /// The archive location. 178 | #[serde(rename = "archiveLocation")] 179 | archive_location: String, 180 | } 181 | 182 | #[derive(Debug, Clone, Serialize)] 183 | struct ReserveCacheRequest<'a> { 184 | /// The cache key. 185 | key: &'a str, 186 | 187 | /// The cache version. 188 | /// 189 | /// This value should be tied to everything that affects 190 | /// the compatibility of the cached objects. 191 | version: &'a str, 192 | 193 | /// The size of the cache, in bytes. 194 | #[serde(rename = "cacheSize")] 195 | #[serde(skip_serializing_if = "Option::is_none")] 196 | cache_size: Option, 197 | } 198 | 199 | #[derive(Debug, Clone, Deserialize)] 200 | struct ReserveCacheResponse { 201 | /// The reserved cache ID. 202 | #[serde(rename = "cacheId")] 203 | cache_id: CacheId, 204 | } 205 | 206 | #[derive(Debug, Clone, Serialize)] 207 | struct CommitCacheRequest { 208 | size: usize, 209 | } 210 | 211 | #[cfg(debug_assertions)] 212 | #[derive(Default, Debug)] 213 | struct RequestStats { 214 | get: AtomicUsize, 215 | post: AtomicUsize, 216 | patch: AtomicUsize, 217 | } 218 | 219 | #[async_trait] 220 | trait ResponseExt { 221 | async fn check(self) -> Result<()>; 222 | async fn check_json(self) -> Result; 223 | } 224 | 225 | impl Error { 226 | fn init_error(e: E) -> Self 227 | where 228 | E: std::error::Error + Send + Sync + 'static, 229 | { 230 | Self::InitError(Box::new(e)) 231 | } 232 | } 233 | 234 | impl fmt::Display for ApiErrorInfo { 235 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 236 | match self { 237 | Self::Unstructured(bytes) => { 238 | write!(f, "[Unstructured] {}", String::from_utf8_lossy(bytes)) 239 | } 240 | Self::Structured(e) => { 241 | write!(f, "{:?}", e) 242 | } 243 | } 244 | } 245 | } 246 | 247 | impl Api { 248 | pub fn new( 249 | credentials: Credentials, 250 | circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback, 251 | ) -> Result { 252 | let mut headers = HeaderMap::new(); 253 | let auth_header = { 254 | let mut h = HeaderValue::from_str(&format!("Bearer {}", credentials.runtime_token)) 255 | .map_err(Error::init_error)?; 256 | h.set_sensitive(true); 257 | h 258 | }; 259 | headers.insert("Authorization", auth_header); 260 | headers.insert( 261 | "Accept", 262 | HeaderValue::from_str(&format!("application/json;api-version={}", API_VERSION)) 263 | .map_err(Error::init_error)?, 264 | ); 265 | 266 | let client = Client::builder() 267 | .user_agent(USER_AGENT) 268 | .default_headers(headers) 269 | .build() 270 | .map_err(Error::init_error)?; 271 | 272 | let version_hasher = Sha256::new_with_prefix(DEFAULT_VERSION.as_bytes()); 273 | let initial_version = hex::encode(version_hasher.clone().finalize()); 274 | 275 | Ok(Self { 276 | credentials, 277 | version: initial_version, 278 | version_hasher, 279 | client, 280 | concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)), 281 | circuit_breaker_429_tripped: Arc::new(AtomicBool::from(false)), 282 | circuit_breaker_429_tripped_callback, 283 | #[cfg(debug_assertions)] 284 | stats: Default::default(), 285 | }) 286 | } 287 | 288 | pub fn circuit_breaker_tripped(&self) -> bool { 289 | self.circuit_breaker_429_tripped.load(Ordering::Relaxed) 290 | } 291 | 292 | /// Mutates the cache version/namespace. 293 | pub fn mutate_version(&mut self, data: &[u8]) { 294 | self.version_hasher.update(data); 295 | self.version = hex::encode(self.version_hasher.clone().finalize()); 296 | } 297 | 298 | // Public 299 | 300 | /// Allocates a file. 301 | pub async fn allocate_file(&self, key: &str) -> Result { 302 | let reservation = self.reserve_cache(key, None).await?; 303 | Ok(FileAllocation(reservation.cache_id)) 304 | } 305 | 306 | /// Allocates a file with a random suffix. 307 | /// 308 | /// This is a hack to allow for easy "overwriting" without 309 | /// deleting the original cache. 310 | pub async fn allocate_file_with_random_suffix(&self, key: &str) -> Result { 311 | for _ in 0..5 { 312 | let nonce: String = rand::thread_rng() 313 | .sample_iter(&Alphanumeric) 314 | .take(4) 315 | .map(char::from) 316 | .collect(); 317 | 318 | let full_key = format!("{}-{}", key, nonce); 319 | 320 | match self.allocate_file(&full_key).await { 321 | Ok(allocation) => { 322 | return Ok(allocation); 323 | } 324 | Err(e) => { 325 | if let Error::ApiError { 326 | info: ApiErrorInfo::Structured(structured), 327 | .. 328 | } = &e 329 | { 330 | if structured.message.contains("Cache already exists") { 331 | continue; 332 | } 333 | } 334 | return Err(e); 335 | } 336 | } 337 | } 338 | 339 | Err(Error::TooManyCollisions) 340 | } 341 | 342 | /// Uploads a file. Returns the size of the file. 343 | pub async fn upload_file(&self, allocation: FileAllocation, mut stream: S) -> Result 344 | where 345 | S: AsyncRead + Unpin + Send, 346 | { 347 | if self.circuit_breaker_tripped() { 348 | return Err(Error::CircuitBreakerTripped); 349 | } 350 | 351 | let mut offset = 0; 352 | let mut futures = Vec::new(); 353 | loop { 354 | let buf = BytesMut::with_capacity(CHUNK_SIZE); 355 | let chunk = read_chunk_async(&mut stream, buf) 356 | .await 357 | .map_err(|e| Error::IoError(e, "Reading a chunk during upload".to_string()))?; 358 | if chunk.is_empty() { 359 | offset += chunk.len(); 360 | break; 361 | } 362 | 363 | if offset == chunk.len() { 364 | tracing::trace!("Received first chunk for cache {:?}", allocation.0); 365 | } 366 | 367 | let chunk_len = chunk.len(); 368 | 369 | #[cfg(debug_assertions)] 370 | self.stats.patch.fetch_add(1, Ordering::SeqCst); 371 | 372 | futures.push({ 373 | let client = self.client.clone(); 374 | let concurrency_limit = self.concurrency_limit.clone(); 375 | let circuit_breaker_429_tripped = self.circuit_breaker_429_tripped.clone(); 376 | let circuit_breaker_429_tripped_callback = 377 | self.circuit_breaker_429_tripped_callback.clone(); 378 | let url = self.construct_url(&format!("caches/{}", allocation.0 .0)); 379 | 380 | tokio::task::spawn(async move { 381 | let permit = concurrency_limit 382 | .acquire() 383 | .await 384 | .expect("failed to acquire concurrency semaphore permit"); 385 | 386 | tracing::trace!( 387 | "Starting uploading chunk {}-{}", 388 | offset, 389 | offset + chunk_len - 1 390 | ); 391 | 392 | let r = client 393 | .patch(url) 394 | .header(CONTENT_TYPE, "application/octet-stream") 395 | .header( 396 | CONTENT_RANGE, 397 | format!("bytes {}-{}/*", offset, offset + chunk.len() - 1), 398 | ) 399 | .body(chunk) 400 | .send() 401 | .await? 402 | .check() 403 | .await; 404 | 405 | tracing::trace!( 406 | "Finished uploading chunk {}-{}: {:?}", 407 | offset, 408 | offset + chunk_len - 1, 409 | r 410 | ); 411 | 412 | drop(permit); 413 | 414 | circuit_breaker_429_tripped 415 | .check_result(&r, &circuit_breaker_429_tripped_callback); 416 | 417 | r 418 | }) 419 | }); 420 | 421 | offset += chunk_len; 422 | } 423 | 424 | future::join_all(futures) 425 | .await 426 | .into_iter() 427 | .try_for_each(|join_result| { 428 | join_result.expect("failed collecting a join result during parallel upload") 429 | })?; 430 | 431 | tracing::debug!("Received all chunks for cache {:?}", allocation.0); 432 | 433 | self.commit_cache(allocation.0, offset).await?; 434 | 435 | Ok(offset) 436 | } 437 | 438 | /// Downloads a file based on a list of key prefixes. 439 | pub async fn get_file_url(&self, keys: &[&str]) -> Result> { 440 | if self.circuit_breaker_tripped() { 441 | return Err(Error::CircuitBreakerTripped); 442 | } 443 | 444 | Ok(self 445 | .get_cache_entry(keys) 446 | .await? 447 | .map(|entry| entry.archive_location)) 448 | } 449 | 450 | /// Dumps statistics. 451 | /// 452 | /// This is for debugging only. 453 | pub fn dump_stats(&self) { 454 | #[cfg(debug_assertions)] 455 | tracing::trace!("Request stats: {:?}", self.stats); 456 | } 457 | 458 | // Private 459 | 460 | /// Retrieves a cache based on a list of key prefixes. 461 | async fn get_cache_entry(&self, keys: &[&str]) -> Result> { 462 | if self.circuit_breaker_tripped() { 463 | return Err(Error::CircuitBreakerTripped); 464 | } 465 | 466 | #[cfg(debug_assertions)] 467 | self.stats.get.fetch_add(1, Ordering::SeqCst); 468 | 469 | let res = self 470 | .client 471 | .get(self.construct_url("cache")) 472 | .query(&[("version", &self.version), ("keys", &keys.join(","))]) 473 | .send() 474 | .await? 475 | .check_json() 476 | .await; 477 | 478 | self.circuit_breaker_429_tripped 479 | .check_result(&res, &self.circuit_breaker_429_tripped_callback); 480 | 481 | match res { 482 | Ok(entry) => Ok(Some(entry)), 483 | Err(Error::DecodeError { status, .. }) if status == StatusCode::NO_CONTENT => Ok(None), 484 | Err(e) => Err(e), 485 | } 486 | } 487 | 488 | /// Reserves a new cache. 489 | /// 490 | /// The cache key should be unique. A cache cannot be created 491 | /// again if the same (cache_name, cache_version) pair already 492 | /// exists. 493 | async fn reserve_cache( 494 | &self, 495 | key: &str, 496 | cache_size: Option, 497 | ) -> Result { 498 | if self.circuit_breaker_tripped() { 499 | return Err(Error::CircuitBreakerTripped); 500 | } 501 | 502 | tracing::debug!("Reserving cache for {}", key); 503 | 504 | let req = ReserveCacheRequest { 505 | key, 506 | version: &self.version, 507 | cache_size, 508 | }; 509 | 510 | #[cfg(debug_assertions)] 511 | self.stats.post.fetch_add(1, Ordering::SeqCst); 512 | 513 | let res = self 514 | .client 515 | .post(self.construct_url("caches")) 516 | .json(&req) 517 | .send() 518 | .await? 519 | .check_json() 520 | .await; 521 | 522 | self.circuit_breaker_429_tripped 523 | .check_result(&res, &self.circuit_breaker_429_tripped_callback); 524 | 525 | res 526 | } 527 | 528 | /// Finalizes uploading to a cache. 529 | async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> { 530 | if self.circuit_breaker_tripped() { 531 | return Err(Error::CircuitBreakerTripped); 532 | } 533 | 534 | tracing::debug!("Commiting cache {:?}", cache_id); 535 | 536 | let req = CommitCacheRequest { size }; 537 | 538 | #[cfg(debug_assertions)] 539 | self.stats.post.fetch_add(1, Ordering::SeqCst); 540 | 541 | if let Err(e) = self 542 | .client 543 | .post(self.construct_url(&format!("caches/{}", cache_id.0))) 544 | .json(&req) 545 | .send() 546 | .await? 547 | .check() 548 | .await 549 | { 550 | self.circuit_breaker_429_tripped 551 | .check_err(&e, &self.circuit_breaker_429_tripped_callback); 552 | return Err(e); 553 | } 554 | 555 | Ok(()) 556 | } 557 | 558 | fn construct_url(&self, resource: &str) -> String { 559 | let mut url = self.credentials.cache_url.clone(); 560 | if !url.ends_with('/') { 561 | url.push('/'); 562 | } 563 | url.push_str("_apis/artifactcache/"); 564 | url.push_str(resource); 565 | url 566 | } 567 | } 568 | 569 | #[async_trait] 570 | impl ResponseExt for reqwest::Response { 571 | async fn check(self) -> Result<()> { 572 | let status = self.status(); 573 | 574 | if !status.is_success() { 575 | return Err(handle_error(self).await); 576 | } 577 | 578 | Ok(()) 579 | } 580 | 581 | async fn check_json(self) -> Result { 582 | let status = self.status(); 583 | 584 | if !status.is_success() { 585 | return Err(handle_error(self).await); 586 | } 587 | 588 | // We don't do `Response::json()` directly to preserve 589 | // the original response payload for troubleshooting. 590 | let bytes = self.bytes().await?; 591 | match serde_json::from_slice(&bytes) { 592 | Ok(decoded) => Ok(decoded), 593 | Err(error) => Err(Error::DecodeError { 594 | status, 595 | error, 596 | bytes, 597 | }), 598 | } 599 | } 600 | } 601 | 602 | async fn handle_error(res: reqwest::Response) -> Error { 603 | let status = res.status(); 604 | let bytes = match res.bytes().await { 605 | Ok(bytes) => { 606 | let bom = Bom::from(bytes.as_ref()); 607 | bytes.slice(bom.len()..) 608 | } 609 | Err(e) => { 610 | return e.into(); 611 | } 612 | }; 613 | 614 | let info = match serde_json::from_slice(&bytes) { 615 | Ok(structured) => ApiErrorInfo::Structured(structured), 616 | Err(e) => { 617 | tracing::info!("failed to decode error: {}", e); 618 | ApiErrorInfo::Unstructured(bytes) 619 | } 620 | }; 621 | 622 | Error::ApiError { status, info } 623 | } 624 | 625 | trait AtomicCircuitBreaker { 626 | fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback); 627 | fn check_result( 628 | &self, 629 | r: &std::result::Result, 630 | callback: &CircuitBreakerTrippedCallback, 631 | ); 632 | } 633 | 634 | impl AtomicCircuitBreaker for AtomicBool { 635 | fn check_result( 636 | &self, 637 | r: &std::result::Result, 638 | callback: &CircuitBreakerTrippedCallback, 639 | ) { 640 | if let Err(ref e) = r { 641 | self.check_err(e, callback) 642 | } 643 | } 644 | 645 | fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback) { 646 | if let Error::ApiError { 647 | status: reqwest::StatusCode::TOO_MANY_REQUESTS, 648 | .. 649 | } = e 650 | { 651 | tracing::info!("Disabling GitHub Actions Cache due to 429: Too Many Requests"); 652 | self.store(true, Ordering::Relaxed); 653 | callback(); 654 | } 655 | } 656 | } 657 | -------------------------------------------------------------------------------- /gha-cache/src/credentials.rs: -------------------------------------------------------------------------------- 1 | //! Access credentials. 2 | 3 | use std::env; 4 | 5 | use derivative::Derivative; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// Credentials to access the GitHub Actions Cache. 9 | #[derive(Clone, Derivative, Deserialize, Serialize)] 10 | #[derivative(Debug)] 11 | pub struct Credentials { 12 | /// The base URL of the cache. 13 | /// 14 | /// This is the `ACTIONS_CACHE_URL` environment variable. 15 | #[serde(alias = "ACTIONS_CACHE_URL")] 16 | pub(crate) cache_url: String, 17 | 18 | /// The token. 19 | /// 20 | /// This is the `ACTIONS_RUNTIME_TOKEN` environment variable. 21 | #[derivative(Debug = "ignore")] 22 | #[serde(alias = "ACTIONS_RUNTIME_TOKEN")] 23 | pub(crate) runtime_token: String, 24 | } 25 | 26 | impl Credentials { 27 | /// Tries to load credentials from the environment. 28 | pub fn load_from_env() -> Option { 29 | let cache_url = env::var("ACTIONS_CACHE_URL").ok()?; 30 | let runtime_token = env::var("ACTIONS_RUNTIME_TOKEN").ok()?; 31 | 32 | Some(Self { 33 | cache_url, 34 | runtime_token, 35 | }) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /gha-cache/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | asm_sub_register, 3 | deprecated, 4 | missing_abi, 5 | unsafe_code, 6 | unused_macros, 7 | unused_must_use, 8 | unused_unsafe 9 | )] 10 | #![deny(clippy::from_over_into, clippy::needless_question_mark)] 11 | #![cfg_attr( 12 | not(debug_assertions), 13 | deny(unused_imports, unused_mut, unused_variables,) 14 | )] 15 | 16 | pub mod api; 17 | pub mod credentials; 18 | mod util; 19 | 20 | pub use api::Api; 21 | pub use credentials::Credentials; 22 | -------------------------------------------------------------------------------- /gha-cache/src/util.rs: -------------------------------------------------------------------------------- 1 | //! Utilities. 2 | //! 3 | //! Taken from . 4 | 5 | use bytes::{Bytes, BytesMut}; 6 | use tokio::io::{AsyncRead, AsyncReadExt}; 7 | 8 | /// Greedily reads from a stream to fill a buffer. 9 | pub async fn read_chunk_async( 10 | stream: &mut S, 11 | mut chunk: BytesMut, 12 | ) -> std::io::Result { 13 | while chunk.len() < chunk.capacity() { 14 | let read = stream.read_buf(&mut chunk).await?; 15 | 16 | if read == 0 { 17 | break; 18 | } 19 | } 20 | 21 | Ok(chunk.freeze()) 22 | } 23 | -------------------------------------------------------------------------------- /magic-nix-cache/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "magic-nix-cache" 3 | version = "0.2.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | [dependencies] 8 | gha-cache = { path = "../gha-cache" } 9 | 10 | axum = { version = "0.7.5", default-features = false, features = [ 11 | "json", 12 | "tokio", 13 | "http2", 14 | "macros" 15 | ] } 16 | clap = { version = "4.2.7", default-features = false, features = [ 17 | "std", 18 | "derive", 19 | "error-context", 20 | "wrap_help", 21 | ] } 22 | tracing = "0.1.37" 23 | tracing-subscriber = { version = "0.3.17", default-features = false, features = [ 24 | "ansi", 25 | "env-filter", 26 | "fmt", 27 | "tracing-log", 28 | "smallvec", 29 | ] } 30 | tower-http = { version = "0.5.2", features = ["trace"] } 31 | serde = { version = "1.0.162", features = ["derive"] } 32 | serde_json = { version = "1.0.96", default-features = false } 33 | thiserror = "1.0.40" 34 | tokio-stream = { version = "0.1.15", default-features = false } 35 | tokio-util = { version = "0.7.11", features = ["io", "compat"] } 36 | daemonize = "0.5.0" 37 | is_ci = "1.1.1" 38 | sha2 = { version = "0.10.6", default-features = false } 39 | reqwest = { version = "0.12.5", default-features = false, features = [ 40 | "blocking", 41 | "rustls-tls-native-roots", 42 | "trust-dns", 43 | "json" 44 | ] } 45 | netrc-rs = "0.1.2" 46 | attic = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" } 47 | attic-client = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" } 48 | attic-server = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" } 49 | indicatif = "0.17" 50 | anyhow = "1.0.71" 51 | tempfile = "3.9" 52 | uuid = { version = "1.4.0", features = ["serde", "v7", "rand", "std"] } 53 | futures = "0.3" 54 | async-compression = "0.4" 55 | tracing-appender = "0.2.3" 56 | http = "1.0" 57 | http-body-util = "0.1" 58 | hyper = { version = "1.0.0", features = ["full"] } 59 | hyper-util = { version = "0.1", features = ["tokio", "server-auto", "http1"] } 60 | xdg = { version = "2.5.2" } 61 | color-eyre = { version = "0.6.3" } 62 | 63 | [dependencies.tokio] 64 | version = "1.44.2" 65 | default-features = false 66 | features = ["fs", "macros", "process", "rt", "rt-multi-thread", "sync"] 67 | -------------------------------------------------------------------------------- /magic-nix-cache/src/api.rs: -------------------------------------------------------------------------------- 1 | //! Action API. 2 | //! 3 | //! This API is intended to be used by determinate-nix-action. 4 | 5 | use attic::nix_store::StorePath; 6 | use axum::{extract::Extension, routing::post, Json, Router}; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use super::State; 10 | use crate::error::{Error, Result}; 11 | 12 | #[derive(Debug, Clone, Serialize)] 13 | struct WorkflowStartResponse { 14 | num_original_paths: Option, 15 | } 16 | 17 | #[derive(Debug, Clone, Serialize)] 18 | struct WorkflowFinishResponse { 19 | num_original_paths: Option, 20 | num_final_paths: Option, 21 | num_new_paths: Option, 22 | } 23 | 24 | pub fn get_router() -> Router { 25 | Router::new() 26 | .route("/api/workflow-start", post(workflow_start)) 27 | .route("/api/workflow-finish", post(workflow_finish)) 28 | .route("/api/enqueue-paths", post(post_enqueue_paths)) 29 | } 30 | 31 | /// Record existing paths. 32 | async fn workflow_start(Extension(state): Extension) -> Result> { 33 | tracing::info!("Workflow started"); 34 | let reply = if let Some(original_paths) = &state.original_paths { 35 | let mut original_paths = original_paths.lock().await; 36 | *original_paths = crate::util::get_store_paths(&state.store).await?; 37 | 38 | let reply = WorkflowStartResponse { 39 | num_original_paths: Some(original_paths.len()), 40 | }; 41 | 42 | state.metrics.num_original_paths.set(original_paths.len()); 43 | 44 | reply 45 | } else { 46 | WorkflowStartResponse { 47 | num_original_paths: None, 48 | } 49 | }; 50 | 51 | Ok(Json(reply)) 52 | } 53 | 54 | /// Push new paths and shut down. 55 | async fn workflow_finish( 56 | Extension(state): Extension, 57 | ) -> Result> { 58 | tracing::info!("Workflow finished"); 59 | 60 | let response = if let Some(original_paths) = &state.original_paths { 61 | let original_paths = original_paths.lock().await; 62 | let final_paths = crate::util::get_store_paths(&state.store).await?; 63 | let new_paths = final_paths 64 | .difference(&original_paths) 65 | .cloned() 66 | .map(|path| state.store.follow_store_path(path).map_err(Error::Attic)) 67 | .collect::>>()?; 68 | 69 | let num_original_paths = original_paths.len(); 70 | let num_final_paths = final_paths.len(); 71 | let num_new_paths = new_paths.len(); 72 | 73 | let reply = WorkflowFinishResponse { 74 | num_original_paths: Some(num_original_paths), 75 | num_final_paths: Some(num_final_paths), 76 | num_new_paths: Some(num_new_paths), 77 | }; 78 | 79 | state.metrics.num_original_paths.set(num_original_paths); 80 | state.metrics.num_final_paths.set(num_final_paths); 81 | state.metrics.num_new_paths.set(num_new_paths); 82 | 83 | // NOTE(cole-h): If we're substituting from an upstream cache, those paths won't have the 84 | // post-build-hook run on it, so we diff the store to ensure we cache everything we can. 85 | tracing::info!("Diffing the store and uploading any new paths before we shut down"); 86 | enqueue_paths(&state, new_paths).await?; 87 | 88 | reply 89 | } else { 90 | WorkflowFinishResponse { 91 | num_original_paths: None, 92 | num_final_paths: None, 93 | num_new_paths: None, 94 | } 95 | }; 96 | 97 | if let Some(gha_cache) = &state.gha_cache { 98 | tracing::info!("Waiting for GitHub action cache uploads to finish"); 99 | gha_cache.shutdown().await?; 100 | } 101 | 102 | if let Some(attic_state) = state.flakehub_state.write().await.take() { 103 | tracing::info!("Waiting for FlakeHub cache uploads to finish"); 104 | let paths = attic_state.push_session.wait().await?; 105 | 106 | let paths = paths.keys().map(|s| s.name()).collect::>(); 107 | 108 | tracing::info!(?paths, "FlakeHub Cache uploads completed"); 109 | } else { 110 | tracing::info!("FlakeHub cache is not enabled, not uploading anything to it"); 111 | } 112 | 113 | if let Some(sender) = state.shutdown_sender.lock().await.take() { 114 | sender 115 | .send(()) 116 | .map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?; 117 | } 118 | 119 | // NOTE(cole-h): see `init_logging` 120 | if let Some(logfile) = &state.logfile { 121 | let logfile_contents = std::fs::read_to_string(logfile) 122 | .map_err(|e| crate::error::Error::Io(e, format!("Reading {}", logfile.display())))?; 123 | println!("Every log line throughout the lifetime of the program:"); 124 | println!("\n{logfile_contents}\n"); 125 | } 126 | 127 | Ok(Json(response)) 128 | } 129 | 130 | #[derive(Debug, Clone, Serialize, Deserialize)] 131 | pub struct EnqueuePathsRequest { 132 | pub store_paths: Vec, 133 | } 134 | 135 | #[derive(Debug, Clone, Serialize, Deserialize)] 136 | pub struct EnqueuePathsResponse {} 137 | 138 | /// Schedule paths in the local Nix store for uploading. 139 | #[tracing::instrument(skip_all)] 140 | async fn post_enqueue_paths( 141 | Extension(state): Extension, 142 | Json(req): Json, 143 | ) -> Result> { 144 | tracing::info!("Enqueueing {:?}", req.store_paths); 145 | 146 | let store_paths = req 147 | .store_paths 148 | .iter() 149 | .map(|path| state.store.follow_store_path(path).map_err(Error::Attic)) 150 | .collect::>>()?; 151 | 152 | enqueue_paths(&state, store_paths).await?; 153 | 154 | Ok(Json(EnqueuePathsResponse {})) 155 | } 156 | 157 | pub async fn enqueue_paths(state: &State, store_paths: Vec) -> Result<()> { 158 | if let Some(gha_cache) = &state.gha_cache { 159 | gha_cache 160 | .enqueue_paths(state.store.clone(), store_paths.clone()) 161 | .await?; 162 | } 163 | 164 | if let Some(flakehub_state) = &*state.flakehub_state.read().await { 165 | crate::flakehub::enqueue_paths(flakehub_state, store_paths).await?; 166 | } 167 | 168 | Ok(()) 169 | } 170 | -------------------------------------------------------------------------------- /magic-nix-cache/src/binary_cache.rs: -------------------------------------------------------------------------------- 1 | //! Binary Cache API. 2 | 3 | use axum::{ 4 | extract::{Extension, Path}, 5 | response::Redirect, 6 | routing::{get, put}, 7 | Router, 8 | }; 9 | use futures::StreamExt as _; 10 | use tokio_util::io::StreamReader; 11 | 12 | use super::State; 13 | use crate::error::{Error, Result}; 14 | 15 | pub fn get_router() -> Router { 16 | Router::new() 17 | .route("/nix-cache-info", get(get_nix_cache_info)) 18 | // .narinfo 19 | .route("/:path", get(get_narinfo)) 20 | .route("/:path", put(put_narinfo)) 21 | // .nar 22 | .route("/nar/:path", get(get_nar)) 23 | .route("/nar/:path", put(put_nar)) 24 | } 25 | 26 | async fn get_nix_cache_info() -> &'static str { 27 | // TODO: Make StoreDir configurable 28 | r#"WantMassQuery: 1 29 | StoreDir: /nix/store 30 | Priority: 41 31 | "# 32 | } 33 | 34 | async fn get_narinfo( 35 | Extension(state): Extension, 36 | Path(path): Path, 37 | ) -> Result { 38 | let components: Vec<&str> = path.splitn(2, '.').collect(); 39 | 40 | if components.len() != 2 { 41 | return Err(Error::NotFound); 42 | } 43 | 44 | if components[1] != "narinfo" { 45 | return Err(Error::NotFound); 46 | } 47 | 48 | let store_path_hash = components[0].to_string(); 49 | let key = format!("{}.narinfo", store_path_hash); 50 | 51 | if state 52 | .narinfo_negative_cache 53 | .read() 54 | .await 55 | .contains(&store_path_hash) 56 | { 57 | state.metrics.narinfos_sent_upstream.incr(); 58 | state.metrics.narinfos_negative_cache_hits.incr(); 59 | return pull_through(&state, &path); 60 | } 61 | 62 | if let Some(gha_cache) = &state.gha_cache { 63 | if let Some(url) = gha_cache.api.get_file_url(&[&key]).await? { 64 | state.metrics.narinfos_served.incr(); 65 | return Ok(Redirect::temporary(&url)); 66 | } 67 | } 68 | 69 | let mut negative_cache = state.narinfo_negative_cache.write().await; 70 | negative_cache.insert(store_path_hash); 71 | 72 | state.metrics.narinfos_sent_upstream.incr(); 73 | state.metrics.narinfos_negative_cache_misses.incr(); 74 | pull_through(&state, &path) 75 | } 76 | 77 | async fn put_narinfo( 78 | Extension(state): Extension, 79 | Path(path): Path, 80 | body: axum::body::Body, 81 | ) -> Result<()> { 82 | let components: Vec<&str> = path.splitn(2, '.').collect(); 83 | 84 | if components.len() != 2 { 85 | return Err(Error::BadRequest); 86 | } 87 | 88 | if components[1] != "narinfo" { 89 | return Err(Error::BadRequest); 90 | } 91 | 92 | let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?; 93 | 94 | let store_path_hash = components[0].to_string(); 95 | let key = format!("{}.narinfo", store_path_hash); 96 | let allocation = gha_cache.api.allocate_file_with_random_suffix(&key).await?; 97 | 98 | let body_stream = body.into_data_stream(); 99 | let stream = StreamReader::new( 100 | body_stream 101 | .map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), 102 | ); 103 | 104 | gha_cache.api.upload_file(allocation, stream).await?; 105 | state.metrics.narinfos_uploaded.incr(); 106 | 107 | state 108 | .narinfo_negative_cache 109 | .write() 110 | .await 111 | .remove(&store_path_hash); 112 | 113 | Ok(()) 114 | } 115 | 116 | async fn get_nar(Extension(state): Extension, Path(path): Path) -> Result { 117 | if let Some(url) = state 118 | .gha_cache 119 | .as_ref() 120 | .ok_or(Error::GHADisabled)? 121 | .api 122 | .get_file_url(&[&path]) 123 | .await? 124 | { 125 | state.metrics.nars_served.incr(); 126 | return Ok(Redirect::temporary(&url)); 127 | } 128 | 129 | if let Some(upstream) = &state.upstream { 130 | state.metrics.nars_sent_upstream.incr(); 131 | Ok(Redirect::temporary(&format!("{}/nar/{}", upstream, path))) 132 | } else { 133 | Err(Error::NotFound) 134 | } 135 | } 136 | 137 | async fn put_nar( 138 | Extension(state): Extension, 139 | Path(path): Path, 140 | body: axum::body::Body, 141 | ) -> Result<()> { 142 | let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?; 143 | 144 | let allocation = gha_cache 145 | .api 146 | .allocate_file_with_random_suffix(&path) 147 | .await?; 148 | 149 | let body_stream = body.into_data_stream(); 150 | let stream = StreamReader::new( 151 | body_stream 152 | .map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), 153 | ); 154 | 155 | gha_cache.api.upload_file(allocation, stream).await?; 156 | state.metrics.nars_uploaded.incr(); 157 | 158 | Ok(()) 159 | } 160 | 161 | fn pull_through(state: &State, path: &str) -> Result { 162 | if let Some(upstream) = &state.upstream { 163 | Ok(Redirect::temporary(&format!("{}/{}", upstream, path))) 164 | } else { 165 | Err(Error::NotFound) 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /magic-nix-cache/src/env.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Display}; 2 | 3 | #[derive(Clone, Copy)] 4 | pub enum Environment { 5 | GitHubActions, 6 | GitLabCI, 7 | Other, 8 | } 9 | 10 | impl Environment { 11 | pub fn determine() -> Self { 12 | if env_var_is_true("GITHUB_ACTIONS") { 13 | return Environment::GitHubActions; 14 | } 15 | 16 | if env_var_is_true("GITLAB_CI") { 17 | return Environment::GitLabCI; 18 | } 19 | 20 | Environment::Other 21 | } 22 | 23 | pub fn is_github_actions(&self) -> bool { 24 | matches!(self, Self::GitHubActions) 25 | } 26 | 27 | pub fn is_gitlab_ci(&self) -> bool { 28 | matches!(self, Self::GitLabCI) 29 | } 30 | } 31 | 32 | impl Display for Environment { 33 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 34 | use Environment::*; 35 | 36 | write!( 37 | f, 38 | "{}", 39 | match self { 40 | GitHubActions => "GitHub Actions", 41 | GitLabCI => "GitLab CI", 42 | Other => "an unspecified environment", 43 | } 44 | ) 45 | } 46 | } 47 | 48 | fn env_var_is_true(e: &str) -> bool { 49 | std::env::var(e).is_ok_and(|v| v == "true") 50 | } 51 | -------------------------------------------------------------------------------- /magic-nix-cache/src/error.rs: -------------------------------------------------------------------------------- 1 | //! Errors. 2 | 3 | use axum::{ 4 | http::StatusCode, 5 | response::{IntoResponse, Response}, 6 | }; 7 | use thiserror::Error; 8 | 9 | pub type Result = std::result::Result; 10 | 11 | #[derive(Error, Debug)] 12 | pub enum Error { 13 | #[error("GitHub API error: {0}")] 14 | Api(#[from] gha_cache::api::Error), 15 | 16 | #[error("Not Found")] 17 | NotFound, 18 | 19 | #[error("Bad Request")] 20 | BadRequest, 21 | 22 | #[error("I/O error: {0}. Context: {1}")] 23 | Io(std::io::Error, String), 24 | 25 | #[error("GHA cache is disabled")] 26 | GHADisabled, 27 | 28 | #[error("FlakeHub cache error: {0}")] 29 | FlakeHub(#[from] anyhow::Error), 30 | 31 | #[error("FlakeHub HTTP error: {0}")] 32 | FlakeHubHttp(#[from] reqwest::Error), 33 | 34 | #[error("Got HTTP response {0} getting the cache name from FlakeHub: {1}")] 35 | GetCacheName(reqwest::StatusCode, String), 36 | 37 | #[error("netrc parse error: {0}")] 38 | Netrc(netrc_rs::Error), 39 | 40 | #[error("Cannot find netrc credentials for {0}")] 41 | MissingCreds(String), 42 | 43 | #[error("Attic error: {0}")] 44 | Attic(#[from] attic::AtticError), 45 | 46 | #[error("Bad URL")] 47 | BadUrl(reqwest::Url), 48 | 49 | #[error("Configuration error: {0}")] 50 | Config(String), 51 | 52 | #[error("Internal error: {0}")] 53 | Internal(String), 54 | } 55 | 56 | impl IntoResponse for Error { 57 | fn into_response(self) -> Response { 58 | let code = match &self { 59 | Self::Api(gha_cache::api::Error::ApiError { 60 | status: StatusCode::TOO_MANY_REQUESTS, 61 | .. 62 | }) => StatusCode::TOO_MANY_REQUESTS, 63 | // HACK: HTTP 418 makes Nix throw a visible error but not retry 64 | Self::Api(_) => StatusCode::IM_A_TEAPOT, 65 | Self::NotFound => StatusCode::NOT_FOUND, 66 | Self::BadRequest => StatusCode::BAD_REQUEST, 67 | _ => StatusCode::INTERNAL_SERVER_ERROR, 68 | }; 69 | 70 | (code, format!("{}", self)).into_response() 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /magic-nix-cache/src/flakehub.rs: -------------------------------------------------------------------------------- 1 | use crate::env::Environment; 2 | use crate::error::{Error, Result}; 3 | use crate::DETERMINATE_NETRC_PATH; 4 | use anyhow::Context; 5 | use attic::cache::CacheName; 6 | use attic::nix_store::{NixStore, StorePath}; 7 | use attic_client::push::{PushSession, PushSessionConfig}; 8 | use attic_client::{ 9 | api::ApiClient, 10 | config::ServerConfig, 11 | push::{PushConfig, Pusher}, 12 | }; 13 | 14 | use reqwest::header::HeaderValue; 15 | use reqwest::Url; 16 | use serde::Deserialize; 17 | use std::os::unix::fs::MetadataExt; 18 | use std::path::{Path, PathBuf}; 19 | use std::sync::Arc; 20 | use tokio::fs::File; 21 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 22 | use tokio::sync::RwLock; 23 | use uuid::Uuid; 24 | 25 | const USER_AGENT: &str = "magic-nix-cache"; 26 | 27 | pub struct State { 28 | #[allow(dead_code)] 29 | pub substituter: Url, 30 | 31 | pub push_session: PushSession, 32 | } 33 | 34 | pub async fn init_cache( 35 | environment: Environment, 36 | flakehub_api_server: &Url, 37 | flakehub_cache_server: &Url, 38 | flakehub_flake_name: &Option, 39 | store: Arc, 40 | auth_method: &super::FlakeHubAuthSource, 41 | ) -> Result { 42 | // Parse netrc to get the credentials for api.flakehub.com. 43 | let netrc_path = auth_method.as_path_buf(); 44 | let NetrcInfo { 45 | netrc, 46 | flakehub_cache_server_hostname, 47 | flakehub_login, 48 | flakehub_password, 49 | } = extract_info_from_netrc(&netrc_path, flakehub_api_server, flakehub_cache_server).await?; 50 | 51 | if let super::FlakeHubAuthSource::Netrc(netrc_path) = auth_method { 52 | // Append an entry for the FlakeHub cache server to netrc. 53 | if !netrc 54 | .machines 55 | .iter() 56 | .any(|machine| machine.name.as_ref() == Some(&flakehub_cache_server_hostname)) 57 | { 58 | let mut netrc_file = tokio::fs::OpenOptions::new() 59 | .create(false) 60 | .append(true) 61 | .open(netrc_path) 62 | .await 63 | .map_err(|e| { 64 | Error::Internal(format!( 65 | "Failed to open {} for appending: {}", 66 | netrc_path.display(), 67 | e 68 | )) 69 | })?; 70 | 71 | netrc_file 72 | .write_all( 73 | format!( 74 | "\nmachine {} login {} password {}\n\n", 75 | flakehub_cache_server_hostname, flakehub_login, flakehub_password, 76 | ) 77 | .as_bytes(), 78 | ) 79 | .await 80 | .map_err(|e| { 81 | Error::Internal(format!( 82 | "Failed to write credentials to {}: {}", 83 | netrc_path.display(), 84 | e 85 | )) 86 | })?; 87 | } 88 | } 89 | 90 | let server_config = ServerConfig { 91 | endpoint: flakehub_cache_server.to_string(), 92 | token: Some(attic_client::config::ServerTokenConfig::Raw { 93 | token: flakehub_password.clone(), 94 | }), 95 | }; 96 | let api_inner = ApiClient::from_server_config(server_config)?; 97 | let api = Arc::new(RwLock::new(api_inner)); 98 | 99 | // Periodically refresh JWT in GitHub Actions environment 100 | if environment.is_github_actions() { 101 | match auth_method { 102 | super::FlakeHubAuthSource::Netrc(path) => { 103 | let netrc_path_clone = path.to_path_buf(); 104 | let initial_github_jwt_clone = flakehub_password.clone(); 105 | let flakehub_cache_server_clone = flakehub_cache_server.to_string(); 106 | let api_clone = api.clone(); 107 | 108 | tokio::task::spawn(refresh_github_actions_jwt_worker( 109 | netrc_path_clone, 110 | initial_github_jwt_clone, 111 | flakehub_cache_server_clone, 112 | api_clone, 113 | )); 114 | } 115 | crate::FlakeHubAuthSource::DeterminateNixd => { 116 | let api_clone = api.clone(); 117 | let netrc_file = PathBuf::from(DETERMINATE_NETRC_PATH); 118 | let flakehub_api_server_clone = flakehub_api_server.clone(); 119 | let flakehub_cache_server_clone = flakehub_cache_server.clone(); 120 | 121 | let initial_meta = tokio::fs::metadata(&netrc_file).await.map_err(|e| { 122 | Error::Io(e, format!("getting metadata of {}", netrc_file.display())) 123 | })?; 124 | let initial_inode = initial_meta.ino(); 125 | 126 | tokio::task::spawn(refresh_determinate_token_worker( 127 | netrc_file, 128 | initial_inode, 129 | flakehub_api_server_clone, 130 | flakehub_cache_server_clone, 131 | api_clone, 132 | )); 133 | } 134 | } 135 | } 136 | 137 | // Get the cache UUID for this project. 138 | let cache_name = { 139 | let mut url = flakehub_api_server 140 | .join("project") 141 | .map_err(|_| Error::Config(format!("bad URL '{}'", flakehub_api_server)))?; 142 | 143 | if let Some(flakehub_flake_name) = flakehub_flake_name { 144 | if !flakehub_flake_name.is_empty() { 145 | url = flakehub_api_server 146 | .join(&format!("project/{}", flakehub_flake_name)) 147 | .map_err(|_| Error::Config(format!("bad URL '{}'", flakehub_api_server)))?; 148 | } 149 | } 150 | 151 | let response = reqwest::Client::new() 152 | .get(url.to_owned()) 153 | .header("User-Agent", USER_AGENT) 154 | .basic_auth(flakehub_login, Some(&flakehub_password)) 155 | .send() 156 | .await?; 157 | 158 | if !response.status().is_success() { 159 | return Err(Error::GetCacheName( 160 | response.status(), 161 | response.text().await?, 162 | )); 163 | } 164 | 165 | #[derive(Deserialize)] 166 | struct ProjectInfo { 167 | organization_uuid_v7: Uuid, 168 | project_uuid_v7: Uuid, 169 | } 170 | 171 | let project_info = response.json::().await?; 172 | 173 | format!( 174 | "{}:{}", 175 | project_info.organization_uuid_v7, project_info.project_uuid_v7, 176 | ) 177 | }; 178 | 179 | tracing::info!("Using cache {:?}", cache_name); 180 | 181 | let cache = unsafe { CacheName::new_unchecked(cache_name) }; 182 | 183 | let cache_config = api.read().await.get_cache_config(&cache).await?; 184 | 185 | let push_config = PushConfig { 186 | num_workers: 5, // FIXME: use number of CPUs? 187 | force_preamble: false, 188 | }; 189 | 190 | let mp = indicatif::MultiProgress::new(); 191 | 192 | let push_session = Pusher::new( 193 | store.clone(), 194 | api.clone(), 195 | cache.to_owned(), 196 | cache_config, 197 | mp, 198 | push_config, 199 | ) 200 | .into_push_session(PushSessionConfig { 201 | no_closure: false, 202 | ignore_upstream_cache_filter: false, 203 | }); 204 | 205 | let state = State { 206 | substituter: flakehub_cache_server.to_owned(), 207 | push_session, 208 | }; 209 | 210 | Ok(state) 211 | } 212 | 213 | #[derive(Debug)] 214 | struct NetrcInfo { 215 | netrc: netrc_rs::Netrc, 216 | flakehub_cache_server_hostname: String, 217 | flakehub_login: String, 218 | flakehub_password: String, 219 | } 220 | 221 | #[tracing::instrument] 222 | async fn extract_info_from_netrc( 223 | netrc_path: &Path, 224 | flakehub_api_server: &Url, 225 | flakehub_cache_server: &Url, 226 | ) -> Result { 227 | let netrc = { 228 | let mut netrc_file = File::open(netrc_path).await.map_err(|e| { 229 | Error::Internal(format!("Failed to open {}: {}", netrc_path.display(), e)) 230 | })?; 231 | let mut netrc_contents = String::new(); 232 | netrc_file 233 | .read_to_string(&mut netrc_contents) 234 | .await 235 | .map_err(|e| { 236 | Error::Internal(format!( 237 | "Failed to read {} contents: {}", 238 | netrc_path.display(), 239 | e 240 | )) 241 | })?; 242 | netrc_rs::Netrc::parse(netrc_contents, false).map_err(Error::Netrc)? 243 | }; 244 | 245 | let flakehub_netrc_entry = netrc 246 | .machines 247 | .iter() 248 | .find(|machine| { 249 | machine.name.as_ref() == flakehub_api_server.host().map(|x| x.to_string()).as_ref() 250 | }) 251 | .ok_or_else(|| Error::MissingCreds(flakehub_api_server.to_string()))? 252 | .to_owned(); 253 | 254 | let flakehub_cache_server_hostname = flakehub_cache_server 255 | .host() 256 | .ok_or_else(|| Error::BadUrl(flakehub_cache_server.to_owned()))? 257 | .to_string(); 258 | let flakehub_login = flakehub_netrc_entry.login.ok_or_else(|| { 259 | Error::Config(format!( 260 | "netrc file does not contain a login for '{}'", 261 | flakehub_api_server 262 | )) 263 | })?; 264 | let flakehub_password = flakehub_netrc_entry.password.ok_or_else(|| { 265 | Error::Config(format!( 266 | "netrc file does not contain a password for '{}'", 267 | flakehub_api_server 268 | )) 269 | })?; 270 | 271 | Ok(NetrcInfo { 272 | netrc, 273 | flakehub_cache_server_hostname, 274 | flakehub_login, 275 | flakehub_password, 276 | }) 277 | } 278 | 279 | pub async fn enqueue_paths(state: &State, store_paths: Vec) -> Result<()> { 280 | state.push_session.queue_many(store_paths)?; 281 | 282 | Ok(()) 283 | } 284 | 285 | /// Refresh the GitHub Actions JWT every 2 minutes (slightly less than half of the default validity 286 | /// period) to ensure pushing / pulling doesn't stop working. 287 | #[tracing::instrument(skip_all)] 288 | async fn refresh_github_actions_jwt_worker( 289 | netrc_path: std::path::PathBuf, 290 | mut github_jwt: String, 291 | flakehub_cache_server_clone: String, 292 | api: Arc>, 293 | ) -> Result<()> { 294 | // NOTE(cole-h): This is a workaround -- at the time of writing, GitHub Actions JWTs are only 295 | // valid for 5 minutes after being issued. FlakeHub uses these JWTs for authentication, which 296 | // means that after those 5 minutes have passed and the token is expired, FlakeHub (and by 297 | // extension FlakeHub Cache) will no longer allow requests using this token. However, GitHub 298 | // gives us a way to repeatedly request new tokens, so we utilize that and refresh the token 299 | // every 2 minutes (less than half of the lifetime of the token). 300 | 301 | // TODO(cole-h): this should probably be half of the token's lifetime ((exp - iat) / 2), but 302 | // getting this is nontrivial so I'm not going to do it until GitHub changes the lifetime and 303 | // breaks this. 304 | let next_refresh = std::time::Duration::from_secs(2 * 60); 305 | 306 | // NOTE(cole-h): we sleep until the next refresh at first because we already got a token from 307 | // GitHub recently, don't need to try again until we actually might need to get a new one. 308 | tokio::time::sleep(next_refresh).await; 309 | 310 | // NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables 311 | let mut headers = reqwest::header::HeaderMap::new(); 312 | headers.insert( 313 | reqwest::header::ACCEPT, 314 | HeaderValue::from_static("application/json;api-version=2.0"), 315 | ); 316 | headers.insert( 317 | reqwest::header::CONTENT_TYPE, 318 | HeaderValue::from_static("application/json"), 319 | ); 320 | 321 | let github_client = reqwest::Client::builder() 322 | .user_agent(USER_AGENT) 323 | .default_headers(headers) 324 | .build()?; 325 | 326 | loop { 327 | match rewrite_github_actions_token(&github_client, &netrc_path, &github_jwt).await { 328 | Ok(new_github_jwt) => { 329 | github_jwt = new_github_jwt; 330 | 331 | let server_config = ServerConfig { 332 | endpoint: flakehub_cache_server_clone.clone(), 333 | token: Some(attic_client::config::ServerTokenConfig::Raw { 334 | token: github_jwt.clone(), 335 | }), 336 | }; 337 | let new_api = ApiClient::from_server_config(server_config)?; 338 | 339 | { 340 | let mut api_client = api.write().await; 341 | *api_client = new_api; 342 | } 343 | 344 | tracing::debug!( 345 | "Stored new token in netrc and API client, sleeping for {next_refresh:?}" 346 | ); 347 | tokio::time::sleep(next_refresh).await; 348 | } 349 | Err(e) => { 350 | tracing::error!( 351 | ?e, 352 | "Failed to get a new JWT from GitHub, trying again in 10 seconds" 353 | ); 354 | tokio::time::sleep(std::time::Duration::from_secs(10)).await; 355 | } 356 | } 357 | } 358 | } 359 | 360 | #[tracing::instrument(skip_all)] 361 | async fn rewrite_github_actions_token( 362 | client: &reqwest::Client, 363 | netrc_path: &Path, 364 | old_github_jwt: &str, 365 | ) -> Result { 366 | // NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables 367 | let runtime_token = std::env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN").map_err(|e| { 368 | Error::Internal(format!( 369 | "ACTIONS_ID_TOKEN_REQUEST_TOKEN was invalid unicode: {e}" 370 | )) 371 | })?; 372 | let runtime_url = std::env::var("ACTIONS_ID_TOKEN_REQUEST_URL").map_err(|e| { 373 | Error::Internal(format!( 374 | "ACTIONS_ID_TOKEN_REQUEST_URL was invalid unicode: {e}" 375 | )) 376 | })?; 377 | 378 | let token_request_url = format!("{runtime_url}&audience=api.flakehub.com"); 379 | let token_response = client 380 | .request(reqwest::Method::GET, &token_request_url) 381 | .bearer_auth(runtime_token) 382 | .send() 383 | .await 384 | .with_context(|| format!("sending request to {token_request_url}"))?; 385 | 386 | if let Err(e) = token_response.error_for_status_ref() { 387 | tracing::error!(?e, "Got error response when requesting token"); 388 | return Err(e)?; 389 | } 390 | 391 | #[derive(serde::Deserialize)] 392 | struct TokenResponse { 393 | value: String, 394 | } 395 | 396 | let token_response: TokenResponse = token_response 397 | .json() 398 | .await 399 | .with_context(|| "converting response into json")?; 400 | 401 | let new_github_jwt_string = token_response.value; 402 | let netrc_contents = tokio::fs::read_to_string(netrc_path) 403 | .await 404 | .with_context(|| format!("failed to read {netrc_path:?} to string"))?; 405 | let new_netrc_contents = netrc_contents.replace(old_github_jwt, &new_github_jwt_string); 406 | 407 | // NOTE(cole-h): create the temporary file right next to the real one so we don't run into 408 | // cross-device linking issues when renaming 409 | let netrc_path_tmp = netrc_path.with_extension("tmp"); 410 | tokio::fs::write(&netrc_path_tmp, new_netrc_contents) 411 | .await 412 | .with_context(|| format!("writing new JWT to {netrc_path_tmp:?}"))?; 413 | tokio::fs::rename(&netrc_path_tmp, &netrc_path) 414 | .await 415 | .with_context(|| format!("renaming {netrc_path_tmp:?} to {netrc_path:?}"))?; 416 | 417 | Ok(new_github_jwt_string) 418 | } 419 | 420 | #[tracing::instrument(skip_all)] 421 | async fn refresh_determinate_token_worker( 422 | netrc_file: PathBuf, 423 | mut inode: u64, 424 | flakehub_api_server: Url, 425 | flakehub_cache_server: Url, 426 | api_clone: Arc>, 427 | ) { 428 | // NOTE(cole-h): This is a workaround -- at the time of writing, determinate-nixd handles the 429 | // GitHub Actions JWT refreshing for us, which means we don't know when this will happen. At the 430 | // moment, it does it roughly every 2 minutes (less than half of the total lifetime of the 431 | // issued token). 432 | 433 | loop { 434 | tokio::time::sleep(std::time::Duration::from_secs(3)).await; 435 | 436 | let meta = tokio::fs::metadata(&netrc_file) 437 | .await 438 | .map_err(|e| Error::Io(e, format!("getting metadata of {}", netrc_file.display()))); 439 | 440 | let Ok(meta) = meta else { 441 | tracing::error!(e = ?meta); 442 | continue; 443 | }; 444 | 445 | let current_inode = meta.ino(); 446 | 447 | if current_inode == inode { 448 | tracing::debug!("current inode is the same, file didn't change"); 449 | continue; 450 | } 451 | 452 | tracing::debug!("current inode is different, file changed"); 453 | inode = current_inode; 454 | 455 | let flakehub_password = match extract_info_from_netrc( 456 | &netrc_file, 457 | &flakehub_api_server, 458 | &flakehub_cache_server, 459 | ) 460 | .await 461 | { 462 | Ok(NetrcInfo { 463 | flakehub_password, .. 464 | }) => flakehub_password, 465 | Err(e) => { 466 | tracing::error!(?e, "Failed to extract auth info from netrc"); 467 | continue; 468 | } 469 | }; 470 | 471 | let server_config = ServerConfig { 472 | endpoint: flakehub_cache_server.to_string(), 473 | token: Some(attic_client::config::ServerTokenConfig::Raw { 474 | token: flakehub_password, 475 | }), 476 | }; 477 | 478 | let new_api = ApiClient::from_server_config(server_config.clone()); 479 | 480 | let Ok(new_api) = new_api else { 481 | tracing::error!(e = ?new_api, "Failed to construct new ApiClient"); 482 | continue; 483 | }; 484 | 485 | { 486 | let mut api_client = api_clone.write().await; 487 | *api_client = new_api; 488 | } 489 | 490 | tracing::debug!("Stored new token in API client, sleeping for 30s"); 491 | } 492 | } 493 | -------------------------------------------------------------------------------- /magic-nix-cache/src/gha.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, sync::Arc}; 2 | 3 | use crate::error::{Error, Result}; 4 | use crate::telemetry; 5 | use async_compression::tokio::bufread::ZstdEncoder; 6 | use attic::nix_store::{NixStore, StorePath, ValidPathInfo}; 7 | use attic_server::narinfo::{Compression, NarInfo}; 8 | use futures::stream::TryStreamExt; 9 | use gha_cache::{Api, Credentials}; 10 | use tokio::sync::{ 11 | mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, 12 | RwLock, 13 | }; 14 | use tokio_util::compat::FuturesAsyncReadCompatExt; 15 | 16 | pub struct GhaCache { 17 | /// The GitHub Actions Cache API. 18 | pub api: Arc, 19 | 20 | /// The future from the completion of the worker. 21 | worker_result: RwLock>>>, 22 | 23 | channel_tx: UnboundedSender, 24 | } 25 | 26 | #[derive(Debug)] 27 | enum Request { 28 | Shutdown, 29 | Upload(StorePath), 30 | } 31 | 32 | impl GhaCache { 33 | pub fn new( 34 | credentials: Credentials, 35 | cache_version: Option, 36 | store: Arc, 37 | metrics: Arc, 38 | narinfo_negative_cache: Arc>>, 39 | ) -> Result { 40 | let cb_metrics = metrics.clone(); 41 | let mut api = Api::new( 42 | credentials, 43 | Arc::new(Box::new(move || { 44 | cb_metrics 45 | .tripped_429 46 | .store(true, std::sync::atomic::Ordering::Relaxed); 47 | })), 48 | )?; 49 | 50 | if let Some(cache_version) = &cache_version { 51 | api.mutate_version(cache_version.as_bytes()); 52 | } 53 | 54 | let (channel_tx, channel_rx) = unbounded_channel(); 55 | 56 | let api = Arc::new(api); 57 | 58 | let api2 = api.clone(); 59 | 60 | let worker_result = tokio::task::spawn(async move { 61 | worker( 62 | &api2, 63 | store, 64 | channel_rx, 65 | metrics, 66 | narinfo_negative_cache.clone(), 67 | ) 68 | .await 69 | }); 70 | 71 | Ok(GhaCache { 72 | api, 73 | worker_result: RwLock::new(Some(worker_result)), 74 | channel_tx, 75 | }) 76 | } 77 | 78 | pub async fn shutdown(&self) -> Result<()> { 79 | if let Some(worker_result) = self.worker_result.write().await.take() { 80 | self.channel_tx 81 | .send(Request::Shutdown) 82 | .expect("Cannot send shutdown message"); 83 | worker_result 84 | .await 85 | .expect("failed to read result from gha worker") 86 | } else { 87 | Ok(()) 88 | } 89 | } 90 | 91 | pub async fn enqueue_paths( 92 | &self, 93 | store: Arc, 94 | store_paths: Vec, 95 | ) -> Result<()> { 96 | // FIXME: make sending the closure optional. We might want to 97 | // only send the paths that have been built by the user, under 98 | // the assumption that everything else is already in a binary 99 | // cache. 100 | // FIXME: compute_fs_closure_multi doesn't return a 101 | // toposort, though it doesn't really matter for the GHA 102 | // cache. 103 | let closure = store 104 | .compute_fs_closure_multi(store_paths, false, false, false) 105 | .await?; 106 | 107 | for p in closure { 108 | self.channel_tx 109 | .send(Request::Upload(p)) 110 | .map_err(|_| Error::Internal("Cannot send upload message".to_owned()))?; 111 | } 112 | 113 | Ok(()) 114 | } 115 | } 116 | 117 | async fn worker( 118 | api: &Api, 119 | store: Arc, 120 | mut channel_rx: UnboundedReceiver, 121 | metrics: Arc, 122 | narinfo_negative_cache: Arc>>, 123 | ) -> Result<()> { 124 | let mut done = HashSet::new(); 125 | 126 | while let Some(req) = channel_rx.recv().await { 127 | match req { 128 | Request::Shutdown => { 129 | break; 130 | } 131 | Request::Upload(path) => { 132 | if api.circuit_breaker_tripped() { 133 | tracing::trace!("GitHub Actions gave us a 429, so we're done.",); 134 | continue; 135 | } 136 | 137 | if !done.insert(path.clone()) { 138 | continue; 139 | } 140 | 141 | if let Err(err) = upload_path( 142 | api, 143 | store.clone(), 144 | &path, 145 | metrics.clone(), 146 | narinfo_negative_cache.clone(), 147 | ) 148 | .await 149 | { 150 | tracing::error!( 151 | "Upload of path '{}' failed: {}", 152 | store.get_full_path(&path).display(), 153 | err 154 | ); 155 | } 156 | } 157 | } 158 | } 159 | 160 | Ok(()) 161 | } 162 | 163 | async fn upload_path( 164 | api: &Api, 165 | store: Arc, 166 | path: &StorePath, 167 | metrics: Arc, 168 | narinfo_negative_cache: Arc>>, 169 | ) -> Result<()> { 170 | let path_info = store.query_path_info(path.clone()).await?; 171 | 172 | // Upload the NAR. 173 | let nar_path = format!("{}.nar.zstd", path_info.nar_hash.to_base32()); 174 | 175 | let nar_allocation = api.allocate_file_with_random_suffix(&nar_path).await?; 176 | 177 | let nar_stream = store.nar_from_path(path.clone()); 178 | 179 | let nar_reader = nar_stream 180 | .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) 181 | .into_async_read(); 182 | 183 | let nar_compressor = ZstdEncoder::new(nar_reader.compat()); 184 | 185 | let compressed_nar_size = api.upload_file(nar_allocation, nar_compressor).await?; 186 | metrics.nars_uploaded.incr(); 187 | 188 | tracing::debug!( 189 | "Uploaded '{}' (size {} -> {})", 190 | nar_path, 191 | path_info.nar_size, 192 | compressed_nar_size 193 | ); 194 | 195 | // Upload the narinfo. 196 | let narinfo_path = format!("{}.narinfo", path.to_hash().as_str()); 197 | 198 | let narinfo_allocation = api.allocate_file_with_random_suffix(&narinfo_path).await?; 199 | 200 | let narinfo = path_info_to_nar_info(store.clone(), &path_info, format!("nar/{}", nar_path)) 201 | .to_string() 202 | .expect("failed to convert path into to nar info"); 203 | 204 | tracing::debug!("Uploading '{}'", narinfo_path); 205 | 206 | api.upload_file(narinfo_allocation, narinfo.as_bytes()) 207 | .await?; 208 | 209 | metrics.narinfos_uploaded.incr(); 210 | 211 | narinfo_negative_cache 212 | .write() 213 | .await 214 | .remove(&path.to_hash().to_string()); 215 | 216 | tracing::info!( 217 | "Uploaded '{}' to the GitHub Action Cache", 218 | store.get_full_path(path).display() 219 | ); 220 | 221 | Ok(()) 222 | } 223 | 224 | // FIXME: move to attic. 225 | fn path_info_to_nar_info(store: Arc, path_info: &ValidPathInfo, url: String) -> NarInfo { 226 | NarInfo { 227 | store_path: store.get_full_path(&path_info.path), 228 | url, 229 | compression: Compression::Zstd, 230 | file_hash: None, 231 | file_size: None, 232 | nar_hash: path_info.nar_hash.clone(), 233 | nar_size: path_info.nar_size as usize, 234 | references: path_info 235 | .references 236 | .iter() 237 | .map(|r| { 238 | r.file_name() 239 | .and_then(|n| n.to_str()) 240 | .unwrap_or_else(|| { 241 | panic!( 242 | "failed to convert nar_info reference to string: {}", 243 | r.display() 244 | ) 245 | }) 246 | .to_owned() 247 | }) 248 | .collect(), 249 | system: None, 250 | deriver: None, 251 | signature: None, 252 | ca: path_info.ca.clone(), 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /magic-nix-cache/src/github.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | const GITHUB_ACTOR_TYPE_USER: &str = "User"; 4 | const GITHUB_ACTOR_TYPE_ORGANIZATION: &str = "Organization"; 5 | 6 | #[derive(Serialize, Deserialize)] 7 | pub struct WorkflowData { 8 | event: WorkflowDataEvent, 9 | } 10 | 11 | #[derive(Serialize, Deserialize)] 12 | pub struct WorkflowDataEvent { 13 | repository: WorkflowDataEventRepo, 14 | } 15 | 16 | #[derive(Serialize, Deserialize)] 17 | pub struct WorkflowDataEventRepo { 18 | owner: WorkflowDataEventRepoOwner, 19 | } 20 | 21 | #[derive(Serialize, Deserialize)] 22 | pub struct WorkflowDataEventRepoOwner { 23 | login: String, 24 | #[serde(rename = "type")] 25 | kind: String, 26 | } 27 | 28 | pub(crate) fn get_actions_event_data() -> color_eyre::Result { 29 | let github_context = std::env::var("GITHUB_CONTEXT")?; 30 | let workflow_data: WorkflowData = serde_json::from_str::(&github_context)?; 31 | 32 | Ok(workflow_data) 33 | } 34 | 35 | pub(crate) fn print_unauthenticated_error() { 36 | let mut msg = "::error title=FlakeHub registration required.::Unable to authenticate to FlakeHub. Individuals must register at FlakeHub.com; Organizations must create an organization at FlakeHub.com.".to_string(); 37 | if let Ok(workflow_data) = get_actions_event_data() { 38 | let owner = workflow_data.event.repository.owner; 39 | if owner.kind == GITHUB_ACTOR_TYPE_USER { 40 | msg = format!( 41 | "::error title=FlakeHub registration required.::Please create an account for {} on FlakeHub.com to publish flakes.", 42 | &owner.login 43 | ); 44 | } else if owner.kind == GITHUB_ACTOR_TYPE_ORGANIZATION { 45 | msg = format!( 46 | "::error title=FlakeHub registration required.::Please create an organization for {} on FlakeHub.com to publish flakes.", 47 | &owner.login 48 | ); 49 | } 50 | }; 51 | println!("{}", msg); 52 | } 53 | -------------------------------------------------------------------------------- /magic-nix-cache/src/main.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | asm_sub_register, 3 | deprecated, 4 | missing_abi, 5 | unused_macros, 6 | unused_must_use, 7 | unused_unsafe 8 | )] 9 | #![deny(clippy::from_over_into, clippy::needless_question_mark)] 10 | #![cfg_attr( 11 | not(debug_assertions), 12 | deny(unused_imports, unused_mut, unused_variables,) 13 | )] 14 | 15 | mod api; 16 | mod binary_cache; 17 | mod env; 18 | mod error; 19 | mod flakehub; 20 | mod gha; 21 | mod github; 22 | mod pbh; 23 | mod telemetry; 24 | mod util; 25 | 26 | use std::collections::HashSet; 27 | use std::fs::create_dir_all; 28 | use std::io::Write; 29 | use std::net::SocketAddr; 30 | use std::path::{Path, PathBuf}; 31 | use std::sync::Arc; 32 | 33 | use ::attic::nix_store::NixStore; 34 | use anyhow::{anyhow, Context, Result}; 35 | use axum::{extract::Extension, routing::get, Router}; 36 | use clap::Parser; 37 | use serde::{Deserialize, Serialize}; 38 | use tokio::fs::File; 39 | use tokio::io::AsyncWriteExt; 40 | use tokio::sync::{oneshot, Mutex, RwLock}; 41 | use tracing_subscriber::filter::EnvFilter; 42 | use tracing_subscriber::layer::SubscriberExt; 43 | use tracing_subscriber::util::SubscriberInitExt; 44 | 45 | use gha_cache::Credentials; 46 | 47 | const DETERMINATE_STATE_DIR: &str = "/nix/var/determinate"; 48 | const DETERMINATE_NIXD_SOCKET_NAME: &str = "determinate-nixd.socket"; 49 | const DETERMINATE_NETRC_PATH: &str = "/nix/var/determinate/netrc"; 50 | 51 | // TODO(colemickens): refactor, move with other UDS stuff (or all PBH stuff) to new file 52 | #[derive(Clone, Debug, Serialize, Deserialize)] 53 | #[serde(tag = "c", rename_all = "kebab-case")] 54 | pub struct BuiltPathResponseEventV1 { 55 | pub drv: PathBuf, 56 | pub outputs: Vec, 57 | } 58 | 59 | type State = Arc; 60 | 61 | /// GitHub Actions-powered Nix binary cache 62 | #[derive(Parser, Debug)] 63 | struct Args { 64 | /// Address to listen on. 65 | /// 66 | /// FIXME: IPv6 67 | #[arg(short = 'l', long, default_value = "127.0.0.1:3000")] 68 | listen: SocketAddr, 69 | 70 | /// The cache version. 71 | /// 72 | /// Only caches with the same version string are visible. 73 | /// Using another version string allows you to "bust" the cache. 74 | #[arg(long)] 75 | cache_version: Option, 76 | 77 | /// The upstream cache. 78 | /// 79 | /// Requests for unknown NARs are redirected to this cache 80 | /// instead. 81 | #[arg(long)] 82 | upstream: Option, 83 | 84 | /// Diagnostic endpoint to send diagnostics and performance data. 85 | /// 86 | /// Set it to an empty string to disable reporting. 87 | /// See the README for details. 88 | #[arg( 89 | long, 90 | default_value = "https://install.determinate.systems/magic-nix-cache/perf" 91 | )] 92 | diagnostic_endpoint: String, 93 | 94 | /// The FlakeHub API server. 95 | #[arg(long, default_value = "https://api.flakehub.com")] 96 | flakehub_api_server: reqwest::Url, 97 | 98 | /// The path of the `netrc` file that contains the FlakeHub JWT token. 99 | #[arg(long)] 100 | flakehub_api_server_netrc: Option, 101 | 102 | /// The FlakeHub binary cache server. 103 | #[arg(long, default_value = "https://cache.flakehub.com")] 104 | flakehub_cache_server: reqwest::Url, 105 | 106 | #[arg(long)] 107 | flakehub_flake_name: Option, 108 | 109 | /// The location of `nix.conf`. 110 | #[arg(long, default_value_os_t = default_nix_conf())] 111 | nix_conf: PathBuf, 112 | 113 | /// Whether to use the GHA cache. 114 | #[arg(long)] 115 | use_gha_cache: Option>, 116 | 117 | /// Whether to use the FlakeHub binary cache. 118 | #[arg(long)] 119 | use_flakehub: Option>, 120 | 121 | /// URL to which to post startup notification. 122 | #[arg(long)] 123 | startup_notification_url: Option, 124 | 125 | /// File to write to when indicating startup. 126 | #[arg(long)] 127 | startup_notification_file: Option, 128 | 129 | /// Whether or not to diff the store before and after Magic Nix Cache runs 130 | #[arg(long, default_value_t = false)] 131 | diff_store: bool, 132 | } 133 | 134 | #[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)] 135 | pub enum CacheTrinary { 136 | NoPreference, 137 | Enabled, 138 | Disabled, 139 | } 140 | 141 | impl From>> for CacheTrinary { 142 | fn from(b: Option>) -> Self { 143 | match b { 144 | None => CacheTrinary::NoPreference, 145 | Some(None) => CacheTrinary::Enabled, 146 | Some(Some(v)) => v, 147 | } 148 | } 149 | } 150 | 151 | #[derive(PartialEq, Clone, Copy)] 152 | pub enum Dnixd { 153 | Available, 154 | Missing, 155 | } 156 | 157 | impl From for Dnixd { 158 | fn from(b: bool) -> Self { 159 | if b { 160 | Dnixd::Available 161 | } else { 162 | Dnixd::Missing 163 | } 164 | } 165 | } 166 | 167 | impl Args { 168 | fn validate(&self, environment: env::Environment) -> Result<(), error::Error> { 169 | if environment.is_gitlab_ci() && self.github_cache_preference() == CacheTrinary::Enabled { 170 | return Err(error::Error::Config(String::from( 171 | "the --use-gha-cache flag should not be applied in GitLab CI", 172 | ))); 173 | } 174 | 175 | if environment.is_gitlab_ci() && self.flakehub_preference() != CacheTrinary::Enabled { 176 | return Err(error::Error::Config(String::from( 177 | "you must set --use-flakehub in GitLab CI", 178 | ))); 179 | } 180 | 181 | Ok(()) 182 | } 183 | 184 | fn github_cache_preference(&self) -> CacheTrinary { 185 | self.use_gha_cache.into() 186 | } 187 | 188 | fn flakehub_preference(&self) -> CacheTrinary { 189 | self.use_flakehub.into() 190 | } 191 | } 192 | 193 | fn default_nix_conf() -> PathBuf { 194 | xdg::BaseDirectories::new() 195 | .with_context(|| "identifying XDG base directories") 196 | .expect( 197 | "Could not identify your home directory. Try setting the HOME environment variable.", 198 | ) 199 | .get_config_file("nix/nix.conf") 200 | } 201 | 202 | /// The global server state. 203 | struct StateInner { 204 | /// State for uploading to the GHA cache. 205 | gha_cache: Option, 206 | 207 | /// The upstream cache. 208 | upstream: Option, 209 | 210 | /// The sender half of the oneshot channel to trigger a shutdown. 211 | shutdown_sender: Mutex>>, 212 | 213 | /// Set of store path hashes that are not present in GHAC. 214 | narinfo_negative_cache: Arc>>, 215 | 216 | /// Metrics for sending to perf at shutdown 217 | metrics: Arc, 218 | 219 | /// Connection to the local Nix store. 220 | store: Arc, 221 | 222 | /// FlakeHub cache state. 223 | flakehub_state: RwLock>, 224 | 225 | /// Where all of tracing will log to when GitHub Actions is run in debug mode 226 | logfile: Option, 227 | 228 | /// The paths in the Nix store when Magic Nix Cache started, if store diffing is enabled. 229 | original_paths: Option>>, 230 | } 231 | 232 | #[derive(Debug, Clone)] 233 | pub(crate) enum FlakeHubAuthSource { 234 | DeterminateNixd, 235 | Netrc(PathBuf), 236 | } 237 | 238 | impl FlakeHubAuthSource { 239 | pub(crate) fn as_path_buf(&self) -> PathBuf { 240 | match &self { 241 | Self::Netrc(path) => path.clone(), 242 | Self::DeterminateNixd => { 243 | let mut path = PathBuf::from(DETERMINATE_STATE_DIR); 244 | path.push("netrc"); 245 | 246 | path 247 | } 248 | } 249 | } 250 | } 251 | 252 | async fn main_cli() -> Result<()> { 253 | let guard = init_logging()?; 254 | let _tracing_guard = guard.appender_guard; 255 | 256 | let args = Args::parse(); 257 | let environment = env::Environment::determine(); 258 | tracing::debug!("Running in {}", environment.to_string()); 259 | args.validate(environment)?; 260 | 261 | let metrics = Arc::new(telemetry::TelemetryReport::new()); 262 | 263 | let dnixd_uds_socket_dir: &Path = Path::new(&DETERMINATE_STATE_DIR); 264 | let dnixd_uds_socket_path = dnixd_uds_socket_dir.join(DETERMINATE_NIXD_SOCKET_NAME); 265 | let dnixd_available: Dnixd = dnixd_uds_socket_path.exists().into(); 266 | 267 | let nix_conf_path: PathBuf = args.nix_conf.clone(); 268 | 269 | // NOTE: we expect this to point to a user nix.conf 270 | // we always open/append to it to be able to append the extra-substituter for github-actions cache 271 | // but we don't write to it for initializing flakehub_cache unless dnixd is unavailable 272 | if let Some(parent) = Path::new(&nix_conf_path).parent() { 273 | create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?; 274 | } 275 | let mut nix_conf = std::fs::OpenOptions::new() 276 | .create(true) 277 | .append(true) 278 | .open(&nix_conf_path) 279 | .with_context(|| "Creating nix.conf")?; 280 | 281 | // always enable fallback, first 282 | nix_conf 283 | .write_all(b"fallback = true\n") 284 | .with_context(|| "Setting fallback in nix.conf")?; 285 | 286 | let store = Arc::new(NixStore::connect()?); 287 | 288 | let narinfo_negative_cache = Arc::new(RwLock::new(HashSet::new())); 289 | 290 | let flakehub_auth_method: Option = match ( 291 | args.flakehub_preference(), 292 | &args.flakehub_api_server_netrc, 293 | dnixd_available, 294 | ) { 295 | // User has explicitly pyassed --use-flakehub=disabled, so just straight up don't 296 | (CacheTrinary::Disabled, _, _) => { 297 | tracing::info!("Disabling FlakeHub cache."); 298 | None 299 | } 300 | 301 | // User has no preference, did not pass a netrc, and determinate-nixd is not available 302 | (CacheTrinary::NoPreference, None, Dnixd::Missing) => None, 303 | 304 | // Use it when determinate-nixd is available, and let the user know what's going on 305 | (pref, user_netrc_path, Dnixd::Available) => { 306 | if pref == CacheTrinary::NoPreference { 307 | tracing::info!("Enabling FlakeHub cache because determinate-nixd is available."); 308 | } 309 | 310 | if user_netrc_path.is_some() { 311 | tracing::info!("Ignoring the user-specified --flakehub-api-server-netrc, in favor of the determinate-nixd netrc"); 312 | } 313 | 314 | Some(FlakeHubAuthSource::DeterminateNixd) 315 | } 316 | 317 | // When determinate-nixd is not available, but the user specified a netrc 318 | (_, Some(path), Dnixd::Missing) => { 319 | if path.exists() { 320 | Some(FlakeHubAuthSource::Netrc(path.to_owned())) 321 | } else { 322 | tracing::debug!(path = %path.display(), "User-provided netrc does not exist"); 323 | None 324 | } 325 | } 326 | 327 | // User explicitly turned on flakehub cache, but we have no netrc and determinate-nixd is not present 328 | (CacheTrinary::Enabled, None, Dnixd::Missing) => { 329 | return Err(anyhow!( 330 | "--flakehub-api-server-netrc is required when determinate-nixd is unavailable" 331 | )); 332 | } 333 | }; 334 | 335 | let flakehub_state = if let Some(auth_method) = flakehub_auth_method { 336 | let flakehub_cache_server = &args.flakehub_cache_server; 337 | 338 | let flakehub_api_server = &args.flakehub_api_server; 339 | 340 | let flakehub_flake_name = &args.flakehub_flake_name; 341 | 342 | match flakehub::init_cache( 343 | environment, 344 | flakehub_api_server, 345 | flakehub_cache_server, 346 | flakehub_flake_name, 347 | store.clone(), 348 | &auth_method, 349 | ) 350 | .await 351 | { 352 | Ok(state) => { 353 | if let FlakeHubAuthSource::Netrc(ref path) = auth_method { 354 | nix_conf 355 | .write_all( 356 | format!( 357 | "extra-substituters = {}?trusted=1\nnetrc-file = {}\n", 358 | &flakehub_cache_server, 359 | path.display() 360 | ) 361 | .as_bytes(), 362 | ) 363 | .with_context(|| "Writing to nix.conf")?; 364 | } 365 | 366 | tracing::info!("FlakeHub cache is enabled."); 367 | Some(state) 368 | } 369 | Err(err) => { 370 | tracing::error!( 371 | "FlakeHub: cache initialized failed: Unauthenticated: {}", 372 | err 373 | ); 374 | github::print_unauthenticated_error(); 375 | None 376 | } 377 | } 378 | } else { 379 | tracing::info!("FlakeHub cache is disabled."); 380 | None 381 | }; 382 | 383 | let gha_cache = if (args.github_cache_preference() == CacheTrinary::Enabled) 384 | || (args.github_cache_preference() == CacheTrinary::NoPreference 385 | && flakehub_state.is_none()) 386 | { 387 | tracing::info!("Loading credentials from environment"); 388 | 389 | let credentials = Credentials::load_from_env() 390 | .with_context(|| "Failed to load credentials from environment (see README.md)")?; 391 | 392 | let gha_cache = gha::GhaCache::new( 393 | credentials, 394 | args.cache_version, 395 | store.clone(), 396 | metrics.clone(), 397 | narinfo_negative_cache.clone(), 398 | ) 399 | .with_context(|| "Failed to initialize GitHub Actions Cache API")?; 400 | 401 | nix_conf 402 | .write_all(format!("extra-substituters = http://{}?trusted=1&compression=zstd¶llel-compression=true&priority=1\n", args.listen).as_bytes()) 403 | .with_context(|| "Writing to nix.conf")?; 404 | 405 | tracing::info!("Native GitHub Action cache is enabled."); 406 | Some(gha_cache) 407 | } else { 408 | if environment.is_github_actions() { 409 | tracing::info!("Native GitHub Action cache is disabled."); 410 | } 411 | 412 | None 413 | }; 414 | 415 | let diagnostic_endpoint = match args.diagnostic_endpoint.as_str() { 416 | "" => { 417 | tracing::info!("Diagnostics disabled."); 418 | None 419 | } 420 | url => Some(url), 421 | }; 422 | 423 | let (shutdown_sender, shutdown_receiver) = oneshot::channel(); 424 | 425 | let original_paths = args.diff_store.then_some(Mutex::new(HashSet::new())); 426 | let state = Arc::new(StateInner { 427 | gha_cache, 428 | upstream: args.upstream.clone(), 429 | shutdown_sender: Mutex::new(Some(shutdown_sender)), 430 | narinfo_negative_cache, 431 | metrics, 432 | store, 433 | flakehub_state: RwLock::new(flakehub_state), 434 | logfile: guard.logfile, 435 | original_paths, 436 | }); 437 | 438 | if dnixd_available == Dnixd::Available { 439 | tracing::info!("Subscribing to Determinate Nixd build events."); 440 | crate::pbh::subscribe_uds_post_build_hook(dnixd_uds_socket_path, state.clone()).await?; 441 | } else { 442 | tracing::info!("Patching nix.conf to use a post-build-hook."); 443 | crate::pbh::setup_legacy_post_build_hook(&args.listen, &mut nix_conf).await?; 444 | } 445 | 446 | drop(nix_conf); 447 | 448 | let app = Router::new() 449 | .route("/", get(root)) 450 | .merge(api::get_router()) 451 | .merge(binary_cache::get_router()); 452 | 453 | #[cfg(debug_assertions)] 454 | let app = app 455 | .layer(tower_http::trace::TraceLayer::new_for_http()) 456 | .layer(axum::middleware::from_fn(dump_api_stats)); 457 | 458 | let app = app.layer(Extension(state.clone())); 459 | 460 | tracing::info!("Listening on {}", args.listen); 461 | 462 | // Notify of startup via HTTP 463 | if let Some(startup_notification_url) = args.startup_notification_url { 464 | tracing::debug!("Startup notification via HTTP POST to {startup_notification_url}"); 465 | 466 | let response = reqwest::Client::new() 467 | .post(startup_notification_url) 468 | .header(reqwest::header::CONTENT_TYPE, "application/json") 469 | .body("{}") 470 | .send() 471 | .await; 472 | match response { 473 | Ok(response) => { 474 | if !response.status().is_success() { 475 | Err(anyhow!( 476 | "Startup notification returned an error: {}\n{}", 477 | response.status(), 478 | response 479 | .text() 480 | .await 481 | .unwrap_or_else(|_| "".to_owned()) 482 | ))?; 483 | } 484 | } 485 | err @ Err(_) => { 486 | err.with_context(|| "Startup notification failed")?; 487 | } 488 | } 489 | } 490 | 491 | // Notify of startup by writing "1" to the specified file 492 | if let Some(startup_notification_file_path) = args.startup_notification_file { 493 | let file_contents: &[u8] = b"1"; 494 | 495 | tracing::debug!("Startup notification via file at {startup_notification_file_path:?}"); 496 | 497 | if let Some(parent_dir) = startup_notification_file_path.parent() { 498 | tokio::fs::create_dir_all(parent_dir) 499 | .await 500 | .with_context(|| { 501 | format!( 502 | "failed to create parent directory for startup notification file path: {}", 503 | startup_notification_file_path.display() 504 | ) 505 | })?; 506 | } 507 | let mut notification_file = File::create(&startup_notification_file_path) 508 | .await 509 | .with_context(|| { 510 | format!( 511 | "failed to create startup notification file to path: {}", 512 | startup_notification_file_path.display() 513 | ) 514 | })?; 515 | notification_file 516 | .write_all(file_contents) 517 | .await 518 | .with_context(|| { 519 | format!( 520 | "failed to write startup notification file to path: {}", 521 | startup_notification_file_path.display() 522 | ) 523 | })?; 524 | 525 | tracing::debug!("Created startup notification file at {startup_notification_file_path:?}"); 526 | } 527 | 528 | let listener = tokio::net::TcpListener::bind(&args.listen).await?; 529 | let ret = axum::serve(listener, app.into_make_service()) 530 | .with_graceful_shutdown(async move { 531 | shutdown_receiver.await.ok(); 532 | tracing::info!("Shutting down"); 533 | }) 534 | .await; 535 | 536 | // Notify diagnostics endpoint 537 | if let Some(diagnostic_endpoint) = diagnostic_endpoint { 538 | state.metrics.send(diagnostic_endpoint).await; 539 | } 540 | 541 | ret?; 542 | 543 | Ok(()) 544 | } 545 | 546 | #[tokio::main] 547 | async fn main() -> Result<()> { 548 | match std::env::var("OUT_PATHS") { 549 | Ok(out_paths) => pbh::handle_legacy_post_build_hook(&out_paths).await, 550 | Err(_) => main_cli().await, 551 | } 552 | } 553 | 554 | pub(crate) fn debug_logfile() -> PathBuf { 555 | std::env::temp_dir().join("magic-nix-cache-tracing.log") 556 | } 557 | 558 | pub struct LogGuard { 559 | appender_guard: Option, 560 | logfile: Option, 561 | } 562 | 563 | fn init_logging() -> Result { 564 | let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { 565 | #[cfg(debug_assertions)] 566 | return EnvFilter::new("info") 567 | .add_directive( 568 | "magic_nix_cache=debug" 569 | .parse() 570 | .expect("failed to parse magix_nix_cache directive"), 571 | ) 572 | .add_directive( 573 | "gha_cache=debug" 574 | .parse() 575 | .expect("failed to parse gha_cahce directive"), 576 | ); 577 | 578 | #[cfg(not(debug_assertions))] 579 | return EnvFilter::new("info"); 580 | }); 581 | 582 | let stderr_layer = tracing_subscriber::fmt::layer() 583 | .with_writer(std::io::stderr) 584 | .pretty(); 585 | 586 | let (guard, file_layer) = match std::env::var("RUNNER_DEBUG") { 587 | Ok(val) if val == "1" => { 588 | let logfile = debug_logfile(); 589 | let file = std::fs::OpenOptions::new() 590 | .create(true) 591 | .write(true) 592 | .truncate(true) 593 | .open(&logfile)?; 594 | let (nonblocking, guard) = tracing_appender::non_blocking(file); 595 | let file_layer = tracing_subscriber::fmt::layer() 596 | .with_writer(nonblocking) 597 | .pretty(); 598 | 599 | ( 600 | LogGuard { 601 | appender_guard: Some(guard), 602 | logfile: Some(logfile), 603 | }, 604 | Some(file_layer), 605 | ) 606 | } 607 | _ => ( 608 | LogGuard { 609 | appender_guard: None, 610 | logfile: None, 611 | }, 612 | None, 613 | ), 614 | }; 615 | 616 | tracing_subscriber::registry() 617 | .with(filter) 618 | .with(stderr_layer) 619 | .with(file_layer) 620 | .init(); 621 | 622 | Ok(guard) 623 | } 624 | 625 | #[cfg(debug_assertions)] 626 | async fn dump_api_stats( 627 | Extension(state): Extension, 628 | request: axum::http::Request, 629 | next: axum::middleware::Next, 630 | ) -> axum::response::Response { 631 | if let Some(gha_cache) = &state.gha_cache { 632 | gha_cache.api.dump_stats(); 633 | } 634 | next.run(request).await 635 | } 636 | 637 | async fn root() -> &'static str { 638 | "cache the world 🚀" 639 | } 640 | -------------------------------------------------------------------------------- /magic-nix-cache/src/pbh.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write as _; 2 | use std::net::SocketAddr; 3 | use std::os::unix::fs::PermissionsExt as _; 4 | use std::path::PathBuf; 5 | 6 | use anyhow::anyhow; 7 | use anyhow::Context as _; 8 | use anyhow::Result; 9 | use clap::Parser; 10 | use futures::StreamExt as _; 11 | use http_body_util::BodyExt as _; 12 | use hyper_util::rt::TokioExecutor; 13 | use hyper_util::rt::TokioIo; 14 | use tempfile::NamedTempFile; 15 | use tokio::net::UnixStream; 16 | use tokio::process::Command; 17 | 18 | use crate::BuiltPathResponseEventV1; 19 | use crate::State; 20 | 21 | pub async fn subscribe_uds_post_build_hook( 22 | dnixd_uds_socket_path: PathBuf, 23 | state: State, 24 | ) -> Result<()> { 25 | tokio::spawn(async move { 26 | let dnixd_uds_socket_path = &dnixd_uds_socket_path; 27 | loop { 28 | let Ok(socket_conn) = UnixStream::connect(dnixd_uds_socket_path).await else { 29 | tracing::error!("built-paths: failed to connect to determinate-nixd's socket"); 30 | tokio::time::sleep(std::time::Duration::from_secs(10)).await; 31 | continue; 32 | }; 33 | let stream = TokioIo::new(socket_conn); 34 | let executor: TokioExecutor = TokioExecutor::new(); 35 | 36 | let sender_conn = hyper::client::conn::http2::handshake(executor, stream).await; 37 | 38 | let Ok((mut sender, conn)) = sender_conn else { 39 | tracing::error!("built-paths: failed to http2 handshake"); 40 | continue; 41 | }; 42 | 43 | // NOTE(colemickens): for now we just drop the joinhandle and let it keep running 44 | let _join_handle = tokio::task::spawn(async move { 45 | if let Err(err) = conn.await { 46 | tracing::error!("Connection failed: {:?}", err); 47 | } 48 | }); 49 | 50 | let request = http::Request::builder() 51 | .method(http::Method::GET) 52 | .uri("http://localhost/events") 53 | .body(axum::body::Body::empty()); 54 | let Ok(request) = request else { 55 | tracing::error!("built-paths: failed to create request to subscribe"); 56 | continue; 57 | }; 58 | 59 | let response = sender.send_request(request).await; 60 | let response = match response { 61 | Ok(r) => r, 62 | Err(e) => { 63 | tracing::error!("buit-paths: failed to send subscription request: {:?}", e); 64 | continue; 65 | } 66 | }; 67 | let mut data = response.into_data_stream(); 68 | 69 | while let Some(event_str) = data.next().await { 70 | let event_str = match event_str { 71 | Ok(event) => event, 72 | Err(e) => { 73 | tracing::error!("built-paths: error while receiving: {}", e); 74 | break; 75 | } 76 | }; 77 | 78 | let Some(event_str) = event_str.strip_prefix("data: ".as_bytes()) else { 79 | tracing::debug!("built-paths subscription: ignoring non-data frame"); 80 | continue; 81 | }; 82 | let Ok(event): core::result::Result = 83 | serde_json::from_slice(event_str) 84 | else { 85 | tracing::error!( 86 | "failed to decode built-path response as BuiltPathResponseEventV1" 87 | ); 88 | continue; 89 | }; 90 | 91 | let maybe_store_paths = event 92 | .outputs 93 | .iter() 94 | .map(|path| { 95 | state 96 | .store 97 | .follow_store_path(path) 98 | .map_err(|_| anyhow!("failed to collect store paths")) 99 | }) 100 | .collect::>>(); 101 | 102 | let Ok(store_paths) = maybe_store_paths else { 103 | tracing::error!( 104 | "built-paths: encountered an error aggregating build store paths" 105 | ); 106 | continue; 107 | }; 108 | 109 | tracing::debug!("about to enqueue paths: {:?}", store_paths); 110 | if let Err(e) = crate::api::enqueue_paths(&state, store_paths).await { 111 | tracing::error!( 112 | "built-paths: failed to enqueue paths for drv ({}): {}", 113 | event.drv.display(), 114 | e 115 | ); 116 | continue; 117 | } 118 | } 119 | } 120 | }); 121 | 122 | Ok(()) 123 | } 124 | 125 | pub async fn setup_legacy_post_build_hook( 126 | listen: &SocketAddr, 127 | nix_conf: &mut std::fs::File, 128 | ) -> Result<()> { 129 | /* Write the post-build hook script. Note that the shell script 130 | * ignores errors, to avoid the Nix build from failing. */ 131 | let post_build_hook_script = { 132 | let mut file = NamedTempFile::with_prefix("magic-nix-cache-build-hook-") 133 | .with_context(|| "Creating a temporary file for the post-build hook")?; 134 | file.write_all( 135 | format!( 136 | // NOTE(cole-h): We want to exit 0 even if the hook failed, otherwise it'll fail the 137 | // build itself 138 | "#! /bin/sh\nRUST_LOG=trace RUST_BACKTRACE=full {} --server {} || :\n", 139 | std::env::current_exe() 140 | .with_context(|| "Getting the path of magic-nix-cache")? 141 | .display(), 142 | listen 143 | ) 144 | .as_bytes(), 145 | ) 146 | .with_context(|| "Writing the post-build hook")?; 147 | let path = file 148 | .keep() 149 | .with_context(|| "Keeping the post-build hook")? 150 | .1; 151 | 152 | std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o755)) 153 | .with_context(|| "Setting permissions on the post-build hook")?; 154 | 155 | /* Copy the script to the Nix store so we know for sure that 156 | * it's accessible to the Nix daemon, which might have a 157 | * different /tmp from us. */ 158 | let res = Command::new("nix") 159 | .args([ 160 | "--extra-experimental-features", 161 | "nix-command", 162 | "store", 163 | "add-path", 164 | &path.display().to_string(), 165 | ]) 166 | .output() 167 | .await 168 | .with_context(|| { 169 | format!( 170 | "Running nix to add the post-build-hook to the store from {}", 171 | path.display() 172 | ) 173 | })?; 174 | if res.status.success() { 175 | tokio::fs::remove_file(&path).await.with_context(|| { 176 | format!( 177 | "Cleaning up the temporary post-build-hook at {}", 178 | path.display() 179 | ) 180 | })?; 181 | PathBuf::from(String::from_utf8_lossy(&res.stdout).trim()) 182 | } else { 183 | path 184 | } 185 | }; 186 | 187 | /* Update nix.conf. */ 188 | nix_conf 189 | .write_all(format!("post-build-hook = {}\n", post_build_hook_script.display()).as_bytes()) 190 | .with_context(|| "Writing to nix.conf")?; 191 | 192 | Ok(()) 193 | } 194 | 195 | pub async fn handle_legacy_post_build_hook(out_paths: &str) -> Result<()> { 196 | #[derive(Parser, Debug)] 197 | struct Args { 198 | /// `magic-nix-cache` daemon to connect to. 199 | #[arg(short = 'l', long, default_value = "127.0.0.1:3000")] 200 | server: SocketAddr, 201 | } 202 | 203 | let args = Args::parse(); 204 | 205 | let store_paths: Vec<_> = out_paths 206 | .split_whitespace() 207 | .map(|s| s.trim().to_owned()) 208 | .collect(); 209 | 210 | let request = crate::api::EnqueuePathsRequest { store_paths }; 211 | 212 | let response = reqwest::Client::new() 213 | .post(format!("http://{}/api/enqueue-paths", &args.server)) 214 | .header(reqwest::header::CONTENT_TYPE, "application/json") 215 | .body( 216 | serde_json::to_string(&request) 217 | .with_context(|| "Decoding the response from the magic-nix-cache server")?, 218 | ) 219 | .send() 220 | .await; 221 | 222 | match response { 223 | Ok(response) if !response.status().is_success() => Err(anyhow!( 224 | "magic-nix-cache server failed to enqueue the push request: {}\n{}", 225 | response.status(), 226 | response 227 | .text() 228 | .await 229 | .unwrap_or_else(|_| "".to_owned()), 230 | ))?, 231 | Ok(response) => response 232 | .json::() 233 | .await 234 | .with_context(|| "magic-nix-cache-server didn't return a valid response")?, 235 | Err(err) => { 236 | Err(err).with_context(|| "magic-nix-cache server failed to send the enqueue request")? 237 | } 238 | }; 239 | 240 | Ok(()) 241 | } 242 | -------------------------------------------------------------------------------- /magic-nix-cache/src/telemetry.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::time::SystemTime; 3 | 4 | use sha2::{Digest, Sha256}; 5 | 6 | /// A telemetry report to measure the effectiveness of the Magic Nix Cache 7 | #[derive(Debug, Default, serde::Serialize)] 8 | pub struct TelemetryReport { 9 | distinct_id: Option, 10 | 11 | version: String, 12 | is_ci: bool, 13 | 14 | #[serde(skip_serializing)] 15 | start_time: Option, 16 | elapsed_seconds: Metric, 17 | 18 | pub narinfos_served: Metric, 19 | pub narinfos_sent_upstream: Metric, 20 | pub narinfos_negative_cache_hits: Metric, 21 | pub narinfos_negative_cache_misses: Metric, 22 | pub narinfos_uploaded: Metric, 23 | 24 | pub nars_served: Metric, 25 | pub nars_sent_upstream: Metric, 26 | pub nars_uploaded: Metric, 27 | 28 | pub num_original_paths: Metric, 29 | pub num_final_paths: Metric, 30 | pub num_new_paths: Metric, 31 | 32 | pub tripped_429: std::sync::atomic::AtomicBool, 33 | } 34 | 35 | #[derive(Debug, Default, serde::Serialize)] 36 | pub struct Metric(std::sync::atomic::AtomicUsize); 37 | impl Metric { 38 | pub fn incr(&self) { 39 | self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed); 40 | } 41 | 42 | pub fn set(&self, val: usize) { 43 | self.0.store(val, std::sync::atomic::Ordering::Relaxed); 44 | } 45 | } 46 | 47 | impl TelemetryReport { 48 | pub fn new() -> TelemetryReport { 49 | TelemetryReport { 50 | distinct_id: env::var("DETSYS_CORRELATION") 51 | .ok() 52 | .or_else(|| calculate_opaque_id().ok()), 53 | 54 | version: env!("CARGO_PKG_VERSION").to_string(), 55 | is_ci: is_ci::cached(), 56 | 57 | start_time: Some(SystemTime::now()), 58 | 59 | ..Default::default() 60 | } 61 | } 62 | 63 | pub async fn send(&self, endpoint: &str) { 64 | if let Some(start_time) = self.start_time { 65 | self.elapsed_seconds.set( 66 | SystemTime::now() 67 | .duration_since(start_time) 68 | .map(|d| d.as_secs()) 69 | .unwrap_or(0) 70 | .try_into() 71 | .unwrap_or(usize::MAX), 72 | ); 73 | } 74 | 75 | if let Ok(serialized) = serde_json::to_string_pretty(&self) { 76 | let _ = reqwest::Client::new() 77 | .post(endpoint) 78 | .body(serialized) 79 | .header("Content-Type", "application/json") 80 | .timeout(std::time::Duration::from_millis(3000)) 81 | .send() 82 | .await; 83 | } 84 | } 85 | } 86 | 87 | fn calculate_opaque_id() -> Result { 88 | let mut hasher = Sha256::new(); 89 | hasher.update(env::var("GITHUB_REPOSITORY")?); 90 | hasher.update(env::var("GITHUB_REPOSITORY_ID")?); 91 | hasher.update(env::var("GITHUB_REPOSITORY_OWNER")?); 92 | hasher.update(env::var("GITHUB_REPOSITORY_OWNER_ID")?); 93 | 94 | let result = hasher.finalize(); 95 | Ok(format!("{:x}", result)) 96 | } 97 | -------------------------------------------------------------------------------- /magic-nix-cache/src/util.rs: -------------------------------------------------------------------------------- 1 | //! Utilities. 2 | 3 | use std::collections::HashSet; 4 | use std::path::{Path, PathBuf}; 5 | 6 | use attic::nix_store::NixStore; 7 | 8 | use crate::error::Result; 9 | 10 | /// Returns the list of store paths that are currently present. 11 | pub async fn get_store_paths(store: &NixStore) -> Result> { 12 | // FIXME: use the Nix API. 13 | let store_dir = store.store_dir(); 14 | let mut listing = tokio::fs::read_dir(store_dir).await.map_err(|e| { 15 | crate::error::Error::Io( 16 | e, 17 | format!("Enumerating store paths in {}", store_dir.display()), 18 | ) 19 | })?; 20 | let mut paths = HashSet::new(); 21 | while let Some(entry) = listing.next_entry().await.map_err(|e| { 22 | crate::error::Error::Io( 23 | e, 24 | format!("Reading existing store paths from {}", store_dir.display()), 25 | ) 26 | })? { 27 | let file_name = entry.file_name(); 28 | let file_name = Path::new(&file_name); 29 | 30 | if let Some(extension) = file_name.extension() { 31 | match extension.to_str() { 32 | None | Some("drv") | Some("chroot") => { 33 | tracing::debug!( 34 | "skipping file with weird or uninteresting extension {extension:?}" 35 | ); 36 | continue; 37 | } 38 | _ => {} 39 | } 40 | } 41 | 42 | if let Some(s) = file_name.to_str() { 43 | // Special paths (so far only `.links`) 44 | if s == ".links" { 45 | continue; 46 | } 47 | } 48 | 49 | paths.insert(store_dir.join(file_name)); 50 | } 51 | Ok(paths) 52 | } 53 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | (import 2 | ( 3 | let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in 4 | fetchTarball { 5 | url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; 6 | sha256 = lock.nodes.flake-compat.locked.narHash; 7 | } 8 | ) 9 | { src = ./.; } 10 | ).shellNix 11 | --------------------------------------------------------------------------------