├── .dockerignore ├── .envrc ├── .github └── workflows │ ├── go.yml │ └── release.yml ├── .gitignore ├── .goreleaser.yml ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── README.md ├── api └── unpack │ └── index.go ├── default.nix ├── deploy └── tf_aws_apprunner │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── flake.lock ├── flake.nix ├── go.mod ├── go.sum ├── main.go ├── overlay.nix ├── pkg ├── libstore │ ├── binary_cache.go │ ├── doc.go │ ├── file_binary_cache_store.go │ ├── gcs_binary_cache_store.go │ ├── http_binary_cache_store.go │ └── s3_binary_cache_store.go ├── nar │ ├── doc.go │ ├── dump.go │ ├── dump_nonwindows_test.go │ ├── dump_test.go │ ├── fixtures_test.go │ ├── header.go │ ├── header_mode.go │ ├── header_mode_windows.go │ ├── header_test.go │ ├── ls │ │ ├── doc.go │ │ ├── list.go │ │ └── list_test.go │ ├── reader.go │ ├── reader_test.go │ ├── types.go │ ├── util.go │ ├── util_test.go │ ├── writer.go │ └── writer_test.go ├── narinfo │ ├── check.go │ ├── narinfo_test.go │ ├── parser.go │ ├── signature.go │ ├── signature_test.go │ └── types.go ├── nixbase32 │ ├── doc.go │ ├── nixbase32.go │ └── nixbase32_test.go ├── nixhash │ ├── algo.go │ ├── algo_test.go │ ├── encoding.go │ ├── hash.go │ ├── hash_test.go │ ├── hash_with_encoding.go │ ├── parse.go │ └── util.go ├── nixpath │ ├── nixpath.go │ ├── nixpath_test.go │ └── references │ │ ├── refs.go │ │ └── refs_test.go └── wire │ ├── bytes_reader.go │ ├── bytes_writer.go │ ├── read.go │ ├── read_test.go │ ├── wire.go │ ├── write.go │ └── write_test.go ├── renovate.json ├── shell.nix ├── start-dev ├── tests ├── README.md └── integration_test.go └── views ├── index.html └── robots.txt /.dockerignore: -------------------------------------------------------------------------------- 1 | *.nix 2 | Dockerfile 3 | LICENSE 4 | README.md 5 | flake.* 6 | nar-serve 7 | result 8 | start-dev 9 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # This will be supported in the future 5 | export NIX_USER_CONF_FILES=$PWD/etc/nix.conf 6 | 7 | if ! nix flake info &>/dev/null; then 8 | echo "This environment needs Nix flakes edition" >&2 9 | exit 1 10 | fi 11 | 12 | watch_file devshell.toml 13 | watch_file flake.lock 14 | watch_file flake.nix 15 | mkdir -p "$(direnv_layout_dir)" 16 | eval "$(nix print-dev-env --profile "$(direnv_layout_dir)/dev-env")" 17 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | 11 | build: 12 | name: Build 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version-file: go.mod 23 | 24 | - name: Build 25 | run: go build -v . 26 | 27 | - name: Test 28 | run: go test -v . 29 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | goreleaser: 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Set up Go 22 | uses: actions/setup-go@v5 23 | with: 24 | go-version-file: go.mod 25 | 26 | - name: Run GoReleaser 27 | uses: goreleaser/goreleaser-action@v6 28 | with: 29 | distribution: goreleaser 30 | version: latest 31 | args: release --clean 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | nar-serve 2 | dist/ 3 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # Documentation at http://goreleaser.com 2 | before: 3 | hooks: 4 | - go mod tidy 5 | builds: 6 | - env: 7 | - CGO_ENABLED=0 8 | goos: 9 | - linux 10 | - darwin 11 | archives: 12 | - name_template: >- 13 | {{- .ProjectName }}_ 14 | {{- title .Os }}_ 15 | {{- if eq .Arch "amd64" }}x86_64 16 | {{- else if eq .Arch "386" }}i386 17 | {{- else }}{{ .Arch }}{{ end }} 18 | {{- if .Arm }}v{{ .Arm }}{{ end -}} 19 | checksum: 20 | name_template: 'checksums.txt' 21 | snapshot: 22 | name_template: "{{ .Tag }}-next" 23 | changelog: 24 | sort: asc 25 | filters: 26 | exclude: 27 | - '^docs:' 28 | - '^test:' 29 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | 0.7.0 / 2024-07-31 3 | ================== 4 | 5 | * feat: allow mapping hashes to subdomains (#48) 6 | * feat: show to which cache the server is bound 7 | * feat: add HTTP_ADDR env var 8 | * feat: add support for zstd decoding (#43) 9 | * feat: expose the executable bit into a HTTP header (#27) 10 | * change: NAR_CACHE_URL -> NIX_CACHE_URL 11 | 12 | 0.5.0 / 2021-07-16 13 | ================== 14 | 15 | * use goreleaser to manage releases (#16) 16 | 17 | 0.4.0 / 2021-07-16 18 | ================== 19 | 20 | * fix build 21 | * use the go-nix library again 22 | * bump dependencies 23 | * deploy: add AWS apprunner example 24 | * add support for local directory as a backend (#14) 25 | 26 | 0.3.0 / 2020-10-24 27 | ================== 28 | 29 | * main: fix PORT to addr logic 30 | * fix nix build 31 | * Add integration tests for nar-serve (#13) 32 | * Make nar-serve and go-nix monorepo (#12) 33 | * ci: no need to pull dependencies 34 | 35 | 0.2.0 / 2020-08-18 36 | ================== 37 | 38 | * Change default port to 8383 and NIX_CACHE_URI to NIX_CACHE_URL 39 | * Update vendorSha256 value from base-64 to base-32 40 | 41 | 0.1.0 / 2020-08-11 42 | ================== 43 | 44 | * update go-nix hash and refactor index.go to satisfy the new go-nix (#9) 45 | * Create go.yml 46 | * overlay: fix naming 47 | * fix vendorSha256 48 | * add overlay.nix file 49 | * fix the build 50 | * use the BinaryCacheReader interface 51 | * update gopath after ownership change 52 | * Merge pull request #6 from numtide/docker-image 53 | * add /healthz endpoint 54 | * add Dockerfile 55 | * Revert "Revert "stream the directory listing"" 56 | * flakeify 57 | * cleanup 58 | * remove now.sh deployment 59 | * Revert "stream the directory listing" 60 | * stream the directory listing 61 | * README: move issues to GitHub issues 62 | * README: add note on .ls files 63 | * add directory listing 64 | * implement symlinks as HTTP redirects 65 | * README: one more known issue 66 | * introduce MountPath for the handlers 67 | * add robots.txt 68 | * README: fixes 69 | * work on the presentation for a bit 70 | * split up the api and public files 71 | * add ./start-dev script 72 | * add shell.nix 73 | * fix the deployment 74 | * fix file listing 75 | * make the cache configurable 76 | * now: respect the go modules pinning 77 | * Create LICENSE 78 | * fix deployment 79 | * init project 80 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23 as builder 2 | 3 | WORKDIR /go/src/app 4 | COPY go.mod go.sum ./ 5 | 6 | RUN go mod download 7 | 8 | COPY . ./ 9 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -ldflags '-w -extldflags "-static"' -o /go-webserver ./*.go 10 | 11 | FROM alpine 12 | RUN apk add --no-cache ca-certificates 13 | 14 | COPY --from=builder /go-webserver /app 15 | 16 | ENTRYPOINT ["/app"] 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019 NumTide Ltd and contributors 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # nar-serve - Serve NAR file content directly from cache 2 | 3 | Push your build artifacts to one place. 4 | 5 | All the files in https://cache.nixos.org are packed in NAR files which makes 6 | them not directly accessible. This service allows to download, decompress, 7 | unpack and serve any file in the cache on the fly. 8 | 9 | ## Use cases 10 | 11 | * Avoid publishing build artifacts to both the binary cache and another service. 12 | * Allows to share build results easily. 13 | * Inspect the content of a NAR file. 14 | 15 | ## Development 16 | 17 | Inside the provided nix shell run: 18 | 19 | ```shell 20 | ./start-dev 21 | ``` 22 | 23 | This will create a small local server with live reload that emulates now.sh. 24 | 25 | Currently, the default port is 8383. You can change it by setting the `PORT` 26 | environment variable, or `HTTP_ADDR` to also change the bind address. 27 | 28 | ## Usage 29 | 30 | Store contents can be fetched via a simple HTTP GET request. 31 | 32 | Append any store path to the hostname to fetch and unpack it on 33 | the fly. That's it. 34 | 35 | E.g.: 36 | 37 | * https://serve.ntd.one/nix/store/barxv95b8arrlh97s6axj8k7ljn7aky1-go-1.12/share/go/doc/effective_go.html 38 | 39 | NAR archives also contain information about the executable bit for each contained file. 40 | nar-serve uses a custom HTTP header named `NAR-executable` to indicate whether the fetched file would be executable. 41 | 42 | ## Configuration 43 | 44 | You can use the following environment variables to configure nar-serve: 45 | 46 | | Name | Default value | Description | 47 | |:-- |:-- |:-- | 48 | | `PORT` | `8383` | Port number on which nar-service listens | 49 | | `HTTP_ADDR` | `:$PORT` | HTTP address to bind the server to. When set, takes precedence over $PORT. | 50 | | `NIX_CACHE_URL` | `https://cache.nixos.org` | The URL of the Nix store from which NARs are fetched | 51 | | `DOMAIN` | "" | When set, also serve `.$DOMAIN` paths. | 52 | 53 | ## Contributing 54 | 55 | Contributions are welcome! 56 | 57 | Before adding any new feature it might be best to first discuss them by 58 | creating a new issue in https://github.com/numtide/nar-serve/issues . 59 | 60 | All code is licensed under the Apache 2.0 license. 61 | -------------------------------------------------------------------------------- /api/unpack/index.go: -------------------------------------------------------------------------------- 1 | package unpack 2 | 3 | import ( 4 | "compress/bzip2" 5 | "context" 6 | "fmt" 7 | "log" 8 | "io" 9 | "mime" 10 | "net/http" 11 | "path/filepath" 12 | "strings" 13 | 14 | "github.com/numtide/nar-serve/pkg/libstore" 15 | "github.com/numtide/nar-serve/pkg/nar" 16 | "github.com/numtide/nar-serve/pkg/narinfo" 17 | 18 | "github.com/go-chi/chi/v5" 19 | "github.com/klauspost/compress/zstd" 20 | "github.com/ulikunitz/xz" 21 | ) 22 | 23 | type Handler struct { 24 | cache libstore.BinaryCacheReader 25 | mountPath string 26 | } 27 | 28 | func NewHandler(cache libstore.BinaryCacheReader, mountPath string) *Handler { 29 | return &Handler{ 30 | cache: cache, 31 | mountPath: mountPath, 32 | } 33 | } 34 | 35 | // MountPath is where this handler is supposed to be mounted 36 | func (h *Handler) MountPath() string { 37 | return h.mountPath 38 | } 39 | 40 | // Handler is the entry-point for @now/go as well as the stub main.go net/http 41 | func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { 42 | narDir := chi.URLParam(req, "narDir") 43 | if narDir == "" { 44 | w.Header().Set("Content-Type", "text/plain") 45 | http.Error(w, "store path missing", 404) 46 | return 47 | } 48 | 49 | narHash := strings.Split(narDir, "-")[0] 50 | 51 | h.ServeNAR(narHash, w, req) 52 | } 53 | 54 | func (h *Handler) ServeNAR(narHash string, w http.ResponseWriter, req *http.Request) { 55 | ctx := req.Context() 56 | 57 | log.Println("narHash=", narHash) 58 | 59 | // Do some path cleanup 60 | // ignore trailing slashes 61 | newPath := strings.TrimRight(req.URL.Path, "/") 62 | // remove the mount path and nar hash from the path 63 | if strings.HasPrefix(newPath, h.mountPath) { 64 | components := strings.Split(newPath, "/") 65 | newPath = strings.Join(components[4:], "/") 66 | } 67 | newPath = "/" + strings.TrimLeft(newPath, "/") 68 | log.Println("newPath=", newPath) 69 | 70 | // Get the NAR info to find the NAR 71 | narinfo, err := getNarInfo(ctx, h.cache, narHash) 72 | if err != nil { 73 | http.Error(w, err.Error(), 500) 74 | return 75 | } 76 | 77 | // TODO: consider keeping a LRU cache 78 | narPATH := narinfo.URL 79 | log.Println("fetching the NAR:", narPATH) 80 | file, err := h.cache.GetFile(ctx, narPATH) 81 | if err != nil { 82 | http.Error(w, err.Error(), 500) 83 | return 84 | } 85 | defer file.Close() 86 | 87 | var r io.Reader 88 | r = file 89 | 90 | // decompress on the fly 91 | switch narinfo.Compression { 92 | case "xz": 93 | r, err = xz.NewReader(r) 94 | if err != nil { 95 | http.Error(w, err.Error(), 500) 96 | return 97 | } 98 | case "bzip2": 99 | r = bzip2.NewReader(r) 100 | case "zstd": 101 | r, err = zstd.NewReader(r) 102 | if err != nil { 103 | http.Error(w, err.Error(), 500) 104 | return 105 | } 106 | default: 107 | http.Error(w, fmt.Sprintf("compression %s not handled", narinfo.Compression), 500) 108 | return 109 | } 110 | 111 | // TODO: try to load .ls files to speed-up the file lookups 112 | 113 | narReader, err := nar.NewReader(r) 114 | if err != nil { 115 | http.Error(w, err.Error(), 500) 116 | return 117 | } 118 | 119 | for { 120 | hdr, err := narReader.Next() 121 | if err != nil { 122 | if err == io.EOF { 123 | http.Error(w, "file not found", 404) 124 | } else { 125 | http.Error(w, err.Error(), 500) 126 | } 127 | return 128 | } 129 | 130 | // we've got a match! 131 | if hdr.Path == newPath { 132 | switch hdr.Type { 133 | case nar.TypeDirectory: 134 | w.Header().Set("Content-Type", "text/html") 135 | fmt.Fprintf(w, "

%s is a directory:

    ", hdr.Path) 136 | flush(w) 137 | for { 138 | hdr2, err := narReader.Next() 139 | if err != nil { 140 | if err == io.EOF { 141 | break 142 | } else { 143 | http.Error(w, err.Error(), 500) 144 | } 145 | } 146 | 147 | if !strings.HasPrefix(hdr2.Path, hdr.Path) { 148 | break 149 | } 150 | 151 | var label string 152 | switch hdr2.Type { 153 | case nar.TypeDirectory: 154 | label = hdr2.Path + "/" 155 | case nar.TypeSymlink: 156 | label = hdr2.Path + " -> " + absSymlink(narinfo, hdr2) 157 | case nar.TypeRegular: 158 | label = hdr2.Path 159 | default: 160 | http.Error(w, fmt.Sprintf("BUG: unknown NAR header type: %s", hdr.Type), 500) 161 | } 162 | 163 | fmt.Fprintf(w, "
  1. %s
  2. ", filepath.Join(narinfo.StorePath, hdr2.Path), label) 164 | flush(w) 165 | } 166 | case nar.TypeSymlink: 167 | redirectPath := absSymlink(narinfo, hdr) 168 | 169 | // Make sure the symlink is absolute 170 | 171 | if !strings.HasPrefix(redirectPath, h.mountPath) { 172 | fmt.Fprintf(w, "found symlink out of store: %s\n", redirectPath) 173 | } else { 174 | http.Redirect(w, req, redirectPath, http.StatusMovedPermanently) 175 | } 176 | case nar.TypeRegular: 177 | // TODO: ETag header matching. Use the NAR file name as the ETag 178 | // TODO: expose the executable flag somehow? 179 | ctype := mime.TypeByExtension(filepath.Ext(hdr.Path)) 180 | if ctype == "" { 181 | ctype = "application/octet-stream" 182 | // TODO: use http.DetectContentType as a fallback 183 | } 184 | 185 | if hdr.Executable { 186 | w.Header().Set("NAR-Executable", "1") 187 | } 188 | 189 | w.Header().Set("Cache-Control", "immutable") 190 | w.Header().Set("Content-Type", ctype) 191 | w.Header().Set("Content-Length", fmt.Sprintf("%d", hdr.Size)) 192 | if req.Method != "HEAD" { 193 | _, _ = io.CopyN(w, narReader, hdr.Size) 194 | } 195 | default: 196 | http.Error(w, fmt.Sprintf("BUG: unknown NAR header type: %s", hdr.Type), 500) 197 | } 198 | return 199 | } 200 | 201 | // TODO: since the nar entries are sorted it's possible to abort early by 202 | // comparing the paths 203 | } 204 | } 205 | 206 | // TODO: consider keeping a LRU cache 207 | func getNarInfo(ctx context.Context, nixCache libstore.BinaryCacheReader, key string) (*narinfo.NarInfo, error) { 208 | path := fmt.Sprintf("%s.narinfo", key) 209 | fmt.Println("Fetching the narinfo:", path, "from:", nixCache.URL()) 210 | r, err := nixCache.GetFile(ctx, path) 211 | if err != nil { 212 | return nil, err 213 | } 214 | defer r.Close() 215 | ni, err := narinfo.Parse(r) 216 | if err != nil { 217 | return nil, err 218 | } 219 | return ni, err 220 | } 221 | 222 | func absSymlink(narinfo *narinfo.NarInfo, hdr *nar.Header) string { 223 | if filepath.IsAbs(hdr.LinkTarget) { 224 | return hdr.LinkTarget 225 | } 226 | 227 | return filepath.Join(narinfo.StorePath, filepath.Dir(hdr.Path), hdr.LinkTarget) 228 | } 229 | 230 | func flush(rw http.ResponseWriter) { 231 | f, ok := rw.(http.Flusher) 232 | if !ok { 233 | panic("ResponseWriter is not a Flusher") 234 | } 235 | f.Flush() 236 | } 237 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { 2 | system ? builtins.currentSystem, 3 | nixpkgs ? import { inherit system; }, 4 | }: 5 | rec { 6 | nar-serve = nixpkgs.buildGoModule { 7 | pname = "nar-serve"; 8 | version = "latest"; 9 | src = nixpkgs.lib.cleanSource ./.; 10 | vendorHash = "sha256-td9NYHGYJYPlIj2tnf5I/GnJQOOgODc6TakHFwxyvLQ="; 11 | doCheck = false; 12 | }; 13 | 14 | default = nar-serve; 15 | 16 | devShell = import ./shell.nix { inherit nixpkgs; }; 17 | } 18 | -------------------------------------------------------------------------------- /deploy/tf_aws_apprunner/README.md: -------------------------------------------------------------------------------- 1 | # tf_aws_apprunner 2 | 3 | A small example on how to deploy it to AWS using the new App Runner. 4 | -------------------------------------------------------------------------------- /deploy/tf_aws_apprunner/main.tf: -------------------------------------------------------------------------------- 1 | # Make a copy of the docker image in that repository 2 | resource "aws_ecr_repository" "nar_serve" { 3 | name = var.name 4 | image_tag_mutability = "MUTABLE" 5 | tags = var.tags 6 | } 7 | 8 | resource "aws_iam_role" "nar_serve_access_role" { 9 | name = "${var.name}-access-role" 10 | assume_role_policy = < 1 { 124 | return parts[0] 125 | } 126 | return "" 127 | } 128 | -------------------------------------------------------------------------------- /overlay.nix: -------------------------------------------------------------------------------- 1 | final: prev: { nar-serve = import ./. { nixpkgs = final; }; } 2 | -------------------------------------------------------------------------------- /pkg/libstore/binary_cache.go: -------------------------------------------------------------------------------- 1 | package libstore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/url" 8 | ) 9 | 10 | // DefaultCache points to our beloved https://cache.nixos.org 11 | func DefaultCache() HTTPBinaryCacheStore { 12 | u, _ := url.Parse("https://cache.nixos.org") 13 | return HTTPBinaryCacheStore{u} 14 | } 15 | 16 | // BinaryCacheReader represents a read-only binary cache store 17 | type BinaryCacheReader interface { 18 | FileExists(ctx context.Context, path string) (bool, error) 19 | GetFile(ctx context.Context, path string) (io.ReadCloser, error) 20 | URL() string 21 | } 22 | 23 | // NewBinaryCacheReader parses the storeURL and returns the proper store 24 | // reader for it. 25 | func NewBinaryCacheReader(ctx context.Context, storeURL string) (BinaryCacheReader, error) { 26 | u, err := url.Parse(storeURL) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | switch u.Scheme { 32 | case "http", "https": 33 | return NewHTTPBinaryCacheStore(u), nil 34 | case "gs": 35 | return NewGCSBinaryCacheStore(ctx, u) 36 | case "s3": 37 | return NewS3BinaryCacheStore(u) 38 | case "file": 39 | return NewFileBinaryCacheStore(u), nil 40 | default: 41 | return nil, fmt.Errorf("scheme %s is not supported", u.Scheme) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /pkg/libstore/doc.go: -------------------------------------------------------------------------------- 1 | // Package libstore implements a subset of the nix libstore API 2 | package libstore 3 | -------------------------------------------------------------------------------- /pkg/libstore/file_binary_cache_store.go: -------------------------------------------------------------------------------- 1 | package libstore 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net/url" 8 | "os" 9 | "path" 10 | "path/filepath" 11 | "strings" 12 | ) 13 | 14 | var _ BinaryCacheReader = FileBinaryCacheStore{} 15 | 16 | type FileBinaryCacheStore struct { 17 | path string 18 | } 19 | 20 | func NewFileBinaryCacheStore(u *url.URL) FileBinaryCacheStore { 21 | return FileBinaryCacheStore{u.Path} 22 | } 23 | 24 | func (c FileBinaryCacheStore) checkPath(p string) error { 25 | if strings.HasPrefix(filepath.Clean(p), ".") { 26 | return errors.New("relative paths are not allowed") 27 | } 28 | return nil 29 | } 30 | 31 | func (c FileBinaryCacheStore) FileExists(ctx context.Context, p string) (bool, error) { 32 | if err := c.checkPath(p); err != nil { 33 | return false, err 34 | } 35 | _, err := os.Open(path.Join(c.path, p)) 36 | return !os.IsNotExist(err), err 37 | } 38 | 39 | func (c FileBinaryCacheStore) GetFile(ctx context.Context, p string) (io.ReadCloser, error) { 40 | if err := c.checkPath(p); err != nil { 41 | return nil, err 42 | } 43 | return os.Open(path.Join(c.path, p)) 44 | } 45 | 46 | func (c FileBinaryCacheStore) URL() string { 47 | return "file://" + c.path 48 | } 49 | -------------------------------------------------------------------------------- /pkg/libstore/gcs_binary_cache_store.go: -------------------------------------------------------------------------------- 1 | package libstore 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "net/url" 7 | "path" 8 | 9 | "cloud.google.com/go/storage" 10 | ) 11 | 12 | var _ BinaryCacheReader = GCSBinaryCacheStore{} 13 | 14 | // GCSBinaryCacheStore ... 15 | type GCSBinaryCacheStore struct { 16 | url *url.URL 17 | bucket *storage.BucketHandle 18 | prefix string 19 | } 20 | 21 | // NewGCSBinaryCacheStore -- 22 | func NewGCSBinaryCacheStore(ctx context.Context, u *url.URL) (*GCSBinaryCacheStore, error) { 23 | client, err := storage.NewClient(ctx) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return &GCSBinaryCacheStore{ 29 | url: u, 30 | bucket: client.Bucket(u.Host), 31 | prefix: u.Path, 32 | }, nil 33 | } 34 | 35 | // getObject composes the path with the prefix to return an ObjectHandle. 36 | func (c GCSBinaryCacheStore) getObject(p string) *storage.ObjectHandle { 37 | objectPath := path.Join(c.prefix, p) 38 | if objectPath[0] == '/' { 39 | objectPath = objectPath[1:] 40 | } 41 | return c.bucket.Object(objectPath) 42 | } 43 | 44 | // FileExists returns true if the file is already in the store. 45 | // err is used for transient issues like networking errors. 46 | func (c GCSBinaryCacheStore) FileExists(ctx context.Context, path string) (bool, error) { 47 | obj := c.getObject(path) 48 | _, err := obj.Attrs(ctx) 49 | if err == nil { 50 | return true, nil 51 | } 52 | if err == storage.ErrObjectNotExist { 53 | return false, nil 54 | } 55 | return false, err 56 | } 57 | 58 | // GetFile returns a file stream from the store if the file exists 59 | func (c GCSBinaryCacheStore) GetFile(ctx context.Context, path string) (io.ReadCloser, error) { 60 | obj := c.getObject(path) 61 | return obj.NewReader(ctx) 62 | } 63 | 64 | // URL returns the store URI 65 | func (c GCSBinaryCacheStore) URL() string { 66 | return c.url.String() 67 | } 68 | -------------------------------------------------------------------------------- /pkg/libstore/http_binary_cache_store.go: -------------------------------------------------------------------------------- 1 | package libstore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/url" 9 | "path" 10 | ) 11 | 12 | var _ BinaryCacheReader = HTTPBinaryCacheStore{} 13 | 14 | // HTTPBinaryCacheStore ... 15 | type HTTPBinaryCacheStore struct { 16 | url *url.URL // assumes the URI doesn't end with '/' 17 | } 18 | 19 | // NewHTTPBinaryCacheStore --- 20 | func NewHTTPBinaryCacheStore(u *url.URL) HTTPBinaryCacheStore { 21 | return HTTPBinaryCacheStore{u} 22 | } 23 | 24 | // getURL composes the path with the prefix to return an URL. 25 | func (c HTTPBinaryCacheStore) getURL(p string) string { 26 | newPath := path.Join(c.url.Path, p) 27 | x, _ := c.url.Parse(newPath) 28 | return x.String() 29 | } 30 | 31 | // FileExists returns true if the file is already in the store. 32 | // err is used for transient issues like networking errors. 33 | func (c HTTPBinaryCacheStore) FileExists(ctx context.Context, path string) (bool, error) { 34 | resp, err := http.Head(c.getURL(path)) 35 | if err != nil { 36 | return false, err 37 | } 38 | return (resp.StatusCode == 200), nil 39 | } 40 | 41 | // GetFile returns a file stream from the store if the file exists 42 | func (c HTTPBinaryCacheStore) GetFile(ctx context.Context, path string) (io.ReadCloser, error) { 43 | resp, err := http.Get(c.getURL(path)) 44 | if err != nil { 45 | return nil, err 46 | } 47 | if resp.StatusCode != 200 { 48 | return nil, fmt.Errorf("unexpected file status '%s'", resp.Status) 49 | } 50 | return resp.Body, nil 51 | } 52 | 53 | // URL returns the store URI 54 | func (c HTTPBinaryCacheStore) URL() string { 55 | return c.url.String() 56 | } 57 | -------------------------------------------------------------------------------- /pkg/libstore/s3_binary_cache_store.go: -------------------------------------------------------------------------------- 1 | package libstore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/aws/aws-sdk-go/aws" 8 | "github.com/aws/aws-sdk-go/aws/awserr" 9 | "github.com/aws/aws-sdk-go/aws/credentials" 10 | "github.com/aws/aws-sdk-go/aws/session" 11 | "github.com/aws/aws-sdk-go/service/s3" 12 | 13 | "io" 14 | "net/url" 15 | ) 16 | 17 | type S3BinaryCacheStore struct { 18 | url *url.URL 19 | BucketName string 20 | Client *s3.S3 21 | } 22 | 23 | func NewS3BinaryCacheStore(u *url.URL) (*S3BinaryCacheStore, error) { 24 | scheme := u.Query().Get("scheme") 25 | profile := u.Query().Get("profile") 26 | region := u.Query().Get("region") 27 | endpoint := u.Query().Get("endpoint") 28 | bucketName := u.Host 29 | creds := credentials.NewChainCredentials( 30 | []credentials.Provider{ 31 | &credentials.EnvProvider{}, 32 | &credentials.SharedCredentialsProvider{}, 33 | }) 34 | 35 | var disableSSL bool 36 | switch scheme { 37 | case "http": 38 | disableSSL = true 39 | case "https", "": 40 | disableSSL = false 41 | default: 42 | return &S3BinaryCacheStore{}, fmt.Errorf("Unsupported scheme %s", scheme) 43 | } 44 | 45 | var sess = session.Must(session.NewSessionWithOptions(session.Options{ 46 | // Specify profile to load for the session's config 47 | Profile: profile, 48 | 49 | // Provide SDK Config options, such as Region. 50 | Config: aws.Config{ 51 | Region: aws.String(region), 52 | Endpoint: &endpoint, 53 | Credentials: creds, 54 | DisableSSL: aws.Bool(disableSSL), 55 | S3ForcePathStyle: aws.Bool(true), 56 | }, 57 | })) 58 | 59 | svc := s3.New(sess) 60 | return &S3BinaryCacheStore{ 61 | url: u, 62 | BucketName: bucketName, 63 | Client: svc, 64 | }, nil 65 | } 66 | 67 | func (c *S3BinaryCacheStore) FileExists(ctx context.Context, path string) (bool, error) { 68 | _, err := c.GetFile(ctx, path) 69 | aerr, ok := err.(awserr.Error) 70 | if ok { 71 | switch aerr.Code() { 72 | case s3.ErrCodeNoSuchKey: 73 | return false, aerr 74 | default: 75 | return true, aerr 76 | } 77 | } else { 78 | return true, nil 79 | } 80 | } 81 | 82 | func (c *S3BinaryCacheStore) GetFile(ctx context.Context, path string) (io.ReadCloser, error) { 83 | input := &s3.GetObjectInput{ 84 | Bucket: aws.String(c.BucketName), 85 | Key: aws.String(path), 86 | } 87 | 88 | obj, err := c.Client.GetObjectWithContext(ctx, input) 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | return obj.Body, nil // for now we return Object data with type blob 94 | } 95 | 96 | // URL returns the store URI 97 | func (c S3BinaryCacheStore) URL() string { 98 | return c.url.String() 99 | } 100 | -------------------------------------------------------------------------------- /pkg/nar/doc.go: -------------------------------------------------------------------------------- 1 | // Package nar implements access to .nar files. 2 | // 3 | // Nix Archive (nar) is a file format for storing a directory or a single file 4 | // in a binary reproducible format. This is the format that is being used to 5 | // pack and distribute Nix build results. It doesn't store any timestamps or 6 | // similar fields available in conventional filesystems. .nar files can be read 7 | // and written in a streaming manner. 8 | package nar 9 | -------------------------------------------------------------------------------- /pkg/nar/dump.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "path/filepath" 8 | "syscall" 9 | ) 10 | 11 | // SourceFilterFunc is the interface for creating source filters. 12 | // If the function returns true, the file is copied to the Nix store, otherwise it is omitted, 13 | // this mimics the behaviour of the Nix function builtins.filterSource. 14 | type SourceFilterFunc func(path string, nodeType NodeType) bool 15 | 16 | // DumpPath will serialize a path on the local file system to NAR format, 17 | // and write it to the passed writer. 18 | func DumpPath(w io.Writer, path string) error { 19 | return DumpPathFilter(w, path, nil) 20 | } 21 | 22 | // DumpPathFilter will serialize a path on the local file system to NAR format, 23 | // and write it to the passed writer, filtering out any files where the filter 24 | // function returns false. 25 | func DumpPathFilter(w io.Writer, path string, filter SourceFilterFunc) error { 26 | // initialize the nar writer 27 | nw, err := NewWriter(w) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | // make sure the NAR writer is always closed, so the underlying goroutine is stopped 33 | defer nw.Close() 34 | 35 | err = dumpPath(nw, path, "/", filter) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | return nw.Close() 41 | } 42 | 43 | // dumpPath recursively calls itself for every node in the path. 44 | func dumpPath(nw *Writer, path string, subpath string, filter SourceFilterFunc) error { 45 | // assemble the full path. 46 | p := filepath.Join(path, subpath) 47 | 48 | // peek at the path 49 | fi, err := os.Lstat(p) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | var nodeType NodeType 55 | if fi.Mode()&os.ModeSymlink == os.ModeSymlink { 56 | nodeType = TypeSymlink 57 | } else if fi.IsDir() { 58 | nodeType = TypeDirectory 59 | } else if fi.Mode().IsRegular() { 60 | nodeType = TypeRegular 61 | } else { 62 | return fmt.Errorf("unknown type for %v", p) 63 | } 64 | 65 | if filter != nil && !filter(p, nodeType) { 66 | return nil 67 | } 68 | 69 | switch nodeType { 70 | case TypeSymlink: 71 | linkTarget, err := os.Readlink(p) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | // write the symlink node 77 | err = nw.WriteHeader(&Header{ 78 | Path: subpath, 79 | Type: TypeSymlink, 80 | LinkTarget: linkTarget, 81 | }) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | return nil 87 | 88 | case TypeDirectory: 89 | // write directory node 90 | err := nw.WriteHeader(&Header{ 91 | Path: subpath, 92 | Type: TypeDirectory, 93 | }) 94 | if err != nil { 95 | return err 96 | } 97 | 98 | // look at the children 99 | files, err := os.ReadDir(filepath.Join(path, subpath)) 100 | if err != nil { 101 | return err 102 | } 103 | 104 | // loop over all elements 105 | for _, file := range files { 106 | err := dumpPath(nw, path, filepath.Join(subpath, file.Name()), filter) 107 | if err != nil { 108 | return err 109 | } 110 | } 111 | 112 | return nil 113 | 114 | case TypeRegular: 115 | // write regular node 116 | err := nw.WriteHeader(&Header{ 117 | Path: subpath, 118 | Type: TypeRegular, 119 | Size: fi.Size(), 120 | // If it's executable by the user, it'll become executable. 121 | // This matches nix's dump() function behaviour. 122 | Executable: fi.Mode()&syscall.S_IXUSR != 0, 123 | }) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | // open the file 129 | f, err := os.Open(p) 130 | if err != nil { 131 | return err 132 | } 133 | defer f.Close() 134 | 135 | // read in contents 136 | n, err := io.Copy(nw, f) 137 | if err != nil { 138 | return err 139 | } 140 | 141 | // check if read bytes matches fi.Size() 142 | if n != fi.Size() { 143 | return fmt.Errorf("read %v, expected %v bytes while reading %v", n, fi.Size(), p) 144 | } 145 | 146 | return nil 147 | } 148 | 149 | return fmt.Errorf("unknown type for file %v", p) 150 | } 151 | -------------------------------------------------------------------------------- /pkg/nar/dump_nonwindows_test.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package nar_test 5 | 6 | import ( 7 | "bytes" 8 | "path/filepath" 9 | "syscall" 10 | "testing" 11 | 12 | "github.com/numtide/nar-serve/pkg/nar" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | // TestDumpPathUnknown makes sure calling DumpPath on a path with a fifo 17 | // doesn't panic, but returns an error. 18 | func TestDumpPathUnknown(t *testing.T) { 19 | tmpDir := t.TempDir() 20 | p := filepath.Join(tmpDir, "a") 21 | 22 | err := syscall.Mkfifo(p, 0o644) 23 | if err != nil { 24 | panic(err) 25 | } 26 | 27 | var buf bytes.Buffer 28 | 29 | err = nar.DumpPath(&buf, p) 30 | assert.Error(t, err) 31 | assert.Containsf(t, err.Error(), "unknown type", "error should complain about unknown type") 32 | } 33 | -------------------------------------------------------------------------------- /pkg/nar/dump_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "path/filepath" 8 | "runtime" 9 | "syscall" 10 | "testing" 11 | 12 | "github.com/numtide/nar-serve/pkg/nar" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestDumpPathEmptyDir(t *testing.T) { 17 | var buf bytes.Buffer 18 | 19 | err := nar.DumpPath(&buf, t.TempDir()) 20 | if assert.NoError(t, err) { 21 | assert.Equal(t, genEmptyDirectoryNar(), buf.Bytes()) 22 | } 23 | } 24 | 25 | func TestDumpPathOneByteRegular(t *testing.T) { 26 | t.Run("non-executable", func(t *testing.T) { 27 | tmpDir := t.TempDir() 28 | p := filepath.Join(tmpDir, "a") 29 | 30 | err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) 31 | if err != nil { 32 | panic(err) 33 | } 34 | 35 | var buf bytes.Buffer 36 | 37 | err = nar.DumpPath(&buf, p) 38 | if assert.NoError(t, err) { 39 | assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) 40 | } 41 | }) 42 | 43 | t.Run("executable", func(t *testing.T) { 44 | // This writes to the filesystem and looks at the attributes. 45 | // As you can't represent the executable bit on windows, it would fail. 46 | if runtime.GOOS == "windows" { 47 | return 48 | } 49 | tmpDir := t.TempDir() 50 | p := filepath.Join(tmpDir, "a") 51 | 52 | err := os.WriteFile(p, []byte{0x1}, os.ModePerm&(syscall.S_IRUSR|syscall.S_IXUSR)) 53 | if err != nil { 54 | panic(err) 55 | } 56 | 57 | var buf bytes.Buffer 58 | 59 | // call dump path on it again 60 | err = nar.DumpPath(&buf, p) 61 | if assert.NoError(t, err) { 62 | // We don't have a fixture with executable bit set, 63 | // so pipe the nar into a reader and check the returned first header. 64 | nr, err := nar.NewReader(&buf) 65 | if err != nil { 66 | panic(err) 67 | } 68 | 69 | hdr, err := nr.Next() 70 | if err != nil { 71 | panic(err) 72 | } 73 | assert.True(t, hdr.Executable, "regular should be executable") 74 | } 75 | }) 76 | } 77 | 78 | func TestDumpPathSymlink(t *testing.T) { 79 | tmpDir := t.TempDir() 80 | p := filepath.Join(tmpDir, "a") 81 | 82 | err := os.Symlink("/nix/store/somewhereelse", p) 83 | if err != nil { 84 | panic(err) 85 | } 86 | 87 | var buf bytes.Buffer 88 | 89 | err = nar.DumpPath(&buf, p) 90 | if assert.NoError(t, err) { 91 | assert.Equal(t, genSymlinkNar(), buf.Bytes()) 92 | } 93 | } 94 | 95 | func TestDumpPathRecursion(t *testing.T) { 96 | tmpDir := t.TempDir() 97 | p := filepath.Join(tmpDir, "a") 98 | 99 | err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) 100 | if err != nil { 101 | panic(err) 102 | } 103 | 104 | var buf bytes.Buffer 105 | 106 | err = nar.DumpPath(&buf, tmpDir) 107 | if assert.NoError(t, err) { 108 | // We don't have a fixture for the created path 109 | // so pipe the nar into a reader and check the headers returned. 110 | nr, err := nar.NewReader(&buf) 111 | if err != nil { 112 | panic(err) 113 | } 114 | 115 | // read in first node 116 | hdr, err := nr.Next() 117 | assert.NoError(t, err) 118 | assert.Equal(t, &nar.Header{ 119 | Path: "/", 120 | Type: nar.TypeDirectory, 121 | }, hdr) 122 | 123 | // read in second node 124 | hdr, err = nr.Next() 125 | assert.NoError(t, err) 126 | assert.Equal(t, &nar.Header{ 127 | Path: "/a", 128 | Type: nar.TypeRegular, 129 | Size: 1, 130 | }, hdr) 131 | 132 | // read in contents 133 | contents, err := io.ReadAll(nr) 134 | assert.NoError(t, err) 135 | assert.Equal(t, []byte{0x1}, contents) 136 | 137 | // we should be done 138 | _, err = nr.Next() 139 | assert.Equal(t, io.EOF, err) 140 | } 141 | } 142 | 143 | func TestDumpPathFilter(t *testing.T) { 144 | t.Run("unfiltered", func(t *testing.T) { 145 | tmpDir := t.TempDir() 146 | p := filepath.Join(tmpDir, "a") 147 | 148 | err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) 149 | if err != nil { 150 | panic(err) 151 | } 152 | 153 | var buf bytes.Buffer 154 | 155 | err = nar.DumpPathFilter(&buf, p, func(name string, nodeType nar.NodeType) bool { 156 | assert.Equal(t, name, p) 157 | assert.Equal(t, nodeType, nar.TypeRegular) 158 | 159 | return true 160 | }) 161 | if assert.NoError(t, err) { 162 | assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) 163 | } 164 | }) 165 | 166 | t.Run("filtered", func(t *testing.T) { 167 | tmpDir := t.TempDir() 168 | p := filepath.Join(tmpDir, "a") 169 | 170 | err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) 171 | if err != nil { 172 | panic(err) 173 | } 174 | 175 | var buf bytes.Buffer 176 | 177 | err = nar.DumpPathFilter(&buf, tmpDir, func(name string, nodeType nar.NodeType) bool { 178 | return name != p 179 | }) 180 | if assert.NoError(t, err) { 181 | assert.NotEqual(t, genOneByteRegularNar(), buf.Bytes()) 182 | } 183 | }) 184 | } 185 | 186 | func BenchmarkDumpPath(b *testing.B) { 187 | b.Run("testdata", func(b *testing.B) { 188 | for i := 0; i < b.N; i++ { 189 | err := nar.DumpPath(io.Discard, "../../test/testdata") 190 | if err != nil { 191 | panic(err) 192 | } 193 | } 194 | }) 195 | } 196 | -------------------------------------------------------------------------------- /pkg/nar/fixtures_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/numtide/nar-serve/pkg/wire" 7 | ) 8 | 9 | // genEmptyNar returns just the magic header, without any actual nodes 10 | // this is no valid NAR file, as it needs to contain at least a root. 11 | func genEmptyNar() []byte { 12 | var expectedBuf bytes.Buffer 13 | 14 | err := wire.WriteString(&expectedBuf, "nix-archive-1") 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | return expectedBuf.Bytes() 20 | } 21 | 22 | // genEmptyDirectoryNar returns the bytes of a NAR file only containing an empty directory. 23 | func genEmptyDirectoryNar() []byte { 24 | var expectedBuf bytes.Buffer 25 | 26 | err := wire.WriteString(&expectedBuf, "nix-archive-1") 27 | if err != nil { 28 | panic(err) 29 | } 30 | 31 | err = wire.WriteString(&expectedBuf, "(") 32 | if err != nil { 33 | panic(err) 34 | } 35 | 36 | err = wire.WriteString(&expectedBuf, "type") 37 | if err != nil { 38 | panic(err) 39 | } 40 | 41 | err = wire.WriteString(&expectedBuf, "directory") 42 | if err != nil { 43 | panic(err) 44 | } 45 | 46 | err = wire.WriteString(&expectedBuf, ")") 47 | if err != nil { 48 | panic(err) 49 | } 50 | 51 | return expectedBuf.Bytes() 52 | } 53 | 54 | // genOneByteRegularNar returns the bytes of a NAR only containing a single file at the root. 55 | func genOneByteRegularNar() []byte { 56 | var expectedBuf bytes.Buffer 57 | 58 | err := wire.WriteString(&expectedBuf, "nix-archive-1") 59 | if err != nil { 60 | panic(err) 61 | } 62 | 63 | err = wire.WriteString(&expectedBuf, "(") 64 | if err != nil { 65 | panic(err) 66 | } 67 | 68 | err = wire.WriteString(&expectedBuf, "type") 69 | if err != nil { 70 | panic(err) 71 | } 72 | 73 | err = wire.WriteString(&expectedBuf, "regular") 74 | if err != nil { 75 | panic(err) 76 | } 77 | 78 | err = wire.WriteString(&expectedBuf, "contents") 79 | if err != nil { 80 | panic(err) 81 | } 82 | 83 | err = wire.WriteBytes(&expectedBuf, []byte{0x1}) 84 | if err != nil { 85 | panic(err) 86 | } 87 | 88 | err = wire.WriteString(&expectedBuf, ")") 89 | if err != nil { 90 | panic(err) 91 | } 92 | 93 | return expectedBuf.Bytes() 94 | } 95 | 96 | // genSymlinkNar returns the bytes of a NAR only containing a single symlink at the root. 97 | func genSymlinkNar() []byte { 98 | var expectedBuf bytes.Buffer 99 | 100 | err := wire.WriteString(&expectedBuf, "nix-archive-1") 101 | if err != nil { 102 | panic(err) 103 | } 104 | 105 | err = wire.WriteString(&expectedBuf, "(") 106 | if err != nil { 107 | panic(err) 108 | } 109 | 110 | err = wire.WriteString(&expectedBuf, "type") 111 | if err != nil { 112 | panic(err) 113 | } 114 | 115 | err = wire.WriteString(&expectedBuf, "symlink") 116 | if err != nil { 117 | panic(err) 118 | } 119 | 120 | err = wire.WriteString(&expectedBuf, "target") 121 | if err != nil { 122 | panic(err) 123 | } 124 | 125 | err = wire.WriteString(&expectedBuf, "/nix/store/somewhereelse") 126 | if err != nil { 127 | panic(err) 128 | } 129 | 130 | err = wire.WriteString(&expectedBuf, ")") 131 | if err != nil { 132 | panic(err) 133 | } 134 | 135 | return expectedBuf.Bytes() 136 | } 137 | 138 | // genInvalidOrderNAR returns the bytes of a NAR file that contains a folder 139 | // with a and b directories inside, but in the wrong order (b comes first). 140 | func genInvalidOrderNAR() []byte { 141 | var expectedBuf bytes.Buffer 142 | 143 | err := wire.WriteString(&expectedBuf, "nix-archive-1") 144 | if err != nil { 145 | panic(err) 146 | } 147 | 148 | err = wire.WriteString(&expectedBuf, "(") 149 | if err != nil { 150 | panic(err) 151 | } 152 | 153 | err = wire.WriteString(&expectedBuf, "type") 154 | if err != nil { 155 | panic(err) 156 | } 157 | 158 | err = wire.WriteString(&expectedBuf, "directory") 159 | if err != nil { 160 | panic(err) 161 | } 162 | 163 | // first entry begin 164 | err = wire.WriteString(&expectedBuf, "entry") 165 | if err != nil { 166 | panic(err) 167 | } 168 | 169 | err = wire.WriteString(&expectedBuf, "(") 170 | if err != nil { 171 | panic(err) 172 | } 173 | 174 | err = wire.WriteString(&expectedBuf, "name") 175 | if err != nil { 176 | panic(err) 177 | } 178 | 179 | err = wire.WriteString(&expectedBuf, "b") 180 | if err != nil { 181 | panic(err) 182 | } 183 | 184 | err = wire.WriteString(&expectedBuf, "node") 185 | if err != nil { 186 | panic(err) 187 | } 188 | 189 | // begin 190 | err = wire.WriteString(&expectedBuf, "(") 191 | if err != nil { 192 | panic(err) 193 | } 194 | 195 | err = wire.WriteString(&expectedBuf, "type") 196 | if err != nil { 197 | panic(err) 198 | } 199 | 200 | err = wire.WriteString(&expectedBuf, "directory") 201 | if err != nil { 202 | panic(err) 203 | } 204 | 205 | err = wire.WriteString(&expectedBuf, ")") 206 | if err != nil { 207 | panic(err) 208 | } 209 | // end 210 | 211 | err = wire.WriteString(&expectedBuf, ")") 212 | if err != nil { 213 | panic(err) 214 | } 215 | // first entry end 216 | 217 | // second entry begin 218 | err = wire.WriteString(&expectedBuf, "entry") 219 | if err != nil { 220 | panic(err) 221 | } 222 | 223 | err = wire.WriteString(&expectedBuf, "(") 224 | if err != nil { 225 | panic(err) 226 | } 227 | 228 | err = wire.WriteString(&expectedBuf, "name") 229 | if err != nil { 230 | panic(err) 231 | } 232 | 233 | err = wire.WriteString(&expectedBuf, "a") 234 | if err != nil { 235 | panic(err) 236 | } 237 | 238 | err = wire.WriteString(&expectedBuf, "node") 239 | if err != nil { 240 | panic(err) 241 | } 242 | 243 | // begin 244 | err = wire.WriteString(&expectedBuf, "(") 245 | if err != nil { 246 | panic(err) 247 | } 248 | 249 | err = wire.WriteString(&expectedBuf, "type") 250 | if err != nil { 251 | panic(err) 252 | } 253 | 254 | err = wire.WriteString(&expectedBuf, "directory") 255 | if err != nil { 256 | panic(err) 257 | } 258 | 259 | err = wire.WriteString(&expectedBuf, ")") 260 | if err != nil { 261 | panic(err) 262 | } 263 | // end 264 | 265 | err = wire.WriteString(&expectedBuf, ")") 266 | if err != nil { 267 | panic(err) 268 | } 269 | // second entry end 270 | 271 | err = wire.WriteString(&expectedBuf, ")") 272 | if err != nil { 273 | panic(err) 274 | } 275 | 276 | return expectedBuf.Bytes() 277 | } 278 | -------------------------------------------------------------------------------- /pkg/nar/header.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import ( 4 | "fmt" 5 | "io/fs" 6 | "path/filepath" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | // Header represents a single header in a NAR archive. Some fields may not 12 | // be populated depending on the Type. 13 | type Header struct { 14 | Path string // Path of the file entry, relative inside the NAR 15 | Type NodeType // Typeflag is the type of header entry. 16 | LinkTarget string // Target of symlink (valid for TypeSymlink) 17 | Size int64 // Logical file size in bytes 18 | Executable bool // Set to true for files that are executable 19 | } 20 | 21 | // Validate does some consistency checking of the header structure, such as 22 | // checking for valid paths and inconsistent fields, and returns an error if it 23 | // fails validation. 24 | func (h *Header) Validate() error { 25 | // Path needs to start with a /, and must not contain null bytes 26 | // as we might get passed windows paths, ToSlash them first. 27 | if p := filepath.ToSlash(h.Path); len(h.Path) < 1 || p[0:1] != "/" { 28 | return fmt.Errorf("path must start with a /") 29 | } 30 | 31 | if strings.ContainsAny(h.Path, "\u0000") { 32 | return fmt.Errorf("path may not contain null bytes") 33 | } 34 | 35 | // Regular files and directories may not have LinkTarget set. 36 | if h.Type == TypeRegular || h.Type == TypeDirectory { 37 | if h.LinkTarget != "" { 38 | return fmt.Errorf("type is %v, but LinkTarget is not empty", h.Type.String()) 39 | } 40 | } 41 | 42 | // Directories and Symlinks may not have Size and Executable set. 43 | if h.Type == TypeDirectory || h.Type == TypeSymlink { 44 | if h.Size != 0 { 45 | return fmt.Errorf("type is %v, but Size is not 0", h.Type.String()) 46 | } 47 | 48 | if h.Executable { 49 | return fmt.Errorf("type is %v, but Executable is true", h.Type.String()) 50 | } 51 | } 52 | 53 | // Symlinks need to specify a target. 54 | if h.Type == TypeSymlink { 55 | if h.LinkTarget == "" { 56 | return fmt.Errorf("type is symlink, but LinkTarget is empty") 57 | } 58 | } 59 | 60 | return nil 61 | } 62 | 63 | // FileInfo returns an fs.FileInfo for the Header. 64 | func (h *Header) FileInfo() fs.FileInfo { 65 | return headerFileInfo{h} 66 | } 67 | 68 | type headerFileInfo struct { 69 | h *Header 70 | } 71 | 72 | func (fi headerFileInfo) Size() int64 { return fi.h.Size } 73 | func (fi headerFileInfo) IsDir() bool { return fi.h.Type == TypeDirectory } 74 | func (fi headerFileInfo) ModTime() time.Time { return time.Unix(0, 0) } 75 | func (fi headerFileInfo) Sys() interface{} { return fi.h } 76 | 77 | // Name of the file. 78 | // Will be an empty string, if this describes the root of a NAR. 79 | func (fi headerFileInfo) Name() string { return fi.h.Path } 80 | -------------------------------------------------------------------------------- /pkg/nar/header_mode.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package nar 5 | 6 | import ( 7 | "io/fs" 8 | "syscall" 9 | ) 10 | 11 | func (fi headerFileInfo) Mode() fs.FileMode { 12 | // everything in the nix store is readable by user, group and other. 13 | var mode fs.FileMode 14 | 15 | switch fi.h.Type { 16 | case TypeRegular: 17 | mode = syscall.S_IRUSR | syscall.S_IRGRP | syscall.S_IROTH 18 | if fi.h.Executable { 19 | mode |= (syscall.S_IXUSR | syscall.S_IXGRP | syscall.S_IXOTH) 20 | } 21 | case TypeDirectory: 22 | mode = syscall.S_IRUSR | syscall.S_IRGRP | syscall.S_IROTH 23 | mode |= (syscall.S_IXUSR | syscall.S_IXGRP | syscall.S_IXOTH) 24 | case TypeSymlink: 25 | mode = fs.ModePerm | fs.ModeSymlink 26 | } 27 | 28 | return mode 29 | } 30 | -------------------------------------------------------------------------------- /pkg/nar/header_mode_windows.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import ( 4 | "io/fs" 5 | ) 6 | 7 | func (fi headerFileInfo) Mode() fs.FileMode { 8 | // On Windows, create a very basic variant of Mode(). 9 | // we use fs.FileMode and clear the 0200 bit. 10 | // Per https://golang.org/pkg/os/#Chmod: 11 | // “On Windows, only the 0200 bit (owner writable) of mode is used; it 12 | // controls whether the file's read-only attribute is set or cleared.” 13 | var mode fs.FileMode 14 | 15 | switch fi.h.Type { 16 | case TypeRegular: 17 | mode = fs.ModePerm 18 | case TypeDirectory: 19 | mode = fs.ModeDir 20 | case TypeSymlink: 21 | mode = fs.ModeSymlink 22 | } 23 | 24 | return mode & ^fs.FileMode(0200) 25 | } 26 | -------------------------------------------------------------------------------- /pkg/nar/header_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/numtide/nar-serve/pkg/nar" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestHeaderValidate(t *testing.T) { 11 | headerRegular := &nar.Header{ 12 | Path: "/foo/bar", 13 | Type: nar.TypeRegular, 14 | LinkTarget: "", 15 | Size: 0, 16 | Executable: false, 17 | } 18 | 19 | t.Run("valid", func(t *testing.T) { 20 | vHeader := *headerRegular 21 | assert.NoError(t, vHeader.Validate()) 22 | }) 23 | 24 | t.Run("invalid path", func(t *testing.T) { 25 | invHeader := *headerRegular 26 | invHeader.Path = "foo/bar" 27 | assert.Error(t, invHeader.Validate()) 28 | 29 | invHeader.Path = "/foo/bar\000/" 30 | assert.Error(t, invHeader.Validate()) 31 | }) 32 | 33 | t.Run("LinkTarget set on regulars or directories", func(t *testing.T) { 34 | invHeader := *headerRegular 35 | invHeader.LinkTarget = "foo" 36 | 37 | assert.Error(t, invHeader.Validate()) 38 | 39 | invHeader.Type = nar.TypeDirectory 40 | assert.Error(t, invHeader.Validate()) 41 | }) 42 | 43 | t.Run("Size set on directories or symlinks", func(t *testing.T) { 44 | invHeader := *headerRegular 45 | invHeader.Type = nar.TypeDirectory 46 | invHeader.Size = 1 47 | assert.Error(t, invHeader.Validate()) 48 | 49 | invHeader = *headerRegular 50 | invHeader.Type = nar.TypeSymlink 51 | invHeader.Size = 1 52 | assert.Error(t, invHeader.Validate()) 53 | }) 54 | 55 | t.Run("Executable set on directories or symlinks", func(t *testing.T) { 56 | invHeader := *headerRegular 57 | invHeader.Type = nar.TypeDirectory 58 | invHeader.Executable = true 59 | assert.Error(t, invHeader.Validate()) 60 | 61 | invHeader = *headerRegular 62 | invHeader.Type = nar.TypeSymlink 63 | invHeader.Executable = true 64 | assert.Error(t, invHeader.Validate()) 65 | }) 66 | 67 | t.Run("No LinkTarget set on symlinks", func(t *testing.T) { 68 | invHeader := *headerRegular 69 | invHeader.Type = nar.TypeSymlink 70 | assert.Error(t, invHeader.Validate()) 71 | }) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/nar/ls/doc.go: -------------------------------------------------------------------------------- 1 | // Package ls implements a parser for the .ls file format, which provides an 2 | // index into .nar files. 3 | 4 | // It is provided on cache.nixos.org, and more generally, written when 5 | // write-nar-listing=1 is passed while copying build results into a binary 6 | // cache. 7 | package ls 8 | -------------------------------------------------------------------------------- /pkg/nar/ls/list.go: -------------------------------------------------------------------------------- 1 | package ls 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/numtide/nar-serve/pkg/nar" 9 | ) 10 | 11 | // Root represents the .ls file root entry. 12 | type Root struct { 13 | Version int `json:"version"` 14 | Root Node 15 | } 16 | 17 | // Node represents one of the entries in a .ls file. 18 | type Node struct { 19 | Type nar.NodeType `json:"type"` 20 | Entries map[string]*Node `json:"entries"` 21 | Size int64 `json:"size"` 22 | LinkTarget string `json:"target"` 23 | Executable bool `json:"executable"` 24 | NAROffset int64 `json:"narOffset"` 25 | } 26 | 27 | // validateNode runs some consistency checks on a node and all its child 28 | // entries. It returns an error on failure. 29 | func validateNode(node *Node) error { 30 | // ensure the name of each entry is valid 31 | for k, v := range node.Entries { 32 | if !nar.IsValidNodeName(k) { 33 | return fmt.Errorf("invalid entry name: %v", k) 34 | } 35 | 36 | // Regular files and directories may not have LinkTarget set. 37 | if node.Type == nar.TypeRegular || node.Type == nar.TypeDirectory { 38 | if node.LinkTarget != "" { 39 | return fmt.Errorf("type is %v, but LinkTarget is not empty", node.Type.String()) 40 | } 41 | } 42 | 43 | // Directories and Symlinks may not have Size and Executable set. 44 | if node.Type == nar.TypeDirectory || node.Type == nar.TypeSymlink { 45 | if node.Size != 0 { 46 | return fmt.Errorf("type is %v, but Size is not 0", node.Type.String()) 47 | } 48 | 49 | if node.Executable { 50 | return fmt.Errorf("type is %v, but Executable is true", node.Type.String()) 51 | } 52 | } 53 | 54 | // Symlinks need to specify a target. 55 | if node.Type == nar.TypeSymlink { 56 | if node.LinkTarget == "" { 57 | return fmt.Errorf("type is symlink, but LinkTarget is empty") 58 | } 59 | } 60 | 61 | // verify children 62 | err := validateNode(v) 63 | if err != nil { 64 | return err 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | 71 | // ParseLS parses the NAR .ls file format. 72 | // It returns a tree-like structure for all the entries. 73 | func ParseLS(r io.Reader) (*Root, error) { 74 | root := Root{} 75 | 76 | dec := json.NewDecoder(r) 77 | dec.DisallowUnknownFields() 78 | 79 | err := dec.Decode(&root) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | if root.Version != 1 { 85 | return nil, fmt.Errorf("invalid version %d", root.Version) 86 | } 87 | 88 | // ensure the nodes are valid 89 | err = validateNode(&root.Root) 90 | if err != nil { 91 | return nil, err 92 | } 93 | 94 | return &root, err 95 | } 96 | -------------------------------------------------------------------------------- /pkg/nar/ls/list_test.go: -------------------------------------------------------------------------------- 1 | package ls_test 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/numtide/nar-serve/pkg/nar" 8 | "github.com/numtide/nar-serve/pkg/nar/ls" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | const fixture = ` 13 | { 14 | "version": 1, 15 | "root": { 16 | "type": "directory", 17 | "entries": { 18 | "bin": { 19 | "type": "directory", 20 | "entries": { 21 | "curl": { 22 | "type": "regular", 23 | "size": 182520, 24 | "executable": true, 25 | "narOffset": 400 26 | } 27 | } 28 | } 29 | } 30 | } 31 | } 32 | ` 33 | 34 | func TestLS(t *testing.T) { 35 | r := strings.NewReader(fixture) 36 | root, err := ls.ParseLS(r) 37 | assert.NoError(t, err) 38 | 39 | expectedRoot := &ls.Root{ 40 | Version: 1, 41 | Root: ls.Node{ 42 | Type: nar.TypeDirectory, 43 | Entries: map[string]*ls.Node{ 44 | "bin": { 45 | Type: nar.TypeDirectory, 46 | Entries: map[string]*ls.Node{ 47 | "curl": { 48 | Type: nar.TypeRegular, 49 | Size: 182520, 50 | Executable: true, 51 | NAROffset: 400, 52 | }, 53 | }, 54 | }, 55 | }, 56 | }, 57 | } 58 | assert.Equal(t, expectedRoot, root) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/nar/reader.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "math" 8 | "path" 9 | 10 | "github.com/numtide/nar-serve/pkg/wire" 11 | ) 12 | 13 | const ( 14 | // for small tokens, 15 | // we use this to limit how large an invalid token we'll read. 16 | tokenLenMax = 32 17 | // maximum length for a single path element 18 | // NAME_MAX is 255 on Linux. 19 | nameLenMax = 255 20 | // maximum length for a relative path 21 | // PATH_MAX is 4096 on Linux, but that includes a null byte. 22 | pathLenMax = 4096 - 1 23 | ) 24 | 25 | // Reader implements io.ReadCloser. 26 | var _ io.ReadCloser = &Reader{} 27 | 28 | // Reader providers sequential access to the contents of a NAR archive. 29 | // Reader.Next advances to the next file in the archive (including the first), 30 | // and then Reader can be treated as an io.Reader to access the file's data. 31 | type Reader struct { 32 | r io.Reader 33 | contentReader io.ReadCloser 34 | 35 | // channels to communicate with the parser goroutine 36 | 37 | // channel used by the parser to communicate back headers and erorrs 38 | headers chan *Header 39 | errors chan error 40 | 41 | // whenever we once got back an error from the parser, we blow a fuse, 42 | // store the error here, and Next() won't resume the parser anymore. 43 | err error 44 | 45 | // NarReader uses this to resume the parser 46 | next chan bool 47 | 48 | // keep a record of the previously received hdr.Path. 49 | // Only read and updated in the Next() method, receiving from the channel 50 | // populated by the goroutine, not the goroutine itself. 51 | // We do this to bail out if we receive a header from the channel that's 52 | // lexicographically smaller than the previous one. 53 | // Elements in NAR files need to be ordered for reproducibility. 54 | previousHdrPath string 55 | } 56 | 57 | // NewReader creates a new Reader reading from r. 58 | // It'll try to detect the magic header and will fail if it can't be read. 59 | func NewReader(r io.Reader) (*Reader, error) { 60 | err := expectString(r, narVersionMagic1) 61 | if err != nil { 62 | return nil, fmt.Errorf("invalid nar version magic: %w", err) 63 | } 64 | 65 | narReader := &Reader{ 66 | r: r, 67 | // create a dummy reader for lm, that'll return EOF immediately, 68 | // so reading from Reader before Next is called won't oops. 69 | contentReader: io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)), 70 | 71 | headers: make(chan *Header), 72 | errors: make(chan error), 73 | err: nil, 74 | next: make(chan bool), 75 | } 76 | 77 | // kick off the goroutine 78 | go func() { 79 | // wait for the first Next() call 80 | next := <-narReader.next 81 | // immediate Close(), without ever calling Next() 82 | if !next { 83 | return 84 | } 85 | 86 | err := narReader.parseNode("/") 87 | if err != nil { 88 | narReader.errors <- err 89 | } else { 90 | narReader.errors <- io.EOF 91 | } 92 | 93 | close(narReader.headers) 94 | close(narReader.errors) 95 | }() 96 | 97 | return narReader, nil 98 | } 99 | 100 | func (nr *Reader) parseNode(p string) error { 101 | // accept a opening ( 102 | err := expectString(nr.r, "(") 103 | if err != nil { 104 | return err 105 | } 106 | 107 | // accept a type 108 | err = expectString(nr.r, "type") 109 | if err != nil { 110 | return err 111 | } 112 | 113 | var currentToken string 114 | 115 | // switch on the type label 116 | currentToken, err = wire.ReadString(nr.r, tokenLenMax) 117 | if err != nil { 118 | return err 119 | } 120 | 121 | switch currentToken { 122 | case "regular": 123 | // we optionally see executable, marking the file as executable, 124 | // and then contents, with the contents afterwards 125 | currentToken, err = wire.ReadString(nr.r, uint64(len("executable"))) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | executable := false 131 | if currentToken == "executable" { 132 | executable = true 133 | 134 | // These seems to be 8 null bytes after the executable field, 135 | // which can be seen as an empty string field. 136 | _, err := wire.ReadBytesFull(nr.r, 0) 137 | if err != nil { 138 | return fmt.Errorf("error reading placeholder: %w", err) 139 | } 140 | 141 | currentToken, err = wire.ReadString(nr.r, tokenLenMax) 142 | if err != nil { 143 | return err 144 | } 145 | } 146 | 147 | if currentToken != "contents" { 148 | return fmt.Errorf("invalid token: %v, expected 'contents'", currentToken) 149 | } 150 | 151 | // peek at the bytes field 152 | contentLength, contentReader, err := wire.ReadBytes(nr.r) 153 | if err != nil { 154 | return err 155 | } 156 | 157 | if contentLength > math.MaxInt64 { 158 | return fmt.Errorf("content length of %v is larger than MaxInt64", contentLength) 159 | } 160 | 161 | nr.contentReader = contentReader 162 | 163 | nr.headers <- &Header{ 164 | Path: p, 165 | Type: TypeRegular, 166 | LinkTarget: "", 167 | Size: int64(contentLength), 168 | Executable: executable, 169 | } 170 | 171 | // wait for the Next() call 172 | next := <-nr.next 173 | if !next { 174 | return nil 175 | } 176 | 177 | // seek to the end of the bytes field - the consumer might not have read all of it 178 | err = nr.contentReader.Close() 179 | if err != nil { 180 | return err 181 | } 182 | 183 | // consume the next token 184 | currentToken, err = wire.ReadString(nr.r, tokenLenMax) 185 | if err != nil { 186 | return err 187 | } 188 | 189 | case "symlink": 190 | // accept the `target` keyword 191 | err := expectString(nr.r, "target") 192 | if err != nil { 193 | return err 194 | } 195 | 196 | // read in the target 197 | target, err := wire.ReadString(nr.r, pathLenMax) 198 | if err != nil { 199 | return err 200 | } 201 | 202 | // set nr.contentReader to a empty reader, we can't read from symlinks! 203 | nr.contentReader = io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)) 204 | 205 | // yield back the header 206 | nr.headers <- &Header{ 207 | Path: p, 208 | Type: TypeSymlink, 209 | LinkTarget: target, 210 | Size: 0, 211 | Executable: false, 212 | } 213 | 214 | // wait for the Next() call 215 | next := <-nr.next 216 | if !next { 217 | return nil 218 | } 219 | 220 | // consume the next token 221 | currentToken, err = wire.ReadString(nr.r, tokenLenMax) 222 | if err != nil { 223 | return err 224 | } 225 | 226 | case "directory": 227 | // set nr.contentReader to a empty reader, we can't read from directories! 228 | nr.contentReader = io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)) 229 | nr.headers <- &Header{ 230 | Path: p, 231 | Type: TypeDirectory, 232 | LinkTarget: "", 233 | Size: 0, 234 | Executable: false, 235 | } 236 | 237 | // wait for the Next() call 238 | next := <-nr.next 239 | if !next { 240 | return nil 241 | } 242 | 243 | // there can be none, one or multiple `entry ( name foo node )` 244 | 245 | for { 246 | // read the next token 247 | currentToken, err = wire.ReadString(nr.r, tokenLenMax) 248 | if err != nil { 249 | return err 250 | } 251 | 252 | if currentToken == "entry" { //nolint:nestif 253 | // ( name foo node ) 254 | err = expectString(nr.r, "(") 255 | if err != nil { 256 | return err 257 | } 258 | 259 | err = expectString(nr.r, "name") 260 | if err != nil { 261 | return err 262 | } 263 | 264 | currentToken, err = wire.ReadString(nr.r, nameLenMax) 265 | if err != nil { 266 | return err 267 | } 268 | 269 | // ensure the name is valid 270 | if !IsValidNodeName(currentToken) { 271 | return fmt.Errorf("name `%v` is invalid", currentToken) 272 | } 273 | 274 | newPath := path.Join(p, currentToken) 275 | 276 | err = expectString(nr.r, "node") 277 | if err != nil { 278 | return err 279 | } 280 | 281 | // , recurse 282 | err = nr.parseNode(newPath) 283 | if err != nil { 284 | return err 285 | } 286 | 287 | err = expectString(nr.r, ")") 288 | if err != nil { 289 | return err 290 | } 291 | } 292 | 293 | if currentToken == ")" { 294 | break 295 | } 296 | } 297 | } 298 | 299 | if currentToken != ")" { 300 | return fmt.Errorf("unexpected token: %v, expected `)`", currentToken) 301 | } 302 | 303 | return nil 304 | } 305 | 306 | // Next advances to the next entry in the NAR archive. The Header.Size 307 | // determines how many bytes can be read for the next file. Any remaining data 308 | // in the current file is automatically discarded. 309 | // 310 | // io.EOF is returned at the end of input. 311 | // Errors are returned in case invalid data was read. 312 | // This includes non-canonically sorted NAR files. 313 | func (nr *Reader) Next() (*Header, error) { 314 | // if there's an error already stored, keep returning it 315 | if nr.err != nil { 316 | return nil, nr.err 317 | } 318 | 319 | // else, resume the parser 320 | nr.next <- true 321 | 322 | // return either an error or headers 323 | select { 324 | case hdr := <-nr.headers: 325 | if !PathIsLexicographicallyOrdered(nr.previousHdrPath, hdr.Path) { 326 | err := fmt.Errorf("received header in the wrong order, %v <= %v", hdr.Path, nr.previousHdrPath) 327 | 328 | // blow fuse 329 | nr.err = err 330 | 331 | return nil, err 332 | } 333 | 334 | nr.previousHdrPath = hdr.Path 335 | 336 | return hdr, nil 337 | 338 | case err := <-nr.errors: 339 | if err != nil { 340 | // blow fuse 341 | nr.err = err 342 | } 343 | 344 | return nil, err 345 | } 346 | } 347 | 348 | // Read reads from the current file in the NAR archive. It returns (0, io.EOF) 349 | // when it reaches the end of that file, until Next is called to advance to 350 | // the next file. 351 | // 352 | // Calling Read on special types like TypeSymlink or TypeDir returns (0, 353 | // io.EOF). 354 | func (nr *Reader) Read(b []byte) (int, error) { 355 | return nr.contentReader.Read(b) 356 | } 357 | 358 | // Close does all internal cleanup. It doesn't close the underlying reader (which can be any io.Reader). 359 | func (nr *Reader) Close() error { 360 | if nr.err != io.EOF { 361 | // Signal the parser there won't be any next. 362 | close(nr.next) 363 | } 364 | 365 | return nil 366 | } 367 | 368 | // expectString reads a string field from a reader, expecting a certain result, 369 | // and errors out if the reader ends unexpected, or didn't read the expected. 370 | func expectString(r io.Reader, expected string) error { 371 | s, err := wire.ReadString(r, uint64(len(expected))) 372 | if err != nil { 373 | if err == io.EOF { 374 | err = io.ErrUnexpectedEOF 375 | } 376 | 377 | return err 378 | } 379 | 380 | if s != expected { 381 | return fmt.Errorf("expected '%v' got '%v'", expected, s) 382 | } 383 | 384 | return nil 385 | } 386 | -------------------------------------------------------------------------------- /pkg/nar/reader_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "testing" 8 | 9 | "github.com/numtide/nar-serve/pkg/nar" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestReaderEmpty(t *testing.T) { 14 | nr, err := nar.NewReader(bytes.NewBuffer(genEmptyNar())) 15 | assert.NoError(t, err) 16 | 17 | hdr, err := nr.Next() 18 | // first Next() should return an non-nil error that's != io.EOF, 19 | // as an empty NAR is invalid. 20 | assert.Error(t, err, "first Next() on an empty NAR should return an error") 21 | assert.NotEqual(t, io.EOF, err, "first Next() on an empty NAR shouldn't return io.EOF") 22 | assert.Nil(t, hdr, "returned header should be nil") 23 | 24 | assert.NotPanics(t, func() { 25 | nr.Close() 26 | }, "closing the reader shouldn't panic") 27 | } 28 | 29 | func TestReaderEmptyDirectory(t *testing.T) { 30 | nr, err := nar.NewReader(bytes.NewBuffer(genEmptyDirectoryNar())) 31 | assert.NoError(t, err) 32 | 33 | // get first header 34 | hdr, err := nr.Next() 35 | assert.NoError(t, err) 36 | assert.Equal(t, &nar.Header{ 37 | Path: "/", 38 | Type: nar.TypeDirectory, 39 | }, hdr) 40 | 41 | hdr, err = nr.Next() 42 | assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") 43 | assert.Nil(t, hdr, "returned header should be nil") 44 | 45 | assert.NotPanics(t, func() { 46 | nr.Close() 47 | }, "closing the reader shouldn't panic") 48 | } 49 | 50 | func TestReaderOneByteRegular(t *testing.T) { 51 | nr, err := nar.NewReader(bytes.NewBuffer(genOneByteRegularNar())) 52 | assert.NoError(t, err) 53 | 54 | // get first header 55 | hdr, err := nr.Next() 56 | assert.NoError(t, err) 57 | assert.Equal(t, &nar.Header{ 58 | Path: "/", 59 | Type: nar.TypeRegular, 60 | Size: 1, 61 | Executable: false, 62 | }, hdr) 63 | 64 | // read contents 65 | contents, err := io.ReadAll(nr) 66 | assert.NoError(t, err) 67 | assert.Equal(t, []byte{0x1}, contents) 68 | 69 | hdr, err = nr.Next() 70 | assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") 71 | assert.Nil(t, hdr, "returned header should be nil") 72 | 73 | assert.NotPanics(t, func() { 74 | nr.Close() 75 | }, "closing the reader shouldn't panic") 76 | } 77 | 78 | func TestReaderSymlink(t *testing.T) { 79 | nr, err := nar.NewReader(bytes.NewBuffer(genSymlinkNar())) 80 | assert.NoError(t, err) 81 | 82 | // get first header 83 | hdr, err := nr.Next() 84 | assert.NoError(t, err) 85 | assert.Equal(t, &nar.Header{ 86 | Path: "/", 87 | Type: nar.TypeSymlink, 88 | LinkTarget: "/nix/store/somewhereelse", 89 | Size: 0, 90 | Executable: false, 91 | }, hdr) 92 | 93 | // read contents should only return an empty byte slice 94 | contents, err := io.ReadAll(nr) 95 | assert.NoError(t, err) 96 | assert.Equal(t, []byte{}, contents) 97 | 98 | hdr, err = nr.Next() 99 | assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") 100 | assert.Nil(t, hdr, "returned header should be nil") 101 | 102 | assert.NotPanics(t, func() { 103 | nr.Close() 104 | }, "closing the reader shouldn't panic") 105 | } 106 | 107 | // TODO: various early close cases 108 | 109 | func TestReaderInvalidOrder(t *testing.T) { 110 | nr, err := nar.NewReader(bytes.NewBuffer(genInvalidOrderNAR())) 111 | assert.NoError(t, err) 112 | 113 | // get first header (/) 114 | hdr, err := nr.Next() 115 | assert.NoError(t, err) 116 | assert.Equal(t, &nar.Header{ 117 | Path: "/", 118 | Type: nar.TypeDirectory, 119 | }, hdr) 120 | 121 | // get first element inside / (/b) 122 | hdr, err = nr.Next() 123 | assert.NoError(t, err) 124 | assert.Equal(t, &nar.Header{ 125 | Path: "/b", 126 | Type: nar.TypeDirectory, 127 | }, hdr) 128 | 129 | // get second element inside / (/a) should fail 130 | _, err = nr.Next() 131 | assert.Error(t, err) 132 | assert.NotErrorIs(t, err, io.EOF, "should not be io.EOF") 133 | } 134 | 135 | func TestReaderSmoketest(t *testing.T) { 136 | f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar") 137 | if !assert.NoError(t, err) { 138 | return 139 | } 140 | 141 | nr, err := nar.NewReader(f) 142 | assert.NoError(t, err, "instantiating the NAR Reader shouldn't error") 143 | 144 | // check premature reading doesn't do any harm 145 | n, err := nr.Read(make([]byte, 1000)) 146 | assert.Equal(t, 0, n) 147 | assert.Equal(t, io.EOF, err) 148 | 149 | headers := []nar.Header{ 150 | {Type: nar.TypeDirectory, Path: "/"}, 151 | {Type: nar.TypeDirectory, Path: "/bin"}, 152 | { 153 | Type: nar.TypeRegular, 154 | Path: "/bin/arp", 155 | Executable: true, 156 | Size: 55288, 157 | }, 158 | { 159 | Type: nar.TypeSymlink, 160 | Path: "/bin/dnsdomainname", 161 | LinkTarget: "hostname", 162 | }, 163 | { 164 | Type: nar.TypeSymlink, 165 | Path: "/bin/domainname", 166 | LinkTarget: "hostname", 167 | }, 168 | { 169 | Type: nar.TypeRegular, 170 | Path: "/bin/hostname", 171 | Executable: true, 172 | Size: 17704, 173 | }, 174 | { 175 | Type: nar.TypeRegular, 176 | Path: "/bin/ifconfig", 177 | Executable: true, 178 | Size: 72576, 179 | }, 180 | { 181 | Type: nar.TypeRegular, 182 | Path: "/bin/nameif", 183 | Executable: true, 184 | Size: 18776, 185 | }, 186 | { 187 | Type: nar.TypeRegular, 188 | Path: "/bin/netstat", 189 | Executable: true, 190 | Size: 131784, 191 | }, 192 | { 193 | Type: nar.TypeSymlink, 194 | Path: "/bin/nisdomainname", 195 | LinkTarget: "hostname", 196 | }, 197 | { 198 | Type: nar.TypeRegular, 199 | Path: "/bin/plipconfig", 200 | Executable: true, 201 | Size: 13160, 202 | }, 203 | { 204 | Type: nar.TypeRegular, 205 | Path: "/bin/rarp", 206 | Executable: true, 207 | Size: 30384, 208 | }, 209 | { 210 | Type: nar.TypeRegular, 211 | Path: "/bin/route", 212 | Executable: true, 213 | Size: 61928, 214 | }, 215 | { 216 | Type: nar.TypeRegular, 217 | Path: "/bin/slattach", 218 | Executable: true, 219 | Size: 35672, 220 | }, 221 | { 222 | Type: nar.TypeSymlink, 223 | Path: "/bin/ypdomainname", 224 | LinkTarget: "hostname", 225 | }, 226 | { 227 | Type: nar.TypeSymlink, 228 | Path: "/sbin", 229 | LinkTarget: "bin", 230 | }, 231 | { 232 | Type: nar.TypeDirectory, 233 | Path: "/share", 234 | }, 235 | { 236 | Type: nar.TypeDirectory, 237 | Path: "/share/man", 238 | }, 239 | { 240 | Type: nar.TypeDirectory, 241 | Path: "/share/man/man1", 242 | }, 243 | { 244 | Type: nar.TypeRegular, 245 | Path: "/share/man/man1/dnsdomainname.1.gz", 246 | Size: 40, 247 | }, 248 | { 249 | Type: nar.TypeRegular, 250 | Path: "/share/man/man1/domainname.1.gz", 251 | Size: 40, 252 | }, 253 | { 254 | Type: nar.TypeRegular, 255 | Path: "/share/man/man1/hostname.1.gz", 256 | Size: 1660, 257 | }, 258 | { 259 | Type: nar.TypeRegular, 260 | Path: "/share/man/man1/nisdomainname.1.gz", 261 | Size: 40, 262 | }, 263 | { 264 | Type: nar.TypeRegular, 265 | Path: "/share/man/man1/ypdomainname.1.gz", 266 | Size: 40, 267 | }, 268 | { 269 | Type: nar.TypeDirectory, 270 | Path: "/share/man/man5", 271 | }, 272 | { 273 | Type: nar.TypeRegular, 274 | Path: "/share/man/man5/ethers.5.gz", 275 | Size: 563, 276 | }, 277 | { 278 | Type: nar.TypeDirectory, 279 | Path: "/share/man/man8", 280 | }, 281 | { 282 | Type: nar.TypeRegular, 283 | Path: "/share/man/man8/arp.8.gz", 284 | Size: 2464, 285 | }, 286 | { 287 | Type: nar.TypeRegular, 288 | Path: "/share/man/man8/ifconfig.8.gz", 289 | Size: 3382, 290 | }, 291 | { 292 | Type: nar.TypeRegular, 293 | Path: "/share/man/man8/nameif.8.gz", 294 | Size: 523, 295 | }, 296 | { 297 | Type: nar.TypeRegular, 298 | Path: "/share/man/man8/netstat.8.gz", 299 | Size: 4284, 300 | }, 301 | { 302 | Type: nar.TypeRegular, 303 | Path: "/share/man/man8/plipconfig.8.gz", 304 | Size: 889, 305 | }, 306 | { 307 | Type: nar.TypeRegular, 308 | Path: "/share/man/man8/rarp.8.gz", 309 | Size: 1198, 310 | }, 311 | { 312 | Type: nar.TypeRegular, 313 | Path: "/share/man/man8/route.8.gz", 314 | Size: 3525, 315 | }, 316 | { 317 | Type: nar.TypeRegular, 318 | Path: "/share/man/man8/slattach.8.gz", 319 | Size: 1441, 320 | }, 321 | } 322 | 323 | for i, expectH := range headers { 324 | hdr, e := nr.Next() 325 | if !assert.NoError(t, e, i) { 326 | return 327 | } 328 | 329 | // read one of the files 330 | if hdr.Path == "/bin/arp" { 331 | f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar_bin_arp") 332 | assert.NoError(t, err) 333 | 334 | defer f.Close() 335 | 336 | expectedContents, err := io.ReadAll(f) 337 | assert.NoError(t, err) 338 | 339 | actualContents, err := io.ReadAll(nr) 340 | if assert.NoError(t, err) { 341 | assert.Equal(t, expectedContents, actualContents) 342 | } 343 | } 344 | 345 | // ensure reading from symlinks or directories doesn't return any actual contents 346 | // we pick examples that previously returned a regular file, so there might 347 | // previously have been a reader pointing to something. 348 | if hdr.Path == "/bin/dnsdomainname" || hdr.Path == "/share/man/man5" { 349 | actualContents, err := io.ReadAll(nr) 350 | if assert.NoError(t, err) { 351 | assert.Equal(t, []byte{}, actualContents) 352 | } 353 | } 354 | 355 | assert.Equal(t, expectH, *hdr) 356 | } 357 | 358 | hdr, err := nr.Next() 359 | // expect to return io.EOF at the end, and no more headers 360 | assert.Nil(t, hdr) 361 | assert.Equal(t, io.EOF, err) 362 | 363 | assert.NoError(t, nr.Close(), nil, "closing the reader shouldn't error") 364 | assert.NotPanics(t, func() { 365 | _ = nr.Close() 366 | }, "closing the reader multiple times shouldn't panic") 367 | } 368 | -------------------------------------------------------------------------------- /pkg/nar/types.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | const narVersionMagic1 = "nix-archive-1" 4 | 5 | // Enum of all the node types possible. 6 | type NodeType string 7 | 8 | const ( 9 | // TypeRegular represents a regular file. 10 | TypeRegular = NodeType("regular") 11 | // TypeDirectory represents a directory entry. 12 | TypeDirectory = NodeType("directory") 13 | // TypeSymlink represents a file symlink. 14 | TypeSymlink = NodeType("symlink") 15 | ) 16 | 17 | func (t NodeType) String() string { 18 | return string(t) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/nar/util.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import "strings" 4 | 5 | // IsValidNodeName checks the name of a node 6 | // it may not contain null bytes or slashes. 7 | func IsValidNodeName(nodeName string) bool { 8 | return !strings.Contains(nodeName, "/") && !strings.ContainsAny(nodeName, "\u0000") 9 | } 10 | 11 | // PathIsLexicographicallyOrdered checks if two paths are lexicographically ordered component by component. 12 | func PathIsLexicographicallyOrdered(path1 string, path2 string) bool { 13 | if path1 <= path2 { 14 | return true 15 | } 16 | 17 | // n is the lower number of characters of the two paths. 18 | var n int 19 | if len(path1) < len(path2) { 20 | n = len(path1) 21 | } else { 22 | n = len(path2) 23 | } 24 | 25 | for i := 0; i < n; i++ { 26 | if path1[i] == path2[i] { 27 | continue 28 | } 29 | 30 | if path1[i] == '/' && path2[i] != '/' { 31 | return true 32 | } 33 | 34 | return path1[i] < path2[i] 35 | } 36 | 37 | // Cover cases like where path1 is a prefix of path2 (path1=/arp-foo path2=/arp) 38 | return len(path2) >= len(path1) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/nar/util_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/numtide/nar-serve/pkg/nar" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | // nolint:gochecknoglobals 12 | var cases = []struct { 13 | path1 string 14 | path2 string 15 | expected bool 16 | }{ 17 | { 18 | path1: "/foo", 19 | path2: "/foo", 20 | expected: true, 21 | }, 22 | { 23 | path1: "/fooa", 24 | path2: "/foob", 25 | expected: true, 26 | }, 27 | { 28 | path1: "/foob", 29 | path2: "/fooa", 30 | expected: false, 31 | }, 32 | { 33 | path1: "/cmd/structlayout/main.go", 34 | path2: "/cmd/structlayout-optimize", 35 | expected: true, 36 | }, 37 | { 38 | path1: "/cmd/structlayout-optimize", 39 | path2: "/cmd/structlayout-ao/main.go", 40 | expected: false, 41 | }, 42 | } 43 | 44 | func TestLexicographicallyOrdered(t *testing.T) { 45 | for i, testCase := range cases { 46 | t.Run(fmt.Sprint(i), func(t *testing.T) { 47 | result := nar.PathIsLexicographicallyOrdered(testCase.path1, testCase.path2) 48 | assert.Equal(t, result, testCase.expected) 49 | }) 50 | } 51 | } 52 | 53 | func BenchmarkLexicographicallyOrdered(b *testing.B) { 54 | for i, testCase := range cases { 55 | b.Run(fmt.Sprint(i), func(b *testing.B) { 56 | for i := 0; i < b.N; i++ { 57 | nar.PathIsLexicographicallyOrdered(testCase.path1, testCase.path2) 58 | } 59 | }) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /pkg/nar/writer.go: -------------------------------------------------------------------------------- 1 | package nar 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "path/filepath" 8 | 9 | "github.com/numtide/nar-serve/pkg/wire" 10 | ) 11 | 12 | // Writer provides sequential writing of a NAR (Nix Archive) file. 13 | // Writer.WriteHeader begins a new file with the provided Header, 14 | // and then Writer can be treated as an io.Writer to supply that 15 | // file's data. 16 | type Writer struct { 17 | w io.Writer 18 | contentWriter io.WriteCloser 19 | 20 | // channels used by the goroutine to communicate back to WriteHeader and Close. 21 | doneWritingHeader chan struct{} // goroutine is done writing that header, WriteHeader() can return. 22 | errors chan error // there were errors while writing 23 | 24 | // whether we closed 25 | closed bool 26 | 27 | // this is used to send new headers to write to the emitter 28 | headers chan *Header 29 | } 30 | 31 | // NewWriter creates a new Writer writing to w. 32 | func NewWriter(w io.Writer) (*Writer, error) { 33 | // write magic 34 | err := wire.WriteString(w, narVersionMagic1) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | narWriter := &Writer{ 40 | w: w, 41 | 42 | doneWritingHeader: make(chan struct{}), 43 | errors: make(chan error), 44 | 45 | closed: false, 46 | 47 | headers: make(chan *Header), 48 | } 49 | 50 | // kick off the goroutine 51 | go func() { 52 | // wait for the first WriteHeader() call 53 | header, ok := <-narWriter.headers 54 | // immediate Close(), without ever calling WriteHeader() 55 | // as an empty nar is invalid, we return an error 56 | if !ok { 57 | narWriter.errors <- fmt.Errorf("unexpected Close()") 58 | close(narWriter.errors) 59 | 60 | return 61 | } 62 | 63 | // ensure the first item received always has a "/" as path. 64 | if header.Path != "/" { 65 | narWriter.errors <- fmt.Errorf("first header always needs to have a / as path") 66 | close(narWriter.errors) 67 | 68 | return 69 | } 70 | 71 | excessHdr, err := narWriter.emitNode(header) 72 | if err != nil { 73 | narWriter.errors <- err 74 | } 75 | 76 | if excessHdr != nil { 77 | narWriter.errors <- fmt.Errorf("additional header detected: %+v", excessHdr) 78 | } 79 | 80 | close(narWriter.errors) 81 | }() 82 | 83 | return narWriter, nil 84 | } 85 | 86 | // emitNode writes one NAR node. It'll internally consume one or more headers. 87 | // in case the header received a header that's not inside its own jurisdiction, 88 | // it'll return it, assuming an upper level will handle it. 89 | func (nw *Writer) emitNode(currentHeader *Header) (*Header, error) { 90 | // write a opening ( 91 | err := wire.WriteString(nw.w, "(") 92 | if err != nil { 93 | return nil, err 94 | } 95 | 96 | // write type 97 | err = wire.WriteString(nw.w, "type") 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | // store the current type in a var, we access it more often later. 103 | currentType := currentHeader.Type 104 | 105 | err = wire.WriteString(nw.w, currentType.String()) 106 | if err != nil { 107 | return nil, err 108 | } 109 | 110 | if currentType == TypeRegular { //nolint:nestif 111 | // if the executable bit is set… 112 | if currentHeader.Executable { 113 | // write the executable token. 114 | err = wire.WriteString(nw.w, "executable") 115 | if err != nil { 116 | return nil, err 117 | } 118 | 119 | // write the placeholder 120 | err = wire.WriteBytes(nw.w, []byte{}) 121 | if err != nil { 122 | return nil, err 123 | } 124 | } 125 | 126 | // write the contents keyword 127 | err = wire.WriteString(nw.w, "contents") 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | nw.contentWriter, err = wire.NewBytesWriter(nw.w, uint64(currentHeader.Size)) 133 | if err != nil { 134 | return nil, err 135 | } 136 | } 137 | 138 | // The directory case doesn't write anything special after ( type directory . 139 | // We need to inspect the next header before figuring out whether to list entries or not. 140 | if currentType == TypeSymlink || currentType == TypeDirectory { // nolint:nestif 141 | if currentType == TypeSymlink { 142 | // write the target keyword 143 | err = wire.WriteString(nw.w, "target") 144 | if err != nil { 145 | return nil, err 146 | } 147 | 148 | // write the target location. Make sure to convert slashes. 149 | err = wire.WriteString(nw.w, filepath.ToSlash(currentHeader.LinkTarget)) 150 | if err != nil { 151 | return nil, err 152 | } 153 | } 154 | 155 | // setup a dummy content write, that's not connected to the main writer, 156 | // and will fail if you write anything to it. 157 | var b bytes.Buffer 158 | 159 | nw.contentWriter, err = wire.NewBytesWriter(&b, 0) 160 | if err != nil { 161 | return nil, err 162 | } 163 | } 164 | 165 | // return from WriteHeader() 166 | nw.doneWritingHeader <- struct{}{} 167 | 168 | // wait till we receive a new header 169 | nextHeader, ok := <-nw.headers 170 | 171 | // Close the content writer to finish the packet and write possible padding 172 | // This is a no-op for symlinks and directories, as the contentWriter is limited to 0 bytes, 173 | // and not connected to the main writer. 174 | // The writer itself will already ensure we wrote the right amount of bytes 175 | err = nw.contentWriter.Close() 176 | if err != nil { 177 | return nil, err 178 | } 179 | 180 | // if this was the last header, write the closing ) and return 181 | if !ok { 182 | err = wire.WriteString(nw.w, ")") 183 | if err != nil { 184 | return nil, err 185 | } 186 | 187 | return nil, err 188 | } 189 | 190 | // This is a loop, as nextHeader can either be what we received above, 191 | // or in the case of a directory, something returned when recursing up. 192 | for { 193 | // if this was the last header, write the closing ) and return 194 | if nextHeader == nil { 195 | err = wire.WriteString(nw.w, ")") 196 | if err != nil { 197 | return nil, err 198 | } 199 | 200 | return nil, err 201 | } 202 | 203 | // compare Path of the received header. 204 | // It needs to be lexicographically greater the previous one. 205 | if !PathIsLexicographicallyOrdered(currentHeader.Path, nextHeader.Path) { 206 | return nil, fmt.Errorf( 207 | "received %v, which isn't lexicographically greater than the previous one %v", 208 | nextHeader.Path, 209 | currentHeader.Path, 210 | ) 211 | } 212 | 213 | // calculate the relative path between the previous and now-read header, 214 | // which will become the new node name. 215 | nodeName, err := filepath.Rel(currentHeader.Path, nextHeader.Path) 216 | if err != nil { 217 | return nil, err 218 | } 219 | 220 | // make sure we're using slashes 221 | nodeName = filepath.ToSlash(nodeName) 222 | 223 | // if the received header is something further up, or a sibling, we're done here. 224 | if len(nodeName) > 2 && (nodeName[0:2] == "..") { 225 | // write the closing ) 226 | err = wire.WriteString(nw.w, ")") 227 | if err != nil { 228 | return nil, err 229 | } 230 | 231 | // bounce further work up to above 232 | return nextHeader, nil 233 | } 234 | 235 | // in other cases, it describes something below. 236 | // This only works if we previously were in a directory. 237 | if currentHeader.Type != TypeDirectory { 238 | return nil, fmt.Errorf("received descending path %v, but we're a %v", nextHeader.Path, currentHeader.Type.String()) 239 | } 240 | 241 | // ensure the name is valid. At this point, there should be no more slashes, 242 | // as we already recursed up. 243 | if !IsValidNodeName(nodeName) { 244 | return nil, fmt.Errorf("name `%v` is invalid, as it contains a slash", nodeName) 245 | } 246 | 247 | // write the entry keyword 248 | err = wire.WriteString(nw.w, "entry") 249 | if err != nil { 250 | return nil, err 251 | } 252 | 253 | // write a opening ( 254 | err = wire.WriteString(nw.w, "(") 255 | if err != nil { 256 | return nil, err 257 | } 258 | 259 | // write a opening name 260 | err = wire.WriteString(nw.w, "name") 261 | if err != nil { 262 | return nil, err 263 | } 264 | 265 | // write the node name 266 | err = wire.WriteString(nw.w, nodeName) 267 | if err != nil { 268 | return nil, err 269 | } 270 | 271 | // write the node keyword 272 | err = wire.WriteString(nw.w, "node") 273 | if err != nil { 274 | return nil, err 275 | } 276 | 277 | // Emit the node inside. It'll consume another node, which is what we'll 278 | // handle in the next loop iteration. 279 | nextHeader, err = nw.emitNode(nextHeader) 280 | if err != nil { 281 | return nil, err 282 | } 283 | 284 | // write the closing ) (from entry) 285 | err = wire.WriteString(nw.w, ")") 286 | if err != nil { 287 | return nil, err 288 | } 289 | } 290 | } 291 | 292 | // WriteHeader writes hdr and prepares to accept the file's contents. The 293 | // Header.Size determines how many bytes can be written for the next file. If 294 | // the current file is not fully written, then this returns an error. This 295 | // implicitly flushes any padding necessary before writing the header. 296 | func (nw *Writer) WriteHeader(hdr *Header) error { 297 | if err := hdr.Validate(); err != nil { 298 | return fmt.Errorf("unable to write header: %w", err) 299 | } 300 | 301 | nw.headers <- hdr 302 | select { 303 | case err := <-nw.errors: 304 | return err 305 | case <-nw.doneWritingHeader: 306 | } 307 | 308 | return nil 309 | } 310 | 311 | // Write writes to the current file in the NAR. 312 | // Write returns the ErrWriteTooLong if more than Header.Size bytes 313 | // are written after WriteHeader. 314 | // 315 | // Calling Write on special types like TypeLink, TypeSymlink, TypeChar, 316 | // TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless of 317 | // what the Header.Size claims. 318 | func (nw *Writer) Write(b []byte) (int, error) { 319 | return nw.contentWriter.Write(b) 320 | } 321 | 322 | // Close closes the NAR file. 323 | // If the current file (from a prior call to WriteHeader) is not fully 324 | // written, then this returns an error. 325 | func (nw *Writer) Close() error { 326 | if nw.closed { 327 | return fmt.Errorf("already closed") 328 | } 329 | 330 | // signal the emitter this was the last one 331 | close(nw.headers) 332 | 333 | nw.closed = true 334 | 335 | // wait for it to signal its done (by closing errors) 336 | return <-nw.errors 337 | } 338 | -------------------------------------------------------------------------------- /pkg/nar/writer_test.go: -------------------------------------------------------------------------------- 1 | package nar_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "testing" 8 | 9 | "github.com/numtide/nar-serve/pkg/nar" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestWriterEmpty(t *testing.T) { 14 | var buf bytes.Buffer 15 | nw, err := nar.NewWriter(&buf) 16 | assert.NoError(t, err) 17 | 18 | // calling close on an empty NAR is an error, as it'd be invalid. 19 | assert.Error(t, nw.Close()) 20 | 21 | assert.NotPanics(t, func() { 22 | nw.Close() 23 | }, "closing a second time, after the first one failed shouldn't panic") 24 | } 25 | 26 | func TestWriterEmptyDirectory(t *testing.T) { 27 | var buf bytes.Buffer 28 | nw, err := nar.NewWriter(&buf) 29 | assert.NoError(t, err) 30 | 31 | hdr := &nar.Header{ 32 | Path: "/", 33 | Type: nar.TypeDirectory, 34 | } 35 | 36 | err = nw.WriteHeader(hdr) 37 | assert.NoError(t, err) 38 | 39 | err = nw.Close() 40 | assert.NoError(t, err) 41 | 42 | assert.Equal(t, genEmptyDirectoryNar(), buf.Bytes()) 43 | 44 | assert.NotPanics(t, func() { 45 | nw.Close() 46 | }, "closing a second time shouldn't panic") 47 | } 48 | 49 | // TestWriterOneByteRegular writes a NAR only containing a single file at the root. 50 | func TestWriterOneByteRegular(t *testing.T) { 51 | var buf bytes.Buffer 52 | nw, err := nar.NewWriter(&buf) 53 | assert.NoError(t, err) 54 | 55 | hdr := nar.Header{ 56 | Path: "/", 57 | Type: nar.TypeRegular, 58 | Size: 1, 59 | Executable: false, 60 | } 61 | 62 | err = nw.WriteHeader(&hdr) 63 | assert.NoError(t, err) 64 | 65 | num, err := nw.Write([]byte{1}) 66 | assert.Equal(t, num, 1) 67 | assert.NoError(t, err) 68 | 69 | err = nw.Close() 70 | assert.NoError(t, err) 71 | 72 | assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) 73 | } 74 | 75 | // TestWriterSymlink writes a NAR only containing a symlink. 76 | func TestWriterSymlink(t *testing.T) { 77 | var buf bytes.Buffer 78 | nw, err := nar.NewWriter(&buf) 79 | assert.NoError(t, err) 80 | 81 | hdr := nar.Header{ 82 | Path: "/", 83 | Type: nar.TypeSymlink, 84 | LinkTarget: "/nix/store/somewhereelse", 85 | Size: 0, 86 | Executable: false, 87 | } 88 | 89 | err = nw.WriteHeader(&hdr) 90 | assert.NoError(t, err) 91 | 92 | err = nw.Close() 93 | assert.NoError(t, err) 94 | 95 | assert.Equal(t, genSymlinkNar(), buf.Bytes()) 96 | } 97 | 98 | // TestWriterSmoketest reads in our example nar, feeds it to the NAR reader, 99 | // and collects all headers and contents returned 100 | // It'll then use this to drive the NAR writer, and will compare the output 101 | // to be the same as originally read in. 102 | func TestWriterSmoketest(t *testing.T) { 103 | f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar") 104 | if !assert.NoError(t, err) { 105 | return 106 | } 107 | 108 | // read in the NAR contents once 109 | narContents, err := io.ReadAll(f) 110 | assert.NoError(t, err) 111 | 112 | // pass them into a NAR reader 113 | nr, err := nar.NewReader(bytes.NewReader(narContents)) 114 | assert.NoError(t, err) 115 | 116 | headers := []*nar.Header{} 117 | contents := [][]byte{} 118 | 119 | for { 120 | hdr, err := nr.Next() 121 | if err != nil { 122 | if err == io.EOF { 123 | break 124 | } 125 | 126 | panic("unexpected error while reading in file") 127 | } 128 | 129 | headers = append(headers, hdr) 130 | 131 | fileContents, err := io.ReadAll(nr) 132 | assert.NoError(t, err) 133 | 134 | contents = append(contents, fileContents) 135 | } 136 | 137 | assert.True(t, len(headers) == len(contents), "headers and contents should have the same size") 138 | 139 | // drive the nar writer 140 | var buf bytes.Buffer 141 | nw, err := nar.NewWriter(&buf) 142 | assert.NoError(t, err) 143 | 144 | // Loop over all headers 145 | for i, hdr := range headers { 146 | // Write header 147 | err := nw.WriteHeader(hdr) 148 | assert.NoError(t, err) 149 | 150 | // Write contents. In the case of directories and symlinks, it should be fine to write empty bytes 151 | n, err := io.Copy(nw, bytes.NewReader(contents[i])) 152 | assert.NoError(t, err) 153 | assert.Equal(t, int64(len(contents[i])), n) 154 | } 155 | 156 | err = nw.Close() 157 | assert.NoError(t, err) 158 | // check the NAR writer produced the same contents than what we read in 159 | assert.Equal(t, narContents, buf.Bytes()) 160 | } 161 | 162 | func TestWriterErrorsTransitions(t *testing.T) { 163 | t.Run("missing directory in between", func(t *testing.T) { 164 | var buf bytes.Buffer 165 | nw, err := nar.NewWriter(&buf) 166 | assert.NoError(t, err) 167 | 168 | // write a directory node 169 | err = nw.WriteHeader(&nar.Header{ 170 | Path: "/", 171 | Type: nar.TypeDirectory, 172 | }) 173 | assert.NoError(t, err) 174 | 175 | // write a symlink "a/foo", but missing the directory node "a" in between should error 176 | err = nw.WriteHeader(&nar.Header{ 177 | Path: "/a/foo", 178 | Type: nar.TypeSymlink, 179 | LinkTarget: "doesntmatter", 180 | }) 181 | assert.Error(t, err) 182 | }) 183 | 184 | t.Run("missing directory at the beginning, writing another directory", func(t *testing.T) { 185 | var buf bytes.Buffer 186 | nw, err := nar.NewWriter(&buf) 187 | assert.NoError(t, err) 188 | 189 | // write a directory node for "/a" without writing the one for "/" 190 | err = nw.WriteHeader(&nar.Header{ 191 | Path: "/a", 192 | Type: nar.TypeDirectory, 193 | }) 194 | assert.Error(t, err) 195 | }) 196 | 197 | t.Run("missing directory at the beginning, writing a symlink", func(t *testing.T) { 198 | var buf bytes.Buffer 199 | nw, err := nar.NewWriter(&buf) 200 | assert.NoError(t, err) 201 | 202 | // write a symlink for "a" without writing the directory one for "" 203 | err = nw.WriteHeader(&nar.Header{ 204 | Path: "/a", 205 | Type: nar.TypeSymlink, 206 | LinkTarget: "foo", 207 | }) 208 | assert.Error(t, err) 209 | }) 210 | 211 | t.Run("transition via a symlink, not directory", func(t *testing.T) { 212 | var buf bytes.Buffer 213 | nw, err := nar.NewWriter(&buf) 214 | assert.NoError(t, err) 215 | 216 | // write a directory node 217 | err = nw.WriteHeader(&nar.Header{ 218 | Path: "/", 219 | Type: nar.TypeDirectory, 220 | }) 221 | assert.NoError(t, err) 222 | 223 | // write a symlink node for "/a" 224 | err = nw.WriteHeader(&nar.Header{ 225 | Path: "/a", 226 | Type: nar.TypeSymlink, 227 | LinkTarget: "doesntmatter", 228 | }) 229 | assert.NoError(t, err) 230 | 231 | // write a symlink "/a/b", which should fail, as a was a symlink, not directory 232 | err = nw.WriteHeader(&nar.Header{ 233 | Path: "/a/b", 234 | Type: nar.TypeSymlink, 235 | LinkTarget: "doesntmatter", 236 | }) 237 | assert.Error(t, err) 238 | }) 239 | 240 | t.Run("not lexicographically sorted", func(t *testing.T) { 241 | var buf bytes.Buffer 242 | nw, err := nar.NewWriter(&buf) 243 | assert.NoError(t, err) 244 | 245 | // write a directory node 246 | err = nw.WriteHeader(&nar.Header{ 247 | Path: "/", 248 | Type: nar.TypeDirectory, 249 | }) 250 | assert.NoError(t, err) 251 | 252 | // write a symlink for "/b" 253 | err = nw.WriteHeader(&nar.Header{ 254 | Path: "/b", 255 | Type: nar.TypeSymlink, 256 | LinkTarget: "foo", 257 | }) 258 | assert.NoError(t, err) 259 | 260 | // write a symlink for "/a" 261 | err = nw.WriteHeader(&nar.Header{ 262 | Path: "/a", 263 | Type: nar.TypeSymlink, 264 | LinkTarget: "foo", 265 | }) 266 | assert.Error(t, err) 267 | }) 268 | 269 | t.Run("not lexicographically sorted, but the same", func(t *testing.T) { 270 | var buf bytes.Buffer 271 | nw, err := nar.NewWriter(&buf) 272 | assert.NoError(t, err) 273 | 274 | // write a directory node 275 | err = nw.WriteHeader(&nar.Header{ 276 | Path: "/", 277 | Type: nar.TypeDirectory, 278 | }) 279 | assert.NoError(t, err) 280 | 281 | // write a symlink for "/a" 282 | err = nw.WriteHeader(&nar.Header{ 283 | Path: "/a", 284 | Type: nar.TypeSymlink, 285 | LinkTarget: "foo", 286 | }) 287 | assert.NoError(t, err) 288 | 289 | // write a symlink for "/a" 290 | err = nw.WriteHeader(&nar.Header{ 291 | Path: "/a", 292 | Type: nar.TypeSymlink, 293 | LinkTarget: "foo", 294 | }) 295 | assert.Error(t, err) 296 | }) 297 | 298 | t.Run("lexicographically sorted with nested directory and common prefix", func(t *testing.T) { 299 | var buf bytes.Buffer 300 | nw, err := nar.NewWriter(&buf) 301 | assert.NoError(t, err) 302 | 303 | // write a directory node 304 | err = nw.WriteHeader(&nar.Header{ 305 | Path: "/", 306 | Type: nar.TypeDirectory, 307 | }) 308 | assert.NoError(t, err) 309 | 310 | // write a directory node with name "/foo" 311 | err = nw.WriteHeader(&nar.Header{ 312 | Path: "/foo", 313 | Type: nar.TypeDirectory, 314 | }) 315 | assert.NoError(t, err) 316 | 317 | // write a symlink for "/foo/b" 318 | err = nw.WriteHeader(&nar.Header{ 319 | Path: "/foo/b", 320 | Type: nar.TypeSymlink, 321 | LinkTarget: "foo", 322 | }) 323 | assert.NoError(t, err) 324 | 325 | // write a symlink for "/foo-a" 326 | err = nw.WriteHeader(&nar.Header{ 327 | Path: "/foo-a", 328 | Type: nar.TypeSymlink, 329 | LinkTarget: "foo", 330 | }) 331 | assert.NoError(t, err) 332 | }) 333 | } 334 | -------------------------------------------------------------------------------- /pkg/narinfo/check.go: -------------------------------------------------------------------------------- 1 | package narinfo 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/google/go-cmp/cmp" 7 | "github.com/numtide/nar-serve/pkg/nixpath" 8 | ) 9 | 10 | // Check does some sanity checking on a NarInfo struct, such as: 11 | // 12 | // - ensuring the paths in StorePath, References and Deriver are syntactically valid 13 | // (references and deriver first need to be made absolute) 14 | // - when no compression is present, ensuring File{Hash,Size} and 15 | // Nar{Hash,Size} are equal 16 | func (n *NarInfo) Check() error { 17 | _, err := nixpath.FromString(n.StorePath) 18 | if err != nil { 19 | return fmt.Errorf("invalid NixPath at StorePath: %v", n.StorePath) 20 | } 21 | 22 | for i, r := range n.References { 23 | referenceAbsolute := nixpath.Absolute(r) 24 | _, err = nixpath.FromString(referenceAbsolute) 25 | 26 | if err != nil { 27 | return fmt.Errorf("invalid NixPath at Reference[%d]: %v", i, r) 28 | } 29 | } 30 | 31 | deriverAbsolute := nixpath.Absolute(n.Deriver) 32 | 33 | _, err = nixpath.FromString(deriverAbsolute) 34 | if err != nil { 35 | return fmt.Errorf("invalid NixPath at Deriver: %v", n.Deriver) 36 | } 37 | 38 | if n.Compression == "none" { 39 | if n.FileSize != n.NarSize { 40 | return fmt.Errorf("compression is none, FileSize/NarSize differs: %d, %d", n.FileSize, n.NarSize) 41 | } 42 | 43 | if !cmp.Equal(n.FileHash, n.NarHash) { 44 | return fmt.Errorf("compression is none, FileHash/NarHash differs: %v, %v", n.FileHash, n.NarHash) 45 | } 46 | } 47 | 48 | return nil 49 | } 50 | -------------------------------------------------------------------------------- /pkg/narinfo/narinfo_test.go: -------------------------------------------------------------------------------- 1 | package narinfo_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/numtide/nar-serve/pkg/narinfo" 11 | "github.com/numtide/nar-serve/pkg/nixhash" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | // nolint:gochecknoglobals 16 | var ( 17 | strNarinfoSample = ` 18 | StorePath: /nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432 19 | URL: nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz 20 | Compression: xz 21 | FileHash: sha256:1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d 22 | FileSize: 114980 23 | NarHash: sha256:0lxjvvpr59c2mdram7ympy5ay741f180kv3349hvfc3f8nrmbqf6 24 | NarSize: 464152 25 | References: 7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27 26 | Deriver: 10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv 27 | Sig: cache.nixos.org-1:sn5s/RrqEI+YG6/PjwdbPjcAC7rcta7sJU4mFOawGvJBLsWkyLtBrT2EuFt/LJjWkTZ+ZWOI9NTtjo/woMdvAg== 28 | Sig: hydra.other.net-1:JXQ3Z/PXf0EZSFkFioa4FbyYpbbTbHlFBtZf4VqU0tuMTWzhMD7p9Q7acJjLn3jofOtilAAwRILKIfVuyrbjAA== 29 | ` 30 | strNarinfoSampleWithoutFileFields = ` 31 | StorePath: /nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432 32 | URL: nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz 33 | Compression: xz 34 | NarHash: sha256:0lxjvvpr59c2mdram7ympy5ay741f180kv3349hvfc3f8nrmbqf6 35 | NarSize: 464152 36 | References: 7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27 37 | Deriver: 10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv 38 | Sig: cache.nixos.org-1:sn5s/RrqEI+YG6/PjwdbPjcAC7rcta7sJU4mFOawGvJBLsWkyLtBrT2EuFt/LJjWkTZ+ZWOI9NTtjo/woMdvAg== 39 | Sig: hydra.other.net-1:JXQ3Z/PXf0EZSFkFioa4FbyYpbbTbHlFBtZf4VqU0tuMTWzhMD7p9Q7acJjLn3jofOtilAAwRILKIfVuyrbjAA== 40 | ` 41 | 42 | strNarinfoSampleCachix = ` 43 | StorePath: /nix/store/8nb4qdm1n2mpfcfr3hdaxw54fjdn4hqz-treefmt-docs-4b33ba3 44 | URL: nar/b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9.nar.zst 45 | Compression: zstd 46 | FileHash: sha256:b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9 47 | FileSize: 873969 48 | NarHash: sha256:1d3pp407iawzv79w453x5ff5fs0cscwzxm7572q85nijc56faxr8 49 | NarSize: 1794360 50 | References: 51 | Deriver: 7nzyn0l9402ya02g6sac073c3733k0p7-treefmt-docs-4b33ba3.drv 52 | Sig: numtide.cachix.org-1:YYcsiDnC0WR2utXGy1G6PqjDPH7TsvMrpaK4QJV1MHLks4N5XPA+Na0yzfOqBxqn9BB8NsTAqSu2B08SiIQmDA== 53 | ` 54 | 55 | _NarHash = nixhash.MustNewHashWithEncoding(nixhash.SHA256, []uint8{ 56 | 0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 57 | 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53, 58 | }, nixhash.NixBase32, true) 59 | 60 | _Signatures = []*narinfo.Signature{ 61 | { 62 | KeyName: "cache.nixos.org-1", 63 | Digest: []byte{ 64 | 0xb2, 0x7e, 0x6c, 0xfd, 0x1a, 0xea, 0x10, 0x8f, 0x98, 0x1b, 0xaf, 0xcf, 0x8f, 0x07, 0x5b, 0x3e, 65 | 0x37, 0x00, 0x0b, 0xba, 0xdc, 0xb5, 0xae, 0xec, 0x25, 0x4e, 0x26, 0x14, 0xe6, 0xb0, 0x1a, 0xf2, 66 | 0x41, 0x2e, 0xc5, 0xa4, 0xc8, 0xbb, 0x41, 0xad, 0x3d, 0x84, 0xb8, 0x5b, 0x7f, 0x2c, 0x98, 0xd6, 67 | 0x91, 0x36, 0x7e, 0x65, 0x63, 0x88, 0xf4, 0xd4, 0xed, 0x8e, 0x8f, 0xf0, 0xa0, 0xc7, 0x6f, 0x02, 68 | }, 69 | }, 70 | { 71 | KeyName: "hydra.other.net-1", 72 | Digest: []byte{ 73 | 0x25, 0x74, 0x37, 0x67, 0xf3, 0xd7, 0x7f, 0x41, 0x19, 0x48, 0x59, 0x05, 0x8a, 0x86, 0xb8, 0x15, 74 | 0xbc, 0x98, 0xa5, 0xb6, 0xd3, 0x6c, 0x79, 0x45, 0x06, 0xd6, 0x5f, 0xe1, 0x5a, 0x94, 0xd2, 0xdb, 75 | 0x8c, 0x4d, 0x6c, 0xe1, 0x30, 0x3e, 0xe9, 0xf5, 0x0e, 0xda, 0x70, 0x98, 0xcb, 0x9f, 0x78, 0xe8, 76 | 0x7c, 0xeb, 0x62, 0x94, 0x00, 0x30, 0x44, 0x82, 0xca, 0x21, 0xf5, 0x6e, 0xca, 0xb6, 0xe3, 0x00, 77 | }, 78 | }, 79 | } 80 | 81 | narinfoSample = &narinfo.NarInfo{ 82 | StorePath: "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432", 83 | URL: "nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz", 84 | Compression: "xz", 85 | FileHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ 86 | 0xed, 0x34, 0xdc, 0x8f, 0x36, 0x04, 0x7d, 0x68, 0x6d, 0xc2, 0x96, 0xb7, 0xb2, 0xe3, 0xf4, 0x27, 87 | 0x84, 0x88, 0xbe, 0x5b, 0x6a, 0x94, 0xa6, 0xf7, 0xa3, 0xdc, 0x92, 0x9f, 0xe0, 0xe5, 0x24, 0x81, 88 | }, nixhash.NixBase32, true), 89 | FileSize: 114980, 90 | NarHash: _NarHash, 91 | NarSize: 464152, 92 | References: []string{"7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27"}, 93 | Deriver: "10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv", 94 | Signatures: _Signatures, 95 | } 96 | 97 | narinfoSampleWithoutFileFields = &narinfo.NarInfo{ 98 | StorePath: "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432", 99 | URL: "nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz", 100 | Compression: "xz", 101 | NarHash: _NarHash, 102 | NarSize: 464152, 103 | References: []string{"7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27"}, 104 | Deriver: "10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv", 105 | Signatures: _Signatures, 106 | } 107 | 108 | narinfoSampleCachix = &narinfo.NarInfo{ 109 | StorePath: "/nix/store/8nb4qdm1n2mpfcfr3hdaxw54fjdn4hqz-treefmt-docs-4b33ba3", 110 | URL: "nar/b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9.nar.zst", 111 | Compression: "zstd", 112 | NarHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ 113 | 0x28, 0x77, 0xe5, 0x4c, 0x61, 0x32, 0xda, 0x82, 0xb0, 0x38, 0xe5, 0xd4, 0xfe, 0x39, 0xd3, 0x0c, 114 | 0x68, 0x57, 0x9c, 0x2b, 0x7d, 0x14, 0xc2, 0xd3, 0xd9, 0x9f, 0xab, 0x78, 0x00, 0xb9, 0x77, 0xb4, 115 | }, nixhash.NixBase32, true), 116 | NarSize: 1794360, 117 | FileHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ 118 | 0xb1, 0x36, 0xfa, 0x7b, 0x36, 0xb9, 0x66, 0xd6, 0x3a, 0x93, 0xf9, 0x83, 0xee, 0x03, 0x07, 0x0e, 119 | 0x44, 0xbf, 0xfe, 0x9b, 0xa9, 0x00, 0x5c, 0xda, 0x59, 0x83, 0x5e, 0x2a, 0x0f, 0x0f, 0x64, 0xb9, 120 | }, nixhash.Base16, true), 121 | FileSize: 873969, 122 | Deriver: "7nzyn0l9402ya02g6sac073c3733k0p7-treefmt-docs-4b33ba3.drv", 123 | Signatures: []*narinfo.Signature{ 124 | { 125 | KeyName: "numtide.cachix.org-1", 126 | Digest: []byte{ 127 | 0x61, 0x87, 0x2c, 0x88, 0x39, 0xc2, 0xd1, 0x64, 0x76, 0xba, 0xd5, 0xc6, 0xcb, 0x51, 0xba, 0x3e, 128 | 0xa8, 0xc3, 0x3c, 0x7e, 0xd3, 0xb2, 0xf3, 0x2b, 0xa5, 0xa2, 0xb8, 0x40, 0x95, 0x75, 0x30, 0x72, 129 | 0xe4, 0xb3, 0x83, 0x79, 0x5c, 0xf0, 0x3e, 0x35, 0xad, 0x32, 0xcd, 0xf3, 0xaa, 0x07, 0x1a, 0xa7, 130 | 0xf4, 0x10, 0x7c, 0x36, 0xc4, 0xc0, 0xa9, 0x2b, 0xb6, 0x07, 0x4f, 0x12, 0x88, 0x84, 0x26, 0x0c, 131 | }, 132 | }, 133 | }, 134 | } 135 | ) 136 | 137 | func TestNarInfo(t *testing.T) { 138 | ni, err := narinfo.Parse(strings.NewReader(strNarinfoSample)) 139 | assert.NoError(t, err) 140 | 141 | // Test the parsing happy path 142 | assert.Equal(t, narinfoSample, ni) 143 | assert.NoError(t, ni.Check()) 144 | 145 | // Test to string 146 | assert.Equal(t, strNarinfoSample, "\n"+ni.String()) 147 | } 148 | 149 | func TestNarInfoWithoutFileFields(t *testing.T) { 150 | ni, err := narinfo.Parse(strings.NewReader(strNarinfoSampleWithoutFileFields)) 151 | assert.NoError(t, err) 152 | 153 | // Test the parsing happy path 154 | assert.Equal(t, narinfoSampleWithoutFileFields, ni) 155 | assert.NoError(t, ni.Check()) 156 | 157 | // Test to string 158 | assert.Equal(t, strNarinfoSampleWithoutFileFields, "\n"+ni.String()) 159 | } 160 | 161 | func TestNarInfoCachix(t *testing.T) { 162 | ni, err := narinfo.Parse(strings.NewReader(strNarinfoSampleCachix)) 163 | assert.NoError(t, err) 164 | 165 | // Test the parsing happy path 166 | assert.Equal(t, narinfoSampleCachix, ni) 167 | assert.NoError(t, ni.Check()) 168 | 169 | // Test to string 170 | assert.Equal(t, strNarinfoSampleCachix, "\n"+ni.String()) 171 | } 172 | 173 | func TestBigNarinfo(t *testing.T) { 174 | f, err := os.Open("../../../test/testdata/big.narinfo") 175 | if err != nil { 176 | panic(err) 177 | } 178 | defer f.Close() 179 | 180 | _, err = narinfo.Parse(f) 181 | assert.NoError(t, err, "Parsing big .narinfo files shouldn't fail") 182 | } 183 | 184 | func BenchmarkNarInfo(b *testing.B) { 185 | b.Run("Regular", func(b *testing.B) { 186 | for i := 0; i < b.N; i++ { 187 | _, err := narinfo.Parse(strings.NewReader(strNarinfoSample)) 188 | assert.NoError(b, err) 189 | } 190 | }) 191 | 192 | { 193 | f, err := os.Open("../../../test/testdata/big.narinfo") 194 | if err != nil { 195 | panic(err) 196 | } 197 | defer f.Close() 198 | 199 | var buf bytes.Buffer 200 | _, err = io.ReadAll(&buf) 201 | if err != nil { 202 | panic(err) 203 | } 204 | 205 | big := buf.Bytes() 206 | 207 | b.Run("Big", func(b *testing.B) { 208 | for i := 0; i < b.N; i++ { 209 | _, err := narinfo.Parse(bytes.NewReader(big)) 210 | assert.NoError(b, err) 211 | } 212 | }) 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /pkg/narinfo/parser.go: -------------------------------------------------------------------------------- 1 | package narinfo 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/numtide/nar-serve/pkg/nixhash" 11 | ) 12 | 13 | // Parse reads a .narinfo file content 14 | // and returns a NarInfo struct with the parsed data. 15 | func Parse(r io.Reader) (*NarInfo, error) { 16 | narInfo := &NarInfo{} 17 | scanner := bufio.NewScanner(r) 18 | 19 | // Increase the buffer size. 20 | // Some .narinfo files have a lot of entries in References, 21 | // and bufio.Scanner will error bufio.ErrTooLong otherwise. 22 | const maxCapacity = 1048576 23 | buf := make([]byte, maxCapacity) 24 | scanner.Buffer(buf, maxCapacity) 25 | 26 | for scanner.Scan() { 27 | var err error 28 | 29 | line := scanner.Text() 30 | // skip empty lines (like, an empty line at EOF) 31 | if line == "" { 32 | continue 33 | } 34 | 35 | k, v, err := splitOnce(line, ": ") 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | switch k { 41 | case "StorePath": 42 | narInfo.StorePath = v 43 | case "URL": 44 | narInfo.URL = v 45 | case "Compression": 46 | narInfo.Compression = v 47 | case "FileHash": 48 | narInfo.FileHash, err = nixhash.ParseAny(v, nil) 49 | if err != nil { 50 | return nil, err 51 | } 52 | case "FileSize": 53 | narInfo.FileSize, err = strconv.ParseUint(v, 10, 0) 54 | if err != nil { 55 | return nil, err 56 | } 57 | case "NarHash": 58 | narInfo.NarHash, err = nixhash.ParseAny(v, nil) 59 | if err != nil { 60 | return nil, err 61 | } 62 | case "NarSize": 63 | narInfo.NarSize, err = strconv.ParseUint(v, 10, 0) 64 | if err != nil { 65 | return nil, err 66 | } 67 | case "References": 68 | if v == "" { 69 | continue 70 | } 71 | 72 | narInfo.References = append(narInfo.References, strings.Split(v, " ")...) 73 | case "Deriver": 74 | narInfo.Deriver = v 75 | case "System": 76 | narInfo.System = v 77 | case "Sig": 78 | signature, e := ParseSignatureLine(v) 79 | if e != nil { 80 | return nil, fmt.Errorf("unable to parse signature line %v: %v", v, err) 81 | } 82 | 83 | narInfo.Signatures = append(narInfo.Signatures, signature) 84 | case "CA": 85 | narInfo.CA = v 86 | default: 87 | return nil, fmt.Errorf("unknown key %v", k) 88 | } 89 | 90 | if err != nil { 91 | return nil, fmt.Errorf("unable to parse line %v", line) 92 | } 93 | } 94 | 95 | if err := scanner.Err(); err != nil { 96 | return nil, err 97 | } 98 | 99 | // An empty/non-existrent compression field is considered to mean bzip2 100 | if narInfo.Compression == "" { 101 | narInfo.Compression = "bzip2" 102 | } 103 | 104 | return narInfo, nil 105 | } 106 | 107 | // splitOnce - Split a string and make sure it's only splittable once. 108 | func splitOnce(s string, sep string) (string, string, error) { 109 | idx := strings.Index(s, sep) 110 | if idx == -1 { 111 | return "", "", fmt.Errorf("unable to find separator '%s' in %v", sep, s) 112 | } 113 | 114 | if strings.Contains(s[:idx], sep) { 115 | return "", "", fmt.Errorf("found separator '%s' twice or more in %v", sep, s) 116 | } 117 | 118 | return s[0:idx], s[idx+len(sep):], nil 119 | } 120 | -------------------------------------------------------------------------------- /pkg/narinfo/signature.go: -------------------------------------------------------------------------------- 1 | package narinfo 2 | 3 | import ( 4 | "crypto/ed25519" 5 | "encoding/base64" 6 | "fmt" 7 | ) 8 | 9 | // Signature is used to sign a NarInfo (parts of it, to be precise). 10 | type Signature struct { 11 | KeyName string // An identifier for the key that's used for the signature 12 | 13 | Digest []byte // The digest itself, in bytes 14 | } 15 | 16 | // ParseSignatureLine parses a signature line and returns a Signature struct, or error. 17 | func ParseSignatureLine(signatureLine string) (*Signature, error) { 18 | field0, field1, err := splitOnce(signatureLine, ":") 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | var sig [ed25519.SignatureSize]byte 24 | 25 | n, err := base64.StdEncoding.Decode(sig[:], []byte(field1)) 26 | if err != nil { 27 | return nil, fmt.Errorf("unable to decode base64: %v", field1) 28 | } 29 | 30 | if n != len(sig) { 31 | return nil, fmt.Errorf("invalid signature size: %d", n) 32 | } 33 | 34 | return &Signature{ 35 | KeyName: field0, 36 | Digest: sig[:], 37 | }, nil 38 | } 39 | 40 | // MustParseSignatureLine parses a signature line and returns a Signature struct, or panics on error. 41 | func MustParseSignatureLine(signatureLine string) *Signature { 42 | s, err := ParseSignatureLine(signatureLine) 43 | if err != nil { 44 | panic(err) 45 | } 46 | 47 | return s 48 | } 49 | 50 | // String returns the string representation of a signature, which is `KeyName:base`. 51 | func (s *Signature) String() string { 52 | return s.KeyName + ":" + base64.StdEncoding.EncodeToString(s.Digest) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/narinfo/signature_test.go: -------------------------------------------------------------------------------- 1 | package narinfo_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/numtide/nar-serve/pkg/narinfo" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const ( 11 | dummySigLine = "cache.nixos.org-1" + 12 | ":" + "rH4wxlNRbTbViQon40C15og5zlcFEphwoF26IQGHi2QCwVYyaLj6LOag+MeWcZ65SWzy6PnOlXjriLNcxE0hAQ==" 13 | expectedKeyName = "cache.nixos.org-1" 14 | ) 15 | 16 | // nolint:gochecknoglobals 17 | var ( 18 | expectedDigest = []byte{ 19 | 0xac, 0x7e, 0x30, 0xc6, 0x53, 0x51, 0x6d, 0x36, 0xd5, 0x89, 0x0a, 0x27, 0xe3, 0x40, 0xb5, 0xe6, 20 | 0x88, 0x39, 0xce, 0x57, 0x05, 0x12, 0x98, 0x70, 0xa0, 0x5d, 0xba, 0x21, 0x01, 0x87, 0x8b, 0x64, 21 | 0x02, 0xc1, 0x56, 0x32, 0x68, 0xb8, 0xfa, 0x2c, 0xe6, 0xa0, 0xf8, 0xc7, 0x96, 0x71, 0x9e, 0xb9, 22 | 0x49, 0x6c, 0xf2, 0xe8, 0xf9, 0xce, 0x95, 0x78, 0xeb, 0x88, 0xb3, 0x5c, 0xc4, 0x4d, 0x21, 0x01, 23 | } 24 | ) 25 | 26 | func TestParseSignatureLine(t *testing.T) { 27 | signature, err := narinfo.ParseSignatureLine(dummySigLine) 28 | if assert.NoError(t, err) { 29 | assert.Equal(t, expectedKeyName, signature.KeyName) 30 | assert.Equal(t, expectedDigest, signature.Digest) 31 | } 32 | } 33 | 34 | func TestMustParseSignatureLine(t *testing.T) { 35 | signature := narinfo.MustParseSignatureLine(dummySigLine) 36 | assert.Equal(t, expectedKeyName, signature.KeyName) 37 | assert.Equal(t, expectedDigest, signature.Digest) 38 | 39 | assert.Panics(t, func() { 40 | _ = narinfo.MustParseSignatureLine(expectedKeyName) 41 | }) 42 | } 43 | 44 | func BenchmarkParseSignatureLine(b *testing.B) { 45 | for i := 0; i < b.N; i++ { 46 | narinfo.MustParseSignatureLine(dummySigLine) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/narinfo/types.go: -------------------------------------------------------------------------------- 1 | package narinfo 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/numtide/nar-serve/pkg/nixhash" 8 | ) 9 | 10 | // NarInfo represents a parsed .narinfo file. 11 | type NarInfo struct { 12 | StorePath string // The full nix store path (/nix/store/…-pname-version) 13 | 14 | URL string // The relative location to the .nar[.xz,…] file. Usually nar/$fileHash.nar[.xz] 15 | Compression string // The compression method file at URL is compressed with (none,xz,…) 16 | 17 | FileHash *nixhash.HashWithEncoding // The hash of the file at URL 18 | FileSize uint64 // The size of the file at URL, in bytes 19 | 20 | // The hash of the .nar file, after possible decompression 21 | // Identical to FileHash if no compression is used. 22 | NarHash *nixhash.HashWithEncoding 23 | // The size of the .nar file, after possible decompression, in bytes. 24 | // Identical to FileSize if no compression is used. 25 | NarSize uint64 26 | 27 | // References to other store paths, contained in the .nar file 28 | References []string 29 | 30 | // Path of the .drv for this store path 31 | Deriver string 32 | 33 | // This doesn't seem to be used at all? 34 | System string 35 | 36 | // Signatures, if any. 37 | Signatures []*Signature 38 | 39 | // TODO: Figure out the meaning of this 40 | CA string 41 | } 42 | 43 | func (n *NarInfo) String() string { 44 | var buf bytes.Buffer 45 | 46 | fmt.Fprintf(&buf, "StorePath: %v\n", n.StorePath) 47 | fmt.Fprintf(&buf, "URL: %v\n", n.URL) 48 | fmt.Fprintf(&buf, "Compression: %v\n", n.Compression) 49 | 50 | if n.FileHash != nil && n.FileSize != 0 { 51 | fmt.Fprintf(&buf, "FileHash: %s\n", n.FileHash.String()) 52 | fmt.Fprintf(&buf, "FileSize: %d\n", n.FileSize) 53 | } 54 | 55 | fmt.Fprintf(&buf, "NarHash: %s\n", n.NarHash.String()) 56 | 57 | fmt.Fprintf(&buf, "NarSize: %d\n", n.NarSize) 58 | 59 | buf.WriteString("References:") 60 | 61 | if len(n.References) == 0 { 62 | buf.WriteByte(' ') 63 | } else { 64 | for _, r := range n.References { 65 | buf.WriteByte(' ') 66 | buf.WriteString(r) 67 | } 68 | } 69 | 70 | buf.WriteByte('\n') 71 | 72 | if n.Deriver != "" { 73 | fmt.Fprintf(&buf, "Deriver: %v\n", n.Deriver) 74 | } 75 | 76 | if n.System != "" { 77 | fmt.Fprintf(&buf, "System: %v\n", n.System) 78 | } 79 | 80 | for _, s := range n.Signatures { 81 | fmt.Fprintf(&buf, "Sig: %v\n", s) 82 | } 83 | 84 | if n.CA != "" { 85 | fmt.Fprintf(&buf, "CA: %v\n", n.CA) 86 | } 87 | 88 | return buf.String() 89 | } 90 | 91 | // ContentType returns the mime content type of the object. 92 | func (n NarInfo) ContentType() string { 93 | return "text/x-nix-narinfo" 94 | } 95 | -------------------------------------------------------------------------------- /pkg/nixbase32/doc.go: -------------------------------------------------------------------------------- 1 | // Package nixbase32 implements the slightly odd "base32" encoding that's used 2 | // in Nix. 3 | 4 | // Nix uses a custom alphabet. Contrary to other implementations (RFC4648), 5 | // encoding to "nix base32" also reads in characters in reverse order (and 6 | // doesn't use any padding), which makes adopting encoding/base32 hard. 7 | // This package provides some of the functions defined in 8 | // encoding/base32.Encoding. 9 | 10 | package nixbase32 11 | -------------------------------------------------------------------------------- /pkg/nixbase32/nixbase32.go: -------------------------------------------------------------------------------- 1 | package nixbase32 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // Alphabet contains the list of valid characters for the Nix base32 alphabet. 9 | const Alphabet = "0123456789abcdfghijklmnpqrsvwxyz" 10 | 11 | func decodeString(s string, dst []byte) error { 12 | var dstLen int 13 | if dst != nil { 14 | dstLen = len(dst) 15 | } else { 16 | dstLen = DecodedLen(len(s)) 17 | } 18 | 19 | for n := 0; n < len(s); n++ { 20 | c := s[len(s)-n-1] 21 | 22 | digit := strings.IndexByte(Alphabet, c) 23 | if digit == -1 { 24 | return fmt.Errorf("character %v not in alphabet", c) 25 | } 26 | 27 | b := uint(n * 5) 28 | i := b / 8 29 | j := b % 8 30 | 31 | // OR the main pattern 32 | if dst != nil { 33 | dst[i] |= byte(digit) << j 34 | } 35 | 36 | // calculate the "carry pattern" 37 | carry := byte(digit) >> (8 - j) 38 | 39 | // if we're at the end of dst… 40 | if i == uint(dstLen-1) { 41 | // but have a nonzero carry, the encoding is invalid. 42 | if carry != 0 { 43 | return fmt.Errorf("invalid encoding") 44 | } 45 | } else if dst != nil { 46 | dst[i+1] |= carry 47 | } 48 | } 49 | 50 | return nil 51 | } 52 | 53 | // ValidateBytes validates if a byte slice is valid nixbase32. 54 | func ValidateBytes(b []byte) error { 55 | return ValidateString(string(b)) 56 | } 57 | 58 | // ValidateString validates if a string is valid nixbase32. 59 | func ValidateString(s string) error { 60 | return decodeString(s, nil) 61 | } 62 | 63 | // EncodedLen returns the length in bytes of the base32 encoding of an input 64 | // buffer of length n. 65 | func EncodedLen(n int) int { 66 | if n == 0 { 67 | return 0 68 | } 69 | 70 | return (n*8-1)/5 + 1 71 | } 72 | 73 | // DecodedLen returns the length in bytes of the decoded data 74 | // corresponding to n bytes of base32-encoded data. 75 | // If we have bits that don't fit into here, they are padding and must 76 | // be 0. 77 | func DecodedLen(n int) int { 78 | return (n * 5) / 8 79 | } 80 | 81 | // EncodeToString returns the nixbase32 encoding of src. 82 | func EncodeToString(src []byte) string { 83 | l := EncodedLen(len(src)) 84 | 85 | var dst strings.Builder 86 | 87 | dst.Grow(l) 88 | 89 | for n := l - 1; n >= 0; n-- { 90 | b := uint(n * 5) 91 | i := b / 8 92 | j := b % 8 93 | 94 | c := src[i] >> j 95 | 96 | if i+1 < uint(len(src)) { 97 | c |= src[i+1] << (8 - j) 98 | } 99 | 100 | dst.WriteByte(Alphabet[c&0x1f]) 101 | } 102 | 103 | return dst.String() 104 | } 105 | 106 | // DecodeString returns the bytes represented by the nixbase32 string s or 107 | // returns an error. 108 | func DecodeString(s string) ([]byte, error) { 109 | dst := make([]byte, DecodedLen(len(s))) 110 | 111 | return dst, decodeString(s, dst) 112 | } 113 | 114 | // MustDecodeString returns the bytes represented by the nixbase32 string s or 115 | // panics on error. 116 | func MustDecodeString(s string) []byte { 117 | b, err := DecodeString(s) 118 | if err != nil { 119 | panic(err) 120 | } 121 | 122 | return b 123 | } 124 | -------------------------------------------------------------------------------- /pkg/nixbase32/nixbase32_test.go: -------------------------------------------------------------------------------- 1 | package nixbase32_test 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "testing" 7 | 8 | "github.com/numtide/nar-serve/pkg/nixbase32" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | // nolint:gochecknoglobals 13 | var tt = []struct { 14 | dec []byte 15 | enc string 16 | }{ 17 | {[]byte{}, ""}, 18 | {[]byte{0x1f}, "0z"}, 19 | { 20 | []byte{ 21 | 0xd8, 0x6b, 0x33, 0x92, 0xc1, 0x20, 0x2e, 0x8f, 22 | 0xf5, 0xa4, 0x23, 0xb3, 0x02, 0xe6, 0x28, 0x4d, 23 | 0xb7, 0xf8, 0xf4, 0x35, 0xea, 0x9f, 0x39, 0xb5, 24 | 0xb1, 0xb2, 0x0f, 0xd3, 0xac, 0x36, 0xdf, 0xcb, 25 | }, 26 | "1jyz6snd63xjn6skk7za6psgidsd53k05cr3lksqybi0q6936syq", 27 | }, 28 | } 29 | 30 | func TestEncode(t *testing.T) { 31 | for i := range tt { 32 | assert.Equal(t, tt[i].enc, nixbase32.EncodeToString(tt[i].dec)) 33 | } 34 | } 35 | 36 | func TestDecode(t *testing.T) { 37 | for i := range tt { 38 | b, err := nixbase32.DecodeString(tt[i].enc) 39 | 40 | if assert.NoError(t, err) { 41 | assert.Equal(t, tt[i].dec, b) 42 | } 43 | } 44 | } 45 | 46 | func TestValidate(t *testing.T) { 47 | for i := range tt { 48 | err := nixbase32.ValidateString(tt[i].enc) 49 | 50 | assert.NoError(t, err) 51 | } 52 | } 53 | 54 | func TestMustDecodeString(t *testing.T) { 55 | for i := range tt { 56 | b := nixbase32.MustDecodeString(tt[i].enc) 57 | assert.Equal(t, tt[i].dec, b) 58 | } 59 | } 60 | 61 | func TestDecodeInvalid(t *testing.T) { 62 | invalidEncodings := []string{ 63 | // this is invalid encoding, because it encodes 10 1-bytes, so the carry 64 | // would be 2 1-bytes 65 | "zz", 66 | // this is an even more specific example - it'd decode as 00000000 11 67 | "c0", 68 | } 69 | 70 | for _, c := range invalidEncodings { 71 | _, err := nixbase32.DecodeString(c) 72 | assert.Error(t, err) 73 | 74 | err = nixbase32.ValidateString(c) 75 | assert.Error(t, err) 76 | 77 | assert.Panics(t, func() { 78 | _ = nixbase32.MustDecodeString(c) 79 | }) 80 | } 81 | } 82 | 83 | func BenchmarkEncode(b *testing.B) { 84 | sizes := []int{32, 64, 128} 85 | 86 | for _, s := range sizes { 87 | bytes := make([]byte, s) 88 | rand.Read(bytes) // nolint:gosec 89 | 90 | b.Run(strconv.Itoa(s), func(b *testing.B) { 91 | for i := 0; i < b.N; i++ { 92 | nixbase32.EncodeToString(bytes) 93 | } 94 | }) 95 | } 96 | } 97 | 98 | func BenchmarkDecode(b *testing.B) { 99 | sizes := []int{32, 64, 128} 100 | 101 | for _, s := range sizes { 102 | bytes := make([]byte, s) 103 | rand.Read(bytes) // nolint:gosec 104 | input := nixbase32.EncodeToString(bytes) 105 | 106 | b.Run(strconv.Itoa(s), func(b *testing.B) { 107 | for i := 0; i < b.N; i++ { 108 | _, err := nixbase32.DecodeString(input) 109 | if err != nil { 110 | b.Fatal("error: %w", err) 111 | } 112 | } 113 | }) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /pkg/nixhash/algo.go: -------------------------------------------------------------------------------- 1 | package nixhash 2 | 3 | import ( 4 | "crypto" 5 | "fmt" 6 | ) 7 | 8 | // Algorithm represent the hashing algorithm used to digest the data. 9 | type Algorithm uint8 10 | 11 | const ( 12 | _ = iota 13 | 14 | // All the algorithms that Nix understands 15 | MD5 = Algorithm(iota) 16 | SHA1 = Algorithm(iota) 17 | SHA256 = Algorithm(iota) 18 | SHA512 = Algorithm(iota) 19 | ) 20 | 21 | func ParseAlgorithm(s string) (Algorithm, error) { 22 | switch s { 23 | case "md5": 24 | return MD5, nil 25 | case "sha1": 26 | return SHA1, nil 27 | case "sha256": 28 | return SHA256, nil 29 | case "sha512": 30 | return SHA512, nil 31 | default: 32 | return 0, fmt.Errorf("unknown algorithm: %s", s) 33 | } 34 | } 35 | 36 | func (a Algorithm) String() string { 37 | switch a { 38 | case MD5: 39 | return "md5" 40 | case SHA1: 41 | return "sha1" 42 | case SHA256: 43 | return "sha256" 44 | case SHA512: 45 | return "sha512" 46 | default: 47 | panic(fmt.Sprintf("bug: unknown algorithm %d", a)) 48 | } 49 | } 50 | 51 | // Func returns the cryptographic hash function for the Algorithm (implementing crypto.Hash) 52 | // It panics when encountering an invalid Algorithm, as these can only occur by 53 | // manually filling the struct. 54 | func (a Algorithm) Func() crypto.Hash { 55 | switch a { 56 | case MD5: 57 | return crypto.MD5 58 | case SHA1: 59 | return crypto.SHA1 60 | case SHA256: 61 | return crypto.SHA256 62 | case SHA512: 63 | return crypto.SHA512 64 | default: 65 | panic(fmt.Sprintf("Invalid hash type: %v", a)) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /pkg/nixhash/algo_test.go: -------------------------------------------------------------------------------- 1 | package nixhash_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/numtide/nar-serve/pkg/nixhash" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestAlgo(t *testing.T) { 11 | cases := []struct { 12 | Title string 13 | Str string 14 | Algo nixhash.Algorithm 15 | }{ 16 | { 17 | "valid md5", 18 | "md5", 19 | nixhash.MD5, 20 | }, 21 | { 22 | "valid sha1", 23 | "sha1", 24 | nixhash.SHA1, 25 | }, 26 | { 27 | "valid sha256", 28 | "sha256", 29 | nixhash.SHA256, 30 | }, 31 | { 32 | "valid sha512", 33 | "sha512", 34 | nixhash.SHA512, 35 | }, 36 | } 37 | 38 | t.Run("ParseAlgorithm", func(t *testing.T) { 39 | for _, c := range cases { 40 | t.Run(c.Title, func(t *testing.T) { 41 | algo, err := nixhash.ParseAlgorithm(c.Str) 42 | assert.NoError(t, err) 43 | assert.Equal(t, c.Algo, algo) 44 | assert.Equal(t, c.Str, algo.String()) 45 | }) 46 | } 47 | }) 48 | 49 | t.Run("ParseInvalidAlgo", func(t *testing.T) { 50 | _, err := nixhash.ParseAlgorithm("woot") 51 | assert.Error(t, err) 52 | }) 53 | 54 | t.Run("PrintInalidAlgo", func(t *testing.T) { 55 | assert.Panics(t, func() { 56 | _ = nixhash.Algorithm(0).String() 57 | }) 58 | }) 59 | 60 | } 61 | -------------------------------------------------------------------------------- /pkg/nixhash/encoding.go: -------------------------------------------------------------------------------- 1 | package nixhash 2 | 3 | import ( 4 | "encoding/base64" 5 | ) 6 | 7 | // Encoding is the string representation of the hashed data. 8 | type Encoding uint8 9 | 10 | const ( 11 | _ = iota // ignore zero value 12 | 13 | // All the encodings that Nix understands 14 | Base16 = Encoding(iota) // Lowercase hexadecimal encoding. 15 | Base64 = Encoding(iota) // [IETF RFC 4648, section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4). 16 | NixBase32 = Encoding(iota) // Nix-specific base-32 encoding. 17 | SRI = Encoding(iota) // W3C recommendation [Subresource Intergrity](https://www.w3.org/TR/SRI/) 18 | ) 19 | 20 | // b64 is the specific base64 encoding that we are using. 21 | var b64 = base64.StdEncoding 22 | -------------------------------------------------------------------------------- /pkg/nixhash/hash.go: -------------------------------------------------------------------------------- 1 | // Package nixhash provides methods to serialize and deserialize some of the 2 | // hashes used in nix code and .narinfo files. 3 | // 4 | // Nix uses different representation of hashes depending on the context 5 | // and history of the project. This package provides the utilities to handle them. 6 | package nixhash 7 | 8 | import ( 9 | "encoding/hex" 10 | "fmt" 11 | 12 | "github.com/numtide/nar-serve/pkg/nixbase32" 13 | ) 14 | 15 | type Hash struct { 16 | algo Algorithm 17 | digest []byte 18 | } 19 | 20 | func NewHash(algo Algorithm, digest []byte) (*Hash, error) { 21 | if algo.Func().Size() != len(digest) { 22 | return nil, fmt.Errorf("algo length doesn't match digest size") 23 | } 24 | 25 | return &Hash{algo, digest}, nil 26 | } 27 | 28 | func MustNewHash(algo Algorithm, digest []byte) *Hash { 29 | h, err := NewHash(algo, digest) 30 | if err != nil { 31 | panic(err) 32 | } 33 | return h 34 | } 35 | 36 | func (h Hash) Algo() Algorithm { 37 | return h.algo 38 | } 39 | 40 | func (h Hash) Digest() []byte { 41 | return h.digest 42 | } 43 | 44 | // Format converts the hash to a string of the given encoding. 45 | func (h Hash) Format(e Encoding, includeAlgo bool) string { 46 | var s string 47 | if e == SRI || includeAlgo { 48 | s += h.algo.String() 49 | if e == SRI { 50 | s += "-" 51 | } else { 52 | s += ":" 53 | } 54 | } 55 | switch e { 56 | case Base16: 57 | s += hex.EncodeToString(h.digest) 58 | case NixBase32: 59 | s += nixbase32.EncodeToString(h.digest) 60 | case Base64, SRI: 61 | s += b64.EncodeToString(h.digest) 62 | default: 63 | panic(fmt.Sprintf("bug: unknown encoding: %v", e)) 64 | } 65 | return s 66 | } 67 | -------------------------------------------------------------------------------- /pkg/nixhash/hash_test.go: -------------------------------------------------------------------------------- 1 | package nixhash_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/numtide/nar-serve/pkg/nixhash" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDigest(t *testing.T) { 11 | cases := []struct { 12 | Title string 13 | EncodedHash string 14 | Algo nixhash.Algorithm 15 | Encoding nixhash.Encoding 16 | IncludePrefix bool 17 | Digest []byte 18 | }{ 19 | { 20 | "valid sha256", 21 | "sha256:1rjs6c23nyf8zkmf7yxglz2q2m7v5kp51nc2m0lk4h998d0qiixs", 22 | nixhash.SHA256, 23 | nixhash.NixBase32, 24 | true, 25 | []byte{ 26 | 0xba, 0xc7, 0x88, 0x41, 0x43, 0x29, 0x41, 0x32, 27 | 0x29, 0xa8, 0x82, 0xd9, 0x50, 0xee, 0x2c, 0xfb, 28 | 0x54, 0x81, 0xc5, 0xa7, 0xaf, 0xfb, 0xe3, 0xea, 29 | 0xfc, 0xc8, 0x79, 0x3b, 0x04, 0x33, 0x5a, 0xe6, 30 | }, 31 | }, 32 | { 33 | "valid sha512", 34 | "sha512:37iwwa5iw4m6pkd6qs2c5lw13q7y16hw2rv4i1cx6jax6yibhn6fgajbwc8p4j1fc6iicpy5r1vi7hpfq3n6z1ikhm5kcyz2b1frk80", 35 | nixhash.SHA512, 36 | nixhash.NixBase32, 37 | true, 38 | []byte{ 39 | 0x00, 0xcd, 0xec, 0xc2, 0x12, 0xdf, 0xb3, 0x59, 40 | 0x2a, 0x9c, 0x31, 0x7c, 0x63, 0x07, 0x76, 0x17, 41 | 0x9e, 0xb8, 0x43, 0x2e, 0xfe, 0xb2, 0x18, 0x0d, 42 | 0x73, 0x41, 0x92, 0x8b, 0x18, 0x5f, 0x52, 0x3d, 43 | 0x67, 0x2c, 0x5c, 0xd1, 0x9b, 0xae, 0xa4, 0xe9, 44 | 0x2c, 0x44, 0xb2, 0xb3, 0xe0, 0xd0, 0x04, 0x7f, 45 | 0xf0, 0x08, 0x9c, 0x16, 0x26, 0x34, 0x36, 0x6d, 46 | 0x5e, 0x53, 0x09, 0x8f, 0x45, 0x71, 0x1e, 0xcf, 47 | }, 48 | }, 49 | { 50 | "invalid base32", 51 | "sha256:1rjs6c2tnyf8zkmf7yxglz2q2m7v5kp51nc2m0lk4h998d0qiixs", 52 | nixhash.SHA256, 53 | nixhash.NixBase32, 54 | true, 55 | nil, // means no result 56 | }, 57 | { 58 | "invalid digest length", 59 | "", // means this should panic 60 | nixhash.SHA256, 61 | nixhash.NixBase32, 62 | true, 63 | []byte{ 64 | 0xba, 0xc7, 0x88, 0x41, 0x43, 0x29, 0x41, 0x32, 65 | 0x29, 0xa8, 0x82, 0xd9, 0x50, 0xee, 0x2c, 0xfb, 66 | 0x54, 0x81, 0xc5, 0xa7, 0xaf, 0xfb, 0xe3, 0xea, 67 | 0xfc, 0xc8, 0x79, 0x3b, 0x04, 0x33, 0x5a, 68 | }, 69 | }, 70 | { 71 | "invalid encoded digest length", 72 | "sha256:37iwwa5iw4m6pkd6qs2c5lw13q7y16hw2rv4i1cx6jax6yibhn6fgajbwc8p4j1fc6iicpy5r1vi7hpfq3n6z1ikhm5kcyz2b1frk80", 73 | nixhash.SHA256, 74 | nixhash.Base64, 75 | true, 76 | nil, 77 | }, 78 | } 79 | 80 | t.Run("ParseAny", func(t *testing.T) { 81 | for _, c := range cases { 82 | t.Run(c.Title, func(t *testing.T) { 83 | if c.EncodedHash == "" { 84 | return // there is no valid string representation to parse 85 | } 86 | 87 | hash, err := nixhash.ParseAny(c.EncodedHash, &c.Algo) 88 | 89 | if c.Digest != nil { 90 | if assert.NoError(t, err, "shouldn't error") { 91 | h, err := nixhash.NewHashWithEncoding(c.Algo, c.Digest, c.Encoding, c.IncludePrefix) 92 | assert.NoError(t, err) 93 | assert.Equal(t, h, hash) 94 | } 95 | } else { 96 | assert.Error(t, err, "should error") 97 | } 98 | }) 99 | } 100 | }) 101 | 102 | t.Run("Format", func(t *testing.T) { 103 | for _, c := range cases { 104 | t.Run(c.Title, func(t *testing.T) { 105 | if c.Digest == nil { 106 | return // there is no valid parsed representation to stringify 107 | } 108 | 109 | hash, err := nixhash.NewHashWithEncoding(c.Algo, c.Digest, c.Encoding, c.IncludePrefix) 110 | 111 | if c.EncodedHash == "" { 112 | assert.Error(t, err) 113 | } else { 114 | assert.NoError(t, err) 115 | assert.Equal(t, c.EncodedHash, hash.String()) 116 | } 117 | }) 118 | } 119 | }) 120 | } 121 | -------------------------------------------------------------------------------- /pkg/nixhash/hash_with_encoding.go: -------------------------------------------------------------------------------- 1 | package nixhash 2 | 3 | // HashWithEncoding stores the original encoding so the user can get error messages with the same encoding. 4 | type HashWithEncoding struct { 5 | Hash 6 | encoding Encoding 7 | includeAlgo bool 8 | } 9 | 10 | func NewHashWithEncoding(algo Algorithm, digest []byte, encoding Encoding, includeAlgo bool) (*HashWithEncoding, error) { 11 | h, err := NewHash(algo, digest) 12 | if err != nil { 13 | return nil, err 14 | } 15 | return &HashWithEncoding{ 16 | Hash: *h, 17 | encoding: encoding, 18 | includeAlgo: includeAlgo, 19 | }, nil 20 | } 21 | 22 | func MustNewHashWithEncoding(algo Algorithm, digest []byte, encoding Encoding, includeAlgo bool) *HashWithEncoding { 23 | h := MustNewHash(algo, digest) 24 | return &HashWithEncoding{ 25 | Hash: *h, 26 | encoding: encoding, 27 | includeAlgo: includeAlgo, 28 | } 29 | } 30 | 31 | // String return the previous representation of a given hash. 32 | func (h HashWithEncoding) String() string { 33 | return h.Format(h.encoding, h.includeAlgo) 34 | } 35 | -------------------------------------------------------------------------------- /pkg/nixhash/parse.go: -------------------------------------------------------------------------------- 1 | package nixhash 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/numtide/nar-serve/pkg/nixbase32" 9 | ) 10 | 11 | // Parse the hash from a string representation in the format 12 | // "[:]" or "-" (a 13 | // Subresource Integrity hash expression). If the 'optAlgo' argument 14 | // is not present, then the hash algorithm must be specified in the 15 | // string. 16 | func ParseAny(s string, optAlgo *Algorithm) (*HashWithEncoding, error) { 17 | var ( 18 | isSRI = false 19 | err error 20 | ) 21 | h := &HashWithEncoding{} 22 | 23 | // Look for prefix 24 | i := strings.IndexByte(s, ':') 25 | if i <= 0 { 26 | i = strings.IndexByte(s, '-') 27 | if i > 0 { 28 | isSRI = true 29 | } 30 | } 31 | 32 | // If has prefix, get the algo 33 | if i > 0 { 34 | h.includeAlgo = true 35 | h.algo, err = ParseAlgorithm(s[:i]) 36 | if err != nil { 37 | return nil, err 38 | } 39 | if optAlgo != nil && h.algo != *optAlgo { 40 | return nil, fmt.Errorf("algo doesn't match expected algo: %v, %v", h.algo, optAlgo) 41 | } 42 | // keep the remainder for the encoding 43 | s = s[i+1:] 44 | } else if optAlgo != nil { 45 | h.algo = *optAlgo 46 | } else { 47 | return nil, fmt.Errorf("unable to find separator in %v", s) 48 | } 49 | 50 | // Decode the string. Because we know the algo, and each encoding has a different size, we 51 | // can find out which of the encoding was used to represent the hash. 52 | digestLenBytes := h.algo.Func().Size() 53 | switch len(s) { 54 | case hex.EncodedLen(digestLenBytes): 55 | h.encoding = Base16 56 | h.digest, err = hex.DecodeString(s) 57 | case nixbase32.EncodedLen(digestLenBytes): 58 | h.encoding = NixBase32 59 | h.digest, err = nixbase32.DecodeString(s) 60 | case b64.EncodedLen(digestLenBytes): 61 | h.encoding = Base64 62 | h.digest, err = b64.DecodeString(s) 63 | default: 64 | return h, fmt.Errorf("unknown encoding for %v", s) 65 | } 66 | if err != nil { 67 | return h, err 68 | } 69 | 70 | // Post-processing for SRI 71 | if isSRI { 72 | if h.encoding == Base64 { 73 | h.encoding = SRI 74 | } else { 75 | return h, fmt.Errorf("invalid encoding for SRI: %v", h.encoding) 76 | } 77 | } 78 | 79 | return h, nil 80 | } 81 | 82 | // ParseNixBase32 returns a new Hash struct, by parsing a hashtype:nixbase32 string, or an error. 83 | func ParseNixBase32(s string) (*Hash, error) { 84 | h, err := ParseAny(s, nil) 85 | if err != nil { 86 | return nil, err 87 | } 88 | if h.encoding != NixBase32 { 89 | return nil, fmt.Errorf("expected NixBase32 encoding but got %v", h.encoding) 90 | } 91 | return &h.Hash, nil 92 | } 93 | 94 | // MustParseNixBase32 returns a new Hash struct, by parsing a hashtype:nixbase32 string, or panics on error. 95 | func MustParseNixBase32(s string) *Hash { 96 | h, err := ParseNixBase32(s) 97 | if err != nil { 98 | panic(err) 99 | } 100 | 101 | return h 102 | } 103 | -------------------------------------------------------------------------------- /pkg/nixhash/util.go: -------------------------------------------------------------------------------- 1 | package nixhash 2 | 3 | // CompressHash takes an arbitrary long sequence of bytes (usually a hash digest), 4 | // and returns a sequence of bytes of length newSize. 5 | // It's calculated by rotating through the bytes in the output buffer (zero-initialized), 6 | // and XOR'ing with each byte in the passed input 7 | // It consumes 1 byte at a time, and XOR's it with the current value in the output buffer. 8 | func CompressHash(input []byte, outputSize int) []byte { 9 | buf := make([]byte, outputSize) 10 | for i := 0; i < len(input); i++ { 11 | buf[i%outputSize] ^= input[i] 12 | } 13 | 14 | return buf 15 | } 16 | -------------------------------------------------------------------------------- /pkg/nixpath/nixpath.go: -------------------------------------------------------------------------------- 1 | // Package nixpath parses and renders Nix store paths. 2 | package nixpath 3 | 4 | import ( 5 | "fmt" 6 | "path" 7 | "regexp" 8 | 9 | "github.com/numtide/nar-serve/pkg/nixbase32" 10 | ) 11 | 12 | const ( 13 | StoreDir = "/nix/store" 14 | PathHashSize = 20 15 | ) 16 | 17 | // nolint:gochecknoglobals 18 | var ( 19 | NameRe = regexp.MustCompile(`[a-zA-Z0-9+\-_?=][.a-zA-Z0-9+\-_?=]*`) 20 | PathRe = regexp.MustCompile(fmt.Sprintf( 21 | `^%v/([%v]{%d})-(%v)$`, 22 | regexp.QuoteMeta(StoreDir), 23 | nixbase32.Alphabet, 24 | nixbase32.EncodedLen(PathHashSize), 25 | NameRe, 26 | )) 27 | 28 | // Length of the hash portion of the store path in base32. 29 | encodedPathHashSize = nixbase32.EncodedLen(PathHashSize) 30 | 31 | // Offset in path string to name. 32 | nameOffset = len(StoreDir) + 1 + encodedPathHashSize + 1 33 | // Offset in path string to hash. 34 | hashOffset = len(StoreDir) + 1 35 | ) 36 | 37 | // NixPath represents a bare nix store path, without any paths underneath `/nix/store/…-…`. 38 | type NixPath struct { 39 | Name string 40 | Digest []byte 41 | } 42 | 43 | func (n *NixPath) String() string { 44 | return Absolute(nixbase32.EncodeToString(n.Digest) + "-" + n.Name) 45 | } 46 | 47 | func (n *NixPath) Validate() error { 48 | return Validate(n.String()) 49 | } 50 | 51 | // FromString parses a path string into a nix path, 52 | // verifying it's syntactically valid 53 | // It returns an error if it fails to parse. 54 | func FromString(s string) (*NixPath, error) { 55 | if err := Validate(s); err != nil { 56 | return nil, err 57 | } 58 | 59 | digest, err := nixbase32.DecodeString(s[hashOffset : hashOffset+encodedPathHashSize]) 60 | if err != nil { 61 | return nil, fmt.Errorf("unable to decode hash: %v", err) 62 | } 63 | 64 | return &NixPath{ 65 | Name: s[nameOffset:], 66 | Digest: digest, 67 | }, nil 68 | } 69 | 70 | // Absolute prefixes a nixpath name with StoreDir and a '/', and cleans the path. 71 | // It does not prevent from leaving StoreDir, so check if it still starts with StoreDir 72 | // if you accept untrusted input. 73 | // This should be used when assembling store paths in hashing contexts. 74 | // Even if this code is running on windows, we want to use forward 75 | // slashes to construct them. 76 | func Absolute(name string) string { 77 | return path.Join(StoreDir, name) 78 | } 79 | 80 | // Validate validates a path string, verifying it's syntactically valid. 81 | func Validate(s string) error { 82 | if len(s) < nameOffset+1 { 83 | return fmt.Errorf("unable to parse path: invalid path length %d for path %v", len(s), s) 84 | } 85 | 86 | if s[:len(StoreDir)] != StoreDir { 87 | return fmt.Errorf("unable to parse path: mismatching store path prefix for path %v", s) 88 | } 89 | 90 | if err := nixbase32.ValidateString(s[hashOffset : hashOffset+encodedPathHashSize]); err != nil { 91 | return fmt.Errorf("unable to parse path: error validating path nixbase32 %v: %v", err, s) 92 | } 93 | 94 | for _, c := range s[nameOffset:] { 95 | if (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') { 96 | switch c { 97 | case '-': 98 | continue 99 | case '_': 100 | continue 101 | case '.': 102 | continue 103 | case '+': 104 | continue 105 | case '?': 106 | continue 107 | case '=': 108 | continue 109 | } 110 | 111 | return fmt.Errorf("unable to parse path: invalid character in path: %v", s) 112 | } 113 | } 114 | 115 | return nil 116 | } 117 | -------------------------------------------------------------------------------- /pkg/nixpath/nixpath_test.go: -------------------------------------------------------------------------------- 1 | package nixpath_test 2 | 3 | import ( 4 | "path" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/numtide/nar-serve/pkg/nixpath" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestNixPath(t *testing.T) { 13 | t.Run("happy path", func(t *testing.T) { 14 | exampleNixPathStr := "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432" 15 | nixpath, err := nixpath.FromString(exampleNixPathStr) 16 | 17 | if assert.NoError(t, err) { 18 | assert.Equal(t, "net-tools-1.60_p20170221182432", nixpath.Name) 19 | assert.Equal(t, []byte{ 20 | 0x8a, 0x12, 0x32, 0x15, 0x22, 0xfd, 0x91, 0xef, 0xbd, 0x60, 0xeb, 0xb2, 0x48, 0x1a, 0xf8, 0x85, 21 | 0x80, 0xf6, 0x16, 0x00, 22 | }, nixpath.Digest) 23 | } 24 | 25 | // Test to string 26 | assert.Equal(t, exampleNixPathStr, nixpath.String()) 27 | }) 28 | 29 | t.Run("invalid hash length", func(t *testing.T) { 30 | s := "/nix/store/00bgd045z0d4icpbc2yy-net-tools-1.60_p20170221182432" 31 | 32 | _, err := nixpath.FromString(s) 33 | assert.Error(t, err) 34 | 35 | err = nixpath.Validate(s) 36 | assert.Error(t, err) 37 | }) 38 | 39 | t.Run("invalid encoding in hash", func(t *testing.T) { 40 | s := "/nix/store/00bgd045z0d4icpbc2yyz4gx48aku4la-net-tools-1.60_p20170221182432" 41 | 42 | _, err := nixpath.FromString(s) 43 | assert.Error(t, err) 44 | 45 | err = nixpath.Validate(s) 46 | assert.Error(t, err) 47 | }) 48 | 49 | t.Run("more than just the bare nix store path", func(t *testing.T) { 50 | s := "/nix/store/00bgd045z0d4icpbc2yyz4gx48aku4la-net-tools-1.60_p20170221182432/bin/arp" 51 | 52 | _, err := nixpath.FromString(s) 53 | assert.Error(t, err) 54 | 55 | err = nixpath.Validate(s) 56 | assert.Error(t, err) 57 | }) 58 | } 59 | 60 | func TestNixPathAbsolute(t *testing.T) { 61 | t.Run("simple (foo)", func(t *testing.T) { 62 | s := nixpath.Absolute("foo") 63 | assert.Equal(t, nixpath.StoreDir+"/"+"foo", s) 64 | }) 65 | t.Run("subdir (foo/bar)", func(t *testing.T) { 66 | s := nixpath.Absolute("foo/bar") 67 | assert.Equal(t, nixpath.StoreDir+"/"+"foo/bar", s) 68 | }) 69 | t.Run("with ../ getting cleaned (foo/bar/.. -> foo)", func(t *testing.T) { 70 | s := nixpath.Absolute("foo/bar/..") 71 | assert.Equal(t, nixpath.StoreDir+"/"+"foo", s) 72 | }) 73 | // test you can use this to exit nixpath.StoreDir 74 | // Note path.Join does a path.Clean already, this is only 75 | // written for additional clarity. 76 | t.Run("leave storeDir", func(t *testing.T) { 77 | s := nixpath.Absolute("..") 78 | assert.Equal(t, path.Clean(path.Join(nixpath.StoreDir, "..")), s) 79 | assert.False(t, strings.HasPrefix(s, nixpath.StoreDir), 80 | "path shouldn't have the full storedir as prefix anymore (/nix)") 81 | }) 82 | } 83 | 84 | func BenchmarkNixPath(b *testing.B) { 85 | path := "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432" 86 | 87 | b.Run("FromString", func(b *testing.B) { 88 | for i := 0; i < b.N; i++ { 89 | _, err := nixpath.FromString(path) 90 | if err != nil { 91 | b.Fatal(err) 92 | } 93 | } 94 | }) 95 | 96 | b.Run("Validate", func(b *testing.B) { 97 | for i := 0; i < b.N; i++ { 98 | err := nixpath.Validate(path) 99 | if err != nil { 100 | b.Fatal(err) 101 | } 102 | } 103 | }) 104 | 105 | { 106 | p, err := nixpath.FromString(path) 107 | if err != nil { 108 | b.Fatal(err) 109 | } 110 | 111 | b.Run("ValidateStruct", func(b *testing.B) { 112 | for i := 0; i < b.N; i++ { 113 | err := p.Validate() 114 | if err != nil { 115 | b.Fatal(err) 116 | } 117 | } 118 | }) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /pkg/nixpath/references/refs.go: -------------------------------------------------------------------------------- 1 | package references 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | 8 | "github.com/numtide/nar-serve/pkg/nixbase32" 9 | "github.com/numtide/nar-serve/pkg/nixpath" 10 | ) 11 | 12 | const ( 13 | storePrefixLength = len(nixpath.StoreDir) + 1 14 | refLength = len(nixbase32.Alphabet) // Store path hash prefix length 15 | ) 16 | 17 | // nolint:gochecknoglobals 18 | // This creates an array to check if a given byte is in the Nix base32 alphabet. 19 | var isNixBase32 = func() (arr [256]bool) { 20 | for _, c := range nixbase32.Alphabet { 21 | arr[c] = true 22 | } 23 | 24 | return 25 | }() 26 | 27 | // ReferenceScanner scans a stream of data for references to store paths to extract run time dependencies. 28 | type ReferenceScanner struct { 29 | // Map of store path hashes to full store paths. 30 | hashes map[string]string 31 | 32 | // Set of hits. 33 | hits map[string]struct{} 34 | 35 | // Buffer for current partial hit. 36 | buf [refLength]byte 37 | 38 | // How far into buf is currently written. 39 | n int 40 | } 41 | 42 | func NewReferenceScanner(storePathCandidates []string) (*ReferenceScanner, error) { 43 | var buf [refLength]byte 44 | 45 | hashes := make(map[string]string) 46 | 47 | for _, storePath := range storePathCandidates { 48 | if !strings.HasPrefix(storePath, nixpath.StoreDir) { 49 | return nil, fmt.Errorf("missing store path prefix: %s", storePath) 50 | } 51 | 52 | // Check length is a valid store path length including dashes 53 | if len(storePath) < len(nixpath.StoreDir)+refLength+3 { 54 | return nil, fmt.Errorf("invalid store path length: %d for store path '%s'", len(storePath), storePath) 55 | } 56 | 57 | hashes[storePath[storePrefixLength:storePrefixLength+refLength]] = storePath 58 | } 59 | 60 | return &ReferenceScanner{ 61 | hits: make(map[string]struct{}), 62 | hashes: hashes, 63 | buf: buf, 64 | n: 0, 65 | }, nil 66 | } 67 | 68 | func (r *ReferenceScanner) References() []string { 69 | paths := make([]string, len(r.hits)) 70 | 71 | i := 0 72 | 73 | for hash := range r.hits { 74 | paths[i] = r.hashes[hash] 75 | i++ 76 | } 77 | 78 | sort.Strings(paths) 79 | 80 | return paths 81 | } 82 | 83 | func (r *ReferenceScanner) Write(s []byte) (int, error) { 84 | for _, c := range s { 85 | if !isNixBase32[c] { 86 | r.n = 0 87 | 88 | continue 89 | } 90 | 91 | r.buf[r.n] = c 92 | r.n++ 93 | 94 | if r.n == refLength { 95 | hash := string(r.buf[:]) 96 | if _, ok := r.hashes[hash]; ok { 97 | r.hits[hash] = struct{}{} 98 | } 99 | 100 | r.n = 0 101 | } 102 | } 103 | 104 | return len(s), nil 105 | } 106 | -------------------------------------------------------------------------------- /pkg/nixpath/references/refs_test.go: -------------------------------------------------------------------------------- 1 | package references_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/numtide/nar-serve/pkg/nixpath/references" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | //nolint:gochecknoglobals 11 | var cases = []struct { 12 | Title string 13 | Chunks []string 14 | Expected []string 15 | }{ 16 | { 17 | Title: "Basic", 18 | Chunks: []string{ 19 | "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12/bin/hello", 20 | }, 21 | Expected: []string{ 22 | "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", 23 | }, 24 | }, 25 | { 26 | Title: "PartialWrites", 27 | Chunks: []string{ 28 | "/nix/store/knn6wc1a89c47yb70", 29 | "qwv56rmxylia6wx-hello-2.12/bin/hello", 30 | }, 31 | Expected: []string{ 32 | "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", 33 | }, 34 | }, 35 | { 36 | Title: "IgnoredPaths", 37 | Chunks: []string{ 38 | "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12/bin/hello", 39 | "/nix/store/c4pcgriqgiwz8vxrjxg7p38q3y7w3ni3-go-1.18.2/bin/go", 40 | }, 41 | Expected: []string{ 42 | "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", 43 | }, 44 | }, 45 | } 46 | 47 | func TestReferences(t *testing.T) { 48 | t.Run("ScanReferences", func(t *testing.T) { 49 | for _, c := range cases { 50 | t.Run(c.Title, func(t *testing.T) { 51 | refScanner, err := references.NewReferenceScanner(c.Expected) 52 | if err != nil { 53 | panic(err) 54 | } 55 | 56 | for _, line := range c.Chunks { 57 | _, err = refScanner.Write([]byte(line)) 58 | if err != nil { 59 | panic(err) 60 | } 61 | } 62 | 63 | assert.Equal(t, c.Expected, refScanner.References()) 64 | }) 65 | } 66 | }) 67 | } 68 | 69 | func BenchmarkReferences(b *testing.B) { 70 | for _, c := range cases { 71 | c := c 72 | 73 | refScanner, err := references.NewReferenceScanner(c.Expected) 74 | if err != nil { 75 | panic(err) 76 | } 77 | 78 | chunks := make([][]byte, len(c.Chunks)) 79 | for i, c := range c.Chunks { 80 | chunks[i] = []byte(c) 81 | } 82 | 83 | b.Run(c.Title, func(b *testing.B) { 84 | for i := 0; i < b.N; i++ { 85 | for _, chunk := range chunks { 86 | _, err = refScanner.Write(chunk) 87 | if err != nil { 88 | panic(err) 89 | } 90 | } 91 | } 92 | 93 | assert.Equal(b, c.Expected, refScanner.References()) 94 | }) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /pkg/wire/bytes_reader.go: -------------------------------------------------------------------------------- 1 | package wire 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // BytesReader implements io.ReadCloser. 8 | var _ io.ReadCloser = &BytesReader{} 9 | 10 | // BytesReader implements reading from bytes fields. 11 | // It'll return a limited reader to the actual contents. 12 | // Closing the reader will seek to the end of the packet (including padding). 13 | // It's fine to not close, in case you don't want to seek to the end. 14 | type BytesReader struct { 15 | contentLength uint64 // the total length of the field 16 | lr io.Reader // a reader limited to the actual contents of the field 17 | r io.Reader // the underlying real reader, used when seeking over the padding. 18 | } 19 | 20 | // NewBytesReader constructs a Reader of a bytes packet. 21 | // Closing the reader will skip over any padding. 22 | func NewBytesReader(r io.Reader, contentLength uint64) *BytesReader { 23 | return &BytesReader{ 24 | contentLength: contentLength, 25 | lr: io.LimitReader(r, int64(contentLength)), 26 | r: r, 27 | } 28 | } 29 | 30 | // Read will read into b until all bytes from the field have been read 31 | // Keep in mind there might be some padding at the end still, 32 | // which can be seek'ed over by closing the reader. 33 | func (br *BytesReader) Read(b []byte) (int, error) { 34 | n, err := br.lr.Read(b) 35 | 36 | return n, err 37 | } 38 | 39 | // Close will skip to the end and consume any remaining padding. 40 | // It'll return an error if the padding contains something else than null 41 | // bytes. 42 | // It's fine to not close, in case you don't want to seek to the end. 43 | func (br *BytesReader) Close() error { 44 | // seek to the end of the limited reader 45 | for { 46 | buf := make([]byte, 1024) 47 | 48 | _, err := br.lr.Read(buf) 49 | if err != nil { 50 | if err == io.EOF { 51 | break 52 | } 53 | 54 | return err 55 | } 56 | } 57 | // skip over padding 58 | return readPadding(br.r, br.contentLength) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/wire/bytes_writer.go: -------------------------------------------------------------------------------- 1 | package wire 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | ) 7 | 8 | var _ io.WriteCloser = &BytesWriter{} 9 | 10 | // BytesWriter implements writing bytes fields. 11 | // It'll return a io.WriteCloser that can be written to. 12 | // On Write(), it'll verify we don't write more than was initially specified. 13 | // On Close(), it'll verify exactly the previously specified number of bytes were written, 14 | // then write any necessary padding. 15 | type BytesWriter struct { 16 | w io.Writer 17 | bytesWritten uint64 // the number of bytes written so far 18 | totalLength uint64 // the expected length of the contents, without padding 19 | paddingWritten bool 20 | } 21 | 22 | func NewBytesWriter(w io.Writer, contentLength uint64) (*BytesWriter, error) { 23 | // write the size field 24 | n := contentLength 25 | if err := WriteUint64(w, n); err != nil { 26 | return nil, err 27 | } 28 | 29 | bytesWriter := &BytesWriter{ 30 | w: w, 31 | bytesWritten: 0, 32 | totalLength: contentLength, 33 | paddingWritten: false, 34 | } 35 | 36 | return bytesWriter, nil 37 | } 38 | 39 | func (bw *BytesWriter) Write(p []byte) (n int, err error) { 40 | l := len(p) 41 | 42 | if bw.bytesWritten+uint64(l) > bw.totalLength { 43 | return 0, fmt.Errorf("maximum number of bytes exceeded") 44 | } 45 | 46 | bytesWritten, err := bw.w.Write(p) 47 | bw.bytesWritten += uint64(bytesWritten) 48 | 49 | return bytesWritten, err 50 | } 51 | 52 | // Close ensures the previously specified number of bytes were written, then writes padding. 53 | func (bw *BytesWriter) Close() error { 54 | // if we already closed once, don't close again 55 | if bw.paddingWritten { 56 | return nil 57 | } 58 | 59 | if bw.bytesWritten != bw.totalLength { 60 | return fmt.Errorf("wrote %v bytes in total, but expected %v", bw.bytesWritten, bw.totalLength) 61 | } 62 | 63 | // write padding 64 | err := writePadding(bw.w, bw.totalLength) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | bw.paddingWritten = true 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /pkg/wire/read.go: -------------------------------------------------------------------------------- 1 | package wire 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | ) 7 | 8 | // ReadUint64 consumes exactly 8 bytes and returns a uint64. 9 | func ReadUint64(r io.Reader) (n uint64, err error) { 10 | buf := bufPool.Get().(*[8]byte) 11 | defer bufPool.Put(buf) 12 | 13 | if _, err := io.ReadFull(r, buf[:]); err != nil { 14 | return 0, err 15 | } 16 | 17 | return byteOrder.Uint64(buf[:]), nil 18 | } 19 | 20 | // ReadBool consumes a boolean in nix wire format. 21 | func ReadBool(r io.Reader) (v bool, err error) { 22 | n, err := ReadUint64(r) 23 | if err != nil { 24 | return false, err 25 | } 26 | 27 | if n != 0 && n != 1 { 28 | return false, fmt.Errorf("invalid value for boolean: %v", n) 29 | } 30 | 31 | return n == 1, nil 32 | } 33 | 34 | // readPadding consumes the remaining padding, if any, and errors out if it's not null bytes. 35 | // In nix archive format, byte packets are padded to 8 byte blocks each. 36 | func readPadding(r io.Reader, contentLength uint64) error { 37 | // n marks the position inside the last block 38 | n := contentLength % 8 39 | if n == 0 { 40 | return nil 41 | } 42 | 43 | buf := bufPool.Get().(*[8]byte) 44 | defer bufPool.Put(buf) 45 | 46 | // we read the padding contents into the tail of the buf slice 47 | if _, err := io.ReadFull(r, buf[n:]); err != nil { 48 | return err 49 | } 50 | // … and check if it's only null bytes 51 | for _, b := range buf[n:] { 52 | if b != 0 { 53 | return fmt.Errorf("invalid padding, should be null bytes, found %v", buf[n:]) 54 | } 55 | } 56 | 57 | return nil 58 | } 59 | 60 | // ReadBytes parses the size field, and returns a ReadCloser to its contents. 61 | // That reader is limited to the actual contents of the bytes field. 62 | // Closing the reader will skip to the end of the last byte packet, including the padding. 63 | func ReadBytes(r io.Reader) (uint64, io.ReadCloser, error) { 64 | // read content length 65 | contentLength, err := ReadUint64(r) 66 | if err != nil { 67 | return 0, nil, err 68 | } 69 | 70 | return contentLength, NewBytesReader(r, contentLength), nil 71 | } 72 | 73 | // ReadBytesFull reads a byte packet, and will return its content, or an error. 74 | // A maximum number of bytes can be specified in max. 75 | // In the case of a packet exceeding the maximum number of bytes, 76 | // the reader won't seek to the end of the packet. 77 | func ReadBytesFull(r io.Reader, max uint64) ([]byte, error) { 78 | contentLength, rd, err := ReadBytes(r) 79 | if err != nil { 80 | return []byte{}, err 81 | } 82 | 83 | if contentLength > max { 84 | return nil, fmt.Errorf("content length of %v bytes exceeds maximum of %v bytes", contentLength, max) 85 | } 86 | 87 | defer rd.Close() 88 | 89 | // consume content 90 | buf := make([]byte, contentLength) 91 | if _, err := io.ReadFull(rd, buf); err != nil { 92 | return nil, err 93 | } 94 | 95 | return buf, nil 96 | } 97 | 98 | // ReadString reads a bytes packet and converts it to string. 99 | func ReadString(r io.Reader, max uint64) (string, error) { 100 | buf, err := ReadBytesFull(r, max) 101 | 102 | return string(buf), err 103 | } 104 | -------------------------------------------------------------------------------- /pkg/wire/read_test.go: -------------------------------------------------------------------------------- 1 | package wire_test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "testing" 7 | 8 | "github.com/numtide/nar-serve/pkg/wire" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | // nolint:gochecknoglobals 13 | var ( 14 | wireBytesFalse = []byte{0, 0, 0, 0, 0, 0, 0, 0} 15 | wireBytesTrue = []byte{1, 0, 0, 0, 0, 0, 0, 0} 16 | wireBytesInvalidBool = []byte{2, 0, 0, 0, 0, 0, 0, 0} 17 | 18 | contents8Bytes = []byte{ 19 | 42, 23, 42, 23, 42, 23, 42, 23, // the actual data 20 | } 21 | wire8Bytes = []byte{ 22 | 8, 0, 0, 0, 0, 0, 0, 0, // length field - 8 bytes 23 | 42, 23, 42, 23, 42, 23, 42, 23, // the actual data 24 | } 25 | 26 | contents10Bytes = []byte{ 27 | 42, 23, 42, 23, 42, 23, 42, 23, // the actual data 28 | 42, 23, 29 | } 30 | wire10Bytes = []byte{ 31 | 10, 0, 0, 0, 0, 0, 0, 0, // length field - 8 bytes 32 | 42, 23, 42, 23, 42, 23, 42, 23, // the actual data 33 | 42, 23, 0, 0, 0, 0, 0, 0, // more actual data (2 bytes), then padding 34 | } 35 | 36 | wireStringFoo = []byte{ 37 | 3, 0, 0, 0, 0, 0, 0, 0, // length field - 3 bytes 38 | 0x46, 0x6F, 0x6F, 0, 0, 0, 0, 0, // contents, Foo, then 5 bytes padding 39 | } 40 | ) 41 | 42 | // hesitantReader implements an io.Reader. 43 | type hesitantReader struct { 44 | data [][]byte 45 | } 46 | 47 | // Read returns the topmost []byte in data, or io.EOF if empty. 48 | func (r *hesitantReader) Read(p []byte) (n int, err error) { 49 | if len(r.data) == 0 { 50 | return 0, io.EOF 51 | } 52 | 53 | copy(p, r.data[0]) 54 | lenRead := len(r.data[0]) 55 | 56 | // pop first element in r.data 57 | r.data = r.data[1:] 58 | 59 | return lenRead, nil 60 | } 61 | 62 | // TestReadUint64 tests a reading a single uint64 field. 63 | func TestReadUint64(t *testing.T) { 64 | bs := []byte{13, 0, 0, 0, 0, 0, 0, 0} 65 | r := bytes.NewReader(bs) 66 | 67 | num, err := wire.ReadUint64(r) 68 | 69 | assert.NoError(t, err) 70 | assert.Equal(t, num, uint64(13)) 71 | } 72 | 73 | // TestReadLongLongPartial tests reading a single uint64 field, but through a 74 | // reader not returning everything at once. 75 | func TestReadUint64Slow(t *testing.T) { 76 | r := &hesitantReader{data: [][]byte{ 77 | {13}, 78 | {}, 79 | {0, 0, 0, 0, 0, 0, 0}, 80 | }} 81 | 82 | num, err := wire.ReadUint64(r) 83 | assert.NoError(t, err) 84 | assert.Equal(t, num, uint64(13)) 85 | } 86 | 87 | // TestReadBool tests reading boolean values works. 88 | func TestReadBool(t *testing.T) { 89 | rdBytesFalse := bytes.NewReader(wireBytesFalse) 90 | rdBytesTrue := bytes.NewReader(wireBytesTrue) 91 | rdBytesInvalidBool := bytes.NewReader(wireBytesInvalidBool) 92 | 93 | v, err := wire.ReadBool(rdBytesFalse) 94 | if assert.NoError(t, err) { 95 | assert.Equal(t, v, false) 96 | } 97 | 98 | v, err = wire.ReadBool(rdBytesTrue) 99 | if assert.NoError(t, err) { 100 | assert.Equal(t, v, true) 101 | } 102 | 103 | _, err = wire.ReadBool(rdBytesInvalidBool) 104 | assert.Error(t, err) 105 | } 106 | 107 | func TestReadBytes(t *testing.T) { 108 | buf, err := wire.ReadBytesFull(bytes.NewReader(wire8Bytes), 1024) 109 | if assert.NoError(t, err) { 110 | assert.Equal(t, 8, len(buf)) 111 | assert.Equal(t, buf, contents8Bytes) 112 | } 113 | 114 | buf, err = wire.ReadBytesFull(bytes.NewReader(wire10Bytes), 1024) 115 | if assert.NoError(t, err) { 116 | assert.Equal(t, 10, len(buf)) 117 | assert.Equal(t, buf, contents10Bytes) 118 | } 119 | 120 | // concatenate the 10 bytes, then 8 bytes dummy data together, 121 | // and see if we can get out both bytes. This will test we properly skip over the padding. 122 | payloadCombined := []byte{} 123 | payloadCombined = append(payloadCombined, wire10Bytes...) 124 | payloadCombined = append(payloadCombined, wire8Bytes...) 125 | 126 | rd := bytes.NewReader(payloadCombined) 127 | 128 | buf, err = wire.ReadBytesFull(rd, 1024) 129 | if assert.NoError(t, err) { 130 | assert.Equal(t, 10, len(buf)) 131 | assert.Equal(t, buf, contents10Bytes) 132 | } 133 | 134 | buf, err = wire.ReadBytesFull(rd, 1024) 135 | if assert.NoError(t, err) { 136 | assert.Equal(t, 8, len(buf)) 137 | assert.Equal(t, buf, contents8Bytes) 138 | } 139 | } 140 | 141 | func TestReadString(t *testing.T) { 142 | s, err := wire.ReadString(bytes.NewReader(wireStringFoo), 1024) 143 | if assert.NoError(t, err) { 144 | assert.Equal(t, s, "Foo") 145 | } 146 | 147 | // exceeding max should error 148 | rd := bytes.NewReader(wireStringFoo) 149 | _, err = wire.ReadString(rd, 2) 150 | assert.Error(t, err) 151 | 152 | // the reader should not have seeked to the end of the packet 153 | buf, err := io.ReadAll(rd) 154 | if assert.NoError(t, err, "reading the rest shouldn't error") { 155 | assert.Equal(t, wireStringFoo[8:], buf, "the reader should not have seeked to the end of the packet") 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /pkg/wire/wire.go: -------------------------------------------------------------------------------- 1 | // Package wire provides methods to parse and produce fields used in the 2 | // low-level Nix wire protocol, operating on io.Reader and io.Writer 3 | // When reading fields with arbitrary lengths, a maximum number of bytes needs 4 | // to be specified. 5 | package wire 6 | 7 | import ( 8 | "encoding/binary" 9 | ) 10 | 11 | // nolint:gochecknoglobals 12 | var byteOrder = binary.LittleEndian 13 | -------------------------------------------------------------------------------- /pkg/wire/write.go: -------------------------------------------------------------------------------- 1 | package wire 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | ) 7 | 8 | // nolint:gochecknoglobals 9 | var ( 10 | padding [8]byte 11 | 12 | bufPool = sync.Pool{ 13 | New: func() interface{} { 14 | return new([8]byte) 15 | }, 16 | } 17 | ) 18 | 19 | // WriteUint64 writes an uint64 in Nix wire format. 20 | func WriteUint64(w io.Writer, n uint64) error { 21 | buf := bufPool.Get().(*[8]byte) 22 | defer bufPool.Put(buf) 23 | 24 | byteOrder.PutUint64(buf[:], n) 25 | _, err := w.Write(buf[:]) 26 | 27 | return err 28 | } 29 | 30 | // WriteBool writes a boolean in Nix wire format. 31 | func WriteBool(w io.Writer, v bool) error { 32 | if v { 33 | return WriteUint64(w, 1) 34 | } 35 | 36 | return WriteUint64(w, 0) 37 | } 38 | 39 | // WriteBytes writes a bytes packet. See ReadBytes for its structure. 40 | func WriteBytes(w io.Writer, buf []byte) error { 41 | n := uint64(len(buf)) 42 | if err := WriteUint64(w, n); err != nil { 43 | return err 44 | } 45 | 46 | if _, err := w.Write(buf); err != nil { 47 | return err 48 | } 49 | 50 | return writePadding(w, n) 51 | } 52 | 53 | // WriteString writes a bytes packet. 54 | func WriteString(w io.Writer, s string) error { 55 | n := uint64(len(s)) 56 | if err := WriteUint64(w, n); err != nil { 57 | return err 58 | } 59 | 60 | if _, err := io.WriteString(w, s); err != nil { 61 | return err 62 | } 63 | 64 | return writePadding(w, n) 65 | } 66 | 67 | // writePadding writes the appropriate amount of padding. 68 | func writePadding(w io.Writer, contentLength uint64) error { 69 | if m := contentLength % 8; m != 0 { 70 | _, err := w.Write(padding[m:]) 71 | 72 | return err 73 | } 74 | 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /pkg/wire/write_test.go: -------------------------------------------------------------------------------- 1 | package wire_test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/numtide/nar-serve/pkg/wire" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestWriteUint64(t *testing.T) { 12 | var buf bytes.Buffer 13 | 14 | err := wire.WriteUint64(&buf, 1) 15 | assert.NoError(t, err) 16 | assert.Equal(t, wireBytesTrue, buf.Bytes()) 17 | } 18 | 19 | func TestWriteBool(t *testing.T) { 20 | var buf bytes.Buffer 21 | 22 | err := wire.WriteBool(&buf, true) 23 | assert.NoError(t, err) 24 | assert.Equal(t, wireBytesTrue, buf.Bytes()) 25 | 26 | buf.Reset() 27 | err = wire.WriteBool(&buf, false) 28 | assert.NoError(t, err) 29 | assert.Equal(t, wireBytesFalse, buf.Bytes()) 30 | } 31 | 32 | func TestWriteBytes(t *testing.T) { 33 | var buf bytes.Buffer 34 | 35 | err := wire.WriteBytes(&buf, contents8Bytes) 36 | assert.NoError(t, err) 37 | assert.Equal(t, wire8Bytes, buf.Bytes()) 38 | 39 | buf.Reset() 40 | 41 | err = wire.WriteBytes(&buf, contents10Bytes) 42 | assert.NoError(t, err) 43 | assert.Equal(t, wire10Bytes, buf.Bytes()) 44 | } 45 | 46 | func TestWriteString(t *testing.T) { 47 | var buf bytes.Buffer 48 | 49 | err := wire.WriteString(&buf, "Foo") 50 | assert.NoError(t, err) 51 | assert.Equal(t, wireStringFoo, buf.Bytes()) 52 | } 53 | 54 | func TestBytesWriter8Bytes(t *testing.T) { 55 | var buf bytes.Buffer 56 | 57 | bw, err := wire.NewBytesWriter(&buf, uint64(len(contents8Bytes))) 58 | assert.NoError(t, err) 59 | 60 | n, err := bw.Write(contents8Bytes[:4]) 61 | assert.NoError(t, err) 62 | assert.Equal(t, 4, n) 63 | n, err = bw.Write(contents8Bytes[4:]) 64 | assert.NoError(t, err) 65 | assert.Equal(t, 4, n) 66 | 67 | err = bw.Close() 68 | assert.NoError(t, err) 69 | 70 | assert.Equal(t, wire8Bytes, buf.Bytes()) 71 | } 72 | 73 | func TestBytesWriter10Bytes(t *testing.T) { 74 | var buf bytes.Buffer 75 | 76 | bw, err := wire.NewBytesWriter(&buf, uint64(len(contents10Bytes))) 77 | assert.NoError(t, err) 78 | 79 | n, err := bw.Write(contents10Bytes[:4]) 80 | assert.NoError(t, err) 81 | assert.Equal(t, 4, n) 82 | n, err = bw.Write(contents10Bytes[4:]) 83 | assert.NoError(t, err) 84 | assert.Equal(t, 6, n) 85 | 86 | err = bw.Close() 87 | assert.NoError(t, err) 88 | 89 | // closing again shouldn't panic 90 | assert.NotPanics(t, func() { 91 | bw.Close() 92 | }) 93 | 94 | assert.Equal(t, wire10Bytes, buf.Bytes()) 95 | } 96 | 97 | func TestBytesWriterError(t *testing.T) { 98 | var buf bytes.Buffer 99 | 100 | // initialize a bytes writer with a len of 9 101 | bw, err := wire.NewBytesWriter(&buf, 9) 102 | assert.NoError(t, err) 103 | 104 | // try to write 10 bytes into it 105 | _, err = bw.Write(contents10Bytes) 106 | assert.Error(t, err) 107 | 108 | buf.Reset() 109 | 110 | // initialize a bytes writer with a len of 11 111 | bw, err = wire.NewBytesWriter(&buf, 11) 112 | assert.NoError(t, err) 113 | 114 | // write 10 bytes into it 115 | n, err := bw.Write(contents10Bytes) 116 | assert.NoError(t, err) 117 | assert.Equal(t, 10, n) 118 | 119 | err = bw.Close() 120 | assert.Error(t, err, "closing should fail, as one byte is still missing") 121 | } 122 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { 2 | system ? builtins.currentSystem, 3 | nixpkgs ? import { inherit system; }, 4 | }: 5 | nixpkgs.mkShell { 6 | buildInputs = with nixpkgs; [ 7 | go 8 | go-outline 9 | gopkgs 10 | gopls 11 | goreleaser 12 | golangci-lint 13 | minio 14 | minio-client 15 | reflex 16 | awscli 17 | google-cloud-sdk 18 | ]; 19 | 20 | shellHook = '' 21 | export GO111MODULE=on 22 | unset GOPATH GOROOT 23 | ''; 24 | } 25 | -------------------------------------------------------------------------------- /start-dev: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run this script to start the developer loop 3 | set -euo pipefail 4 | 5 | cd "$(dirname "$0")" 6 | 7 | # On each file change, kill, rebuild and restart the server 8 | reflex -r '\.go$' -s -- sh -c 'go build -o nar-serve && exec ./nar-serve' 9 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | ## With the minio package 4 | 5 | ```shell 6 | mkdir nar 7 | minio server ./nar 8 | ``` 9 | 10 | ## With the pkgs.minio-client package 11 | 12 | ```shell 13 | mc config host add mycloud http://127.0.0.1:9000 accesskey secretkey 14 | mc mb mycloud/nar 15 | AWS_ACCESS_KEY_ID=accesskey AWS_SECRET_ACCESS_KEY=secretkey nix copy --to "s3://nar?region=eu-west-1&endpoint=127.0.0.1:9000&scheme=http" /nix/store/irfa91bs2wfqyh2j9kl8m3rcg7h72w4m-curl-7.71.1-bin 16 | ``` 17 | 18 | ## Run the test 19 | 20 | ```shell 21 | go run main.go 22 | ``` 23 | -------------------------------------------------------------------------------- /tests/integration_test.go: -------------------------------------------------------------------------------- 1 | package integration_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io/ioutil" 7 | "os" 8 | "os/exec" 9 | "path/filepath" 10 | "testing" 11 | 12 | "github.com/numtide/nar-serve/pkg/libstore" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func cmd(env []string, name string, args ...string) *exec.Cmd { 17 | cmd := exec.Command(name, args...) 18 | cmd.Stdout = os.Stdout 19 | cmd.Stderr = os.Stderr 20 | cmd.Env = env 21 | 22 | return cmd 23 | } 24 | 25 | func TestHappyPath(t *testing.T) { 26 | assert := assert.New(t) 27 | accessKeyID := "Q3AM3UQ867SPQQA43P2F" 28 | secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" 29 | 30 | tempDir, err := ioutil.TempDir("", "nar-serve") 31 | homeDir := tempDir + "/home" 32 | configDir := tempDir + "/config" 33 | dataDir := tempDir + "/data" 34 | 35 | env := append(os.Environ(), 36 | "AWS_ACCESS_KEY_ID="+accessKeyID, 37 | "AWS_SECRET_ACCESS_KEY="+secretAccessKey, 38 | "MINIO_ACCESS_KEY="+accessKeyID, 39 | "MINIO_SECRET_KEY="+secretAccessKey, 40 | "MINIO_REGION_NAME=us-east-1", 41 | "HOME="+homeDir, 42 | ) 43 | 44 | if err != nil { 45 | t.Fatal("tmpdir error:", err) 46 | } 47 | defer os.RemoveAll(tempDir) 48 | 49 | // Start the server 50 | minios := cmd(env, "minio", "server", dataDir, "--config-dir", configDir) 51 | err = minios.Start() 52 | if err != nil { 53 | t.Fatal("minio error:", err) 54 | } 55 | defer func() { 56 | minios.Process.Kill() 57 | minios.Wait() 58 | }() 59 | 60 | minioc := cmd(env, "mc", "config", "host", "add", "narcloud", "http://127.0.0.1:9000", accessKeyID, secretAccessKey, "--config-dir", configDir, "--api", "s3v4") 61 | err = minioc.Run() 62 | if err != nil { 63 | t.Fatal("mc error:", err) 64 | } 65 | 66 | minio_bucket := cmd(env, "mc", "mb", "narcloud/nsbucket") 67 | err = minio_bucket.Run() 68 | if err != nil { 69 | t.Fatal("mc error:", err) 70 | } 71 | 72 | nix_copy := cmd(env, "nix", "copy", "--to", "s3://nsbucket?region=us-east-1&endpoint=127.0.0.1:9000&scheme=http", "/nix/store/irfa91bs2wfqyh2j9kl8m3rcg7h72w4m-curl-7.71.1-bin") 73 | err = nix_copy.Run() 74 | if err != nil { 75 | t.Fatal("nix-copy error:", err) 76 | } 77 | 78 | ctx := context.Background() 79 | 80 | tmpfile := filepath.Join(dataDir, "nsbucket/irfa91bs2wfqyh2j9kl8m3rcg7h72w4m.narinfo") 81 | _, err = os.Stat(tmpfile) 82 | 83 | if err != nil { 84 | if os.IsNotExist(err) { 85 | t.Fatal("File not exists") 86 | } else { 87 | t.Fatal("ERROR:", err) 88 | } 89 | } 90 | content, err := ioutil.ReadFile(tmpfile) 91 | 92 | if err != nil { 93 | t.Fatal(err) 94 | } 95 | 96 | // S3 binary cache storage 97 | r, err := libstore.NewBinaryCacheReader(ctx, "s3://nsbucket?region=us-east-1&endpoint=http://127.0.0.1:9000&scheme=http") 98 | if err != nil { 99 | t.Fatal("new binary cache error:", err) 100 | } 101 | 102 | os.Setenv("AWS_ACCESS_KEY_ID", accessKeyID) 103 | os.Setenv("AWS_SECRET_ACCESS_KEY", secretAccessKey) 104 | obj, err := r.GetFile(ctx, "irfa91bs2wfqyh2j9kl8m3rcg7h72w4m.narinfo") 105 | if err != nil { 106 | t.Fatal("get file error:", err) 107 | } 108 | 109 | obj_content, err_read := ioutil.ReadAll(obj) 110 | if err_read != nil { 111 | t.Fatal(err_read) 112 | } 113 | 114 | same_content := bytes.Equal(content, obj_content) 115 | assert.True(same_content, "The content is not the same") 116 | 117 | is_exist, err := r.FileExists(ctx, "irfa91bs2wfqyh2j9kl8m3rcg7h72w4m.narinfo") 118 | if err != nil { 119 | t.Fatal("file exist error:", err) 120 | } 121 | assert.True(is_exist, "File is not existed") 122 | // Stop the server 123 | minios.Process.Kill() 124 | } 125 | -------------------------------------------------------------------------------- /views/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | nar-serve 6 | 8 | 9 | 10 |
    11 |

    nar-serve

    12 | 13 |

    All the files in {{ .NixCacheURL }} are packed in NAR files which makes them not directly accessible. This service allows to dowload, decompress, unpack and serve any file in the cache on the fly.

    14 | 15 |

    Use cases

    16 | 17 |
      18 |
    • Avoid publishing build artifacts to both the binary cache and 19 | another service.
    • 20 |
    • Allows to share build results easily.
    • 21 |
    • Inspect the content of a NAR file.
    • 22 |
    23 | 24 |

    Usage

    25 |
      26 |
    1. Pick a full store path in your filesystem.
    2. 27 |
    3. Paste it in the form below.
    4. 28 |
    5. Click submit. TADA!
    6. 29 |
    30 | 31 |
    32 |
    33 |
    34 | 35 |
    36 |
    37 | 38 | 39 | 40 |

    Examples

    41 | 45 | 46 | 47 |
    48 |

    49 | Like this project? Star it on GitHub. 50 | 51 | 59 | -------------------------------------------------------------------------------- /views/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: /nix/store 3 | --------------------------------------------------------------------------------