├── .gitignore
├── LICENSE
├── NOTICE
├── README.md
├── assemble.go
├── assemble_test.go
├── blob_manager.go
├── bump.rb
├── cache.go
├── cicero
└── actions
│ └── spongix
│ └── ci.nix
├── devshell.toml
├── docker.go
├── docker_test.go
├── fake.go
├── flake.lock
├── flake.nix
├── gc.go
├── go.mod
├── go.sum
├── helpers.go
├── img
└── spongix.svg
├── log_record.go
├── main.go
├── manifest_manager.go
├── module.nix
├── narinfo.go
├── narinfo_test.go
├── package.nix
├── router.go
├── router_test.go
├── scripts
├── dl.sh
├── foo.nix
├── hook.sh
├── infos.sh
└── run.sh
├── test.nix
├── testdata
├── 0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7.nar.xz
├── 0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar
├── 0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar.xz
└── 8ckxc8biqqfdwyhr0w70jgrcb4h7a4y5.narinfo
├── treefmt.toml
└── upload_manager.go
/.gitignore:
--------------------------------------------------------------------------------
1 | /cache
2 | /.fake-credentials
3 | /.sequence
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright 2021 Input Output (Hong Kong) Ltd.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
Spongix
4 |
A proxy that acts as binary cache for Nix
5 |
6 |
7 | * Signs Narinfo in flight with own private key
8 | * Authenticates with S3 to forward NARs for long-term storage
9 | * Keeps a local cache on disk for faster responses.
10 | * Provides a minimal Docker registry
11 |
12 | ## Usage
13 |
14 | Start `spongix`:
15 |
16 | nix key generate-secret --key-name foo > skey
17 | nix build
18 | ./result/bin/spongix \
19 | --substituters "https://cache.nixos.org" "https://hydra.iohk.io" \
20 | --secret-key-files ./skey \
21 | --trusted-public-keys "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" \
22 | --listen :7745 \
23 | --dir /tmp/spongix
24 |
25 | To add store paths to the cache, you can use `nix copy`:
26 |
27 | nix copy --to 'http://127.0.0.1:7745?compression=none' github:nixos/nix
28 |
29 | To use this as your binary cache, specify it as a substituter:
30 |
31 | nix build github:nixos/nix \
32 | --option substituters http://127.0.0.1:7745 \
33 | --option trusted-public-keys "$(< pkey)"
34 |
35 | Signatures are checked against the the `trusted-public-keys` of your
36 | configuration.
37 |
38 | ### Upload after every build
39 |
40 | Set a `post-build-hook` in your nix configuration to a script like this:
41 |
42 | #!/bin/sh
43 | set -euf
44 | export IFS=' '
45 | if [[ -n "$OUT_PATHS" ]]; then
46 | echo "Uploading to cache: $OUT_PATHS"
47 | exec nix copy --to 'http://127.0.0.1:7745?compression=none' $OUT_PATHS
48 | fi
49 |
50 | ## TODO
51 |
52 | - [ ] Write better integration tests (with cicero)
53 | - [ ] Healthchecks
54 | - [ ] A way to horizontally scale (probably by just locking via consul, s3, raft, postgres, rqlite, dqlite, ...)
55 | - [ ] Proper CLI usage
56 | - [ ] Benchmark of desync index vs db lookup performance
57 | - [x] Additional signing for a set of allowed public keys
58 | - [x] Disk cache size limits and LRU eviction
59 | - [x] Forward lookups across multiple upstream caches
60 | - [x] Identify and solve concurrency issues
61 | - [x] Prometheus metrics
62 | - [x] Store narinfo in a database
63 | - [x] Upload to S3 as well as the local store
64 | - [x] Verify existing signatures
65 |
--------------------------------------------------------------------------------
/assemble.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "io"
6 |
7 | "github.com/folbricht/desync"
8 | "github.com/pkg/errors"
9 | )
10 |
11 | type assembler struct {
12 | store desync.Store
13 | index desync.Index
14 | idx int
15 | data *bytes.Buffer
16 | readBytes int64
17 | wroteBytes int64
18 | }
19 |
20 | func newAssembler(store desync.Store, index desync.Index) *assembler {
21 | return &assembler{store: store, index: index, data: &bytes.Buffer{}}
22 | }
23 |
24 | func (a *assembler) Close() error { return nil }
25 |
26 | func (a *assembler) Read(p []byte) (int, error) {
27 | if a.data.Len() > 0 {
28 | writeBytes, _ := a.data.Read(p)
29 | a.wroteBytes += int64(writeBytes)
30 | return writeBytes, nil
31 | }
32 |
33 | if a.idx >= len(a.index.Chunks) {
34 | if a.wroteBytes != a.index.Length() {
35 | return 0, errors.New("written bytes don't match index length")
36 | }
37 | if a.wroteBytes != a.readBytes {
38 | return 0, errors.New("read and written bytes are different")
39 | }
40 | return 0, io.EOF
41 | }
42 |
43 | if chunk, err := a.store.GetChunk(a.index.Chunks[a.idx].ID); err != nil {
44 | return 0, err
45 | } else if data, err := chunk.Data(); err != nil {
46 | return 0, err
47 | } else {
48 | readBytes, _ := a.data.Write(data)
49 | a.readBytes += int64(readBytes)
50 | writeBytes, _ := a.data.Read(p)
51 | a.wroteBytes += int64(writeBytes)
52 | a.idx++
53 | return writeBytes, nil
54 | }
55 | }
56 |
57 | var _ = io.Reader(&assembler{})
58 |
59 | // very simple implementation, mostly used for assembling narinfo which is
60 | // usually tiny to avoid overhead of creating files.
61 | func assemble(store desync.Store, index desync.Index) io.ReadCloser {
62 | return newAssembler(store, index)
63 | }
64 |
65 | func assembleNarinfo(store desync.Store, index desync.Index) (*Narinfo, error) {
66 | buf := assemble(store, index)
67 |
68 | info := &Narinfo{}
69 | err := info.Unmarshal(buf)
70 | if err != nil {
71 | return info, errors.WithMessage(err, "while unmarshaling narinfo")
72 | }
73 |
74 | return info, nil
75 | }
76 |
--------------------------------------------------------------------------------
/assemble_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "io"
7 | "os"
8 | "path/filepath"
9 | "testing"
10 |
11 | "github.com/folbricht/desync"
12 | "github.com/smartystreets/assertions"
13 | )
14 |
15 | func TestAssemble(t *testing.T) {
16 | a := assertions.New(t)
17 |
18 | var index desync.IndexWriteStore
19 |
20 | indexDir := filepath.Join(t.TempDir(), "index")
21 | if err := os.MkdirAll(filepath.Join(indexDir, "nar"), 0700); err != nil {
22 | t.Fatal(err)
23 | } else if index, err = desync.NewLocalIndexStore(indexDir); err != nil {
24 | t.Fatal(err)
25 | }
26 |
27 | var store desync.WriteStore
28 | storeDir := filepath.Join(t.TempDir(), "store")
29 | if err := os.MkdirAll(storeDir, 0700); err != nil {
30 | t.Fatal(err)
31 | } else if store, err = desync.NewLocalStore(storeDir, defaultStoreOptions); err != nil {
32 | t.Fatal(err)
33 | }
34 |
35 | key := "hello"
36 | value := bytes.Repeat([]byte("hello world"), 200)
37 | input := bytes.NewBuffer(value)
38 |
39 | if chunker, err := desync.NewChunker(input, 48, 192, 768); err != nil {
40 | t.Fatal(err)
41 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, store, defaultThreads); err != nil {
42 | t.Fatal(err)
43 | } else if err := index.StoreIndex(key, idx); err != nil {
44 | t.Fatal(err)
45 | } else {
46 | asm := newAssembler(store, idx)
47 |
48 | buf := &bytes.Buffer{}
49 | n, err := io.Copy(buf, asm)
50 | a.So(err, assertions.ShouldBeNil)
51 | a.So(n, assertions.ShouldEqual, 2200)
52 | a.So(buf.Bytes(), assertions.ShouldResemble, value)
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/blob_manager.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "context"
6 |
7 | "github.com/folbricht/desync"
8 | "github.com/pkg/errors"
9 | )
10 |
11 | type blobManager struct {
12 | c chan blobMsg
13 | store desync.WriteStore
14 | index desync.IndexWriteStore
15 | }
16 |
17 | func newBlobManager(store desync.WriteStore, index desync.IndexWriteStore) blobManager {
18 | c := make(chan blobMsg, 10)
19 | manager := blobManager{c: c, store: store, index: index}
20 | go manager.loop()
21 | return manager
22 | }
23 |
24 | func (m blobManager) get(name, digest string) ([]byte, error) {
25 | c := make(chan blobResponse)
26 | m.c <- blobMsg{t: blobMsgGet, name: name, digest: digest, c: c}
27 | msg := <-c
28 | return msg.blob, msg.err
29 | }
30 |
31 | func (m blobManager) head(name, digest string) error {
32 | c := make(chan blobResponse)
33 | m.c <- blobMsg{t: blobMsgGet, name: name, digest: digest, c: c}
34 | msg := <-c
35 | return msg.err
36 | }
37 |
38 | func (m blobManager) set(name, digest string, blob []byte) error {
39 | c := make(chan blobResponse)
40 | m.c <- blobMsg{t: blobMsgSet, name: name, digest: digest, blob: blob, c: c}
41 | msg := <-c
42 | return msg.err
43 | }
44 |
45 | // used to communicate with the blob registry
46 | type blobMsg struct {
47 | t blobMsgType
48 | name string
49 | digest string
50 | blob []byte
51 | c chan blobResponse
52 | }
53 |
54 | func (m blobMsg) Key() string {
55 | return m.name + "_" + m.digest
56 | }
57 |
58 | type blobResponse struct {
59 | blob []byte
60 | err error
61 | }
62 |
63 | type blobMsgType int
64 |
65 | const (
66 | blobMsgSet blobMsgType = iota
67 | blobMsgGet blobMsgType = iota
68 | blobMsgHead blobMsgType = iota
69 | )
70 |
71 | func (m blobManager) loop() {
72 | blobSet := func(msg blobMsg) error {
73 | if chunker, err := desync.NewChunker(bytes.NewBuffer(msg.blob), chunkSizeMin(), chunkSizeAvg, chunkSizeMax()); err != nil {
74 | return errors.WithMessage(err, "making chunker")
75 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, m.store, defaultThreads); err != nil {
76 | return errors.WithMessage(err, "chunking blob")
77 | } else if err := m.index.StoreIndex(msg.Key(), idx); err != nil {
78 | return errors.WithMessage(err, "storing index")
79 | }
80 |
81 | return nil
82 | }
83 |
84 | blobGet := func(msg blobMsg) ([]byte, error) {
85 | if idx, err := m.index.GetIndex(msg.Key()); err != nil {
86 | return nil, errors.WithMessage(err, "getting index")
87 | } else {
88 | buf := &bytes.Buffer{}
89 |
90 | for _, indexChunk := range idx.Chunks {
91 | if chunk, err := m.store.GetChunk(indexChunk.ID); err != nil {
92 | return nil, errors.WithMessage(err, "getting chunk for index")
93 | } else if data, err := chunk.Data(); err != nil {
94 | return nil, errors.WithMessage(err, "getting chunk data")
95 | } else if _, err := buf.Write(data); err != nil {
96 | return nil, errors.WithMessage(err, "writing chunk data")
97 | }
98 | }
99 |
100 | return buf.Bytes(), nil
101 | }
102 | }
103 |
104 | blobHead := func(msg blobMsg) error {
105 | if _, err := m.index.GetIndex(msg.Key()); err != nil {
106 | return errors.WithMessage(err, "getting index")
107 | } else {
108 | return nil
109 | }
110 | }
111 |
112 | for msg := range m.c {
113 | switch msg.t {
114 | case blobMsgSet:
115 | // pretty.Println("blob set", msg)
116 | if err := blobSet(msg); err != nil {
117 | // pretty.Println("blob set", err)
118 | msg.c <- blobResponse{err: err}
119 | } else {
120 | msg.c <- blobResponse{}
121 | }
122 | case blobMsgGet:
123 | // pretty.Println("blob get", msg)
124 | if blob, err := blobGet(msg); err != nil {
125 | // pretty.Println("blob get", err)
126 | msg.c <- blobResponse{err: err}
127 | } else {
128 | msg.c <- blobResponse{blob: blob}
129 | }
130 | case blobMsgHead:
131 | // pretty.Println("blob head", msg)
132 | if err := blobHead(msg); err != nil {
133 | // pretty.Println("blob head", err)
134 | msg.c <- blobResponse{err: err}
135 | } else {
136 | msg.c <- blobResponse{}
137 | }
138 | default:
139 | panic(msg)
140 | }
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/bump.rb:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 |
3 | require 'json'
4 | require 'date'
5 | require 'open3'
6 |
7 | pkg = 'spongix'
8 | file = 'package.nix'
9 |
10 | system 'go', 'mod', 'tidy'
11 |
12 | puts 'Checking version...'
13 |
14 | today = Date.today
15 |
16 | version = `nix eval --raw '.##{pkg}.version'`.strip
17 | md = version.match(/(?\d+)\.(?\d+)\.(?\d+)\.(?\d+)/)
18 | version_date = Date.new(md[:y].to_i, md[:m].to_i, md[:d].to_i)
19 | old_version = version
20 |
21 | new_version =
22 | if today == version_date
23 | old_version.succ
24 | else
25 | today.strftime('%Y.%m.%d.001')
26 | end
27 |
28 | if new_version != old_version
29 | puts "Updating version #{old_version} => #{new_version}"
30 | updated = File.read(file).gsub(old_version, new_version)
31 | File.write(file, updated)
32 | else
33 | puts 'Skipping version update'
34 | end
35 |
36 | puts 'Checking vendorSha256...'
37 |
38 | old_sha = `nix eval --raw '.##{pkg}.vendorSha256'`.strip
39 | new_sha = nil
40 |
41 | Open3.popen3('nix', 'build', ".##{pkg}.invalidHash") do |_si, _so, se|
42 | se.each_line do |line|
43 | puts line
44 | new_sha = $~[:sha] if line =~ /^\s+got:\s+(?sha256-\S+)$/
45 | end
46 | end
47 |
48 | pp old_sha, new_sha
49 |
50 | if old_sha == new_sha
51 | puts 'Skipping vendorSha256 update'
52 | else
53 | puts "Updating vendorSha256 #{old_sha} => #{new_sha}"
54 | updated = File.read(file).gsub(old_sha, new_sha)
55 | File.write(file, updated)
56 | end
57 |
--------------------------------------------------------------------------------
/cache.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "crypto/ed25519"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "net/url"
10 | "path/filepath"
11 | "strconv"
12 | "strings"
13 | "sync"
14 | "time"
15 |
16 | "github.com/folbricht/desync"
17 | "github.com/jamespfennell/xz"
18 | "github.com/pascaldekloe/metrics"
19 | "github.com/pkg/errors"
20 | "go.uber.org/zap"
21 | )
22 |
23 | const (
24 | headerCache = "X-Cache"
25 | headerCacheHit = "HIT"
26 | headerCacheRemote = "REMOTE"
27 | headerCacheMiss = "MISS"
28 | headerCacheUpstream = "X-Cache-Upstream"
29 | headerContentType = "Content-Type"
30 | )
31 |
32 | func urlToMime(u string) string {
33 | switch filepath.Ext(u) {
34 | case ".nar", ".xz":
35 | return mimeNar
36 | case ".narinfo":
37 | return mimeNarinfo
38 | default:
39 | return mimeText
40 | }
41 | }
42 |
43 | func getIndex(index desync.IndexStore, url *url.URL) (i desync.Index, err error) {
44 | if name, err := urlToIndexName(url); err != nil {
45 | return i, err
46 | } else {
47 | return index.GetIndex(name)
48 | }
49 | }
50 |
51 | func storeIndex(index desync.IndexWriteStore, url *url.URL, idx desync.Index) error {
52 | if name, err := urlToIndexName(url); err != nil {
53 | return err
54 | } else {
55 | return index.StoreIndex(name, idx)
56 | }
57 | }
58 |
59 | func urlToIndexName(url *url.URL) (string, error) {
60 | name := url.EscapedPath()
61 | if strings.HasPrefix(name, "/cache/") {
62 | name = strings.Replace(name, "/cache/", "/", 1)
63 | }
64 | if strings.HasSuffix(name, ".nar.xz") {
65 | name = strings.Replace(name, ".nar.xz", ".nar", 1)
66 | }
67 | if name, err := filepath.Rel("/", name); err != nil {
68 | return name, err
69 | } else {
70 | return name, nil
71 | }
72 | }
73 |
74 | type cacheHandler struct {
75 | log *zap.Logger
76 | handler http.Handler
77 | store desync.WriteStore
78 | index desync.IndexWriteStore
79 | trustedKeys map[string]ed25519.PublicKey
80 | secretKeys map[string]ed25519.PrivateKey
81 | }
82 |
83 | func withCacheHandler(
84 | log *zap.Logger,
85 | store desync.WriteStore,
86 | index desync.IndexWriteStore,
87 | trustedKeys map[string]ed25519.PublicKey,
88 | secretKeys map[string]ed25519.PrivateKey,
89 | ) func(http.Handler) http.Handler {
90 | if store == nil || index == nil {
91 | return func(h http.Handler) http.Handler {
92 | return h
93 | }
94 | }
95 |
96 | return func(h http.Handler) http.Handler {
97 | return &cacheHandler{handler: h,
98 | log: log,
99 | store: store,
100 | index: index,
101 | trustedKeys: trustedKeys,
102 | secretKeys: secretKeys,
103 | }
104 | }
105 | }
106 |
107 | func (c cacheHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
108 | switch r.Method {
109 | case "HEAD":
110 | c.Head(w, r)
111 | case "GET":
112 | c.Get(w, r)
113 | case "PUT":
114 | c.Put(w, r)
115 | default:
116 | c.handler.ServeHTTP(w, r)
117 | }
118 | }
119 |
120 | func (c cacheHandler) Head(w http.ResponseWriter, r *http.Request) {
121 | idx, err := getIndex(c.index, r.URL)
122 | if err != nil {
123 | c.handler.ServeHTTP(w, r)
124 | return
125 | }
126 |
127 | w.Header().Set("Content-Length", strconv.FormatInt(idx.Length(), 10))
128 | w.Header().Set(headerCache, headerCacheHit)
129 | w.Header().Set(headerContentType, urlToMime(r.URL.String()))
130 | w.WriteHeader(200)
131 | }
132 |
133 | func (c cacheHandler) Get(w http.ResponseWriter, r *http.Request) {
134 | idx, err := getIndex(c.index, r.URL)
135 | if err != nil {
136 | c.handler.ServeHTTP(w, r)
137 | return
138 | }
139 |
140 | wr := io.Writer(w)
141 | if filepath.Ext(r.URL.String()) == ".xz" {
142 | xzWr := xz.NewWriterLevel(w, xz.BestSpeed)
143 | defer xzWr.Close()
144 | wr = xzWr
145 | } else {
146 | w.Header().Set("Content-Length", strconv.FormatInt(idx.Length(), 10))
147 | }
148 |
149 | w.Header().Set(headerCache, headerCacheHit)
150 | w.Header().Set(headerContentType, urlToMime(r.URL.String()))
151 | for _, indexChunk := range idx.Chunks {
152 | if chunk, err := c.store.GetChunk(indexChunk.ID); err != nil {
153 | c.log.Error("while getting chunk", zap.Error(err))
154 | break
155 | } else if data, err := chunk.Data(); err != nil {
156 | c.log.Error("while reading chunk data", zap.Error(err))
157 | break
158 | } else if _, err := wr.Write(data); err != nil {
159 | c.log.Error("while writing chunk data", zap.Error(err))
160 | break
161 | }
162 | }
163 | }
164 |
165 | func answer(w http.ResponseWriter, status int, mime, msg string) {
166 | w.Header().Set(headerContentType, mime)
167 | w.WriteHeader(status)
168 | _, _ = w.Write([]byte(msg))
169 | }
170 |
171 | func (c cacheHandler) Put(w http.ResponseWriter, r *http.Request) {
172 | urlExt := filepath.Ext(r.URL.String())
173 | switch urlExt {
174 | case ".narinfo":
175 | info := &Narinfo{}
176 | if err := info.Unmarshal(r.Body); err != nil {
177 | c.log.Error("unmarshaling narinfo", zap.Error(err))
178 | answer(w, http.StatusBadRequest, mimeText, err.Error())
179 | } else if infoRd, err := info.PrepareForStorage(c.trustedKeys, c.secretKeys); err != nil {
180 | c.log.Error("failed serializing narinfo", zap.Error(err))
181 | answer(w, http.StatusInternalServerError, mimeText, "failed serializing narinfo")
182 | } else {
183 | c.putCommon(w, r, infoRd)
184 | }
185 | case ".nar":
186 | c.putCommon(w, r, r.Body)
187 | case ".xz":
188 | xzRd := xz.NewReader(r.Body)
189 | defer xzRd.Close()
190 | c.putCommon(w, r, xzRd)
191 | default:
192 | answer(w, http.StatusBadRequest, mimeText, "compression is not supported\n")
193 | }
194 | }
195 |
196 | func (c cacheHandler) putCommon(w http.ResponseWriter, r *http.Request, rd io.Reader) {
197 | if chunker, err := desync.NewChunker(rd, chunkSizeMin(), chunkSizeAvg, chunkSizeMax()); err != nil {
198 | c.log.Error("making chunker", zap.Error(err))
199 | answer(w, http.StatusInternalServerError, mimeText, "making chunker")
200 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, c.store, defaultThreads); err != nil {
201 | c.log.Error("chunking body", zap.Error(err))
202 | answer(w, http.StatusInternalServerError, mimeText, "chunking body")
203 | } else if err := storeIndex(c.index, r.URL, idx); err != nil {
204 | c.log.Error("storing index", zap.Error(err))
205 | answer(w, http.StatusInternalServerError, mimeText, "storing index")
206 | } else {
207 | answer(w, http.StatusOK, mimeText, "ok\n")
208 | }
209 | }
210 |
211 | type remoteHandler struct {
212 | log *zap.Logger
213 | handler http.Handler
214 | substituters []*url.URL
215 | exts []string
216 | cacheChan chan string
217 | }
218 |
219 | func withRemoteHandler(log *zap.Logger, substituters, exts []string, cacheChan chan string) func(http.Handler) http.Handler {
220 | parsedSubstituters := []*url.URL{}
221 | for _, raw := range substituters {
222 | u, err := url.Parse(raw)
223 | if err != nil {
224 | panic(err)
225 | }
226 | parsedSubstituters = append(parsedSubstituters, u)
227 | }
228 |
229 | return func(h http.Handler) http.Handler {
230 | return &remoteHandler{
231 | log: log,
232 | handler: h,
233 | exts: exts,
234 | substituters: parsedSubstituters,
235 | cacheChan: cacheChan,
236 | }
237 | }
238 | }
239 |
240 | func (h *remoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
241 | exts := h.exts
242 | urlExt := filepath.Ext(r.URL.String())
243 | timeout := 30 * time.Minute
244 | switch urlExt {
245 | case ".nar":
246 | case ".xz":
247 | exts = []string{""}
248 | case ".narinfo":
249 | timeout = 10 * time.Second
250 | exts = []string{""}
251 | case "":
252 | h.handler.ServeHTTP(w, r)
253 | return
254 | }
255 |
256 | ctx, cancel := context.WithTimeout(context.Background(), timeout)
257 | defer cancel()
258 |
259 | routines := len(h.substituters) * len(exts)
260 | resChan := make(chan *http.Response, routines)
261 | wg := &sync.WaitGroup{}
262 |
263 | for _, substituter := range h.substituters {
264 | for _, ext := range exts {
265 | u, err := substituter.Parse(r.URL.String() + ext)
266 | if err != nil {
267 | h.log.Error("parsing url", zap.String("url", r.URL.String()+ext), zap.Error(err))
268 | continue
269 | }
270 |
271 | request, err := http.NewRequestWithContext(ctx, r.Method, u.String(), nil)
272 | if err != nil {
273 | h.log.Error("creating request", zap.String("url", request.URL.String()), zap.Error(err))
274 | continue
275 | }
276 |
277 | wg.Add(1)
278 | go func(request *http.Request) {
279 | defer wg.Done()
280 | res, err := http.DefaultClient.Do(request)
281 | if err != nil {
282 | if !errors.Is(err, context.Canceled) {
283 | h.log.Error("fetching upstream", zap.String("url", request.URL.String()), zap.Error(err))
284 | }
285 | } else if res.StatusCode/100 == 2 {
286 | select {
287 | case resChan <- res:
288 | case <-ctx.Done():
289 | }
290 | }
291 | }(request)
292 | }
293 | }
294 |
295 | allDone := make(chan bool)
296 | go func() {
297 | wg.Wait()
298 | select {
299 | case allDone <- true:
300 | case <-ctx.Done():
301 | }
302 | }()
303 |
304 | select {
305 | case <-allDone:
306 | // got no good responses
307 | case <-ctx.Done():
308 | // ran out of time
309 | case response := <-resChan:
310 | h.cacheChan <- response.Request.URL.String()
311 | // w.Header().Set("Content-Length", strconv.FormatInt(idx.Length(), 10))
312 | w.Header().Set(headerCache, headerCacheRemote)
313 | w.Header().Set(headerContentType, urlToMime(response.Request.URL.String()))
314 | w.Header().Set(headerCacheUpstream, response.Request.URL.String())
315 |
316 | body := response.Body
317 | if strings.HasSuffix(r.URL.String(), ".nar") && strings.HasSuffix(response.Request.URL.String(), ".xz") {
318 | body = xz.NewReader(response.Body)
319 | }
320 |
321 | _, _ = io.Copy(w, body)
322 | return
323 | }
324 |
325 | h.handler.ServeHTTP(w, r)
326 | }
327 |
328 | func (proxy *Proxy) cacheUrl(urlStr string) error {
329 | u, err := url.Parse(urlStr)
330 | if err != nil {
331 | return errors.WithMessage(err, "parsing URL")
332 | }
333 |
334 | response, err := http.Get(urlStr)
335 | if err != nil {
336 | return errors.WithMessage(err, "getting URL")
337 | }
338 |
339 | if response.StatusCode/100 != 2 {
340 | return errors.WithMessagef(err, "received status %d", response.StatusCode)
341 | }
342 |
343 | defer response.Body.Close()
344 |
345 | if strings.HasSuffix(urlStr, ".nar") || strings.HasSuffix(urlStr, ".narinfo") {
346 | if chunker, err := desync.NewChunker(response.Body, chunkSizeMin(), chunkSizeAvg, chunkSizeMax()); err != nil {
347 | return errors.WithMessage(err, "making chunker")
348 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, proxy.localStore, defaultThreads); err != nil {
349 | return errors.WithMessage(err, "chunking body")
350 | } else if err := storeIndex(proxy.localIndex, u, idx); err != nil {
351 | return errors.WithMessage(err, "storing index")
352 | }
353 | } else if strings.HasSuffix(urlStr, ".nar.xz") {
354 | xzRd := xz.NewReader(response.Body)
355 | if chunker, err := desync.NewChunker(xzRd, chunkSizeMin(), chunkSizeAvg, chunkSizeMax()); err != nil {
356 | return errors.WithMessage(err, "making chunker")
357 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, proxy.localStore, defaultThreads); err != nil {
358 | return errors.WithMessage(err, "chunking body")
359 | } else if err := storeIndex(proxy.localIndex, u, idx); err != nil {
360 | return errors.WithMessage(err, "storing index")
361 | }
362 | } else {
363 | return fmt.Errorf("unexpected extension in url: %s", urlStr)
364 | }
365 |
366 | return nil
367 | }
368 |
369 | var (
370 | metricRemoteCachedFail = metrics.MustCounter("spongix_remote_cache_fail", "Number of upstream cache entries failed to copy")
371 | metricRemoteCachedOk = metrics.MustCounter("spongix_remote_cache_ok", "Number of upstream cache entries copied")
372 | )
373 |
374 | func (proxy *Proxy) startCache() {
375 | for urlStr := range proxy.cacheChan {
376 | proxy.log.Info("Caching", zap.String("url", urlStr))
377 | if err := proxy.cacheUrl(urlStr); err != nil {
378 | metricRemoteCachedFail.Add(1)
379 | proxy.log.Error("Caching failed", zap.String("url", urlStr), zap.Error(err))
380 | } else {
381 | metricRemoteCachedOk.Add(1)
382 | proxy.log.Info("Cached", zap.String("url", urlStr))
383 | }
384 | }
385 | }
386 |
--------------------------------------------------------------------------------
/cicero/actions/spongix/ci.nix:
--------------------------------------------------------------------------------
1 | {
2 | name,
3 | std,
4 | lib,
5 | actionLib,
6 | ...
7 | } @ args: let
8 | startOf = of: of.value."${name}".start;
9 | in {
10 | inputs.start = ''
11 | "${name}": start: {
12 | clone_url: string
13 | sha: string
14 | statuses_url?: string
15 | }
16 | '';
17 |
18 | output = {start}: let
19 | facts = start.value."${name}".start;
20 | in {
21 | success."${name}" = {
22 | ok = true;
23 | inherit (facts) clone_url sha;
24 | };
25 | };
26 |
27 | job = {start}: let
28 | facts = start.value."${name}".start;
29 | in
30 | std.chain args [
31 | actionLib.simpleJob
32 | (std.git.clone facts)
33 |
34 | {
35 | resources = {
36 | memory = 1000 * 2;
37 | cpu = 7000;
38 | };
39 | config.console = "pipe";
40 | config.packages = std.data-merge.append [
41 | "github:input-output-hk/spongix#devShell.x86_64-linux"
42 | ];
43 | }
44 |
45 | (lib.optionalAttrs (facts ? statuses_url) (std.github.reportStatus facts.statuses_url))
46 |
47 | (std.base {})
48 | std.nix.develop
49 | (std.script "bash" ''
50 | set -ex
51 | lint
52 | '')
53 | ];
54 | }
55 |
--------------------------------------------------------------------------------
/devshell.toml:
--------------------------------------------------------------------------------
1 | [[commands]]
2 | package = "golangci-lint"
3 |
4 | [[commands]]
5 | package = "treefmt"
6 |
7 | [[commands]]
8 | package = "alejandra"
9 |
10 | [[commands]]
11 | name = "lint"
12 | command = "golangci-lint run && treefmt --fail-on-change"
13 | help = "Run code linters"
14 |
15 | [[commands]]
16 | name = "coverage"
17 | command = "go test -v -coverprofile cover.out ./..."
18 | help = "Run code coverage"
19 |
20 | [devshell]
21 | name = "spongix"
22 | packages = [
23 | "diffutils",
24 | "go_1_18",
25 | "gotools",
26 | "gopls",
27 | "gocode",
28 | "gcc",
29 | "fd",
30 | "gnumake",
31 | "minio",
32 | "minio-client",
33 | ]
34 |
--------------------------------------------------------------------------------
/docker.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "strings"
10 | "time"
11 |
12 | "github.com/folbricht/desync"
13 | "github.com/gorilla/mux"
14 | "github.com/hashicorp/go-uuid"
15 | "go.uber.org/zap"
16 | )
17 |
18 | const (
19 | mimeJson = "application/json; charset=utf-8"
20 | )
21 |
22 | type dockerUpload struct {
23 | uuid string
24 | content *bytes.Buffer
25 | lastModified time.Time
26 | }
27 |
28 | type DockerManifest struct {
29 | SchemaVersion int64 `json:"schemaVersion"`
30 | Config DockerManifestConfig `json:"config"`
31 | Layers []DockerManifestConfig `json:"layers"`
32 | }
33 |
34 | type DockerManifestConfig struct {
35 | MediaType string `json:"mediaType"`
36 | Digest string `json:"digest"`
37 | Size int64 `json:"size"`
38 | }
39 |
40 | type DockerManifestResponse struct {
41 | Name string `json:"name"`
42 | Tag string `json:"tag"`
43 | Architecture string `json:"architecture"`
44 | FSLayers []DockerManifestResponseFSLayer `json:"fsLayers"`
45 | History []DockerManifestResponseHistory `json:"history"`
46 | SchemaVersion int `json:"schemaVersion"`
47 | Signatures []string `json:"signatures"`
48 | }
49 |
50 | type DockerManifestResponseFSLayer struct {
51 | BlobSum string `json:"blobSum"`
52 | }
53 |
54 | type DockerManifestResponseHistory struct {
55 | V1Compatibility string `json:"v1Compatibility"`
56 | }
57 |
58 | type dockerHandler struct {
59 | log *zap.Logger
60 | blobs blobManager
61 | manifests manifestManager
62 | uploads uploadManager
63 | }
64 |
65 | func newDockerHandler(
66 | logger *zap.Logger,
67 | store desync.WriteStore,
68 | index desync.IndexWriteStore,
69 | manifestDir string,
70 | r *mux.Router,
71 | ) dockerHandler {
72 | handler := dockerHandler{
73 | log: logger,
74 | blobs: newBlobManager(store, index),
75 | manifests: newManifestManager(manifestDir),
76 | uploads: newUploadManager(store, index),
77 | }
78 |
79 | r.HandleFunc("/v2/", handler.ping)
80 |
81 | prefix := "/v2/{name:(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/?){2}}/"
82 | r.Methods("GET", "HEAD").Path(prefix + "manifests/{reference}").HandlerFunc(handler.manifestGet)
83 | r.Methods("PUT").Path(prefix + "manifests/{reference}").HandlerFunc(handler.manifestPut)
84 | r.Methods("GET").Path(prefix + "blobs/{digest:sha256:[a-z0-9]{64}}").HandlerFunc(handler.blobGet)
85 | r.Methods("HEAD").Path(prefix + "blobs/{digest:sha256:[a-z0-9]{64}}").HandlerFunc(handler.blobHead)
86 | r.Methods("POST").Path(prefix + "blobs/uploads/").HandlerFunc(handler.blobUploadPost)
87 |
88 | // seems like a bug in mux, we cannot simply use `registry` as our subrouter here
89 | uploadPrefix := prefix + "blobs/uploads/{uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}}"
90 | r.PathPrefix(uploadPrefix).Methods("GET").HandlerFunc(handler.blobUploadGet)
91 | r.PathPrefix(uploadPrefix).Methods("PUT").HandlerFunc(handler.blobUploadPut)
92 | r.PathPrefix(uploadPrefix).Methods("PATCH").HandlerFunc(handler.blobUploadPatch)
93 |
94 | return handler
95 | }
96 |
97 | func (d dockerHandler) ping(w http.ResponseWriter, r *http.Request) {
98 | w.Header().Set(headerContentType, mimeJson)
99 | w.WriteHeader(http.StatusOK)
100 | _, _ = w.Write([]byte(`{}`))
101 | }
102 |
103 | func (d dockerHandler) blobUploadPost(w http.ResponseWriter, r *http.Request) {
104 | u, err := uuid.GenerateUUID()
105 | if err != nil {
106 | d.log.Error("Failed to generate UUID", zap.Error(err))
107 | w.WriteHeader(http.StatusInternalServerError)
108 | return
109 | }
110 |
111 | d.uploads.new(u)
112 |
113 | h := w.Header()
114 | h.Set("Content-Length", "0")
115 | h.Set("Location", r.URL.Host+r.URL.Path+u)
116 | h.Set("Range", "0-0")
117 | h.Set("Docker-Upload-UUID", u)
118 | w.WriteHeader(http.StatusAccepted)
119 | }
120 |
121 | func (d dockerHandler) blobUploadGet(w http.ResponseWriter, r *http.Request) {
122 | vars := mux.Vars(r)
123 |
124 | upload := d.uploads.get(vars["uuid"])
125 | if upload == nil {
126 | w.WriteHeader(http.StatusNotFound)
127 | return
128 | }
129 |
130 | w.WriteHeader(http.StatusNoContent)
131 | h := w.Header()
132 | h.Set("Content-Length", "0")
133 | h.Set("Range", fmt.Sprintf("%d-%d", 0, upload.content.Len()))
134 | h.Set("Docker-Upload-UUID", vars["uuid"])
135 | }
136 |
137 | func (d dockerHandler) blobHead(w http.ResponseWriter, r *http.Request) {
138 | vars := mux.Vars(r)
139 |
140 | if err := d.blobs.head(vars["name"], vars["digest"]); err != nil {
141 | d.log.Error("getting blob", zap.Error(err))
142 | w.WriteHeader(http.StatusNotFound)
143 | } else {
144 | w.WriteHeader(http.StatusOK)
145 | }
146 | }
147 |
148 | func (d dockerHandler) blobGet(w http.ResponseWriter, r *http.Request) {
149 | vars := mux.Vars(r)
150 |
151 | blob, err := d.blobs.get(vars["name"], vars["digest"])
152 | if err != nil {
153 | d.log.Error("getting blob", zap.Error(err))
154 | w.WriteHeader(http.StatusNotFound)
155 | return
156 | }
157 |
158 | if blob == nil {
159 | w.WriteHeader(http.StatusNotFound)
160 | return
161 | }
162 |
163 | w.WriteHeader(http.StatusOK)
164 | _, _ = w.Write(blob)
165 | }
166 |
167 | func (d dockerHandler) manifestPut(w http.ResponseWriter, r *http.Request) {
168 | vars := mux.Vars(r)
169 |
170 | manifest := &DockerManifest{}
171 | if err := json.NewDecoder(r.Body).Decode(manifest); err != nil {
172 | fmt.Println(err)
173 | w.Header().Set(headerContentType, mimeJson)
174 | w.WriteHeader(http.StatusBadRequest)
175 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
176 | return
177 | }
178 |
179 | if manifest.Config.Digest == "" {
180 | w.WriteHeader(http.StatusBadRequest)
181 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
182 | return
183 | }
184 |
185 | if err := d.manifests.set(vars["name"], vars["reference"], manifest); err != nil {
186 | fmt.Println(err)
187 | w.WriteHeader(http.StatusBadRequest)
188 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
189 | return
190 | }
191 |
192 | w.WriteHeader(http.StatusOK)
193 | }
194 |
195 | func (d dockerHandler) blobUploadPut(w http.ResponseWriter, r *http.Request) {
196 | vars := mux.Vars(r)
197 | // TODO: verify digest
198 | digest := r.URL.Query().Get("digest")
199 | // parts := strings.SplitN(digest, ":", 2)
200 |
201 | h := w.Header()
202 | if upload := d.uploads.get(vars["uuid"]); upload != nil {
203 | _, _ = io.Copy(upload.content, r.Body)
204 |
205 | if err := d.blobs.set(vars["name"], digest, upload.content.Bytes()); err != nil {
206 | w.WriteHeader(http.StatusInternalServerError)
207 | d.log.Error("Failed to store blob", zap.Error(err))
208 | _, _ = w.Write([]byte(`{"errors": [{"code": "BLOB_UPLOAD_UNKNOWN"}]}`))
209 | }
210 | d.uploads.del(vars["uuid"])
211 |
212 | h.Set("Content-Length", "0")
213 | h.Set("Range", fmt.Sprintf("0-%d", upload.content.Len()))
214 | h.Set("Docker-Upload-UUID", vars["uuid"])
215 | w.WriteHeader(http.StatusCreated)
216 | } else {
217 | h.Set(headerContentType, mimeJson)
218 | w.WriteHeader(http.StatusNotFound)
219 | _, _ = w.Write([]byte(`{"errors": [{"code": "BLOB_UPLOAD_UNKNOWN"}]}`))
220 | }
221 | }
222 |
223 | func (d dockerHandler) blobUploadPatch(w http.ResponseWriter, r *http.Request) {
224 | vars := mux.Vars(r)
225 |
226 | h := w.Header()
227 |
228 | if upload := d.uploads.get(vars["uuid"]); upload != nil {
229 | _, _ = io.Copy(upload.content, r.Body)
230 |
231 | h.Set("Content-Length", "0")
232 | h.Set("Location", r.URL.Host+r.URL.Path)
233 | h.Set("Range", fmt.Sprintf("0-%d", upload.content.Len()))
234 | h.Set("Docker-Upload-UUID", vars["uuid"])
235 | w.WriteHeader(http.StatusNoContent)
236 | } else {
237 | h.Set(headerContentType, mimeJson)
238 | w.WriteHeader(http.StatusNotFound)
239 | _, _ = w.Write([]byte(`{"errors": [{"code": "BLOB_UPLOAD_UNKNOWN"}]}`))
240 | }
241 | }
242 |
243 | func (d dockerHandler) manifestGet(w http.ResponseWriter, r *http.Request) {
244 | vars := mux.Vars(r)
245 |
246 | manifest, err := d.manifests.get(vars["name"], vars["reference"])
247 | if err != nil {
248 | fmt.Println(err)
249 | d.log.Error("getting manifest", zap.Error(err))
250 | w.WriteHeader(http.StatusInternalServerError)
251 | return
252 | }
253 |
254 | if manifest == nil {
255 | fmt.Println("404")
256 | d.log.Warn("manifest not found")
257 | w.WriteHeader(http.StatusNotFound)
258 | return
259 | }
260 |
261 | h := w.Header()
262 | h.Set(headerContentType, manifest.Config.MediaType)
263 | h.Set("Docker-Content-Digest", manifest.Config.Digest)
264 | h.Set("Docker-Distribution-Api-Version", "registry/2.0")
265 | h.Set("Etag", `"`+manifest.Config.Digest+`"`)
266 |
267 | if r.Method == "HEAD" {
268 | w.WriteHeader(http.StatusOK)
269 | return
270 | }
271 |
272 | blob, err := d.blobs.get(vars["name"], manifest.Config.Digest)
273 | if err != nil {
274 | fmt.Println(err)
275 | w.WriteHeader(http.StatusInternalServerError)
276 | return
277 | }
278 |
279 | if blob == nil {
280 | w.WriteHeader(http.StatusNotFound)
281 | return
282 | }
283 |
284 | cfg := map[string]interface{}{}
285 | if err := json.Unmarshal(blob, &cfg); err != nil {
286 | d.log.Error("unmarshal manifest", zap.Error(err))
287 | w.WriteHeader(http.StatusBadRequest)
288 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
289 | return
290 | }
291 |
292 | fsLayers := []DockerManifestResponseFSLayer{}
293 | for _, layer := range manifest.Layers {
294 | fsLayers = append(fsLayers, DockerManifestResponseFSLayer{BlobSum: layer.Digest})
295 | }
296 |
297 | history := []DockerManifestResponseHistory{}
298 | for i := range manifest.Layers {
299 | rootfs, ok := cfg["rootfs"].(map[string]interface{})
300 | if !ok {
301 | w.Header().Set(headerContentType, mimeJson)
302 | d.log.Error("manifest invalid", zap.Error(err))
303 | w.WriteHeader(http.StatusInternalServerError)
304 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
305 | return
306 | }
307 |
308 | diffIds, ok := rootfs["diff_ids"].([]interface{})
309 | if !ok {
310 | w.Header().Set(headerContentType, mimeJson)
311 | d.log.Error("manifest invalid", zap.Error(err))
312 | w.WriteHeader(http.StatusInternalServerError)
313 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
314 | return
315 | }
316 |
317 | rid := diffIds[i].(string)
318 | ridp := strings.SplitN(rid, ":", 2)
319 | entry := map[string]interface{}{
320 | "created": "1970-01-01T00:00:01+00:00",
321 | "id": ridp[1],
322 | }
323 |
324 | if len(manifest.Layers) > 1 && i != len(manifest.Layers)-1 {
325 | prid := diffIds[i+1].(string)
326 | pridp := strings.SplitN(prid, ":", 2)
327 | entry["parent"] = pridp[1]
328 | }
329 |
330 | if i == 0 {
331 | entry["architecture"] = "amd64"
332 | entry["config"] = cfg["config"]
333 | }
334 |
335 | if c, err := json.Marshal(entry); err != nil {
336 | w.Header().Set(headerContentType, mimeJson)
337 | w.WriteHeader(http.StatusBadRequest)
338 | _, _ = w.Write([]byte(`{"errors": [{"code": "MANIFEST_INVALID"}]}`))
339 | return
340 | } else {
341 | history = append(history, DockerManifestResponseHistory{
342 | V1Compatibility: string(c),
343 | })
344 | }
345 | }
346 |
347 | res := DockerManifestResponse{
348 | Name: vars["name"],
349 | Tag: vars["reference"],
350 | Architecture: "amd64",
351 | FSLayers: fsLayers,
352 | History: history,
353 | SchemaVersion: 1,
354 | Signatures: []string{},
355 | }
356 |
357 | w.WriteHeader(http.StatusOK)
358 | if err := json.NewEncoder(w).Encode(res); err != nil {
359 | d.log.Error("Failed to encode JSON", zap.Error(err))
360 | }
361 | }
362 |
--------------------------------------------------------------------------------
/docker_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "net/http"
6 | "net/url"
7 | "os"
8 | "path/filepath"
9 | "testing"
10 |
11 | "github.com/folbricht/desync"
12 | "github.com/gorilla/mux"
13 | "github.com/steinfletcher/apitest"
14 | "go.uber.org/zap"
15 | )
16 |
17 | func testDocker(t *testing.T) dockerHandler {
18 | var store desync.LocalStore
19 | var index desync.LocalIndexStore
20 |
21 | indexDir := filepath.Join(t.TempDir(), "index")
22 | if err := os.MkdirAll(filepath.Join(indexDir, "spongix/blobs"), 0700); err != nil {
23 | t.Fatal(err)
24 | } else if index, err = desync.NewLocalIndexStore(indexDir); err != nil {
25 | t.Fatal(err)
26 | }
27 |
28 | storeDir := filepath.Join(t.TempDir(), "store")
29 | if err := os.MkdirAll(storeDir, 0700); err != nil {
30 | t.Fatal(err)
31 | } else if store, err = desync.NewLocalStore(storeDir, defaultStoreOptions); err != nil {
32 | t.Fatal(err)
33 | }
34 |
35 | ociDir := filepath.Join(t.TempDir(), "oci")
36 | if err := os.MkdirAll(ociDir, 0700); err != nil {
37 | t.Fatal(err)
38 | }
39 |
40 | log, err := zap.NewDevelopment()
41 | if err != nil {
42 | t.Fatal(err)
43 | }
44 |
45 | return newDockerHandler(log, store, index, ociDir, mux.NewRouter())
46 | }
47 |
48 | func TestDocker(t *testing.T) {
49 | proxy := testProxy(t)
50 |
51 | apitest.New().
52 | Handler(proxy.router()).
53 | Get("/v2/").
54 | Expect(t).
55 | Header(headerContentType, mimeJson).
56 | Body(`{}`).
57 | Status(http.StatusOK).
58 | End()
59 | }
60 |
61 | func TestDockerBlob(t *testing.T) {
62 | proxy := testProxy(t)
63 | router := proxy.router()
64 |
65 | uploadResult := apitest.New().
66 | Handler(router).
67 | Post("/v2/spongix/blobs/uploads/").
68 | Body(`{}`).
69 | Expect(t).
70 | Status(http.StatusAccepted).
71 | HeaderPresent("Location").
72 | Header("Content-Length", "0").
73 | Header("Range", "0-0").
74 | HeaderPresent("Docker-Upload-UUID").
75 | End()
76 |
77 | location, err := url.Parse(uploadResult.Response.Header.Get("Location"))
78 | if err != nil {
79 | t.Fatal(err)
80 | }
81 |
82 | digest := "sha256:bd60d81d7c94dec8378b4e6fb652462a9156618bfd34c6673ad9d81566d2d6cc"
83 |
84 | apitest.New().
85 | Handler(router).
86 | Put(location.RequestURI()).
87 | Query("digest", digest).
88 | Body(`{}`).
89 | Expect(t).
90 | Status(http.StatusCreated).
91 | Header("Content-Length", "0").
92 | Header("Range", "0-2").
93 | HeaderPresent("Docker-Upload-UUID").
94 | End()
95 |
96 | apitest.New().
97 | Handler(router).
98 | Method("HEAD").
99 | URL("/v2/spongix/blobs/" + digest).
100 | Expect(t).
101 | Body(``).
102 | Status(http.StatusOK).
103 | Headers(map[string]string{}).
104 | End()
105 | }
106 |
107 | func TestDockerManifest(t *testing.T) {
108 | proxy := testProxy(t)
109 | router := proxy.router()
110 |
111 | body, err := json.Marshal(&DockerManifest{})
112 | if err != nil {
113 | t.Fatal(err)
114 | }
115 |
116 | apitest.New().
117 | Handler(router).
118 | Put("/v2/spongix/manifests/hello").
119 | Body(string(body)).
120 | Expect(t).
121 | Body(``).
122 | Status(http.StatusOK).
123 | End()
124 |
125 | apitest.New().
126 | Handler(router).
127 | Get("/v2/spongix/manifests/hello").
128 | Expect(t).
129 | Status(http.StatusOK).
130 | Body(``).
131 | Headers(map[string]string{}).
132 | End()
133 | }
134 |
--------------------------------------------------------------------------------
/fake.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | "os"
7 |
8 | "github.com/folbricht/desync"
9 | "github.com/pkg/errors"
10 | )
11 |
12 | // fakeStore
13 | type fakeStore struct {
14 | chunks map[desync.ChunkID][]byte
15 | }
16 |
17 | func (s fakeStore) Close() error { return nil }
18 | func (s fakeStore) String() string { return "store" }
19 |
20 | func newFakeStore() *fakeStore {
21 | return &fakeStore{
22 | chunks: map[desync.ChunkID][]byte{},
23 | }
24 | }
25 |
26 | func (s fakeStore) GetChunk(id desync.ChunkID) (*desync.Chunk, error) {
27 | found, ok := s.chunks[id]
28 | if !ok {
29 | return nil, desync.ChunkMissing{ID: id}
30 | }
31 | return desync.NewChunk(found), nil
32 | }
33 |
34 | func (s fakeStore) HasChunk(id desync.ChunkID) (bool, error) {
35 | _, ok := s.chunks[id]
36 | return ok, nil
37 | }
38 |
39 | func (s *fakeStore) StoreChunk(chunk *desync.Chunk) error {
40 | data, err := chunk.Data()
41 | if err != nil {
42 | return err
43 | }
44 | s.chunks[chunk.ID()] = data
45 | return nil
46 | }
47 |
48 | // fakeIndex
49 | type fakeIndex struct {
50 | indices map[string][]byte
51 | }
52 |
53 | func newFakeIndex() *fakeIndex {
54 | return &fakeIndex{indices: map[string][]byte{}}
55 | }
56 |
57 | func (s fakeIndex) Close() error { return nil }
58 | func (s fakeIndex) String() string { return "index" }
59 |
60 | func (s fakeIndex) StoreIndex(id string, index desync.Index) error {
61 | buf := &bytes.Buffer{}
62 | if _, err := index.WriteTo(buf); err != nil {
63 | return err
64 | }
65 | s.indices[id] = buf.Bytes()
66 | return nil
67 | }
68 |
69 | func (s fakeIndex) GetIndex(id string) (i desync.Index, e error) {
70 | f, err := s.GetIndexReader(id)
71 | if err != nil {
72 | return i, err
73 | }
74 | defer f.Close()
75 | idx, err := desync.IndexFromReader(f)
76 | if os.IsNotExist(err) {
77 | err = errors.Errorf("Index file does not exist: %v", err)
78 | }
79 | return idx, err
80 | }
81 |
82 | func (s fakeIndex) GetIndexReader(id string) (io.ReadCloser, error) {
83 | idx, ok := s.indices[id]
84 | if ok {
85 | return io.NopCloser(bytes.NewBuffer(idx)), nil
86 | }
87 | return nil, os.ErrNotExist
88 | }
89 |
--------------------------------------------------------------------------------
/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "HTTP": {
4 | "flake": false,
5 | "locked": {
6 | "lastModified": 1451647621,
7 | "narHash": "sha256-oHIyw3x0iKBexEo49YeUDV1k74ZtyYKGR2gNJXXRxts=",
8 | "owner": "phadej",
9 | "repo": "HTTP",
10 | "rev": "9bc0996d412fef1787449d841277ef663ad9a915",
11 | "type": "github"
12 | },
13 | "original": {
14 | "owner": "phadej",
15 | "repo": "HTTP",
16 | "type": "github"
17 | }
18 | },
19 | "alejandra": {
20 | "inputs": {
21 | "flakeCompat": "flakeCompat",
22 | "nixpkgs": "nixpkgs"
23 | },
24 | "locked": {
25 | "lastModified": 1646937818,
26 | "narHash": "sha256-vkFKYnSmhPPXtc3AH7iRtqRRqxhj0o5WySqPT+klDWU=",
27 | "owner": "kamadorueda",
28 | "repo": "alejandra",
29 | "rev": "e00f984b95e696a0878cd231e937f852eb79532c",
30 | "type": "github"
31 | },
32 | "original": {
33 | "owner": "kamadorueda",
34 | "repo": "alejandra",
35 | "type": "github"
36 | }
37 | },
38 | "alejandra_2": {
39 | "inputs": {
40 | "flakeCompat": "flakeCompat_2",
41 | "nixpkgs": "nixpkgs_2"
42 | },
43 | "locked": {
44 | "lastModified": 1646360966,
45 | "narHash": "sha256-fJ/WHSU45bMJRDqz9yA3B2lwXtW5DKooU+Pzn13GyZI=",
46 | "owner": "kamadorueda",
47 | "repo": "alejandra",
48 | "rev": "511c3f6a88b6964e1496fb6f441f4ae5e58bd3ea",
49 | "type": "github"
50 | },
51 | "original": {
52 | "owner": "kamadorueda",
53 | "repo": "alejandra",
54 | "type": "github"
55 | }
56 | },
57 | "cabal-32": {
58 | "flake": false,
59 | "locked": {
60 | "lastModified": 1603716527,
61 | "narHash": "sha256-X0TFfdD4KZpwl0Zr6x+PLxUt/VyKQfX7ylXHdmZIL+w=",
62 | "owner": "haskell",
63 | "repo": "cabal",
64 | "rev": "48bf10787e27364730dd37a42b603cee8d6af7ee",
65 | "type": "github"
66 | },
67 | "original": {
68 | "owner": "haskell",
69 | "ref": "3.2",
70 | "repo": "cabal",
71 | "type": "github"
72 | }
73 | },
74 | "cabal-34": {
75 | "flake": false,
76 | "locked": {
77 | "lastModified": 1640353650,
78 | "narHash": "sha256-N1t6M3/wqj90AEdRkeC8i923gQYUpzSr8b40qVOZ1Rk=",
79 | "owner": "haskell",
80 | "repo": "cabal",
81 | "rev": "942639c18c0cd8ec53e0a6f8d120091af35312cd",
82 | "type": "github"
83 | },
84 | "original": {
85 | "owner": "haskell",
86 | "ref": "3.4",
87 | "repo": "cabal",
88 | "type": "github"
89 | }
90 | },
91 | "cabal-36": {
92 | "flake": false,
93 | "locked": {
94 | "lastModified": 1641652457,
95 | "narHash": "sha256-BlFPKP4C4HRUJeAbdembX1Rms1LD380q9s0qVDeoAak=",
96 | "owner": "haskell",
97 | "repo": "cabal",
98 | "rev": "f27667f8ec360c475027dcaee0138c937477b070",
99 | "type": "github"
100 | },
101 | "original": {
102 | "owner": "haskell",
103 | "ref": "3.6",
104 | "repo": "cabal",
105 | "type": "github"
106 | }
107 | },
108 | "cardano-shell": {
109 | "flake": false,
110 | "locked": {
111 | "lastModified": 1608537748,
112 | "narHash": "sha256-PulY1GfiMgKVnBci3ex4ptk2UNYMXqGjJOxcPy2KYT4=",
113 | "owner": "input-output-hk",
114 | "repo": "cardano-shell",
115 | "rev": "9392c75087cb9a3d453998f4230930dea3a95725",
116 | "type": "github"
117 | },
118 | "original": {
119 | "owner": "input-output-hk",
120 | "repo": "cardano-shell",
121 | "type": "github"
122 | }
123 | },
124 | "cicero": {
125 | "inputs": {
126 | "alejandra": "alejandra_2",
127 | "data-merge": "data-merge",
128 | "devshell": "devshell",
129 | "driver": "driver",
130 | "follower": "follower",
131 | "haskell-nix": "haskell-nix",
132 | "inclusive": "inclusive_3",
133 | "nix": "nix_2",
134 | "nix-cache-proxy": "nix-cache-proxy",
135 | "nixpkgs": "nixpkgs_6",
136 | "poetry2nix": "poetry2nix",
137 | "utils": "utils_4"
138 | },
139 | "locked": {
140 | "lastModified": 1648123211,
141 | "narHash": "sha256-J7MG4XImsPU5cKwk+emxwx0EOYR4s2d+1JwSqUvImmg=",
142 | "owner": "input-output-hk",
143 | "repo": "cicero",
144 | "rev": "6a79a14cdb348362b30c8cd9e8a2f6471294103c",
145 | "type": "github"
146 | },
147 | "original": {
148 | "owner": "input-output-hk",
149 | "repo": "cicero",
150 | "type": "github"
151 | }
152 | },
153 | "data-merge": {
154 | "inputs": {
155 | "nixlib": "nixlib"
156 | },
157 | "locked": {
158 | "lastModified": 1635967744,
159 | "narHash": "sha256-01065dNad3BIepNzrpYuYInxq/ynqtGMSsIiNqjND7E=",
160 | "owner": "divnix",
161 | "repo": "data-merge",
162 | "rev": "68bd71f980f75cf73bc5071982eddfe6bc089768",
163 | "type": "github"
164 | },
165 | "original": {
166 | "owner": "divnix",
167 | "repo": "data-merge",
168 | "type": "github"
169 | }
170 | },
171 | "devshell": {
172 | "inputs": {
173 | "flake-utils": "flake-utils",
174 | "nixpkgs": "nixpkgs_3"
175 | },
176 | "locked": {
177 | "lastModified": 1644227066,
178 | "narHash": "sha256-FHcFZtpZEWnUh62xlyY3jfXAXHzJNEDLDzLsJxn+ve0=",
179 | "owner": "numtide",
180 | "repo": "devshell",
181 | "rev": "7033f64dd9ef8d9d8644c5030c73913351d2b660",
182 | "type": "github"
183 | },
184 | "original": {
185 | "owner": "numtide",
186 | "repo": "devshell",
187 | "type": "github"
188 | }
189 | },
190 | "devshell_2": {
191 | "locked": {
192 | "lastModified": 1632436039,
193 | "narHash": "sha256-OtITeVWcKXn1SpVEnImpTGH91FycCskGBPqmlxiykv4=",
194 | "owner": "numtide",
195 | "repo": "devshell",
196 | "rev": "7a7a7aa0adebe5488e5abaec688fd9ae0f8ea9c6",
197 | "type": "github"
198 | },
199 | "original": {
200 | "owner": "numtide",
201 | "repo": "devshell",
202 | "type": "github"
203 | }
204 | },
205 | "devshell_3": {
206 | "locked": {
207 | "lastModified": 1636119665,
208 | "narHash": "sha256-e11Z9PyH9hdgTm4Vyl8S5iTwrv0um6+srzb1Ba+YUHA=",
209 | "owner": "numtide",
210 | "repo": "devshell",
211 | "rev": "ab14b1a3cb253f58e02f5f849d621292fbf81fad",
212 | "type": "github"
213 | },
214 | "original": {
215 | "owner": "numtide",
216 | "repo": "devshell",
217 | "type": "github"
218 | }
219 | },
220 | "devshell_4": {
221 | "locked": {
222 | "lastModified": 1636119665,
223 | "narHash": "sha256-e11Z9PyH9hdgTm4Vyl8S5iTwrv0um6+srzb1Ba+YUHA=",
224 | "owner": "numtide",
225 | "repo": "devshell",
226 | "rev": "ab14b1a3cb253f58e02f5f849d621292fbf81fad",
227 | "type": "github"
228 | },
229 | "original": {
230 | "owner": "numtide",
231 | "repo": "devshell",
232 | "type": "github"
233 | }
234 | },
235 | "devshell_5": {
236 | "locked": {
237 | "lastModified": 1636119665,
238 | "narHash": "sha256-e11Z9PyH9hdgTm4Vyl8S5iTwrv0um6+srzb1Ba+YUHA=",
239 | "owner": "numtide",
240 | "repo": "devshell",
241 | "rev": "ab14b1a3cb253f58e02f5f849d621292fbf81fad",
242 | "type": "github"
243 | },
244 | "original": {
245 | "owner": "numtide",
246 | "repo": "devshell",
247 | "type": "github"
248 | }
249 | },
250 | "driver": {
251 | "inputs": {
252 | "devshell": "devshell_2",
253 | "inclusive": "inclusive",
254 | "nix": "nix",
255 | "nixpkgs": [
256 | "cicero",
257 | "nixpkgs"
258 | ],
259 | "utils": "utils"
260 | },
261 | "locked": {
262 | "lastModified": 1644418487,
263 | "narHash": "sha256-nzFmmBYjNjWVy25bHLLmZECfwJm3nxcAr/mYVYxWggA=",
264 | "owner": "input-output-hk",
265 | "repo": "nomad-driver-nix",
266 | "rev": "7f7adb6814b4bf926597e4b810b803140176122c",
267 | "type": "github"
268 | },
269 | "original": {
270 | "owner": "input-output-hk",
271 | "repo": "nomad-driver-nix",
272 | "type": "github"
273 | }
274 | },
275 | "flake-utils": {
276 | "locked": {
277 | "lastModified": 1642700792,
278 | "narHash": "sha256-XqHrk7hFb+zBvRg6Ghl+AZDq03ov6OshJLiSWOoX5es=",
279 | "owner": "numtide",
280 | "repo": "flake-utils",
281 | "rev": "846b2ae0fc4cc943637d3d1def4454213e203cba",
282 | "type": "github"
283 | },
284 | "original": {
285 | "owner": "numtide",
286 | "repo": "flake-utils",
287 | "type": "github"
288 | }
289 | },
290 | "flake-utils_2": {
291 | "locked": {
292 | "lastModified": 1644229661,
293 | "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=",
294 | "owner": "numtide",
295 | "repo": "flake-utils",
296 | "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797",
297 | "type": "github"
298 | },
299 | "original": {
300 | "owner": "numtide",
301 | "repo": "flake-utils",
302 | "type": "github"
303 | }
304 | },
305 | "flake-utils_3": {
306 | "locked": {
307 | "lastModified": 1610051610,
308 | "narHash": "sha256-U9rPz/usA1/Aohhk7Cmc2gBrEEKRzcW4nwPWMPwja4Y=",
309 | "owner": "numtide",
310 | "repo": "flake-utils",
311 | "rev": "3982c9903e93927c2164caa727cd3f6a0e6d14cc",
312 | "type": "github"
313 | },
314 | "original": {
315 | "owner": "numtide",
316 | "repo": "flake-utils",
317 | "type": "github"
318 | }
319 | },
320 | "flake-utils_4": {
321 | "locked": {
322 | "lastModified": 1638122382,
323 | "narHash": "sha256-sQzZzAbvKEqN9s0bzWuYmRaA03v40gaJ4+iL1LXjaeI=",
324 | "owner": "numtide",
325 | "repo": "flake-utils",
326 | "rev": "74f7e4319258e287b0f9cb95426c9853b282730b",
327 | "type": "github"
328 | },
329 | "original": {
330 | "owner": "numtide",
331 | "repo": "flake-utils",
332 | "type": "github"
333 | }
334 | },
335 | "flakeCompat": {
336 | "flake": false,
337 | "locked": {
338 | "lastModified": 1641205782,
339 | "narHash": "sha256-4jY7RCWUoZ9cKD8co0/4tFARpWB+57+r1bLLvXNJliY=",
340 | "owner": "edolstra",
341 | "repo": "flake-compat",
342 | "rev": "b7547d3eed6f32d06102ead8991ec52ab0a4f1a7",
343 | "type": "github"
344 | },
345 | "original": {
346 | "owner": "edolstra",
347 | "repo": "flake-compat",
348 | "type": "github"
349 | }
350 | },
351 | "flakeCompat_2": {
352 | "flake": false,
353 | "locked": {
354 | "lastModified": 1641205782,
355 | "narHash": "sha256-4jY7RCWUoZ9cKD8co0/4tFARpWB+57+r1bLLvXNJliY=",
356 | "owner": "edolstra",
357 | "repo": "flake-compat",
358 | "rev": "b7547d3eed6f32d06102ead8991ec52ab0a4f1a7",
359 | "type": "github"
360 | },
361 | "original": {
362 | "owner": "edolstra",
363 | "repo": "flake-compat",
364 | "type": "github"
365 | }
366 | },
367 | "follower": {
368 | "inputs": {
369 | "devshell": "devshell_3",
370 | "inclusive": "inclusive_2",
371 | "nixpkgs": [
372 | "cicero",
373 | "nixpkgs"
374 | ],
375 | "utils": "utils_2"
376 | },
377 | "locked": {
378 | "lastModified": 1642008295,
379 | "narHash": "sha256-yx3lLN/hlvEeKItHJ5jH0KSm84IruTWMo78IItVPji4=",
380 | "owner": "input-output-hk",
381 | "repo": "nomad-follower",
382 | "rev": "b1b0b00e940026f72d16bdf13e36ad20f1826e8a",
383 | "type": "github"
384 | },
385 | "original": {
386 | "owner": "input-output-hk",
387 | "repo": "nomad-follower",
388 | "type": "github"
389 | }
390 | },
391 | "ghc-8.6.5-iohk": {
392 | "flake": false,
393 | "locked": {
394 | "lastModified": 1600920045,
395 | "narHash": "sha256-DO6kxJz248djebZLpSzTGD6s8WRpNI9BTwUeOf5RwY8=",
396 | "owner": "input-output-hk",
397 | "repo": "ghc",
398 | "rev": "95713a6ecce4551240da7c96b6176f980af75cae",
399 | "type": "github"
400 | },
401 | "original": {
402 | "owner": "input-output-hk",
403 | "ref": "release/8.6.5-iohk",
404 | "repo": "ghc",
405 | "type": "github"
406 | }
407 | },
408 | "hackage": {
409 | "flake": false,
410 | "locked": {
411 | "lastModified": 1646097829,
412 | "narHash": "sha256-PcHDDV8NuUxZhPV/p++IkZC+SDZ1Db7m7K+9HN4/0S4=",
413 | "owner": "input-output-hk",
414 | "repo": "hackage.nix",
415 | "rev": "283f096976b48e54183905e7bdde7f213c6ee5cd",
416 | "type": "github"
417 | },
418 | "original": {
419 | "owner": "input-output-hk",
420 | "repo": "hackage.nix",
421 | "type": "github"
422 | }
423 | },
424 | "haskell-nix": {
425 | "inputs": {
426 | "HTTP": "HTTP",
427 | "cabal-32": "cabal-32",
428 | "cabal-34": "cabal-34",
429 | "cabal-36": "cabal-36",
430 | "cardano-shell": "cardano-shell",
431 | "flake-utils": "flake-utils_2",
432 | "ghc-8.6.5-iohk": "ghc-8.6.5-iohk",
433 | "hackage": "hackage",
434 | "hpc-coveralls": "hpc-coveralls",
435 | "nix-tools": "nix-tools",
436 | "nixpkgs": [
437 | "cicero",
438 | "haskell-nix",
439 | "nixpkgs-unstable"
440 | ],
441 | "nixpkgs-2003": "nixpkgs-2003",
442 | "nixpkgs-2105": "nixpkgs-2105",
443 | "nixpkgs-2111": "nixpkgs-2111",
444 | "nixpkgs-unstable": [
445 | "cicero",
446 | "nixpkgs"
447 | ],
448 | "old-ghc-nix": "old-ghc-nix",
449 | "stackage": "stackage"
450 | },
451 | "locked": {
452 | "lastModified": 1646097976,
453 | "narHash": "sha256-EiyrBqayw67dw8pr1XCVU9tIZ+/jzXCQycW1S9a+KFA=",
454 | "owner": "input-output-hk",
455 | "repo": "haskell.nix",
456 | "rev": "f0308ed1df3ce9f10f9da1a7c0c8591921d0b4e5",
457 | "type": "github"
458 | },
459 | "original": {
460 | "owner": "input-output-hk",
461 | "repo": "haskell.nix",
462 | "type": "github"
463 | }
464 | },
465 | "hpc-coveralls": {
466 | "flake": false,
467 | "locked": {
468 | "lastModified": 1607498076,
469 | "narHash": "sha256-8uqsEtivphgZWYeUo5RDUhp6bO9j2vaaProQxHBltQk=",
470 | "owner": "sevanspowell",
471 | "repo": "hpc-coveralls",
472 | "rev": "14df0f7d229f4cd2e79f8eabb1a740097fdfa430",
473 | "type": "github"
474 | },
475 | "original": {
476 | "owner": "sevanspowell",
477 | "repo": "hpc-coveralls",
478 | "type": "github"
479 | }
480 | },
481 | "inclusive": {
482 | "inputs": {
483 | "stdlib": "stdlib"
484 | },
485 | "locked": {
486 | "lastModified": 1628098927,
487 | "narHash": "sha256-Ft4sdf7VPL8MQtu18AAPiN2s5pUsbv+3RxqzJSa/yzg=",
488 | "owner": "input-output-hk",
489 | "repo": "nix-inclusive",
490 | "rev": "13123eb7a8c3359738a4756b8d645729e8655b27",
491 | "type": "github"
492 | },
493 | "original": {
494 | "owner": "input-output-hk",
495 | "repo": "nix-inclusive",
496 | "type": "github"
497 | }
498 | },
499 | "inclusive_2": {
500 | "inputs": {
501 | "stdlib": "stdlib_2"
502 | },
503 | "locked": {
504 | "lastModified": 1628098927,
505 | "narHash": "sha256-Ft4sdf7VPL8MQtu18AAPiN2s5pUsbv+3RxqzJSa/yzg=",
506 | "owner": "input-output-hk",
507 | "repo": "nix-inclusive",
508 | "rev": "13123eb7a8c3359738a4756b8d645729e8655b27",
509 | "type": "github"
510 | },
511 | "original": {
512 | "owner": "input-output-hk",
513 | "repo": "nix-inclusive",
514 | "type": "github"
515 | }
516 | },
517 | "inclusive_3": {
518 | "inputs": {
519 | "stdlib": "stdlib_3"
520 | },
521 | "locked": {
522 | "lastModified": 1628098927,
523 | "narHash": "sha256-Ft4sdf7VPL8MQtu18AAPiN2s5pUsbv+3RxqzJSa/yzg=",
524 | "owner": "input-output-hk",
525 | "repo": "nix-inclusive",
526 | "rev": "13123eb7a8c3359738a4756b8d645729e8655b27",
527 | "type": "github"
528 | },
529 | "original": {
530 | "owner": "input-output-hk",
531 | "repo": "nix-inclusive",
532 | "type": "github"
533 | }
534 | },
535 | "inclusive_4": {
536 | "inputs": {
537 | "stdlib": "stdlib_4"
538 | },
539 | "locked": {
540 | "lastModified": 1628098927,
541 | "narHash": "sha256-Ft4sdf7VPL8MQtu18AAPiN2s5pUsbv+3RxqzJSa/yzg=",
542 | "owner": "input-output-hk",
543 | "repo": "nix-inclusive",
544 | "rev": "13123eb7a8c3359738a4756b8d645729e8655b27",
545 | "type": "github"
546 | },
547 | "original": {
548 | "owner": "input-output-hk",
549 | "repo": "nix-inclusive",
550 | "type": "github"
551 | }
552 | },
553 | "lowdown-src": {
554 | "flake": false,
555 | "locked": {
556 | "lastModified": 1633514407,
557 | "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
558 | "owner": "kristapsdz",
559 | "repo": "lowdown",
560 | "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
561 | "type": "github"
562 | },
563 | "original": {
564 | "owner": "kristapsdz",
565 | "repo": "lowdown",
566 | "type": "github"
567 | }
568 | },
569 | "lowdown-src_2": {
570 | "flake": false,
571 | "locked": {
572 | "lastModified": 1633514407,
573 | "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
574 | "owner": "kristapsdz",
575 | "repo": "lowdown",
576 | "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
577 | "type": "github"
578 | },
579 | "original": {
580 | "owner": "kristapsdz",
581 | "repo": "lowdown",
582 | "type": "github"
583 | }
584 | },
585 | "n2c": {
586 | "inputs": {
587 | "flake-utils": "flake-utils_4",
588 | "nixpkgs": "nixpkgs_7"
589 | },
590 | "locked": {
591 | "lastModified": 1648247166,
592 | "narHash": "sha256-zGbrJ5ddo+MvCHHuY2kGWi6N2676DkBjz3Qbu66LhHg=",
593 | "owner": "nlewo",
594 | "repo": "nix2container",
595 | "rev": "065e5b108650ee4c2dc80fe1f4952eff50922e6c",
596 | "type": "github"
597 | },
598 | "original": {
599 | "owner": "nlewo",
600 | "repo": "nix2container",
601 | "type": "github"
602 | }
603 | },
604 | "nix": {
605 | "inputs": {
606 | "lowdown-src": "lowdown-src",
607 | "nixpkgs": "nixpkgs_4",
608 | "nixpkgs-regression": "nixpkgs-regression"
609 | },
610 | "locked": {
611 | "lastModified": 1644413094,
612 | "narHash": "sha256-KLGaeSqvhuUFz6DxrB9r3w+lfp9bXIiCT9K1cqg7Ze8=",
613 | "owner": "nixos",
614 | "repo": "nix",
615 | "rev": "52f52319ad21bdbd7a33bb85eccc83756648f110",
616 | "type": "github"
617 | },
618 | "original": {
619 | "owner": "nixos",
620 | "repo": "nix",
621 | "rev": "52f52319ad21bdbd7a33bb85eccc83756648f110",
622 | "type": "github"
623 | }
624 | },
625 | "nix-cache-proxy": {
626 | "inputs": {
627 | "devshell": "devshell_4",
628 | "inclusive": [
629 | "cicero",
630 | "inclusive"
631 | ],
632 | "nixpkgs": [
633 | "cicero",
634 | "nixpkgs"
635 | ],
636 | "utils": "utils_3"
637 | },
638 | "locked": {
639 | "lastModified": 1644317729,
640 | "narHash": "sha256-R9R1XHv69VvZ/c7lXYs18PHcnEBXS+hDfhjdkZ96lgw=",
641 | "owner": "input-output-hk",
642 | "repo": "nix-cache-proxy",
643 | "rev": "378617d6b9865be96f7dfa16e0ce3f329da844ec",
644 | "type": "github"
645 | },
646 | "original": {
647 | "owner": "input-output-hk",
648 | "repo": "nix-cache-proxy",
649 | "type": "github"
650 | }
651 | },
652 | "nix-tools": {
653 | "flake": false,
654 | "locked": {
655 | "lastModified": 1644395812,
656 | "narHash": "sha256-BVFk/BEsTLq5MMZvdy3ZYHKfaS3dHrsKh4+tb5t5b58=",
657 | "owner": "input-output-hk",
658 | "repo": "nix-tools",
659 | "rev": "d847c63b99bbec78bf83be2a61dc9f09b8a9ccc1",
660 | "type": "github"
661 | },
662 | "original": {
663 | "owner": "input-output-hk",
664 | "repo": "nix-tools",
665 | "type": "github"
666 | }
667 | },
668 | "nix_2": {
669 | "inputs": {
670 | "lowdown-src": "lowdown-src_2",
671 | "nixpkgs": "nixpkgs_5",
672 | "nixpkgs-regression": "nixpkgs-regression_2"
673 | },
674 | "locked": {
675 | "lastModified": 1645437800,
676 | "narHash": "sha256-MAMIKi3sIQ0b3jzYyOb5VY29GRgv7JXl1VXoUM9xUZw=",
677 | "owner": "NixOS",
678 | "repo": "nix",
679 | "rev": "f22b9e72f51f97f8f2d334748d3e97123940a146",
680 | "type": "github"
681 | },
682 | "original": {
683 | "owner": "NixOS",
684 | "repo": "nix",
685 | "rev": "f22b9e72f51f97f8f2d334748d3e97123940a146",
686 | "type": "github"
687 | }
688 | },
689 | "nixlib": {
690 | "locked": {
691 | "lastModified": 1644107864,
692 | "narHash": "sha256-Wrbt6Gs+hjXD3HUICPBJHKnHEUqiyx8rzHCgvqC1Bok=",
693 | "owner": "nix-community",
694 | "repo": "nixpkgs.lib",
695 | "rev": "58eabcf65e7dba189eb0013f86831c159e3b2be6",
696 | "type": "github"
697 | },
698 | "original": {
699 | "owner": "nix-community",
700 | "repo": "nixpkgs.lib",
701 | "type": "github"
702 | }
703 | },
704 | "nixpkgs": {
705 | "locked": {
706 | "lastModified": 1646506091,
707 | "narHash": "sha256-sWNAJE2m+HOh1jtXlHcnhxsj6/sXrHgbqVNcVRlveK4=",
708 | "owner": "nixos",
709 | "repo": "nixpkgs",
710 | "rev": "3e644bd62489b516292c816f70bf0052c693b3c7",
711 | "type": "github"
712 | },
713 | "original": {
714 | "owner": "nixos",
715 | "ref": "nixpkgs-unstable",
716 | "repo": "nixpkgs",
717 | "type": "github"
718 | }
719 | },
720 | "nixpkgs-2003": {
721 | "locked": {
722 | "lastModified": 1620055814,
723 | "narHash": "sha256-8LEHoYSJiL901bTMVatq+rf8y7QtWuZhwwpKE2fyaRY=",
724 | "owner": "NixOS",
725 | "repo": "nixpkgs",
726 | "rev": "1db42b7fe3878f3f5f7a4f2dc210772fd080e205",
727 | "type": "github"
728 | },
729 | "original": {
730 | "owner": "NixOS",
731 | "ref": "nixpkgs-20.03-darwin",
732 | "repo": "nixpkgs",
733 | "type": "github"
734 | }
735 | },
736 | "nixpkgs-2105": {
737 | "locked": {
738 | "lastModified": 1642244250,
739 | "narHash": "sha256-vWpUEqQdVP4srj+/YLJRTN9vjpTs4je0cdWKXPbDItc=",
740 | "owner": "NixOS",
741 | "repo": "nixpkgs",
742 | "rev": "0fd9ee1aa36ce865ad273f4f07fdc093adeb5c00",
743 | "type": "github"
744 | },
745 | "original": {
746 | "owner": "NixOS",
747 | "ref": "nixpkgs-21.05-darwin",
748 | "repo": "nixpkgs",
749 | "type": "github"
750 | }
751 | },
752 | "nixpkgs-2111": {
753 | "locked": {
754 | "lastModified": 1644510859,
755 | "narHash": "sha256-xjpVvL5ecbyi0vxtVl/Fh9bwGlMbw3S06zE5nUzFB8A=",
756 | "owner": "NixOS",
757 | "repo": "nixpkgs",
758 | "rev": "0d1d5d7e3679fec9d07f2eb804d9f9fdb98378d3",
759 | "type": "github"
760 | },
761 | "original": {
762 | "owner": "NixOS",
763 | "ref": "nixpkgs-21.11-darwin",
764 | "repo": "nixpkgs",
765 | "type": "github"
766 | }
767 | },
768 | "nixpkgs-regression": {
769 | "locked": {
770 | "lastModified": 1643052045,
771 | "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
772 | "owner": "NixOS",
773 | "repo": "nixpkgs",
774 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
775 | "type": "github"
776 | },
777 | "original": {
778 | "id": "nixpkgs",
779 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
780 | "type": "indirect"
781 | }
782 | },
783 | "nixpkgs-regression_2": {
784 | "locked": {
785 | "lastModified": 1643052045,
786 | "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
787 | "owner": "NixOS",
788 | "repo": "nixpkgs",
789 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
790 | "type": "github"
791 | },
792 | "original": {
793 | "id": "nixpkgs",
794 | "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
795 | "type": "indirect"
796 | }
797 | },
798 | "nixpkgs_2": {
799 | "locked": {
800 | "lastModified": 1646331602,
801 | "narHash": "sha256-cRuytTfel52z947yKfJcZU7zbQBgM16qqTf+oJkVwtg=",
802 | "owner": "nixos",
803 | "repo": "nixpkgs",
804 | "rev": "ad267cc9cf3d5a6ae63940df31eb31382d6356e6",
805 | "type": "github"
806 | },
807 | "original": {
808 | "owner": "nixos",
809 | "ref": "nixpkgs-unstable",
810 | "repo": "nixpkgs",
811 | "type": "github"
812 | }
813 | },
814 | "nixpkgs_3": {
815 | "locked": {
816 | "lastModified": 1643381941,
817 | "narHash": "sha256-pHTwvnN4tTsEKkWlXQ8JMY423epos8wUOhthpwJjtpc=",
818 | "owner": "NixOS",
819 | "repo": "nixpkgs",
820 | "rev": "5efc8ca954272c4376ac929f4c5ffefcc20551d5",
821 | "type": "github"
822 | },
823 | "original": {
824 | "owner": "NixOS",
825 | "ref": "nixpkgs-unstable",
826 | "repo": "nixpkgs",
827 | "type": "github"
828 | }
829 | },
830 | "nixpkgs_4": {
831 | "locked": {
832 | "lastModified": 1632864508,
833 | "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
834 | "owner": "NixOS",
835 | "repo": "nixpkgs",
836 | "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
837 | "type": "github"
838 | },
839 | "original": {
840 | "id": "nixpkgs",
841 | "ref": "nixos-21.05-small",
842 | "type": "indirect"
843 | }
844 | },
845 | "nixpkgs_5": {
846 | "locked": {
847 | "lastModified": 1632864508,
848 | "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
849 | "owner": "NixOS",
850 | "repo": "nixpkgs",
851 | "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
852 | "type": "github"
853 | },
854 | "original": {
855 | "id": "nixpkgs",
856 | "ref": "nixos-21.05-small",
857 | "type": "indirect"
858 | }
859 | },
860 | "nixpkgs_6": {
861 | "locked": {
862 | "lastModified": 1644486793,
863 | "narHash": "sha256-EeijR4guVHgVv+JpOX3cQO+1XdrkJfGmiJ9XVsVU530=",
864 | "owner": "NixOS",
865 | "repo": "nixpkgs",
866 | "rev": "1882c6b7368fd284ad01b0a5b5601ef136321292",
867 | "type": "github"
868 | },
869 | "original": {
870 | "owner": "NixOS",
871 | "ref": "nixpkgs-unstable",
872 | "repo": "nixpkgs",
873 | "type": "github"
874 | }
875 | },
876 | "nixpkgs_7": {
877 | "locked": {
878 | "lastModified": 1642451377,
879 | "narHash": "sha256-hvAuYDUN8XIrcQKE6wDw4LjTCcwrTp2B1i1i/5vfDMQ=",
880 | "owner": "NixOS",
881 | "repo": "nixpkgs",
882 | "rev": "e5b47c5c21336e3fdd887d24c7e34363fa09c6d7",
883 | "type": "github"
884 | },
885 | "original": {
886 | "owner": "NixOS",
887 | "repo": "nixpkgs",
888 | "type": "github"
889 | }
890 | },
891 | "nixpkgs_8": {
892 | "locked": {
893 | "lastModified": 1648219316,
894 | "narHash": "sha256-Ctij+dOi0ZZIfX5eMhgwugfvB+WZSrvVNAyAuANOsnQ=",
895 | "owner": "nixos",
896 | "repo": "nixpkgs",
897 | "rev": "30d3d79b7d3607d56546dd2a6b49e156ba0ec634",
898 | "type": "github"
899 | },
900 | "original": {
901 | "owner": "nixos",
902 | "ref": "nixpkgs-unstable",
903 | "repo": "nixpkgs",
904 | "type": "github"
905 | }
906 | },
907 | "old-ghc-nix": {
908 | "flake": false,
909 | "locked": {
910 | "lastModified": 1631092763,
911 | "narHash": "sha256-sIKgO+z7tj4lw3u6oBZxqIhDrzSkvpHtv0Kki+lh9Fg=",
912 | "owner": "angerman",
913 | "repo": "old-ghc-nix",
914 | "rev": "af48a7a7353e418119b6dfe3cd1463a657f342b8",
915 | "type": "github"
916 | },
917 | "original": {
918 | "owner": "angerman",
919 | "ref": "master",
920 | "repo": "old-ghc-nix",
921 | "type": "github"
922 | }
923 | },
924 | "poetry2nix": {
925 | "inputs": {
926 | "flake-utils": "flake-utils_3",
927 | "nixpkgs": [
928 | "cicero",
929 | "nixpkgs"
930 | ]
931 | },
932 | "locked": {
933 | "lastModified": 1641849362,
934 | "narHash": "sha256-1K3NOM0ZoFRVxU3HJ2G8CMZEtyRn0RpuUjsws7jKsds=",
935 | "owner": "nix-community",
936 | "repo": "poetry2nix",
937 | "rev": "6b063a31bc8fea6c1d9fdc47e9078772b0ba283b",
938 | "type": "github"
939 | },
940 | "original": {
941 | "owner": "nix-community",
942 | "ref": "fetched-projectdir-test",
943 | "repo": "poetry2nix",
944 | "type": "github"
945 | }
946 | },
947 | "root": {
948 | "inputs": {
949 | "alejandra": "alejandra",
950 | "cicero": "cicero",
951 | "devshell": "devshell_5",
952 | "inclusive": "inclusive_4",
953 | "n2c": "n2c",
954 | "nixpkgs": "nixpkgs_8",
955 | "utils": "utils_5"
956 | }
957 | },
958 | "stackage": {
959 | "flake": false,
960 | "locked": {
961 | "lastModified": 1646010978,
962 | "narHash": "sha256-NpioQiTXyYm+Gm111kcDEE/ghflmnTNwPhWff54GYA4=",
963 | "owner": "input-output-hk",
964 | "repo": "stackage.nix",
965 | "rev": "9cce3e0d420f6c38cdbbe1c5e5bbc07fd2adfc3a",
966 | "type": "github"
967 | },
968 | "original": {
969 | "owner": "input-output-hk",
970 | "repo": "stackage.nix",
971 | "type": "github"
972 | }
973 | },
974 | "stdlib": {
975 | "locked": {
976 | "lastModified": 1590026685,
977 | "narHash": "sha256-E5INrVvYX/P/UpcoUFDAsuHem+lsqT+/teBs9O7oc9Q=",
978 | "owner": "manveru",
979 | "repo": "nix-lib",
980 | "rev": "99088cf7febcdb21afd375a335dcafa959bef3ed",
981 | "type": "github"
982 | },
983 | "original": {
984 | "owner": "manveru",
985 | "repo": "nix-lib",
986 | "type": "github"
987 | }
988 | },
989 | "stdlib_2": {
990 | "locked": {
991 | "lastModified": 1590026685,
992 | "narHash": "sha256-E5INrVvYX/P/UpcoUFDAsuHem+lsqT+/teBs9O7oc9Q=",
993 | "owner": "manveru",
994 | "repo": "nix-lib",
995 | "rev": "99088cf7febcdb21afd375a335dcafa959bef3ed",
996 | "type": "github"
997 | },
998 | "original": {
999 | "owner": "manveru",
1000 | "repo": "nix-lib",
1001 | "type": "github"
1002 | }
1003 | },
1004 | "stdlib_3": {
1005 | "locked": {
1006 | "lastModified": 1590026685,
1007 | "narHash": "sha256-E5INrVvYX/P/UpcoUFDAsuHem+lsqT+/teBs9O7oc9Q=",
1008 | "owner": "manveru",
1009 | "repo": "nix-lib",
1010 | "rev": "99088cf7febcdb21afd375a335dcafa959bef3ed",
1011 | "type": "github"
1012 | },
1013 | "original": {
1014 | "owner": "manveru",
1015 | "repo": "nix-lib",
1016 | "type": "github"
1017 | }
1018 | },
1019 | "stdlib_4": {
1020 | "locked": {
1021 | "lastModified": 1590026685,
1022 | "narHash": "sha256-E5INrVvYX/P/UpcoUFDAsuHem+lsqT+/teBs9O7oc9Q=",
1023 | "owner": "manveru",
1024 | "repo": "nix-lib",
1025 | "rev": "99088cf7febcdb21afd375a335dcafa959bef3ed",
1026 | "type": "github"
1027 | },
1028 | "original": {
1029 | "owner": "manveru",
1030 | "repo": "nix-lib",
1031 | "type": "github"
1032 | }
1033 | },
1034 | "utils": {
1035 | "locked": {
1036 | "lastModified": 1633020561,
1037 | "narHash": "sha256-4uAiRqL9nP3d/NQ8VBqjQ5iZypHaM+X/FyWpXVXkwTA=",
1038 | "owner": "kreisys",
1039 | "repo": "flake-utils",
1040 | "rev": "2923532a276a5595ee64376ec1b3db6ed8503c52",
1041 | "type": "github"
1042 | },
1043 | "original": {
1044 | "owner": "kreisys",
1045 | "repo": "flake-utils",
1046 | "type": "github"
1047 | }
1048 | },
1049 | "utils_2": {
1050 | "locked": {
1051 | "lastModified": 1633020561,
1052 | "narHash": "sha256-4uAiRqL9nP3d/NQ8VBqjQ5iZypHaM+X/FyWpXVXkwTA=",
1053 | "owner": "kreisys",
1054 | "repo": "flake-utils",
1055 | "rev": "2923532a276a5595ee64376ec1b3db6ed8503c52",
1056 | "type": "github"
1057 | },
1058 | "original": {
1059 | "owner": "kreisys",
1060 | "repo": "flake-utils",
1061 | "type": "github"
1062 | }
1063 | },
1064 | "utils_3": {
1065 | "locked": {
1066 | "lastModified": 1633020561,
1067 | "narHash": "sha256-4uAiRqL9nP3d/NQ8VBqjQ5iZypHaM+X/FyWpXVXkwTA=",
1068 | "owner": "kreisys",
1069 | "repo": "flake-utils",
1070 | "rev": "2923532a276a5595ee64376ec1b3db6ed8503c52",
1071 | "type": "github"
1072 | },
1073 | "original": {
1074 | "owner": "kreisys",
1075 | "repo": "flake-utils",
1076 | "type": "github"
1077 | }
1078 | },
1079 | "utils_4": {
1080 | "locked": {
1081 | "lastModified": 1644229661,
1082 | "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=",
1083 | "owner": "numtide",
1084 | "repo": "flake-utils",
1085 | "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797",
1086 | "type": "github"
1087 | },
1088 | "original": {
1089 | "owner": "numtide",
1090 | "repo": "flake-utils",
1091 | "type": "github"
1092 | }
1093 | },
1094 | "utils_5": {
1095 | "locked": {
1096 | "lastModified": 1633020561,
1097 | "narHash": "sha256-4uAiRqL9nP3d/NQ8VBqjQ5iZypHaM+X/FyWpXVXkwTA=",
1098 | "owner": "kreisys",
1099 | "repo": "flake-utils",
1100 | "rev": "2923532a276a5595ee64376ec1b3db6ed8503c52",
1101 | "type": "github"
1102 | },
1103 | "original": {
1104 | "owner": "kreisys",
1105 | "repo": "flake-utils",
1106 | "type": "github"
1107 | }
1108 | }
1109 | },
1110 | "root": "root",
1111 | "version": 7
1112 | }
1113 |
--------------------------------------------------------------------------------
/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "Flake for spongix";
3 |
4 | inputs = {
5 | devshell.url = "github:numtide/devshell";
6 | inclusive.url = "github:input-output-hk/nix-inclusive";
7 | nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable";
8 | utils.url = "github:kreisys/flake-utils";
9 | cicero.url = "github:input-output-hk/cicero";
10 | n2c.url = "github:nlewo/nix2container";
11 | alejandra.url = "github:kamadorueda/alejandra";
12 | };
13 |
14 | outputs = {
15 | self,
16 | nixpkgs,
17 | utils,
18 | devshell,
19 | cicero,
20 | ...
21 | } @ inputs:
22 | utils.lib.simpleFlake {
23 | systems = ["x86_64-linux"];
24 | inherit nixpkgs;
25 |
26 | preOverlays = [devshell.overlay];
27 |
28 | overlay = final: prev: {
29 | go = prev.go_1_18;
30 | golangci-lint = prev.golangci-lint.override {buildGoModule = prev.buildGo118Module;};
31 | gotools = prev.gotools.override {buildGoModule = prev.buildGo118Module;};
32 | gocode = prev.gocode.override {buildGoPackage = prev.buildGo118Package;};
33 |
34 | alejandra = inputs.alejandra.defaultPackage.x86_64-linux;
35 | spongix = prev.callPackage ./package.nix {
36 | inherit (inputs.inclusive.lib) inclusive;
37 | rev = self.rev or "dirty";
38 | };
39 | };
40 |
41 | packages = {
42 | spongix,
43 | hello,
44 | cowsay,
45 | ponysay,
46 | lib,
47 | coreutils,
48 | bashInteractive,
49 | }: {
50 | inherit spongix;
51 | defaultPackage = spongix;
52 |
53 | oci-tiny = inputs.n2c.packages.x86_64-linux.nix2container.buildImage {
54 | name = "localhost:7777/spongix";
55 | tag = "v1";
56 | config = {
57 | Cmd = ["${ponysay}/bin/ponysay" "hi"];
58 | Env = [
59 | "PATH=${lib.makeBinPath [coreutils bashInteractive]}"
60 | ];
61 | };
62 | maxLayers = 128;
63 | };
64 |
65 | oci = inputs.n2c.packages.x86_64-linux.nix2container.buildImage {
66 | name = "localhost:7745/spongix";
67 | tag = spongix.version;
68 | config = {
69 | entrypoint = ["${spongix}/bin/spongix"];
70 | environment = ["CACHE_DIR=/cache"];
71 | };
72 | maxLayers = 250;
73 | };
74 | };
75 |
76 | hydraJobs = {
77 | spongix,
78 | callPackage,
79 | }: {
80 | inherit spongix;
81 | test = callPackage ./test.nix {inherit inputs;};
82 | };
83 |
84 | nixosModules.spongix = import ./module.nix;
85 |
86 | devShell = {devshell}: devshell.fromTOML ./devshell.toml;
87 |
88 | extraOutputs.ciceroActions =
89 | cicero.lib.callActionsWithExtraArgs rec {
90 | inherit (cicero.lib) std;
91 | inherit (nixpkgs) lib;
92 | actionLib = import "${cicero}/action-lib.nix" {inherit std lib;};
93 | }
94 | ./cicero/actions;
95 | };
96 | }
97 |
--------------------------------------------------------------------------------
/gc.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "io"
6 | "io/fs"
7 | "math"
8 | "os"
9 | "path/filepath"
10 | "runtime"
11 | "sort"
12 | "strings"
13 | "sync"
14 | "time"
15 |
16 | "github.com/folbricht/desync"
17 | "github.com/numtide/go-nix/nar"
18 | "github.com/pascaldekloe/metrics"
19 | "github.com/pkg/errors"
20 | "go.uber.org/zap"
21 | )
22 |
23 | var (
24 | metricChunkCount = metrics.MustInteger("spongix_chunk_count_local", "Number of chunks")
25 | metricChunkGcCount = metrics.MustCounter("spongix_chunk_gc_count_local", "Number of chunks deleted by GC")
26 | metricChunkGcSize = metrics.MustCounter("spongix_chunk_gc_bytes_local", "Size of chunks deleted by GC")
27 | metricChunkSize = metrics.MustInteger("spongix_chunk_size_local", "Size of the chunks in bytes")
28 | metricChunkWalk = metrics.MustCounter("spongix_chunk_walk_local", "Total time spent walking the cache in ms")
29 | metricChunkDirs = metrics.MustInteger("spongix_chunk_dir_count", "Number of directories the chunks are stored in")
30 |
31 | metricIndexCount = metrics.MustInteger("spongix_index_count_local", "Number of indices")
32 | metricIndexGcCount = metrics.MustCounter("spongix_index_gc_count_local", "Number of indices deleted by GC")
33 | metricIndexWalk = metrics.MustCounter("spongix_index_walk_local", "Total time spent walking the index in ms")
34 |
35 | metricInflated = metrics.MustInteger("spongix_inflated_size_local", "Size of cache in bytes contents if they were inflated")
36 | metricMaxSize = metrics.MustInteger("spongix_max_size_local", "Limit for the local cache in bytes")
37 | metricGcTime = metrics.MustCounter("spongix_gc_time_local", "Total time spent in GC")
38 | metricVerifyTime = metrics.MustCounter("spongix_verify_time_local", "Total time spent in verification")
39 | )
40 |
41 | var yes = struct{}{}
42 |
43 | func measure(metric *metrics.Counter, f func()) {
44 | start := time.Now()
45 | f()
46 | metric.Add(uint64(time.Since(start).Milliseconds()))
47 | }
48 |
49 | func (proxy *Proxy) gc() {
50 | proxy.log.Debug("Initializing GC", zap.Duration("interval", proxy.GcInterval))
51 | cacheStat := map[string]*chunkStat{}
52 | measure(metricGcTime, func() { proxy.gcOnce(cacheStat) })
53 |
54 | ticker := time.NewTicker(proxy.GcInterval)
55 | for {
56 | <-ticker.C
57 | measure(metricGcTime, func() { proxy.gcOnce(cacheStat) })
58 | }
59 | }
60 |
61 | func (proxy *Proxy) verify() {
62 | proxy.log.Debug("Initializing Verifier", zap.Duration("interval", proxy.VerifyInterval))
63 | measure(metricVerifyTime, func() { proxy.verifyOnce() })
64 |
65 | ticker := time.NewTicker(proxy.VerifyInterval)
66 | for {
67 | <-ticker.C
68 | measure(metricVerifyTime, func() { proxy.verifyOnce() })
69 | }
70 | }
71 |
72 | func (proxy *Proxy) verifyOnce() {
73 | proxy.log.Info("store verify started")
74 | store := proxy.localStore.(desync.LocalStore)
75 | err := store.Verify(context.Background(), runtime.GOMAXPROCS(0), true, os.Stderr)
76 |
77 | if err != nil {
78 | proxy.log.Error("store verify failed", zap.Error(err))
79 | } else {
80 | proxy.log.Info("store verify completed")
81 | }
82 | }
83 |
84 | type chunkStat struct {
85 | id desync.ChunkID
86 | size int64
87 | mtime time.Time
88 | }
89 |
90 | type chunkLRU struct {
91 | live []*chunkStat
92 | liveSize uint64
93 | liveSizeMax uint64
94 | dead map[desync.ChunkID]struct{}
95 | deadSize uint64
96 | }
97 |
98 | func NewLRU(liveSizeMax uint64) *chunkLRU {
99 | return &chunkLRU{
100 | live: []*chunkStat{},
101 | liveSizeMax: liveSizeMax,
102 | dead: map[desync.ChunkID]struct{}{},
103 | }
104 | }
105 |
106 | func (l *chunkLRU) AddDead(stat *chunkStat) {
107 | l.dead[stat.id] = yes
108 | l.deadSize += uint64(stat.size)
109 | }
110 |
111 | func (l *chunkLRU) Add(stat *chunkStat) {
112 | isOlder := func(i int) bool { return l.live[i].mtime.Before(stat.mtime) }
113 | i := sort.Search(len(l.live), isOlder)
114 | l.insertAt(i, stat)
115 | l.liveSize += uint64(stat.size)
116 | for l.liveSize > l.liveSizeMax {
117 | die := l.live[len(l.live)-1]
118 | l.dead[die.id] = yes
119 | l.live = l.live[:len(l.live)-1]
120 | l.deadSize += uint64(die.size)
121 | l.liveSize -= uint64(die.size)
122 | }
123 | }
124 |
125 | func (l *chunkLRU) insertAt(i int, v *chunkStat) {
126 | if i == len(l.live) {
127 | l.live = append(l.live, v)
128 | } else {
129 | l.live = append(l.live[:i+1], l.live[i:]...)
130 | l.live[i] = v
131 | }
132 | }
133 |
134 | func (l *chunkLRU) IsDead(id desync.ChunkID) bool {
135 | _, found := l.dead[id]
136 | return found
137 | }
138 |
139 | func (l *chunkLRU) Dead() map[desync.ChunkID]struct{} {
140 | return l.dead
141 | }
142 |
143 | // we assume every directory requires 4KB of size (one block) desync stores
144 | // files in directories with a 4 hex prefix, so we need to keep at least this
145 | // amount of space reserved.
146 | const maxCacheDirPortion = 0xffff * 4096
147 |
148 | type integrityCheck struct {
149 | path string
150 | index desync.Index
151 | }
152 |
153 | func checkNarContents(store desync.Store, idx desync.Index) error {
154 | buf := newAssembler(store, idx)
155 | narRd := nar.NewReader(buf)
156 | none := true
157 | for {
158 | if _, err := narRd.Next(); err == nil {
159 | none = false
160 | } else if err == io.EOF {
161 | break
162 | } else {
163 | return err
164 | }
165 | }
166 |
167 | if none {
168 | return errors.New("no contents in NAR")
169 | }
170 |
171 | return nil
172 | }
173 |
174 | /*
175 | Local GC strategies:
176 | Check every index file:
177 | If chunks are missing, delete it.
178 | If it is not referenced by the database anymore, delete it.
179 | Check every narinfo in the database:
180 | If index is missing, delete it.
181 | If last access is too old, delete it.
182 | */
183 | func (proxy *Proxy) gcOnce(cacheStat map[string]*chunkStat) {
184 | maxCacheSize := (uint64(math.Pow(2, 30)) * proxy.CacheSize) - maxCacheDirPortion
185 | store := proxy.localStore.(desync.LocalStore)
186 | indices := proxy.localIndex.(desync.LocalIndexStore)
187 | lru := NewLRU(maxCacheSize)
188 | walkStoreStart := time.Now()
189 | chunkDirs := int64(0)
190 |
191 | metricMaxSize.Set(int64(maxCacheSize))
192 |
193 | // filepath.Walk is faster for our usecase because we need the stat result anyway.
194 | walkStoreErr := filepath.Walk(store.Base, func(path string, info fs.FileInfo, err error) error {
195 | if err != nil {
196 | if err == os.ErrNotExist {
197 | return nil
198 | } else {
199 | return err
200 | }
201 | }
202 |
203 | if info.IsDir() {
204 | chunkDirs++
205 | return nil
206 | }
207 |
208 | name := info.Name()
209 | if strings.HasPrefix(name, ".tmp") {
210 | return nil
211 | }
212 |
213 | ext := filepath.Ext(name)
214 | if ext != desync.CompressedChunkExt {
215 | return nil
216 | }
217 |
218 | idstr := name[0 : len(name)-len(ext)]
219 |
220 | id, err := desync.ChunkIDFromString(idstr)
221 | if err != nil {
222 | return err
223 | }
224 |
225 | stat := &chunkStat{id: id, size: info.Size(), mtime: info.ModTime()}
226 |
227 | if _, err := store.GetChunk(id); err != nil {
228 | proxy.log.Error("getting chunk", zap.Error(err), zap.String("chunk", id.String()))
229 | lru.AddDead(stat)
230 | } else {
231 | lru.Add(stat)
232 | }
233 |
234 | return nil
235 | })
236 |
237 | metricChunkWalk.Add(uint64(time.Since(walkStoreStart).Milliseconds()))
238 | metricChunkDirs.Set(chunkDirs)
239 |
240 | if walkStoreErr != nil {
241 | proxy.log.Error("While walking store", zap.Error(walkStoreErr))
242 | return
243 | }
244 |
245 | metricChunkCount.Set(int64(len(lru.live)))
246 | metricChunkGcCount.Add(uint64(len(lru.dead)))
247 | metricChunkGcSize.Add(lru.deadSize)
248 | metricChunkSize.Set(int64(lru.liveSize))
249 |
250 | deadIndices := &sync.Map{}
251 | walkIndicesStart := time.Now()
252 | indicesCount := int64(0)
253 | inflatedSize := int64(0)
254 | ignoreBeforeTime := time.Now().Add(10 * time.Minute)
255 |
256 | integrity := make(chan integrityCheck)
257 | wg := &sync.WaitGroup{}
258 |
259 | for i := 0; i < 3; i++ {
260 | wg.Add(1)
261 |
262 | go func(n int) {
263 | defer wg.Done()
264 |
265 | for {
266 | select {
267 | case <-time.After(1 * time.Second):
268 | return
269 | case check := <-integrity:
270 | switch filepath.Ext(check.path) {
271 | case ".nar":
272 | if err := checkNarContents(store, check.index); err != nil {
273 | proxy.log.Error("checking NAR contents", zap.Error(err), zap.String("path", check.path))
274 | deadIndices.Store(check.path, yes)
275 | continue
276 | }
277 | case ".narinfo":
278 | if _, err := assembleNarinfo(store, check.index); err != nil {
279 | proxy.log.Error("checking narinfo", zap.Error(err), zap.String("path", check.path))
280 | deadIndices.Store(check.path, yes)
281 | }
282 | }
283 | }
284 | }
285 | }(i)
286 | }
287 |
288 | walkIndicesErr := filepath.Walk(indices.Path, func(path string, info fs.FileInfo, err error) error {
289 | if err != nil || info.IsDir() {
290 | return err
291 | }
292 |
293 | isOld := info.ModTime().Before(ignoreBeforeTime)
294 |
295 | ext := filepath.Ext(path)
296 | isNar := ext == ".nar"
297 | isNarinfo := ext == ".narinfo"
298 |
299 | if !(isNar || isNarinfo || isOld) {
300 | return nil
301 | }
302 |
303 | name := path[len(indices.Path):]
304 |
305 | index, err := indices.GetIndex(name)
306 | if err != nil {
307 | return errors.WithMessagef(err, "while getting index %s", name)
308 | }
309 |
310 | integrity <- integrityCheck{path: path, index: index}
311 |
312 | inflatedSize += index.Length()
313 | indicesCount++
314 |
315 | if len(index.Chunks) == 0 {
316 | proxy.log.Debug("index chunks are empty", zap.String("path", path))
317 | deadIndices.Store(path, yes)
318 | } else {
319 | for _, indexChunk := range index.Chunks {
320 | if lru.IsDead(indexChunk.ID) {
321 | proxy.log.Debug("some chunks are dead", zap.String("path", path))
322 | deadIndices.Store(path, yes)
323 | break
324 | }
325 | }
326 | }
327 |
328 | return nil
329 | })
330 |
331 | wg.Wait()
332 | close(integrity)
333 |
334 | metricIndexCount.Set(indicesCount)
335 | metricIndexWalk.Add(uint64(time.Since(walkIndicesStart).Milliseconds()))
336 | metricInflated.Set(inflatedSize)
337 |
338 | if walkIndicesErr != nil {
339 | proxy.log.Error("While walking index", zap.Error(walkIndicesErr))
340 | return
341 | }
342 | deadIndexCount := uint64(0)
343 | // time.Sleep(10 * time.Minute)
344 | deadIndices.Range(func(key, value interface{}) bool {
345 | path := key.(string)
346 | proxy.log.Debug("moving index to trash", zap.String("path", path))
347 | _ = os.Remove(path)
348 | deadIndexCount++
349 | return true
350 | })
351 |
352 | metricIndexGcCount.Add(deadIndexCount)
353 |
354 | // we don't use store.Prune because it does another filepath.Walk and no
355 | // added benefit for us.
356 |
357 | for id := range lru.Dead() {
358 | if err := store.RemoveChunk(id); err != nil {
359 | proxy.log.Error("Removing chunk", zap.Error(err), zap.String("id", id.String()))
360 | }
361 | }
362 |
363 | proxy.log.Debug(
364 | "GC stats",
365 | zap.Uint64("live_bytes", lru.liveSize),
366 | zap.Uint64("live_max_bytes", lru.liveSizeMax),
367 | zap.Int("live_chunk_count", len(lru.live)),
368 | zap.Uint64("dead_bytes", lru.deadSize),
369 | zap.Int("dead_chunk_count", len(lru.dead)),
370 | zap.Uint64("dead_index_count", deadIndexCount),
371 | zap.Duration("walk_indices_time", time.Since(walkIndicesStart)),
372 | )
373 | }
374 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/input-output-hk/spongix
2 |
3 | go 1.18
4 |
5 | require (
6 | github.com/alexflint/go-arg v1.4.2
7 | github.com/folbricht/desync v0.9.2
8 | github.com/gorilla/handlers v1.5.1
9 | github.com/gorilla/mux v1.8.0
10 | github.com/hashicorp/go-uuid v1.0.1
11 | github.com/jamespfennell/xz v0.1.3-0.20210418231708-010343b46672
12 | github.com/kr/pretty v0.3.0
13 | github.com/minio/minio-go/v6 v6.0.57
14 | github.com/numtide/go-nix v0.0.0-20211215191921-37a8ad2f9e4f
15 | github.com/pascaldekloe/metrics v1.3.0
16 | github.com/pkg/errors v0.9.1
17 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d
18 | github.com/steinfletcher/apitest v1.5.11
19 | go.uber.org/zap v1.10.0
20 | )
21 |
22 | require (
23 | cloud.google.com/go v0.72.0 // indirect
24 | cloud.google.com/go/storage v1.12.0 // indirect
25 | github.com/DataDog/zstd v1.4.5 // indirect
26 | github.com/alexflint/go-scalar v1.0.0 // indirect
27 | github.com/boljen/go-bitmap v0.0.0-20151001105940-23cd2fb0ce7d // indirect
28 | github.com/davecgh/go-spew v1.1.1 // indirect
29 | github.com/dchest/siphash v1.2.2 // indirect
30 | github.com/felixge/httpsnoop v1.0.1 // indirect
31 | github.com/folbricht/tempfile v0.0.1 // indirect
32 | github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
33 | github.com/golang/protobuf v1.4.3 // indirect
34 | github.com/google/go-cmp v0.5.7 // indirect
35 | github.com/googleapis/gax-go/v2 v2.0.5 // indirect
36 | github.com/hanwen/go-fuse/v2 v2.0.3 // indirect
37 | github.com/json-iterator/go v1.1.9 // indirect
38 | github.com/jstemmer/go-junit-report v0.9.1 // indirect
39 | github.com/klauspost/compress v1.11.4 // indirect
40 | github.com/klauspost/cpuid v1.2.3 // indirect
41 | github.com/kr/fs v0.1.0 // indirect
42 | github.com/kr/text v0.2.0 // indirect
43 | github.com/minio/md5-simd v1.1.0 // indirect
44 | github.com/minio/sha256-simd v0.1.1 // indirect
45 | github.com/mitchellh/go-homedir v1.1.0 // indirect
46 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
47 | github.com/modern-go/reflect2 v1.0.1 // indirect
48 | github.com/pkg/sftp v1.12.0 // indirect
49 | github.com/pkg/xattr v0.4.3 // indirect
50 | github.com/pmezard/go-difflib v1.0.0 // indirect
51 | github.com/rogpeppe/go-internal v1.6.1 // indirect
52 | github.com/sirupsen/logrus v1.7.0 // indirect
53 | github.com/stretchr/testify v1.7.0 // indirect
54 | go.opencensus.io v0.22.5 // indirect
55 | go.uber.org/atomic v1.4.0 // indirect
56 | go.uber.org/multierr v1.1.0 // indirect
57 | golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
58 | golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
59 | golang.org/x/mod v0.3.0 // indirect
60 | golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 // indirect
61 | golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 // indirect
62 | golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect
63 | golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 // indirect
64 | golang.org/x/text v0.3.4 // indirect
65 | golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a // indirect
66 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
67 | google.golang.org/api v0.36.0 // indirect
68 | google.golang.org/appengine v1.6.7 // indirect
69 | google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e // indirect
70 | google.golang.org/grpc v1.33.2 // indirect
71 | google.golang.org/protobuf v1.25.0 // indirect
72 | gopkg.in/ini.v1 v1.62.0 // indirect
73 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
74 | )
75 |
--------------------------------------------------------------------------------
/helpers.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "crypto/ed25519"
5 | "encoding/base64"
6 | "os"
7 | "strings"
8 |
9 | "github.com/kr/pretty"
10 | "github.com/pkg/errors"
11 | )
12 |
13 | func pp(v ...interface{}) {
14 | pretty.Println(v...)
15 | }
16 |
17 | func loadNixPublicKeys(rawKeys []string) (map[string]ed25519.PublicKey, error) {
18 | keys := map[string]ed25519.PublicKey{}
19 | for _, rawKey := range rawKeys {
20 | name, value, err := parseNixPair(rawKey)
21 | if err != nil {
22 | return nil, errors.WithMessage(err, "While loading public keys")
23 | }
24 | keys[name] = ed25519.PublicKey(value)
25 | }
26 |
27 | return keys, nil
28 | }
29 |
30 | func loadNixPrivateKeys(paths []string) (map[string]ed25519.PrivateKey, error) {
31 | pairs, err := readNixPairs(paths)
32 | if err != nil {
33 | return nil, errors.WithMessage(err, "While loading private keys")
34 | }
35 |
36 | keys := map[string]ed25519.PrivateKey{}
37 | for name, key := range pairs {
38 | keys[name] = ed25519.PrivateKey(key)
39 | }
40 |
41 | return keys, nil
42 | }
43 |
44 | func readNixPairs(paths []string) (map[string][]byte, error) {
45 | keys := map[string][]byte{}
46 |
47 | for _, path := range paths {
48 | raw, err := os.ReadFile(path)
49 | if err != nil {
50 | return nil, errors.WithMessagef(err, "Trying to read %q", path)
51 | }
52 |
53 | name, key, err := parseNixPair(string(raw))
54 | if err != nil {
55 | return nil, errors.WithMessagef(err, "Key parsing failed for %q", raw)
56 | }
57 |
58 | keys[name] = key
59 | }
60 |
61 | return keys, nil
62 | }
63 |
64 | func parseNixPair(input string) (string, []byte, error) {
65 | i := strings.IndexRune(input, ':')
66 | if i < 1 {
67 | return "", nil, errors.Errorf("Key has no name part in %q", input)
68 | }
69 | name := input[0:i]
70 | encoded := input[i+1:]
71 | value, err := base64.StdEncoding.DecodeString(encoded)
72 | if err != nil {
73 | return "", nil, errors.Errorf("Key decoding failed for %q", encoded)
74 | }
75 |
76 | return name, value, nil
77 | }
78 |
--------------------------------------------------------------------------------
/img/spongix.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
41 |
54 |
58 |
62 |
66 |
71 |
74 |
78 |
82 |
86 |
87 |
91 |
95 |
100 |
105 |
108 |
114 |
118 |
124 |
130 |
136 |
142 |
148 |
154 |
160 |
166 |
172 |
178 |
179 |
186 |
190 |
194 |
198 |
199 |
207 |
213 |
219 |
225 |
231 |
237 |
238 |
--------------------------------------------------------------------------------
/log_record.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "net/http"
5 | "time"
6 |
7 | "go.uber.org/zap"
8 | )
9 |
10 | // LogRecord warps a http.ResponseWriter and records the status
11 | type LogRecord struct {
12 | http.ResponseWriter
13 | status int
14 | }
15 |
16 | func (r *LogRecord) Write(p []byte) (int, error) {
17 | return r.ResponseWriter.Write(p)
18 | }
19 |
20 | // WriteHeader overrides ResponseWriter.WriteHeader to keep track of the response code
21 | func (r *LogRecord) WriteHeader(status int) {
22 | r.status = status
23 | r.ResponseWriter.WriteHeader(status)
24 | }
25 |
26 | // withHTTPLogging adds HTTP request logging to the Handler h
27 | func withHTTPLogging(log *zap.Logger) func(http.Handler) http.Handler {
28 | return func(h http.Handler) http.Handler {
29 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
30 | url := r.URL.String()
31 | isMetric := url == "/metrics"
32 |
33 | if !isMetric {
34 | log.Info("REQ",
35 | zap.String("ident", r.Host),
36 | zap.String("method", r.Method),
37 | zap.String("url", url),
38 | )
39 | }
40 |
41 | start := time.Now()
42 | record := &LogRecord{
43 | ResponseWriter: w,
44 | status: 200,
45 | }
46 | h.ServeHTTP(record, r)
47 |
48 | level := log.Info
49 | if record.status >= 500 {
50 | level = log.Error
51 | }
52 |
53 | if !(isMetric && record.status == 200) {
54 | level("RES",
55 | zap.String("ident", r.Host),
56 | zap.String("method", r.Method),
57 | zap.String("url", url),
58 | zap.Int("status_code", record.status),
59 | zap.Duration("duration", time.Since(start)),
60 | )
61 | }
62 | })
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "crypto/ed25519"
6 | "log"
7 | "net/http"
8 | "net/url"
9 | "os"
10 | "os/signal"
11 | "path/filepath"
12 | "syscall"
13 | "time"
14 |
15 | "github.com/alexflint/go-arg"
16 | "github.com/folbricht/desync"
17 | "github.com/minio/minio-go/v6"
18 | "github.com/minio/minio-go/v6/pkg/credentials"
19 | "go.uber.org/zap"
20 | )
21 |
22 | const (
23 | defaultThreads = 2
24 | )
25 |
26 | var chunkSizeAvg uint64 = 65536
27 |
28 | func chunkSizeMin() uint64 { return chunkSizeAvg / 4 }
29 | func chunkSizeMax() uint64 { return chunkSizeAvg * 4 }
30 |
31 | func main() {
32 | // cpuprofile := "spongix.pprof"
33 | // f, err := os.Create(cpuprofile)
34 | // if err != nil {
35 | // log.Fatal(err)
36 | // }
37 | // pprof.StartCPUProfile(f)
38 | // defer pprof.StopCPUProfile()
39 |
40 | proxy := NewProxy()
41 |
42 | arg.MustParse(proxy)
43 | chunkSizeAvg = proxy.AverageChunkSize
44 |
45 | proxy.setupLogger()
46 | proxy.setupDesync()
47 | proxy.setupKeys()
48 | proxy.setupS3()
49 |
50 | go proxy.startCache()
51 | go proxy.gc()
52 | go proxy.verify()
53 |
54 | go func() {
55 | t := time.Tick(5 * time.Second)
56 | for range t {
57 | if err := proxy.log.Sync(); err != nil {
58 | if err.Error() != "sync /dev/stderr: invalid argument" {
59 | log.Printf("failed to sync zap: %s", err)
60 | }
61 | }
62 | }
63 | }()
64 |
65 | // nolint
66 | defer proxy.log.Sync()
67 |
68 | const timeout = 15 * time.Minute
69 |
70 | srv := &http.Server{
71 | Handler: proxy.router(),
72 | Addr: proxy.Listen,
73 | ReadTimeout: timeout,
74 | WriteTimeout: timeout,
75 | }
76 |
77 | sc := make(chan os.Signal, 1)
78 | signal.Notify(
79 | sc,
80 | syscall.SIGHUP,
81 | syscall.SIGINT,
82 | syscall.SIGQUIT,
83 | syscall.SIGTERM,
84 | )
85 |
86 | go func() {
87 | proxy.log.Info("Server starting", zap.String("listen", proxy.Listen))
88 | if err := srv.ListenAndServe(); err != http.ErrServerClosed {
89 | // Only log an error if it's not due to shutdown or close
90 | proxy.log.Fatal("error bringing up listener", zap.Error(err))
91 | }
92 | }()
93 |
94 | <-sc
95 | signal.Stop(sc)
96 |
97 | // Shutdown timeout should be max request timeout (with 1s buffer).
98 | ctxShutDown, cancel := context.WithTimeout(context.Background(), timeout)
99 | defer cancel()
100 |
101 | if err := srv.Shutdown(ctxShutDown); err != nil {
102 | proxy.log.Fatal("server shutdown failed", zap.Error(err))
103 | }
104 |
105 | proxy.log.Info("server shutdown gracefully")
106 | }
107 |
108 | type Proxy struct {
109 | BucketURL string `arg:"--bucket-url,env:BUCKET_URL" help:"Bucket URL like s3+http://127.0.0.1:9000/ncp"`
110 | BucketRegion string `arg:"--bucket-region,env:BUCKET_REGION" help:"Region the bucket is in"`
111 | Dir string `arg:"--dir,env:CACHE_DIR" help:"directory for the cache"`
112 | Listen string `arg:"--listen,env:LISTEN_ADDR" help:"Listen on this address"`
113 | SecretKeyFiles []string `arg:"--secret-key-files,required,env:NIX_SECRET_KEY_FILES" help:"Files containing your private nix signing keys"`
114 | Substituters []string `arg:"--substituters,env:NIX_SUBSTITUTERS"`
115 | TrustedPublicKeys []string `arg:"--trusted-public-keys,env:NIX_TRUSTED_PUBLIC_KEYS"`
116 | CacheInfoPriority uint64 `arg:"--cache-info-priority,env:CACHE_INFO_PRIORITY" help:"Priority in nix-cache-info"`
117 | AverageChunkSize uint64 `arg:"--average-chunk-size,env:AVERAGE_CHUNK_SIZE" help:"Chunk size will be between /4 and *4 of this value"`
118 | CacheSize uint64 `arg:"--cache-size,env:CACHE_SIZE" help:"Number of gigabytes to keep in the disk cache"`
119 | VerifyInterval time.Duration `arg:"--verify-interval,env:VERIFY_INTERVAL" help:"Time between verification runs"`
120 | GcInterval time.Duration `arg:"--gc-interval,env:GC_INTERVAL" help:"Time between store garbage collection runs"`
121 | LogLevel string `arg:"--log-level,env:LOG_LEVEL" help:"One of debug, info, warn, error, dpanic, panic, fatal"`
122 | LogMode string `arg:"--log-mode,env:LOG_MODE" help:"development or production"`
123 |
124 | // derived from the above
125 | secretKeys map[string]ed25519.PrivateKey
126 | trustedKeys map[string]ed25519.PublicKey
127 |
128 | s3Store desync.WriteStore
129 | localStore desync.WriteStore
130 |
131 | s3Index desync.IndexWriteStore
132 | localIndex desync.IndexWriteStore
133 |
134 | cacheChan chan string
135 |
136 | log *zap.Logger
137 | }
138 |
139 | func NewProxy() *Proxy {
140 | devLog, err := zap.NewDevelopment()
141 | if err != nil {
142 | panic(err)
143 | }
144 |
145 | return &Proxy{
146 | Dir: "./cache",
147 | Listen: ":7745",
148 | SecretKeyFiles: []string{},
149 | TrustedPublicKeys: []string{},
150 | Substituters: []string{},
151 | CacheInfoPriority: 50,
152 | AverageChunkSize: chunkSizeAvg,
153 | VerifyInterval: time.Hour,
154 | GcInterval: time.Hour,
155 | cacheChan: make(chan string, 10000),
156 | log: devLog,
157 | LogLevel: "debug",
158 | LogMode: "production",
159 | }
160 | }
161 |
162 | var (
163 | buildVersion = "dev"
164 | buildCommit = "dirty"
165 | )
166 |
167 | func (proxy *Proxy) Version() string {
168 | return buildVersion + " (" + buildCommit + ")"
169 | }
170 |
171 | func (proxy *Proxy) setupDir(path string) {
172 | dir := filepath.Join(proxy.Dir, path)
173 | if _, err := os.Stat(dir); err != nil {
174 | proxy.log.Debug("Creating directory", zap.String("dir", dir))
175 | if err := os.MkdirAll(dir, 0o755); err != nil {
176 | proxy.log.Fatal("couldn't create directory", zap.String("dir", dir))
177 | }
178 | }
179 | }
180 |
181 | func (proxy *Proxy) setupS3() {
182 | if proxy.BucketURL == "" {
183 | log.Println("No bucket name given, will not upload files")
184 | return
185 | }
186 |
187 | if proxy.BucketRegion == "" {
188 | log.Println("No bucket region given, will not upload files")
189 | return
190 | }
191 |
192 | s3Url, err := url.Parse(proxy.BucketURL)
193 | if err != nil {
194 | proxy.log.Fatal("couldn't parse bucket url", zap.Error(err), zap.String("url", proxy.BucketURL))
195 | }
196 | creds := credentials.NewChainCredentials(
197 | []credentials.Provider{
198 | &credentials.EnvMinio{},
199 | &credentials.EnvAWS{},
200 | },
201 | )
202 |
203 | store, err := desync.NewS3Store(s3Url, creds, proxy.BucketRegion,
204 | desync.StoreOptions{
205 | N: 1,
206 | Timeout: 1 * time.Second,
207 | ErrorRetry: 0,
208 | Uncompressed: false,
209 | SkipVerify: false,
210 | }, minio.BucketLookupAuto)
211 | if err != nil {
212 | proxy.log.Fatal("failed creating s3 store",
213 | zap.Error(err),
214 | zap.String("url", s3Url.String()),
215 | zap.String("region", proxy.BucketRegion),
216 | )
217 | }
218 |
219 | proxy.s3Store = store
220 | }
221 |
222 | func (proxy *Proxy) setupKeys() {
223 | secretKeys, err := loadNixPrivateKeys(proxy.SecretKeyFiles)
224 | if err != nil {
225 | proxy.log.Fatal("failed loading private keys", zap.Error(err), zap.Strings("files", proxy.SecretKeyFiles))
226 | }
227 | proxy.secretKeys = secretKeys
228 |
229 | publicKeys, err := loadNixPublicKeys(proxy.TrustedPublicKeys)
230 | if err != nil {
231 | proxy.log.Fatal("failed loading public keys", zap.Error(err), zap.Strings("files", proxy.TrustedPublicKeys))
232 | }
233 | proxy.trustedKeys = publicKeys
234 | }
235 |
236 | func (proxy *Proxy) stateDirs() []string {
237 | return []string{"store", "index", "index/nar", "tmp", "trash/index", "oci"}
238 | }
239 |
240 | var defaultStoreOptions = desync.StoreOptions{
241 | N: 1,
242 | Timeout: 1 * time.Second,
243 | ErrorRetry: 0,
244 | Uncompressed: false,
245 | SkipVerify: false,
246 | }
247 |
248 | func (proxy *Proxy) setupDesync() {
249 | for _, name := range proxy.stateDirs() {
250 | proxy.setupDir(name)
251 | }
252 |
253 | storeDir := filepath.Join(proxy.Dir, "store")
254 | narStore, err := desync.NewLocalStore(storeDir, defaultStoreOptions)
255 | if err != nil {
256 | proxy.log.Fatal("failed creating local store", zap.Error(err), zap.String("dir", storeDir))
257 | }
258 | narStore.UpdateTimes = true
259 |
260 | indexDir := filepath.Join(proxy.Dir, "index")
261 | narIndex, err := desync.NewLocalIndexStore(indexDir)
262 | if err != nil {
263 | proxy.log.Fatal("failed creating local index", zap.Error(err), zap.String("dir", indexDir))
264 | }
265 |
266 | proxy.localStore = narStore
267 | proxy.localIndex = narIndex
268 | }
269 |
270 | func (proxy *Proxy) setupLogger() {
271 | lvl := zap.NewAtomicLevel()
272 | if err := lvl.UnmarshalText([]byte(proxy.LogLevel)); err != nil {
273 | panic(err)
274 | }
275 | development := proxy.LogMode == "development"
276 | encoding := "json"
277 | encoderConfig := zap.NewProductionEncoderConfig()
278 | if development {
279 | encoding = "console"
280 | encoderConfig = zap.NewDevelopmentEncoderConfig()
281 | }
282 |
283 | l := zap.Config{
284 | Level: lvl,
285 | Development: development,
286 | DisableCaller: false,
287 | DisableStacktrace: false,
288 | Sampling: &zap.SamplingConfig{Initial: 1, Thereafter: 2},
289 | Encoding: encoding,
290 | EncoderConfig: encoderConfig,
291 | OutputPaths: []string{"stderr"},
292 | ErrorOutputPaths: []string{"stderr"},
293 | }
294 |
295 | var err error
296 | proxy.log, err = l.Build()
297 | if err != nil {
298 | panic(err)
299 | }
300 | }
301 |
--------------------------------------------------------------------------------
/manifest_manager.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "os"
6 | "path/filepath"
7 | )
8 |
9 | type manifestManager struct {
10 | c chan manifestMsg
11 | }
12 |
13 | func newManifestManager(dir string) manifestManager {
14 | return manifestManager{c: manifestLoop(dir)}
15 | }
16 |
17 | func (m manifestManager) set(name, reference string, manifest *DockerManifest) error {
18 | c := make(chan *manifestMsg)
19 | m.c <- manifestMsg{t: manifestMsgSet, name: name, reference: reference, manifest: manifest, c: c}
20 | return (<-c).err
21 | }
22 |
23 | func (m manifestManager) get(name, reference string) (*DockerManifest, error) {
24 | c := make(chan *manifestMsg)
25 | m.c <- manifestMsg{t: manifestMsgGet, name: name, reference: reference, c: c}
26 | res := <-c
27 | return res.manifest, res.err
28 | }
29 |
30 | type manifestMsgType int
31 |
32 | const (
33 | manifestMsgGet manifestMsgType = iota
34 | manifestMsgSet manifestMsgType = iota
35 | )
36 |
37 | type manifestMsg struct {
38 | t manifestMsgType
39 | c chan *manifestMsg
40 | manifest *DockerManifest
41 | name string
42 | reference string
43 | err error
44 | }
45 |
46 | func manifestLoop(dir string) chan manifestMsg {
47 | ch := make(chan manifestMsg)
48 | go func() {
49 | for msg := range ch {
50 | switch msg.t {
51 | case manifestMsgGet:
52 | subdir := filepath.Join(dir, msg.name)
53 |
54 | if fd, err := os.Open(filepath.Join(subdir, msg.reference)); err != nil {
55 | if err == os.ErrNotExist {
56 | msg.c <- nil
57 | } else {
58 | msg.c <- &manifestMsg{err: err}
59 | }
60 | } else {
61 | manifest := &DockerManifest{}
62 | if err := json.NewDecoder(fd).Decode(manifest); err != nil {
63 | msg.c <- &manifestMsg{err: err}
64 | } else {
65 | msg.c <- &manifestMsg{manifest: manifest}
66 | }
67 | }
68 | case manifestMsgSet:
69 | subdir := filepath.Join(dir, msg.name)
70 |
71 | if err := os.MkdirAll(subdir, 0755); err != nil {
72 | msg.c <- &manifestMsg{err: err}
73 | } else if fd, err := os.Create(filepath.Join(subdir, msg.reference)); err != nil {
74 | msg.c <- &manifestMsg{err: err}
75 | } else if err := json.NewEncoder(fd).Encode(msg.manifest); err != nil {
76 | msg.c <- &manifestMsg{err: err}
77 | } else {
78 | msg.c <- &manifestMsg{}
79 | }
80 | default:
81 | panic(msg)
82 | }
83 | }
84 | }()
85 |
86 | return ch
87 | }
88 |
--------------------------------------------------------------------------------
/module.nix:
--------------------------------------------------------------------------------
1 | {
2 | config,
3 | lib,
4 | pkgs,
5 | ...
6 | }: let
7 | cfg = config.services.spongix;
8 | join = lib.concatStringsSep ",";
9 | in {
10 | options = {
11 | services.spongix = {
12 | enable = lib.mkEnableOption "Enable the Nix Cache Proxy";
13 |
14 | package = lib.mkOption {
15 | type = lib.types.package;
16 | default = pkgs.spongix;
17 | };
18 |
19 | bucketURL = lib.mkOption {
20 | type = lib.types.nullOr lib.types.str;
21 | default = null;
22 | description = "URL of the S3 Bucket.";
23 | example = "s3+http://127.0.0.1:7745/spongix";
24 | };
25 |
26 | bucketRegion = lib.mkOption {
27 | type = lib.types.nullOr lib.types.str;
28 | default = null;
29 | description = "Region of the S3 bucket. (Also required for Minio)";
30 | };
31 |
32 | cacheDir = lib.mkOption {
33 | type = lib.types.str;
34 | default = "/var/lib/spongix";
35 | description = ''
36 | Keep all cache state in this directory.
37 | '';
38 | };
39 |
40 | host = lib.mkOption {
41 | type = lib.types.str;
42 | default = "";
43 | description = ''
44 | Listen on this host. Will be 0.0.0.0 if empty.
45 | '';
46 | };
47 |
48 | port = lib.mkOption {
49 | type = lib.types.port;
50 | default = 7745;
51 | description = ''
52 | Listen on this port.
53 | '';
54 | };
55 |
56 | secretKeyFiles = lib.mkOption {
57 | type = lib.types.attrsOf lib.types.str;
58 | default = {};
59 | description = ''
60 | An attrset of { name = path; } to files containing private keys used
61 | for signing narinfos.
62 | They may be located anywhere and will be made available by systemd.
63 | To generate a key, you can use
64 | `nix key generate-secret --key-name foo > foo.private`
65 | '';
66 | };
67 |
68 | substituters = lib.mkOption {
69 | type = lib.types.listOf lib.types.str;
70 | default = ["https://cache.nixos.org"];
71 | description = ''
72 | Remote Nix caches
73 | '';
74 | };
75 |
76 | trustedPublicKeys = lib.mkOption {
77 | type = lib.types.listOf lib.types.str;
78 | default = ["cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="];
79 | description = ''
80 | Keys in this list are kept in narinfo files, and re-signed with the spongix key.
81 | This should include the public key of your secret key.
82 | To generate a public key from the secret, you can use
83 | `nix key convert-secret-to-public < foo.private > foo.public`
84 | '';
85 | };
86 |
87 | cacheInfoPriority = lib.mkOption {
88 | type = lib.types.ints.unsigned;
89 | default = 50;
90 | description = ''
91 | Priority in /nix-cache-info
92 | '';
93 | };
94 |
95 | averageChunkSize = lib.mkOption {
96 | type = lib.types.ints.between 48 4294967296;
97 | default = 65536;
98 | description = ''
99 | Chunk size will be between /4 and *4 of this value
100 | '';
101 | };
102 |
103 | cacheSize = lib.mkOption {
104 | type = lib.types.ints.positive;
105 | default = 10;
106 | description = ''
107 | Number of GB to keep in the local cache
108 | '';
109 | };
110 |
111 | verifyInterval = lib.mkOption {
112 | type = lib.types.str;
113 | default = "24h";
114 | description = ''
115 | Time between verifcations of local store file integrity (slow and I/O intensive)
116 | '';
117 | };
118 |
119 | gcInterval = lib.mkOption {
120 | type = lib.types.str;
121 | default = "1h";
122 | description = ''
123 | Time between garbage collections of local store files (fast)
124 | '';
125 | };
126 |
127 | logLevel = lib.mkOption {
128 | type = lib.types.enum [
129 | "debug"
130 | "info"
131 | "warn"
132 | "error"
133 | "dpanic"
134 | "panic"
135 | "fatal"
136 | ];
137 | default = "info";
138 | };
139 |
140 | logMode = lib.mkOption {
141 | type = lib.types.enum ["production" "development"];
142 | default = "production";
143 | description = ''
144 | production mode uses JSON formatting, while development mode is more
145 | human readable.
146 | '';
147 | };
148 | };
149 | };
150 |
151 | config = lib.mkIf cfg.enable {
152 | systemd.services.spongix = {
153 | wantedBy = ["multi-user.target"];
154 |
155 | path = [config.nix.package];
156 |
157 | environment = {
158 | BUCKET_URL = cfg.bucketURL;
159 | BUCKET_REGION = cfg.bucketRegion;
160 | CACHE_DIR = cfg.cacheDir;
161 | LISTEN_ADDR = "${cfg.host}:${toString cfg.port}";
162 | NIX_SUBSTITUTERS = join cfg.substituters;
163 | NIX_TRUSTED_PUBLIC_KEYS = join cfg.trustedPublicKeys;
164 | CACHE_INFO_PRIORITY = toString cfg.cacheInfoPriority;
165 | AVERAGE_CHUNK_SIZE = toString cfg.averageChunkSize;
166 | CACHE_SIZE = toString cfg.cacheSize;
167 | VERIFY_INTERVAL = cfg.verifyInterval;
168 | GC_INTERVAL = cfg.gcInterval;
169 | LOG_LEVEL = cfg.logLevel;
170 | LOG_MODE = cfg.logMode;
171 | };
172 |
173 | script = ''
174 | set -exuo pipefail
175 | export NIX_SECRET_KEY_FILES="${
176 | join
177 | (lib.mapAttrsToList (name: value: "$CREDENTIALS_DIRECTORY/${name}")
178 | cfg.secretKeyFiles)
179 | }"
180 | exec "${cfg.package}/bin/spongix"
181 | '';
182 |
183 | serviceConfig = {
184 | User = "spongix";
185 | Group = "spongix";
186 | DynamicUser = true;
187 | StateDirectory = "spongix";
188 | WorkingDirectory = cfg.cacheDir;
189 | LoadCredential =
190 | lib.mapAttrsToList (name: value: "${name}:${value}")
191 | cfg.secretKeyFiles;
192 | ReadWritePaths = cfg.cacheDir;
193 | Restart = "on-failure";
194 | RestartSec = 5;
195 | };
196 | };
197 | };
198 | }
199 |
--------------------------------------------------------------------------------
/narinfo.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "crypto/ed25519"
7 | "encoding/base64"
8 | "fmt"
9 | "io"
10 | "path/filepath"
11 | "regexp"
12 | "strconv"
13 | "strings"
14 |
15 | "github.com/pkg/errors"
16 | )
17 |
18 | type Narinfo struct {
19 | Name string `json:"name"`
20 | StorePath string `json:"store_path"`
21 | URL string `json:"url"`
22 | Compression string `json:"compression"`
23 | FileHash string `json:"file_hash"`
24 | FileSize int64 `json:"file_size"`
25 | NarHash string `json:"nar_hash"`
26 | NarSize int64 `json:"nar_size"`
27 | References []string `json:"references"`
28 | Deriver string `json:"deriver"`
29 | Sig []string `json:"sig"`
30 | CA string `json:"ca"`
31 | }
32 |
33 | /*
34 | func (proxy *Proxy) validateNarinfo(dir, path string, remove bool) error {
35 | info := &Narinfo{}
36 | f, err := os.Open(path)
37 | if err != nil {
38 | proxy.log.Error("Failed to open narinfo", zap.String("path", path), zap.Error(err))
39 | return nil
40 | }
41 |
42 | if err := info.Unmarshal(f); err != nil {
43 | proxy.log.Error("Failed to unmarshal narinfo", zap.String("path", path), zap.Error(err))
44 | if remove {
45 | os.Remove(path)
46 | }
47 | return nil
48 | }
49 |
50 | narPath := filepath.Join(dir, info.URL)
51 | stat, err := os.Stat(narPath)
52 | if err != nil {
53 | proxy.log.Error("Failed to find NAR", zap.String("nar_path", narPath), zap.String("path", path), zap.Error(err))
54 | if remove {
55 | os.Remove(path)
56 | }
57 | return nil
58 | }
59 |
60 | ssize := stat.Size()
61 |
62 | if ssize != info.FileSize {
63 | log.Printf("%q should be size %d but has %d", narPath, info.FileSize, ssize)
64 | proxy.log.Error("NAR has wrong size", zap.String("nar_path", narPath), zap.String("path", path), zap.Int64("expected", info.FileSize), zap.Int64("actual", ssize))
65 | if remove {
66 | os.Remove(path)
67 | os.Remove(narPath)
68 | }
69 | return nil
70 | }
71 |
72 | return nil
73 | }
74 | */
75 |
76 | func (info *Narinfo) PrepareForStorage(
77 | trustedKeys map[string]ed25519.PublicKey,
78 | secretKeys map[string]ed25519.PrivateKey,
79 | ) (io.Reader, error) {
80 | info.SanitizeNar()
81 | info.SanitizeSignatures(trustedKeys)
82 | if len(info.Sig) == 0 {
83 | for name, key := range secretKeys {
84 | info.Sign(name, key)
85 | }
86 | }
87 | return info.ToReader()
88 | }
89 |
90 | func (info *Narinfo) ToReader() (io.Reader, error) {
91 | buf := &bytes.Buffer{}
92 | err := info.Marshal(buf)
93 | return buf, err
94 | }
95 |
96 | func (info *Narinfo) Marshal(output io.Writer) error {
97 | out := bufio.NewWriter(output)
98 |
99 | write := func(format string, arg interface{}) error {
100 | _, err := out.WriteString(fmt.Sprintf(format, arg))
101 | return err
102 | }
103 |
104 | if err := write("StorePath: %s\n", info.StorePath); err != nil {
105 | return err
106 | }
107 |
108 | if err := write("URL: %s\n", info.URL); err != nil {
109 | return err
110 | }
111 |
112 | if err := write("Compression: %s\n", info.Compression); err != nil {
113 | return err
114 | }
115 |
116 | if err := write("FileHash: %s\n", info.FileHash); err != nil {
117 | return err
118 | }
119 |
120 | if err := write("FileSize: %d\n", info.FileSize); err != nil {
121 | return err
122 | }
123 |
124 | if err := write("NarHash: %s\n", info.NarHash); err != nil {
125 | return err
126 | }
127 |
128 | if err := write("NarSize: %d\n", info.NarSize); err != nil {
129 | return err
130 | }
131 |
132 | if len(info.References) > 0 {
133 | if err := write("References: %s\n", strings.Join(info.References, " ")); err != nil {
134 | return err
135 | }
136 | }
137 |
138 | if len(info.Deriver) > 0 {
139 | if err := write("Deriver: %s\n", info.Deriver); err != nil {
140 | return err
141 | }
142 | }
143 |
144 | for _, sig := range info.Sig {
145 | if _, err := out.WriteString(fmt.Sprintf("Sig: %s\n", sig)); err != nil {
146 | return err
147 | }
148 | }
149 |
150 | return out.Flush()
151 | }
152 |
153 | // TODO: replace with a validating parser
154 | func (info *Narinfo) Unmarshal(input io.Reader) error {
155 | if input == nil {
156 | return errors.New("can't unmarshal nil reader")
157 | }
158 |
159 | scanner := bufio.NewScanner(input)
160 | capacity := 1024 * 1024
161 | buf := make([]byte, 0, capacity)
162 | scanner.Buffer(buf, capacity)
163 |
164 | for scanner.Scan() {
165 | line := scanner.Text()
166 |
167 | parts := strings.SplitN(line, ": ", 2)
168 | if len(parts) != 2 {
169 | return errors.Errorf("Failed to parse line: %q", line)
170 | }
171 | key := parts[0]
172 | value := parts[1]
173 | if value == "" {
174 | continue
175 | }
176 |
177 | switch key {
178 | case "StorePath":
179 | if info.StorePath != "" {
180 | return errors.Errorf("Duplicate StorePath")
181 | }
182 | info.StorePath = value
183 | parts := strings.SplitN(filepath.Base(value), "-", 2)
184 | info.Name = parts[0]
185 | case "URL":
186 | if info.URL != "" {
187 | return errors.Errorf("Duplicate URL")
188 | }
189 | info.URL = value
190 | case "Compression":
191 | if info.Compression != "" {
192 | return errors.Errorf("Duplicate Compression")
193 | }
194 | info.Compression = value
195 | case "FileHash":
196 | if info.FileHash != "" {
197 | return errors.Errorf("Duplicate FileHash")
198 | }
199 | info.FileHash = value
200 | case "FileSize":
201 | if info.FileSize != 0 {
202 | return errors.Errorf("Duplicate FileSize")
203 | }
204 | if fileSize, err := strconv.ParseInt(value, 10, 64); err == nil {
205 | info.FileSize = fileSize
206 | } else {
207 | return err
208 | }
209 | case "NarHash":
210 | if info.NarHash != "" {
211 | return errors.Errorf("Duplicate NarHash")
212 | }
213 | info.NarHash = value
214 | case "NarSize":
215 | if info.NarSize != 0 {
216 | return errors.Errorf("Duplicate NarSize")
217 | }
218 | if narSize, err := strconv.ParseInt(value, 10, 64); err == nil {
219 | info.NarSize = narSize
220 | } else {
221 | return err
222 | }
223 | case "References":
224 | info.References = append(info.References, strings.Split(value, " ")...)
225 | case "Deriver":
226 | if info.Deriver != "" {
227 | return errors.Errorf("Duplicate Deriver")
228 | }
229 | info.Deriver = value
230 | case "Sig":
231 | info.Sig = append(info.Sig, value)
232 | case "CA":
233 | if info.CA != "" {
234 | return errors.Errorf("Duplicate CA")
235 | }
236 | info.CA = value
237 | default:
238 | return errors.Errorf("Unknown narinfo key: %q: %v", key, value)
239 | }
240 | }
241 |
242 | if err := scanner.Err(); err != nil {
243 | return errors.WithMessage(err, "Parsing narinfo")
244 | }
245 |
246 | if info.Compression == "" {
247 | info.Compression = "bzip2"
248 | }
249 |
250 | if err := info.Validate(); err != nil {
251 | return errors.WithMessage(err, "Validating narinfo")
252 | }
253 |
254 | return nil
255 | }
256 |
257 | var (
258 | nixHash = `[0-9a-df-np-sv-z]`
259 | validNixStorePath = regexp.MustCompile(`\A/nix/store/` + nixHash + `{32}-.+\z`)
260 | validStorePath = regexp.MustCompile(`\A` + nixHash + `{32}-.+\z`)
261 | validURL = regexp.MustCompile(`\Anar/` + nixHash + `{52}(\.drv|\.nar(\.(xz|bz2|zst|lzip|lz4|br))?)\z`)
262 | validCompression = regexp.MustCompile(`\A(|none|xz|bzip2|br|zst)\z`)
263 | validHash = regexp.MustCompile(`\Asha256:` + nixHash + `{52}\z`)
264 | validDeriver = regexp.MustCompile(`\A` + nixHash + `{32}-.+\.drv\z`)
265 | )
266 |
267 | func (info *Narinfo) Validate() error {
268 | if !validNixStorePath.MatchString(info.StorePath) {
269 | return errors.Errorf("Invalid StorePath: %q", info.StorePath)
270 | }
271 |
272 | if !validURL.MatchString(info.URL) {
273 | return errors.Errorf("Invalid URL: %q", info.URL)
274 | }
275 |
276 | if !validCompression.MatchString(info.Compression) {
277 | return errors.Errorf("Invalid Compression: %q", info.Compression)
278 | }
279 |
280 | if !validHash.MatchString(info.FileHash) {
281 | return errors.Errorf("Invalid FileHash: %q", info.FileHash)
282 | }
283 |
284 | if info.FileSize == 0 {
285 | return errors.Errorf("Invalid FileSize: %d", info.FileSize)
286 | }
287 |
288 | if !validHash.MatchString(info.NarHash) {
289 | return errors.Errorf("Invalid NarHash: %q", info.NarHash)
290 | }
291 |
292 | if info.NarSize == 0 {
293 | return errors.Errorf("Invalid NarSize: %d", info.NarSize)
294 | }
295 |
296 | for _, ref := range info.References {
297 | if !validStorePath.MatchString(ref) {
298 | return errors.Errorf("Invalid Reference: %q", ref)
299 | }
300 | }
301 |
302 | if info.Deriver != "" && !validDeriver.MatchString(info.Deriver) {
303 | return errors.Errorf("Invalid Deriver: %q", info.Deriver)
304 | }
305 |
306 | return nil
307 | }
308 |
309 | // modifies the Narinfo to point to an uncompressed NAR file.
310 | // This doesn't affect validity of the signature.
311 | func (info *Narinfo) SanitizeNar() {
312 | if info.Compression == "none" {
313 | return
314 | }
315 |
316 | info.FileHash = info.NarHash
317 | info.FileSize = info.NarSize
318 | info.Compression = "none"
319 |
320 | ext := filepath.Ext(info.URL)
321 | info.URL = info.URL[0 : len(info.URL)-len(ext)]
322 | }
323 |
324 | // ensures only valid sigantures are kept in the Narinfo
325 | func (info *Narinfo) SanitizeSignatures(publicKeys map[string]ed25519.PublicKey) {
326 | valid, _ := info.ValidInvalidSignatures(publicKeys)
327 | info.Sig = valid
328 | }
329 |
330 | // Returns valid and invalid signatures
331 | func (info *Narinfo) ValidInvalidSignatures(publicKeys map[string]ed25519.PublicKey) ([]string, []string) {
332 | if len(info.Sig) == 0 {
333 | return nil, nil
334 | }
335 |
336 | signMsg := info.signMsg()
337 | valid := []string{}
338 | invalid := []string{}
339 |
340 | // finally we need at leaat one matching signature
341 | for _, sig := range info.Sig {
342 | i := strings.IndexRune(sig, ':')
343 | name := sig[0:i]
344 | sigStr := sig[i+1:]
345 | signature, err := base64.StdEncoding.DecodeString(sigStr)
346 | if err != nil {
347 | invalid = append(invalid, sig)
348 | } else if key, ok := publicKeys[name]; ok {
349 | if ed25519.Verify(key, []byte(signMsg), signature) {
350 | valid = append(valid, sig)
351 | } else {
352 | invalid = append(invalid, sig)
353 | }
354 | }
355 | }
356 |
357 | return valid, invalid
358 | }
359 |
360 | func (info *Narinfo) signMsg() string {
361 | refs := []string{}
362 | for _, ref := range info.References {
363 | refs = append(refs, "/nix/store/"+ref)
364 | }
365 |
366 | return fmt.Sprintf("1;%s;%s;%s;%s",
367 | info.StorePath,
368 | info.NarHash,
369 | strconv.FormatInt(info.NarSize, 10),
370 | strings.Join(refs, ","))
371 | }
372 |
373 | func (info *Narinfo) Sign(name string, key ed25519.PrivateKey) {
374 | signature := info.Signature(name, key)
375 | missing := true
376 |
377 | for _, sig := range info.Sig {
378 | if sig == signature {
379 | missing = false
380 | }
381 | }
382 |
383 | if missing {
384 | info.Sig = append(info.Sig, signature)
385 | }
386 | }
387 |
388 | func (info *Narinfo) Signature(name string, key ed25519.PrivateKey) string {
389 | signature := ed25519.Sign(key, []byte(info.signMsg()))
390 | return name + ":" + base64.StdEncoding.EncodeToString(signature)
391 | }
392 |
393 | func (info *Narinfo) NarHashType() string {
394 | return strings.SplitN(info.NarHash, ":", 2)[0]
395 | }
396 |
397 | func (info *Narinfo) NarHashValue() string {
398 | return strings.SplitN(info.NarHash, ":", 2)[1]
399 | }
400 |
401 | func (info *Narinfo) FileHashType() string {
402 | return strings.SplitN(info.FileHash, ":", 2)[0]
403 | }
404 |
405 | func (info *Narinfo) FileHashValue() string {
406 | return strings.SplitN(info.FileHash, ":", 2)[1]
407 | }
408 |
--------------------------------------------------------------------------------
/narinfo_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "crypto/ed25519"
6 | "strings"
7 | "testing"
8 |
9 | "github.com/smartystreets/assertions"
10 | "github.com/steinfletcher/apitest"
11 | )
12 |
13 | var validNarinfo = &Narinfo{
14 | StorePath: "/nix/store/00000000000000000000000000000000-some",
15 | URL: "nar/0000000000000000000000000000000000000000000000000000.nar.xz",
16 | Compression: "xz",
17 | FileHash: "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7",
18 | FileSize: 1,
19 | NarHash: "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7",
20 | NarSize: 1,
21 | References: []string{"00000000000000000000000000000000-some"},
22 | Deriver: "r92m816zcm8v9zjr55lmgy4pdibjbyjp-foo.drv",
23 | }
24 |
25 | func TestNarinfoMarshal(t *testing.T) {
26 | v := apitest.DefaultVerifier{}
27 |
28 | info := validNarinfo
29 | buf := bytes.Buffer{}
30 | err := info.Marshal(&buf)
31 | v.NoError(t, err)
32 |
33 | v.Equal(t, buf.String(), `StorePath: `+validNarinfo.StorePath+`
34 | URL: `+validNarinfo.URL+`
35 | Compression: `+validNarinfo.Compression+`
36 | FileHash: `+validNarinfo.FileHash+`
37 | FileSize: 1
38 | NarHash: `+validNarinfo.NarHash+`
39 | NarSize: 1
40 | References: `+strings.Join(validNarinfo.References, " ")+`
41 | Deriver: `+validNarinfo.Deriver+`
42 | `)
43 | }
44 |
45 | func TestNarinfoValidate(t *testing.T) {
46 | v := apitest.DefaultVerifier{}
47 |
48 | info := &Narinfo{
49 | Compression: "invalid",
50 | References: []string{"invalid"},
51 | }
52 |
53 | v.Equal(t, `Invalid StorePath: ""`, info.Validate().Error())
54 |
55 | info.StorePath = "/nix/store/00000000000000000000000000000000-some"
56 | v.Equal(t, `Invalid URL: ""`, info.Validate().Error())
57 |
58 | info.URL = "nar/0000000000000000000000000000000000000000000000000000.nar.xz"
59 | v.Equal(t, `Invalid Compression: "invalid"`, info.Validate().Error())
60 |
61 | info.Compression = "xz"
62 | v.Equal(t, `Invalid FileHash: ""`, info.Validate().Error())
63 |
64 | info.FileHash = "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7"
65 | v.Equal(t, `Invalid FileSize: 0`, info.Validate().Error())
66 |
67 | info.FileSize = 1
68 | v.Equal(t, `Invalid NarHash: ""`, info.Validate().Error())
69 |
70 | info.NarHash = "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7"
71 | v.Equal(t, `Invalid NarSize: 0`, info.Validate().Error())
72 |
73 | info.NarSize = 1
74 | v.Equal(t, `Invalid Reference: "invalid"`, info.Validate().Error())
75 |
76 | info.References = []string{"00000000000000000000000000000000-some"}
77 | v.Equal(t, nil, info.Validate())
78 | }
79 |
80 | func TestNarinfoVerify(t *testing.T) {
81 | a := assertions.New(t)
82 | name := "test"
83 | key := ed25519.NewKeyFromSeed(bytes.Repeat([]byte{0}, 32))
84 |
85 | publicKeys := map[string]ed25519.PublicKey{}
86 | publicKeys[name] = key.Public().(ed25519.PublicKey)
87 |
88 | info := &Narinfo{
89 | StorePath: "/nix/store/00000000000000000000000000000000-some",
90 | URL: "nar/0000000000000000000000000000000000000000000000000000.nar.xz",
91 | Compression: "xz",
92 | FileHash: "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7",
93 | FileSize: 1,
94 | NarHash: "sha256:0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7",
95 | NarSize: 1,
96 | References: []string{"00000000000000000000000000000000-some"},
97 | Deriver: "r92m816zcm8v9zjr55lmgy4pdibjbyjp-foo.drv",
98 | }
99 |
100 | info.Sig = []string{}
101 | valid, invalid := info.ValidInvalidSignatures(publicKeys)
102 | a.So(valid, assertions.ShouldHaveLength, 0)
103 | a.So(invalid, assertions.ShouldHaveLength, 0)
104 |
105 | info.Sig = []string{"test:test"}
106 | valid, invalid = info.ValidInvalidSignatures(publicKeys)
107 | a.So(valid, assertions.ShouldHaveLength, 0)
108 | a.So(invalid, assertions.ShouldHaveLength, 1)
109 |
110 | info.Sig = []string{}
111 | info.Sign(name, key)
112 | valid, invalid = info.ValidInvalidSignatures(publicKeys)
113 | a.So(valid, assertions.ShouldHaveLength, 1)
114 | a.So(invalid, assertions.ShouldHaveLength, 0)
115 |
116 | // v.Equal(t, `No matching signature found in []`, info.(publicKeys).Error())
117 |
118 | // info.Sig = []string{}
119 | // v.NoError(t, info.Sign(name, key))
120 | // v.Equal(t, nil, info.Verify(publicKeys))
121 | }
122 |
123 | func TestNarinfoSanitizeNar(t *testing.T) {
124 | a := assertions.New(t)
125 | name := "test"
126 | key := ed25519.NewKeyFromSeed(bytes.Repeat([]byte{0}, 32))
127 |
128 | publicKeys := map[string]ed25519.PublicKey{}
129 | publicKeys[name] = key.Public().(ed25519.PublicKey)
130 |
131 | info := &Narinfo{
132 | StorePath: "/nix/store/00000000000000000000000000000000-some",
133 | URL: "nar/0000000000000000000000000000000000000000000000000000.nar.xz",
134 | Compression: "xz",
135 | FileHash: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
136 | FileSize: 1,
137 | NarHash: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
138 | NarSize: 2,
139 | References: []string{"00000000000000000000000000000000-some"},
140 | Deriver: "r92m816zcm8v9zjr55lmgy4pdibjbyjp-foo.drv",
141 | }
142 |
143 | info.SanitizeNar()
144 |
145 | a.So(info.FileSize, assertions.ShouldEqual, 2)
146 | a.So(info.FileHash, assertions.ShouldEqual, "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
147 | a.So(info.Compression, assertions.ShouldEqual, "none")
148 | a.So(info.URL, assertions.ShouldEqual, "nar/0000000000000000000000000000000000000000000000000000.nar")
149 | }
150 |
--------------------------------------------------------------------------------
/package.nix:
--------------------------------------------------------------------------------
1 | {
2 | buildGo118Module,
3 | lzma,
4 | pkg-config,
5 | inclusive,
6 | rev,
7 | }: let
8 | final = package "sha256-NGQZqIawCOb1UPjFCSkSfPV02jMOD+MUx6b5LZyzy94=";
9 | package = vendorSha256:
10 | buildGo118Module rec {
11 | pname = "spongix";
12 | version = "2022.05.10.001";
13 | inherit vendorSha256;
14 |
15 | passthru.invalidHash =
16 | package "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=";
17 |
18 | src = inclusive ./. [
19 | ./testdata
20 | ./go.mod
21 | ./go.sum
22 |
23 | ./assemble.go
24 | ./assemble_test.go
25 | ./blob_manager.go
26 | ./cache.go
27 | ./docker.go
28 | ./docker_test.go
29 | ./fake.go
30 | ./gc.go
31 | ./helpers.go
32 | ./log_record.go
33 | ./main.go
34 | ./manifest_manager.go
35 | ./narinfo.go
36 | ./narinfo_test.go
37 | ./router.go
38 | ./router_test.go
39 | ./upload_manager.go
40 | ];
41 |
42 | proxyVendor = true;
43 | CGO_ENABLED = "1";
44 |
45 | ldflags = [
46 | "-s"
47 | "-w"
48 | "-X main.buildVersion=${version} -X main.buildCommit=${rev}"
49 | ];
50 | };
51 | in
52 | final
53 |
--------------------------------------------------------------------------------
/router.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "net/http"
5 | "path/filepath"
6 | "strconv"
7 |
8 | "github.com/gorilla/handlers"
9 | "github.com/gorilla/mux"
10 | "github.com/pascaldekloe/metrics"
11 | )
12 |
13 | const (
14 | mimeNarinfo = "text/x-nix-narinfo"
15 | mimeNar = "application/x-nix-nar"
16 | mimeText = "text/plain"
17 | mimeNixCacheInfo = "text/x-nix-cache-info"
18 | )
19 |
20 | func (proxy *Proxy) router() *mux.Router {
21 | r := mux.NewRouter()
22 | r.NotFoundHandler = notFound{}
23 | r.MethodNotAllowedHandler = notAllowed{}
24 | r.Use(
25 | withHTTPLogging(proxy.log),
26 | handlers.RecoveryHandler(handlers.PrintRecoveryStack(true)),
27 | )
28 |
29 | r.HandleFunc("/metrics", metrics.ServeHTTP)
30 |
31 | newDockerHandler(proxy.log, proxy.localStore, proxy.localIndex, filepath.Join(proxy.Dir, "oci"), r)
32 |
33 | // backwards compat
34 | for _, prefix := range []string{"/cache", ""} {
35 | r.HandleFunc(prefix+"/nix-cache-info", proxy.nixCacheInfo).Methods("GET")
36 |
37 | narinfo := r.Name("narinfo").Path(prefix + "/{hash:[0-9a-df-np-sv-z]{32}}.narinfo").Subrouter()
38 | narinfo.Use(
39 | proxy.withLocalCacheHandler(),
40 | proxy.withS3CacheHandler(),
41 | withRemoteHandler(proxy.log, proxy.Substituters, []string{""}, proxy.cacheChan),
42 | )
43 | narinfo.Methods("HEAD", "GET", "PUT").HandlerFunc(serveNotFound)
44 |
45 | nar := r.Name("nar").Path(prefix + "/nar/{hash:[0-9a-df-np-sv-z]{52}}{ext:\\.nar(?:\\.xz|)}").Subrouter()
46 | nar.Use(
47 | proxy.withLocalCacheHandler(),
48 | proxy.withS3CacheHandler(),
49 | withRemoteHandler(proxy.log, proxy.Substituters, []string{"", ".xz"}, proxy.cacheChan),
50 | )
51 | nar.Methods("HEAD", "GET", "PUT").HandlerFunc(serveNotFound)
52 | }
53 |
54 | return r
55 | }
56 |
57 | func (proxy *Proxy) withLocalCacheHandler() mux.MiddlewareFunc {
58 | return withCacheHandler(
59 | proxy.log,
60 | proxy.localStore,
61 | proxy.localIndex,
62 | proxy.trustedKeys,
63 | proxy.secretKeys,
64 | )
65 | }
66 |
67 | func (proxy *Proxy) withS3CacheHandler() mux.MiddlewareFunc {
68 | return withCacheHandler(
69 | proxy.log,
70 | proxy.s3Store,
71 | proxy.s3Index,
72 | proxy.trustedKeys,
73 | proxy.secretKeys,
74 | )
75 | }
76 |
77 | type notAllowed struct{}
78 |
79 | func (n notAllowed) ServeHTTP(w http.ResponseWriter, r *http.Request) {
80 | pp("*** 405", r.Method, r.URL.Path, mux.Vars(r))
81 | }
82 |
83 | type notFound struct{}
84 |
85 | func (n notFound) ServeHTTP(w http.ResponseWriter, r *http.Request) {
86 | serveNotFound(w, r)
87 | }
88 |
89 | func serveNotFound(w http.ResponseWriter, r *http.Request) {
90 | pp("*** 404", r.Method, r.URL.Path, mux.Vars(r))
91 | w.Header().Set(headerContentType, mimeText)
92 | w.Header().Set(headerCache, headerCacheMiss)
93 | w.WriteHeader(http.StatusNotFound)
94 | _, _ = w.Write([]byte("not found"))
95 | }
96 |
97 | // GET /nix-cache-info
98 | func (proxy *Proxy) nixCacheInfo(w http.ResponseWriter, r *http.Request) {
99 | answer(w, http.StatusOK, mimeNixCacheInfo, `StoreDir: /nix/store
100 | WantMassQuery: 1
101 | Priority: `+strconv.FormatUint(proxy.CacheInfoPriority, 10))
102 | }
103 |
--------------------------------------------------------------------------------
/router_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "crypto/ed25519"
7 | "net/http"
8 | "os"
9 | "path/filepath"
10 | "testing"
11 | "time"
12 |
13 | "github.com/folbricht/desync"
14 | "github.com/steinfletcher/apitest"
15 | "go.uber.org/zap"
16 | )
17 |
18 | var (
19 | testdata = map[string][]byte{}
20 | fNar = "/nar/0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar"
21 | fNarXz = "/nar/0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar.xz"
22 | fNarinfo = "/8ckxc8biqqfdwyhr0w70jgrcb4h7a4y5.narinfo"
23 | )
24 |
25 | func TestMain(m *testing.M) {
26 | for _, name := range []string{
27 | fNar, fNarXz, fNarinfo,
28 | } {
29 | content, err := os.ReadFile(filepath.Join("testdata", filepath.Base(name)))
30 | if err != nil {
31 | panic(err)
32 | }
33 |
34 | testdata[name] = content
35 | }
36 |
37 | os.Exit(m.Run())
38 | }
39 |
40 | func testProxy(t *testing.T) *Proxy {
41 | proxy := NewProxy()
42 | proxy.Substituters = []string{"http://example.com"}
43 |
44 | indexDir := filepath.Join(t.TempDir(), "index")
45 | if err := os.MkdirAll(filepath.Join(indexDir, "nar"), 0700); err != nil {
46 | panic(err)
47 | } else if proxy.localIndex, err = desync.NewLocalIndexStore(indexDir); err != nil {
48 | panic(err)
49 | }
50 |
51 | storeDir := filepath.Join(t.TempDir(), "store")
52 | if err := os.MkdirAll(storeDir, 0700); err != nil {
53 | panic(err)
54 | } else if proxy.localStore, err = desync.NewLocalStore(storeDir, defaultStoreOptions); err != nil {
55 | panic(err)
56 | }
57 |
58 | // proxy.s3Index = newFakeIndex()
59 | // proxy.s3Store = newFakeStore()
60 | proxy.Dir = t.TempDir()
61 | proxy.TrustedPublicKeys = []string{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}
62 | proxy.setupKeys()
63 | // NOTE: uncomment this line to enable logging
64 | proxy.log = zap.NewNop()
65 | return proxy
66 | }
67 |
68 | func withS3(proxy *Proxy) *Proxy {
69 | proxy.s3Index = newFakeIndex()
70 | proxy.s3Store = newFakeStore()
71 | return proxy
72 | }
73 |
74 | func TestRouterNixCacheInfo(t *testing.T) {
75 | proxy := testProxy(t)
76 |
77 | apitest.New().
78 | Handler(proxy.router()).
79 | Get("/nix-cache-info").
80 | Expect(t).
81 | Header(headerContentType, mimeNixCacheInfo).
82 | Body(`StoreDir: /nix/store
83 | WantMassQuery: 1
84 | Priority: 50`).
85 | Status(http.StatusOK).
86 | End()
87 | }
88 |
89 | func TestRouterNarinfoHead(t *testing.T) {
90 | t.Run("not found", func(tt *testing.T) {
91 | proxy := testProxy(tt)
92 |
93 | apitest.New().
94 | Handler(proxy.router()).
95 | Method("HEAD").
96 | URL(fNarinfo).
97 | Expect(tt).
98 | Header(headerCache, headerCacheMiss).
99 | Header(headerContentType, mimeText).
100 | Body(``).
101 | Status(http.StatusNotFound).
102 | End()
103 | })
104 |
105 | t.Run("found remote", func(tt *testing.T) {
106 | proxy := testProxy(tt)
107 |
108 | apitest.New().
109 | Mocks(
110 | apitest.NewMock().
111 | Head(fNarinfo).
112 | RespondWith().
113 | Status(http.StatusOK).
114 | End(),
115 | ).
116 | Handler(proxy.router()).
117 | Method("HEAD").
118 | URL(fNarinfo).
119 | Expect(tt).
120 | Header(headerCache, headerCacheRemote).
121 | Header(headerCacheUpstream, "http://example.com"+fNarinfo).
122 | Header(headerContentType, mimeNarinfo).
123 | Body(``).
124 | Status(http.StatusOK).
125 | End()
126 | })
127 |
128 | t.Run("found local", func(tt *testing.T) {
129 | proxy := testProxy(tt)
130 | insertFake(tt, proxy.localStore, proxy.localIndex, fNarinfo)
131 |
132 | apitest.New().
133 | Handler(proxy.router()).
134 | Method("HEAD").
135 | URL(fNarinfo).
136 | Expect(tt).
137 | Header(headerCache, headerCacheHit).
138 | Header(headerContentType, mimeNarinfo).
139 | Body(``).
140 | Status(http.StatusOK).
141 | End()
142 | })
143 |
144 | t.Run("found s3", func(tt *testing.T) {
145 | proxy := withS3(testProxy(tt))
146 | insertFake(tt, proxy.s3Store, proxy.s3Index, fNarinfo)
147 |
148 | apitest.New().
149 | Handler(proxy.router()).
150 | Method("HEAD").
151 | URL(fNarinfo).
152 | Expect(tt).
153 | Header(headerCache, headerCacheHit).
154 | Header(headerContentType, mimeNarinfo).
155 | Body(``).
156 | Status(http.StatusOK).
157 | End()
158 | })
159 | }
160 |
161 | func TestRouterNarHead(t *testing.T) {
162 | t.Run("not found", func(tt *testing.T) {
163 | proxy := testProxy(tt)
164 |
165 | apitest.New().
166 | Mocks(
167 | apitest.NewMock().
168 | Head(fNar+".xz").
169 | RespondWith().
170 | Status(http.StatusNotFound).
171 | End(),
172 | apitest.NewMock().
173 | Head(fNar).
174 | RespondWith().
175 | Status(http.StatusNotFound).
176 | End()).
177 | Handler(proxy.router()).
178 | Method("HEAD").
179 | URL(fNar).
180 | Expect(tt).
181 | Header(headerCache, headerCacheMiss).
182 | Header(headerContentType, mimeText).
183 | Body(``).
184 | Status(http.StatusNotFound).
185 | End()
186 | })
187 |
188 | t.Run("found remote", func(tt *testing.T) {
189 | proxy := testProxy(tt)
190 |
191 | apitest.New().
192 | Mocks(
193 | apitest.NewMock().
194 | Head(fNar+".xz").
195 | RespondWith().
196 | Status(http.StatusOK).
197 | End(),
198 | apitest.NewMock().
199 | Head(fNar).
200 | RespondWith().
201 | Status(http.StatusNotFound).
202 | End(),
203 | ).
204 | Handler(proxy.router()).
205 | Method("HEAD").
206 | URL(fNar).
207 | Expect(tt).
208 | Header(headerCache, headerCacheRemote).
209 | Header(headerCacheUpstream, "http://example.com"+fNar+".xz").
210 | Header(headerContentType, mimeNar).
211 | Body(``).
212 | Status(http.StatusOK).
213 | End()
214 | })
215 |
216 | t.Run("found local", func(tt *testing.T) {
217 | proxy := testProxy(tt)
218 | insertFake(tt, proxy.localStore, proxy.localIndex, fNar)
219 |
220 | apitest.New().
221 | Handler(proxy.router()).
222 | Method("HEAD").
223 | URL(fNar).
224 | Expect(tt).
225 | Header(headerCache, headerCacheHit).
226 | Header(headerContentType, mimeNar).
227 | Body(``).
228 | Status(http.StatusOK).
229 | End()
230 | })
231 |
232 | t.Run("found s3", func(tt *testing.T) {
233 | proxy := withS3(testProxy(tt))
234 | insertFake(tt, proxy.s3Store, proxy.s3Index, fNar)
235 |
236 | apitest.New().
237 | Mocks(
238 | apitest.NewMock().
239 | Head(fNar).
240 | RespondWith().
241 | Status(http.StatusNotFound).
242 | End(),
243 | ).
244 | Handler(proxy.router()).
245 | Method("HEAD").
246 | URL(fNar).
247 | Expect(tt).
248 | Header(headerCache, headerCacheHit).
249 | Header(headerContentType, mimeNar).
250 | Body(``).
251 | Status(http.StatusOK).
252 | End()
253 | })
254 | }
255 |
256 | func TestRouterNarGet(t *testing.T) {
257 | t.Run("not found", func(tt *testing.T) {
258 | proxy := testProxy(tt)
259 |
260 | apitest.New().
261 | Mocks(
262 | apitest.NewMock().
263 | Get(fNar+".xz").
264 | RespondWith().
265 | Status(http.StatusNotFound).
266 | End(),
267 | apitest.NewMock().
268 | Get(fNar).
269 | RespondWith().
270 | Status(http.StatusNotFound).
271 | End(),
272 | ).
273 | Handler(proxy.router()).
274 | Method("GET").
275 | URL(fNar).
276 | Expect(tt).
277 | Header(headerCache, headerCacheMiss).
278 | Header(headerContentType, mimeText).
279 | Body(`not found`).
280 | Status(http.StatusNotFound).
281 | End()
282 | })
283 |
284 | t.Run("found remote xz", func(tt *testing.T) {
285 | proxy := testProxy(tt)
286 |
287 | apitest.New().
288 | Mocks(
289 | apitest.NewMock().
290 | Get(fNar+".xz").
291 | RespondWith().
292 | Body(string(testdata[fNarXz])).
293 | Status(http.StatusOK).
294 | End(),
295 | apitest.NewMock().
296 | Get(fNar).
297 | RespondWith().
298 | Status(http.StatusNotFound).
299 | End(),
300 | ).
301 | Handler(proxy.router()).
302 | Method("GET").
303 | URL(fNar).
304 | Expect(tt).
305 | Header(headerCache, headerCacheRemote).
306 | Header(headerCacheUpstream, "http://example.com"+fNar+".xz").
307 | Header(headerContentType, mimeNar).
308 | Body(string(testdata[fNar])).
309 | Status(http.StatusOK).
310 | End()
311 | })
312 |
313 | t.Run("found remote xz and requested xz", func(tt *testing.T) {
314 | proxy := testProxy(tt)
315 |
316 | apitest.New().
317 | Mocks(
318 | apitest.NewMock().
319 | Get(fNarXz).
320 | RespondWith().
321 | Body(string(testdata[fNarXz])).
322 | Status(http.StatusOK).
323 | End(),
324 | apitest.NewMock().
325 | Get(fNar).
326 | RespondWith().
327 | Status(http.StatusNotFound).
328 | End(),
329 | ).
330 | Handler(proxy.router()).
331 | Method("GET").
332 | URL(fNarXz).
333 | Expect(tt).
334 | Header(headerCache, headerCacheRemote).
335 | Header(headerCacheUpstream, "http://example.com"+fNar+".xz").
336 | Header(headerContentType, mimeNar).
337 | Body(string(testdata[fNarXz])).
338 | Status(http.StatusOK).
339 | End()
340 | })
341 |
342 | t.Run("found local", func(tt *testing.T) {
343 | proxy := testProxy(tt)
344 | insertFake(tt, proxy.localStore, proxy.localIndex, fNar)
345 |
346 | apitest.New().
347 | Handler(proxy.router()).
348 | Method("GET").
349 | URL(fNar).
350 | Expect(tt).
351 | Header(headerCache, headerCacheHit).
352 | Header(headerContentType, mimeNar).
353 | Body(``).
354 | Status(http.StatusOK).
355 | End()
356 | })
357 |
358 | t.Run("found s3", func(tt *testing.T) {
359 | proxy := withS3(testProxy(tt))
360 | insertFake(tt, proxy.s3Store, proxy.s3Index, fNar)
361 |
362 | apitest.New().
363 | Handler(proxy.router()).
364 | Method("GET").
365 | URL(fNar).
366 | Expect(tt).
367 | Header(headerCache, headerCacheHit).
368 | Header(headerContentType, mimeNar).
369 | Body(``).
370 | Status(http.StatusOK).
371 | End()
372 | })
373 | }
374 |
375 | func TestRouterNarinfoGet(t *testing.T) {
376 | t.Run("not found", func(tt *testing.T) {
377 | proxy := testProxy(tt)
378 |
379 | apitest.New().
380 | Handler(proxy.router()).
381 | Method("GET").
382 | URL(fNarinfo).
383 | Expect(tt).
384 | Header(headerCache, headerCacheMiss).
385 | Header(headerContentType, mimeText).
386 | Body(`not found`).
387 | Status(http.StatusNotFound).
388 | End()
389 | })
390 |
391 | t.Run("found local", func(tt *testing.T) {
392 | proxy := testProxy(tt)
393 | insertFake(tt, proxy.localStore, proxy.localIndex, fNarinfo)
394 |
395 | apitest.New().
396 | Handler(proxy.router()).
397 | Method("GET").
398 | URL(fNarinfo).
399 | Expect(tt).
400 | Header(headerCache, headerCacheHit).
401 | Header(headerContentType, mimeNarinfo).
402 | Body(string(testdata[fNarinfo])).
403 | Status(http.StatusOK).
404 | End()
405 | })
406 |
407 | t.Run("found s3", func(tt *testing.T) {
408 | proxy := withS3(testProxy(tt))
409 | insertFake(tt, proxy.s3Store, proxy.s3Index, fNarinfo)
410 |
411 | apitest.New().
412 | Handler(proxy.router()).
413 | Method("GET").
414 | URL(fNarinfo).
415 | Expect(tt).
416 | Header(headerCache, headerCacheHit).
417 | Header(headerContentType, mimeNarinfo).
418 | Body(string(testdata[fNarinfo])).
419 | Status(http.StatusOK).
420 | End()
421 | })
422 |
423 | t.Run("found remote", func(tt *testing.T) {
424 | proxy := testProxy(tt)
425 |
426 | apitest.New().
427 | EnableMockResponseDelay().
428 | Mocks(
429 | apitest.NewMock().
430 | Get(fNarinfo).
431 | RespondWith().
432 | FixedDelay((1*time.Second).Milliseconds()).
433 | Body(string(testdata[fNarinfo])).
434 | Status(http.StatusOK).
435 | End(),
436 | ).
437 | Handler(proxy.router()).
438 | Method("GET").
439 | URL(fNarinfo).
440 | Expect(tt).
441 | Header(headerCache, headerCacheRemote).
442 | Header(headerCacheUpstream, "http://example.com"+fNarinfo).
443 | Header(headerContentType, mimeNarinfo).
444 | Body(string(testdata[fNarinfo])).
445 | Status(http.StatusOK).
446 | End()
447 | })
448 |
449 | t.Run("copies remote to local", func(tt *testing.T) {
450 | proxy := testProxy(tt)
451 | go proxy.startCache()
452 | defer close(proxy.cacheChan)
453 |
454 | mockReset := apitest.NewStandaloneMocks(
455 | apitest.NewMock().
456 | Get("http://example.com" + fNarinfo).
457 | RespondWith().
458 | Body(string(testdata[fNarinfo])).
459 | Status(http.StatusOK).
460 | End(),
461 | ).End()
462 | defer mockReset()
463 |
464 | apitest.New().
465 | Mocks(
466 | apitest.NewMock().
467 | Get(fNarinfo).
468 | RespondWith().
469 | Body(string(testdata[fNarinfo])).
470 | Status(http.StatusOK).
471 | End(),
472 | ).
473 | Handler(proxy.router()).
474 | Method("GET").
475 | URL(fNarinfo).
476 | Expect(tt).
477 | Header(headerCache, headerCacheRemote).
478 | Header(headerCacheUpstream, "http://example.com"+fNarinfo).
479 | Header(headerContentType, mimeNarinfo).
480 | Body(string(testdata[fNarinfo])).
481 | Status(http.StatusOK).
482 | End()
483 |
484 | for metricRemoteCachedOk.Get()+metricRemoteCachedFail.Get() == 0 {
485 | time.Sleep(1 * time.Millisecond)
486 | }
487 |
488 | apitest.New().
489 | Handler(proxy.router()).
490 | Method("GET").
491 | URL(fNarinfo).
492 | Expect(tt).
493 | Header(headerCache, headerCacheHit).
494 | Header(headerContentType, mimeNarinfo).
495 | Body(string(testdata[fNarinfo])).
496 | Status(http.StatusOK).
497 | End()
498 | })
499 | }
500 |
501 | func TestRouterNarinfoPut(t *testing.T) {
502 | t.Run("upload success", func(tt *testing.T) {
503 | proxy := withS3(testProxy(tt))
504 |
505 | apitest.New().
506 | Handler(proxy.router()).
507 | Method("PUT").
508 | URL(fNarinfo).
509 | Body(string(testdata[fNarinfo])).
510 | Expect(tt).
511 | Header(headerContentType, mimeText).
512 | Body("ok\n").
513 | Status(http.StatusOK).
514 | End()
515 |
516 | apitest.New().
517 | Handler(proxy.router()).
518 | Method("GET").
519 | URL(fNarinfo).
520 | Expect(tt).
521 | Header(headerContentType, mimeNarinfo).
522 | Header(headerCache, headerCacheHit).
523 | Body(string(testdata[fNarinfo])).
524 | Status(http.StatusOK).
525 | End()
526 | })
527 |
528 | t.Run("upload invalid", func(tt *testing.T) {
529 | proxy := testProxy(tt)
530 |
531 | apitest.New().
532 | Handler(proxy.router()).
533 | Method("PUT").
534 | URL(fNarinfo).
535 | Body("blah").
536 | Expect(tt).
537 | Header(headerContentType, mimeText).
538 | Body(`Failed to parse line: "blah"`).
539 | Status(http.StatusBadRequest).
540 | End()
541 | })
542 |
543 | t.Run("upload unsigned", func(tt *testing.T) {
544 | proxy := testProxy(tt)
545 |
546 | apitest.New().
547 | Handler(proxy.router()).
548 | Method("PUT").
549 | URL(fNarinfo).
550 | Body("blah").
551 | Expect(tt).
552 | Header(headerContentType, mimeText).
553 | Body(`Failed to parse line: "blah"`).
554 | Status(http.StatusBadRequest).
555 | End()
556 | })
557 |
558 | t.Run("signs unsigned narinfos", func(tt *testing.T) {
559 | proxy := testProxy(tt)
560 |
561 | seed := make([]byte, ed25519.SeedSize)
562 | proxy.secretKeys["foo"] = ed25519.NewKeyFromSeed(seed)
563 |
564 | emptyInfo := &Narinfo{}
565 | if err := emptyInfo.Unmarshal(bytes.NewReader(testdata[fNarinfo])); err != nil {
566 | tt.Fatal(err)
567 | }
568 | emptyInfo.Sig = []string{}
569 | empty := &bytes.Buffer{}
570 | if err := emptyInfo.Marshal(empty); err != nil {
571 | tt.Fatal(err)
572 | }
573 |
574 | apitest.New().
575 | Handler(proxy.router()).
576 | Method("PUT").
577 | URL(fNarinfo).
578 | Body(empty.String()).
579 | Expect(tt).
580 | Header(headerContentType, mimeText).
581 | Body("ok\n").
582 | Status(http.StatusOK).
583 | End()
584 |
585 | expectInfo := &Narinfo{}
586 | if err := expectInfo.Unmarshal(bytes.NewReader(testdata[fNarinfo])); err != nil {
587 | tt.Fatal(err)
588 | }
589 | expectInfo.Sig = []string{"foo:MGrENumWZ1kbm23vCTyYrw6hRBJtLGIIpfHjpZszs2D1G1AALMKvl49T66WIhx2X02s8n/zsfUPpga2bL6PmBQ=="}
590 | expect := &bytes.Buffer{}
591 | if err := expectInfo.Marshal(expect); err != nil {
592 | tt.Fatal(err)
593 | }
594 |
595 | apitest.New().
596 | Handler(proxy.router()).
597 | Method("GET").
598 | URL(fNarinfo).
599 | Expect(tt).
600 | Header(headerCache, headerCacheHit).
601 | Header(headerContentType, mimeNarinfo).
602 | Body(expect.String()).
603 | Status(http.StatusOK).
604 | End()
605 | })
606 | }
607 |
608 | func TestRouterNarPut(t *testing.T) {
609 | t.Run("upload success", func(tt *testing.T) {
610 | proxy := withS3(testProxy(tt))
611 |
612 | apitest.New().
613 | Handler(proxy.router()).
614 | Method("PUT").
615 | URL(fNar).
616 | Body(string(testdata[fNar])).
617 | Expect(tt).
618 | Header(headerContentType, mimeText).
619 | Body("ok\n").
620 | Status(http.StatusOK).
621 | End()
622 |
623 | apitest.New().
624 | Handler(proxy.router()).
625 | Method("GET").
626 | URL(fNar).
627 | Expect(tt).
628 | Header(headerContentType, mimeNar).
629 | Header(headerCache, headerCacheHit).
630 | Body(string(testdata[fNar])).
631 | Status(http.StatusOK).
632 | End()
633 | })
634 |
635 | t.Run("upload xz success", func(tt *testing.T) {
636 | proxy := withS3(testProxy(tt))
637 |
638 | apitest.New().
639 | Handler(proxy.router()).
640 | Method("PUT").
641 | URL(fNarXz).
642 | Body(string(testdata[fNarXz])).
643 | Expect(tt).
644 | Header(headerContentType, mimeText).
645 | Body("ok\n").
646 | Status(http.StatusOK).
647 | End()
648 |
649 | apitest.New().
650 | Handler(proxy.router()).
651 | Method("GET").
652 | URL(fNar).
653 | Expect(tt).
654 | Header(headerContentType, mimeNar).
655 | Header(headerCache, headerCacheHit).
656 | Body(string(testdata[fNar])).
657 | Status(http.StatusOK).
658 | End()
659 | })
660 |
661 | t.Run("upload xz to /cache success", func(tt *testing.T) {
662 | proxy := withS3(testProxy(tt))
663 |
664 | apitest.New().
665 | Handler(proxy.router()).
666 | Method("PUT").
667 | URL("/cache"+fNarXz).
668 | Body(string(testdata[fNarXz])).
669 | Expect(tt).
670 | Header(headerContentType, mimeText).
671 | Body("ok\n").
672 | Status(http.StatusOK).
673 | End()
674 |
675 | apitest.New().
676 | Handler(proxy.router()).
677 | Method("GET").
678 | URL(fNar).
679 | Expect(tt).
680 | Header(headerContentType, mimeNar).
681 | Header(headerCache, headerCacheHit).
682 | Body(string(testdata[fNar])).
683 | Status(http.StatusOK).
684 | End()
685 | })
686 | }
687 |
688 | func insertFake(
689 | t *testing.T,
690 | store desync.WriteStore,
691 | index desync.IndexWriteStore,
692 | path string) {
693 | if chunker, err := desync.NewChunker(bytes.NewBuffer(testdata[path]), chunkSizeMin(), chunkSizeAvg, chunkSizeMax()); err != nil {
694 | t.Fatal(err)
695 | } else if idx, err := desync.ChunkStream(context.Background(), chunker, store, 1); err != nil {
696 | t.Fatal(err)
697 | } else if rel, err := filepath.Rel("/", path); err != nil {
698 | t.Fatal(err)
699 | } else if err := index.StoreIndex(rel, idx); err != nil {
700 | t.Fatal(err)
701 | }
702 | }
703 |
--------------------------------------------------------------------------------
/scripts/dl.sh:
--------------------------------------------------------------------------------
1 | set -exuo pipefail
2 | curl -e -s -v http://alpha.fritz.box:7745/nar/039sqini16vclacyy1gx421pp19fjrn4vrn6arrljpv1qkb993yh.nar > 039sqini16vclacyy1gx421pp19fjrn4vrn6arrljpv1qkb993yh.nar
3 | curl -e -s -v http://alpha.fritz.box:7745/nar/06ms6ksdp19zzyc6g3kl72sqrvbgf57yzfjr5pfz16bhwq4dz604.nar > 06ms6ksdp19zzyc6g3kl72sqrvbgf57yzfjr5pfz16bhwq4dz604.nar
4 | curl -e -s -v http://alpha.fritz.box:7745/nar/0cax356sbzwf2aknyh9ly0y9q4x92py9bcchyq3nr7ji4z2fmy1w.nar > 0cax356sbzwf2aknyh9ly0y9q4x92py9bcchyq3nr7ji4z2fmy1w.nar
5 | curl -e -s -v http://alpha.fritz.box:7745/nar/0ll5xsrprmi8w94m13qb7rjiw1r0h3paz8mv5952l2f4mgffrds2.nar > 0ll5xsrprmi8w94m13qb7rjiw1r0h3paz8mv5952l2f4mgffrds2.nar
6 | curl -e -s -v http://alpha.fritz.box:7745/nar/0m3fp46zyspmr0b4g33jyh44g1942qv35h2ik647jd3z99ryxlgc.nar > 0m3fp46zyspmr0b4g33jyh44g1942qv35h2ik647jd3z99ryxlgc.nar
7 | curl -e -s -v http://alpha.fritz.box:7745/nar/0nvqk2rr3phm8snxcsbcn3gv11r1jp8hvs1119fxx2q481kmn88n.nar > 0nvqk2rr3phm8snxcsbcn3gv11r1jp8hvs1119fxx2q481kmn88n.nar
8 | curl -e -s -v http://alpha.fritz.box:7745/nar/0pp4bciq6zs5zd69p012zcx66v2ndph165hnycsi3r3wp5v7lkxx.nar > 0pp4bciq6zs5zd69p012zcx66v2ndph165hnycsi3r3wp5v7lkxx.nar
9 | curl -e -s -v http://alpha.fritz.box:7745/nar/0przdrvyyg0adfi2fwyjkbp63l9fasbawa2vh62vpb9icyc0352f.nar > 0przdrvyyg0adfi2fwyjkbp63l9fasbawa2vh62vpb9icyc0352f.nar
10 | curl -e -s -v http://alpha.fritz.box:7745/nar/0ws3h2ld64l2kjmf4ca4lfdawkzmcsdngxqnlc7dk4j63m52avp5.nar > 0ws3h2ld64l2kjmf4ca4lfdawkzmcsdngxqnlc7dk4j63m52avp5.nar
11 | curl -e -s -v http://alpha.fritz.box:7745/nar/0xy98dmdcqcg6k72fqk0q1mybrkqnj0pv4nmvs3qmr5n6spja0n0.nar > 0xy98dmdcqcg6k72fqk0q1mybrkqnj0pv4nmvs3qmr5n6spja0n0.nar
12 | curl -e -s -v http://alpha.fritz.box:7745/nar/14yp9hlcwk0gvg6sv85v4shcl65f2pwc3kh45d09phsc2ivv0ggm.nar > 14yp9hlcwk0gvg6sv85v4shcl65f2pwc3kh45d09phsc2ivv0ggm.nar
13 | curl -e -s -v http://alpha.fritz.box:7745/nar/15g58m5jqjfiyyjsdm7vqqqnnqh6x3dm5fg84ri26cha4ilmbq1k.nar > 15g58m5jqjfiyyjsdm7vqqqnnqh6x3dm5fg84ri26cha4ilmbq1k.nar
14 | curl -e -s -v http://alpha.fritz.box:7745/nar/16x3wglqfwml44lp5cddarqb9c8i8iw35qs7zs351f4nfdpzwqsf.nar > 16x3wglqfwml44lp5cddarqb9c8i8iw35qs7zs351f4nfdpzwqsf.nar
15 | curl -e -s -v http://alpha.fritz.box:7745/nar/17mpf86zs82hlszxvnaalbiclr603ixzq4rlg7wlc0qnpg7iaria.nar > 17mpf86zs82hlszxvnaalbiclr603ixzq4rlg7wlc0qnpg7iaria.nar
16 | curl -e -s -v http://alpha.fritz.box:7745/nar/18kh31zdcxwb9sd97g8d20x33wwqijxzgvwcil506flb6w5jm4ys.nar > 18kh31zdcxwb9sd97g8d20x33wwqijxzgvwcil506flb6w5jm4ys.nar
17 | curl -e -s -v http://alpha.fritz.box:7745/nar/18mfij512wc5nsxzw3lbs9r5mgfzr07bnjmlx814aby13qwg977n.nar > 18mfij512wc5nsxzw3lbs9r5mgfzr07bnjmlx814aby13qwg977n.nar
18 | curl -e -s -v http://alpha.fritz.box:7745/nar/19g8qi949hxdh2jyi7wbz1nbknr86wdvw1qalwh9blcafqwij9xz.nar > 19g8qi949hxdh2jyi7wbz1nbknr86wdvw1qalwh9blcafqwij9xz.nar
19 | curl -e -s -v http://alpha.fritz.box:7745/nar/1bx6q1s64v2049cipylknvxz8b363blnr85ki3n16kq9zw11fmsw.nar > 1bx6q1s64v2049cipylknvxz8b363blnr85ki3n16kq9zw11fmsw.nar
20 | curl -e -s -v http://alpha.fritz.box:7745/nar/1cxw7vfkdng11c4bpj00h87i8r4yqblncspnmxqj1jp0l75gm0ni.nar > 1cxw7vfkdng11c4bpj00h87i8r4yqblncspnmxqj1jp0l75gm0ni.nar
21 | curl -e -s -v http://alpha.fritz.box:7745/nar/1dwsyv6rgl9aq4wa4p2i6wiy8v4i40rmv8x1nj2vpgrs1gfqxs17.nar > 1dwsyv6rgl9aq4wa4p2i6wiy8v4i40rmv8x1nj2vpgrs1gfqxs17.nar
22 | curl -e -s -v http://alpha.fritz.box:7745/nar/1dzabqi62wz844q6vj4pa1kkajqrcdvvldv2ll9pbbrvi3zp29m2.nar > 1dzabqi62wz844q6vj4pa1kkajqrcdvvldv2ll9pbbrvi3zp29m2.nar
23 | curl -e -s -v http://alpha.fritz.box:7745/nar/1f7yms2x17f9mrps3y2qkcispxxbxlhkz013vis4d3ppfdmy95ck.nar > 1f7yms2x17f9mrps3y2qkcispxxbxlhkz013vis4d3ppfdmy95ck.nar
24 | curl -e -s -v http://alpha.fritz.box:7745/nar/1glj03gsjr7zj8wlkc7296rfirzn77dprv85v9b5na3cyi3xhdaw.nar > 1glj03gsjr7zj8wlkc7296rfirzn77dprv85v9b5na3cyi3xhdaw.nar
25 | curl -e -s -v http://alpha.fritz.box:7745/nar/1gnn4r4ibh60dwis9m31y4b0iskk8jnwjah86851aib4rvm9ra21.nar > 1gnn4r4ibh60dwis9m31y4b0iskk8jnwjah86851aib4rvm9ra21.nar
26 | curl -e -s -v http://alpha.fritz.box:7745/nar/1h3dnfn3hzv0hyv9nn1ghgbqlhcjirij35dvjc2l0jzgcxfcscny.nar > 1h3dnfn3hzv0hyv9nn1ghgbqlhcjirij35dvjc2l0jzgcxfcscny.nar
27 | curl -e -s -v http://alpha.fritz.box:7745/nar/1i0kkjg4mzsax30b9g885kmacwby88ycgv0v6s3s504z8wrjghak.nar > 1i0kkjg4mzsax30b9g885kmacwby88ycgv0v6s3s504z8wrjghak.nar
28 | curl -e -s -v http://alpha.fritz.box:7745/nar/1j57nxfb4va85178nk8nxmm8kyfsp1738kcaay9hhgrlhkz1v1g4.nar > 1j57nxfb4va85178nk8nxmm8kyfsp1738kcaay9hhgrlhkz1v1g4.nar
29 | curl -e -s -v http://alpha.fritz.box:7745/nar/1l885bjv9n3czwqikqprfq8vj3x4f3msd2grwjfwm6072krjpskm.nar > 1l885bjv9n3czwqikqprfq8vj3x4f3msd2grwjfwm6072krjpskm.nar
30 | curl -e -s -v http://alpha.fritz.box:7745/nar/1pcrh0nxl6j1ws3b3sh266ci666h13knimj17f0m49dinc1hzdww.nar > 1pcrh0nxl6j1ws3b3sh266ci666h13knimj17f0m49dinc1hzdww.nar
31 | curl -e -s -v http://alpha.fritz.box:7745/nar/1rdmmkny35f5br7awawx15lqdh33zbzjh6xwql7v93mqk0d1dsjw.nar > 1rdmmkny35f5br7awawx15lqdh33zbzjh6xwql7v93mqk0d1dsjw.nar
32 | curl -e -s -v http://alpha.fritz.box:7745/nar/1vmkchx8ac80b2vcrhbsz80mc9dpaq988i00fjpxxancrj5xy37a.nar > 1vmkchx8ac80b2vcrhbsz80mc9dpaq988i00fjpxxancrj5xy37a.nar
33 | curl -e -s -v http://alpha.fritz.box:7745/nar/1xvvm2yjy1va04vh7s38lq0qajw8xvccmmj1ka7fs3nxwm59v3mn.nar > 1xvvm2yjy1va04vh7s38lq0qajw8xvccmmj1ka7fs3nxwm59v3mn.nar
34 | curl -e -s -v http://alpha.fritz.box:7745/nar/06ms6ksdp19zzyc6g3kl72sqrvbgf57yzfjr5pfz16bhwq4dz604.nar > 06ms6ksdp19zzyc6g3kl72sqrvbgf57yzfjr5pfz16bhwq4dz604.nar
35 |
--------------------------------------------------------------------------------
/scripts/foo.nix:
--------------------------------------------------------------------------------
1 | with import {};
2 | writeText "test1" "test1"
3 |
--------------------------------------------------------------------------------
/scripts/hook.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -euf
4 |
5 | export IFS=' '
6 |
7 | echo "Uploading paths to cache $OUT_PATHS"
8 | exec nix copy --to 's3://cache?endpoint=127.0.0.1:7070&scheme=http' $OUT_PATHS
9 |
--------------------------------------------------------------------------------
/scripts/infos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -exuo pipefail
4 |
5 | urls=(
6 | )
7 |
8 | mkdir -p narinfo
9 |
10 | for url in ${urls[*]}; do
11 | curl -s "http://127.0.0.1:7777/$url" > "narinfo/$url"
12 | done
13 |
14 | rm -f /tmp/spongix/index/nar/1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar
15 | rm -rf /tmp/spongix/store/*
16 | nix store dump-path /nix/store/cbckczjas96g9smn1g2s9kr8m18yg1pb-pdftk-3.2.1 > 1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar
17 | curl -s -v http://127.0.0.1:7777/nar/1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar > 1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar.0
18 | curl -s -v http://127.0.0.1:7777/nar/1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar > 1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar.1
19 | curl -s -v http://127.0.0.1:7777/nar/1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar > 1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar.2
20 | sha256sum 1s50rvmv1n79lk7nhn2w2xvzin0jd1l67bkz5c93xjrc3knl187s.nar*
21 |
--------------------------------------------------------------------------------
/scripts/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -exuo pipefail
4 |
5 | cacheDir=/tmp/spongix
6 |
7 | [ -s skey ] || nix key generate-secret > skey
8 | [ skey -ot pkey ] || nix key convert-secret-to-public < skey > pkey
9 |
10 | # rm -rf "$cacheDir"
11 |
12 | export MINIO_ACCESS_KEY=minioadmin
13 | export MINIO_SECRET_KEY=minioadmin
14 |
15 | mc mb /tmp/spongix/minio/ncp
16 |
17 | go run . \
18 | --substituters \
19 | 'https://cache.nixos.org' \
20 | 'https://hydra.iohk.io' \
21 | 'https://cachix.cachix.org' \
22 | 'https://manveru.cachix.org' \
23 | 'https://hercules-ci.cachix.org' \
24 | --trusted-public-keys \
25 | 'kappa:Ffd0MaBUBrRsMCHsQ6YMmGO+tlh7EiHRFK2YfOTSwag=' \
26 | 'cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=' \
27 | 'hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=' \
28 | 'cachix.cachix.org-1:eWNHQldwUO7G2VkjpnjDbWwy4KQ/HNxht7H4SSoMckM=' \
29 | 'hercules-ci.cachix.org-1:ZZeDl9Va+xe9j+KqdzoBZMFJHVQ42Uu/c/1/KMC5Lw0=' \
30 | 'manveru.cachix.org-1:L5nJHSinfA2K5dDCG3KAEadwf/e3qqhuBr7yCwSksXo=' \
31 | --secret-key-files ./skey \
32 | --listen :7777 \
33 | --dir "$cacheDir" \
34 | --log-mode development \
35 | --cache-size 4 \
36 | --bucket-url 's3+http://127.0.0.1:9000/ncp' \
37 | --bucket-region eu-central-1
38 |
--------------------------------------------------------------------------------
/test.nix:
--------------------------------------------------------------------------------
1 | {
2 | lib,
3 | pkgs,
4 | inputs,
5 | }:
6 | pkgs.nixosTest {
7 | name = "spongix";
8 |
9 | testScript = ''
10 | cache.systemctl("is-system-running --wait")
11 | cache.wait_for_unit("spongix")
12 | '';
13 |
14 | nodes = {
15 | cache = {
16 | imports = [inputs.self.nixosModules.spongix];
17 | services.spongix = {
18 | package = pkgs.spongix;
19 | cacheDir = "/cache";
20 | enable = true;
21 | };
22 | };
23 | };
24 | }
25 |
--------------------------------------------------------------------------------
/testdata/0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7.nar.xz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/spongix/df995cc9ac15a282044fe2a00244a595093b6f97/testdata/0f54iihf02azn24vm6gky7xxpadq5693qrjzkaavbnd68shvgbd7.nar.xz
--------------------------------------------------------------------------------
/testdata/0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar:
--------------------------------------------------------------------------------
1 |
nix-archive-1 ( type regular contents test )
--------------------------------------------------------------------------------
/testdata/0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar.xz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/spongix/df995cc9ac15a282044fe2a00244a595093b6f97/testdata/0m8sd5qbmvfhyamwfv3af1ff18ykywf3zx5qwawhhp3jv1h777xz.nar.xz
--------------------------------------------------------------------------------
/testdata/8ckxc8biqqfdwyhr0w70jgrcb4h7a4y5.narinfo:
--------------------------------------------------------------------------------
1 | StorePath: /nix/store/8ckxc8biqqfdwyhr0w70jgrcb4h7a4y5-libunistring-0.9.10
2 | URL: nar/1n02zg7nnkfrcf7rl8z5p030hkjakry6d60mnd248fa94s0bn301.nar
3 | Compression: none
4 | FileHash: sha256:1n02zg7nnkfrcf7rl8z5p030hkjakry6d60mnd248fa94s0bn301
5 | FileSize: 1634360
6 | NarHash: sha256:1n02zg7nnkfrcf7rl8z5p030hkjakry6d60mnd248fa94s0bn301
7 | NarSize: 1634360
8 | References: 8ckxc8biqqfdwyhr0w70jgrcb4h7a4y5-libunistring-0.9.10
9 | Deriver: nq5zrwpzxs20qvl54ks3frj14qhfalqp-libunistring-0.9.10.drv
10 | Sig: cache.nixos.org-1:DTAdPmlglN2mGxmSN0QwAhVcE4x924hH5cPXFZ64ifx+fa2GipOlvjN/zvQVck9VR0K+0vlwfg/Ne2nm3WU9Bg==
11 |
--------------------------------------------------------------------------------
/treefmt.toml:
--------------------------------------------------------------------------------
1 | [formatter.go]
2 | command = "gofmt"
3 | options = ["-w"]
4 | includes = ["*.go"]
5 |
6 | [formatter.nix]
7 | command = "alejandra"
8 | includes = ["*.nix"]
9 |
--------------------------------------------------------------------------------
/upload_manager.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "time"
6 |
7 | "github.com/folbricht/desync"
8 | )
9 |
10 | type uploadManager struct {
11 | c chan uploadMsg
12 | }
13 |
14 | func newUploadManager(store desync.WriteStore, index desync.IndexWriteStore) uploadManager {
15 | return uploadManager{c: uploadLoop(store, index)}
16 | }
17 |
18 | func (m uploadManager) new(uuid string) {
19 | m.c <- uploadMsg{t: uploadMsgNew, uuid: uuid}
20 | }
21 |
22 | func (m uploadManager) get(uuid string) *dockerUpload {
23 | c := make(chan *dockerUpload)
24 | m.c <- uploadMsg{t: uploadMsgGet, uuid: uuid, c: c}
25 | return <-c
26 | }
27 |
28 | func (m uploadManager) del(uuid string) *dockerUpload {
29 | c := make(chan *dockerUpload)
30 | m.c <- uploadMsg{t: uploadMsgGet, uuid: uuid, c: c}
31 | return <-c
32 | }
33 |
34 | type uploadMsg struct {
35 | t uploadMsgType
36 | c chan *dockerUpload
37 | uuid string
38 | }
39 |
40 | type uploadMsgType int
41 |
42 | const (
43 | uploadMsgNew uploadMsgType = iota
44 | uploadMsgGet uploadMsgType = iota
45 | uploadMsgDel uploadMsgType = iota
46 | )
47 |
48 | func uploadLoop(store desync.WriteStore, index desync.IndexWriteStore) chan uploadMsg {
49 | uploads := map[string]*dockerUpload{}
50 |
51 | ch := make(chan uploadMsg, 10)
52 | go func() {
53 | for msg := range ch {
54 | switch msg.t {
55 | case uploadMsgNew:
56 | // pretty.Println("upload new", msg.uuid)
57 | uploads[msg.uuid] = &dockerUpload{
58 | uuid: msg.uuid,
59 | content: &bytes.Buffer{},
60 | lastModified: time.Now(),
61 | }
62 | case uploadMsgGet:
63 | // pretty.Println("upload get", msg.uuid)
64 | if upload, ok := uploads[msg.uuid]; ok {
65 | msg.c <- upload
66 | } else {
67 | msg.c <- nil
68 | }
69 | case uploadMsgDel:
70 | // pretty.Println("upload del", msg.uuid)
71 | delete(uploads, msg.uuid)
72 | msg.c <- nil
73 | default:
74 | panic(msg)
75 | }
76 | }
77 | }()
78 |
79 | return ch
80 | }
81 |
--------------------------------------------------------------------------------