├── .github └── workflows │ ├── go.yml │ ├── golangci-lint.yml │ └── release.yml ├── .gitignore ├── .gon.hcl ├── .goreleaser.yaml ├── LICENSE ├── README.md ├── carserver ├── server.go ├── server_test.go ├── station_api_impl.go └── station_api_impl_test.go ├── carstore ├── blockstore.go ├── carstore.go ├── carstore_test.go ├── gateway_api.go ├── gateway_api_test.go ├── gateway_mount.go └── gateway_mount_test.go ├── cmd └── saturn-l2 │ ├── l1_discovery_test.go │ ├── l2_id_persistence_test.go │ └── main.go ├── go.mod ├── go.sum ├── l1interop ├── l1sseclient.go └── l1sseclient_test.go ├── logs └── logs.go ├── resources ├── resources.go └── webui │ └── package-lock.json ├── scripts └── download-webui.sh ├── station └── station.go ├── testdata ├── files │ ├── junk.dat │ ├── sample-rw-bs-v2.car │ ├── sample-v1.car │ └── sample-wrapped-v2.car └── testdata.go ├── testutils └── helpers.go └── types ├── types.go └── types_test.go /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | build-and-test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v3 17 | with: 18 | go-version: 1.18 19 | 20 | - name: Build 21 | run: go build -v ./... 22 | 23 | - name: Test 24 | run: go test -v -race ./... -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | branches: 7 | - master 8 | - main 9 | pull_request: 10 | permissions: 11 | contents: read 12 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 13 | # pull-requests: read 14 | jobs: 15 | golangci: 16 | name: lint 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/setup-go@v3 20 | with: 21 | go-version: 1.18 22 | - uses: actions/checkout@v3 23 | - name: golangci-lint 24 | uses: golangci/golangci-lint-action@v3 -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | 6 | permissions: 7 | contents: write 8 | # packages: write 9 | # issues: write 10 | 11 | jobs: 12 | goreleaser: 13 | runs-on: macos-latest 14 | steps: 15 | - 16 | name: Checkout 17 | uses: actions/checkout@v2 18 | with: 19 | fetch-depth: 0 20 | - 21 | name: Include web UI 22 | uses: robinraju/release-downloader@v1.3 23 | with: 24 | repository: filecoin-saturn/node-webui 25 | # Update tag to deploy new web UI. 26 | tag: v0.0.19 27 | fileName: saturn-webui.tar.gz 28 | out-file-path: resources/webui 29 | token: ${{ secrets.GITHUB_TOKEN }} 30 | - 31 | name: Unpack web UI archive 32 | run: | 33 | cd resources/webui 34 | tar zxvf saturn-webui.tar.gz 35 | rm saturn-webui.tar.gz 36 | - 37 | name: Fetch all tags 38 | run: git fetch --force --tags 39 | - 40 | name: Set up Go 41 | uses: actions/setup-go@v2 42 | with: 43 | go-version: 1.18 44 | - 45 | name: Install gon 46 | run: | 47 | brew tap mitchellh/gon 48 | brew install mitchellh/gon/gon 49 | - 50 | name: Install the Apple certificate and provisioning profile 51 | env: 52 | BUILD_CERTIFICATE_BASE64: ${{ secrets.BUILD_CERTIFICATE_BASE64 }} 53 | P12_PASSWORD: ${{ secrets.P12_PASSWORD }} 54 | KEYCHAIN_PASSWORD: ${{ secrets.KEYCHAIN_PASSWORD }} 55 | run: | 56 | # create variables 57 | CERTIFICATE_PATH=$RUNNER_TEMP/build_certificate.p12 58 | KEYCHAIN_PATH=$RUNNER_TEMP/app-signing.keychain-db 59 | 60 | # import certificate and provisioning profile from secrets 61 | echo -n "$BUILD_CERTIFICATE_BASE64" | base64 --decode --output $CERTIFICATE_PATH 62 | 63 | # create temporary keychain 64 | security create-keychain -p "$KEYCHAIN_PASSWORD" $KEYCHAIN_PATH 65 | security set-keychain-settings -lut 21600 $KEYCHAIN_PATH 66 | security unlock-keychain -p "$KEYCHAIN_PASSWORD" $KEYCHAIN_PATH 67 | 68 | # import certificate to keychain 69 | security import $CERTIFICATE_PATH -P "$P12_PASSWORD" -A -t cert -f pkcs12 -k $KEYCHAIN_PATH 70 | security list-keychain -d user -s $KEYCHAIN_PATH 71 | - 72 | name: Run GoReleaser 73 | uses: goreleaser/goreleaser-action@v2 74 | with: 75 | # either 'goreleaser' (default) or 'goreleaser-pro' 76 | distribution: goreleaser 77 | version: latest 78 | args: release --rm-dist 79 | env: 80 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 81 | AC_PASSWORD: ${{ secrets.AC_PASSWORD }} 82 | # Your GoReleaser Pro key, if you are using the 'goreleaser-pro' distribution 83 | # GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} 84 | - 85 | name: Attach produced packages to Github Action 86 | uses: actions/upload-artifact@v2 87 | with: 88 | name: dist 89 | path: dist/*.* 90 | if-no-files-found: error 91 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | # Resources directory 18 | resources 19 | !resources/resources.go 20 | -------------------------------------------------------------------------------- /.gon.hcl: -------------------------------------------------------------------------------- 1 | source = ["./dist/macos-x86-64_darwin_amd64_v1/saturn-L2-node"] 2 | bundle_id = "io.filecoin.saturn.l2-node" 3 | 4 | apple_id { 5 | username = "oli@protocol.ai" 6 | password = "@env:AC_PASSWORD" 7 | } 8 | 9 | sign { 10 | application_identity = "Developer ID Application: Protocol Labs, Inc." 11 | } 12 | 13 | zip { 14 | output_path="./dist/L2-node_Darwin_x86_64.zip" 15 | } 16 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | # This is an example .goreleaser.yml file with some sensible defaults. 2 | # Make sure to check the documentation at https://goreleaser.com 3 | before: 4 | hooks: 5 | # You may remove this if you don't use go modules. 6 | - go mod tidy 7 | # you may remove this if you don't need go generate 8 | - go generate ./... 9 | builds: 10 | - id: saturn 11 | env: 12 | - CGO_ENABLED=0 13 | goos: 14 | - linux 15 | - windows 16 | ignore: 17 | - goos: windows 18 | goarch: arm64 19 | main: ./cmd/saturn-l2 20 | binary: saturn-L2-node 21 | - id: macos-x86-64 22 | env: 23 | - CGO_ENABLED=0 24 | goos: 25 | - darwin 26 | goarch: 27 | - amd64 28 | main: ./cmd/saturn-l2 29 | binary: saturn-L2-node 30 | archives: 31 | - builds: 32 | - saturn 33 | name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" 34 | replacements: 35 | linux: Linux 36 | windows: Windows 37 | 386: i386 38 | amd64: x86_64 39 | - builds: 40 | - macos-x86-64 41 | id: macos-x86-64-zip 42 | format: zip 43 | name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" 44 | replacements: 45 | darwin: Darwin 46 | amd64: x86_64 47 | checksum: 48 | name_template: 'checksums.txt' 49 | signs: 50 | - id: macos-x86-64 51 | ids: 52 | - macos-x86-64-zip 53 | cmd: gon 54 | args: 55 | - .gon.hcl 56 | artifacts: all 57 | snapshot: 58 | name_template: "{{ incpatch .Version }}-next" 59 | changelog: 60 | sort: asc 61 | groups: 62 | - title: Features 63 | regexp: "^.*feat[(\\w)]*:+.*$" 64 | order: 0 65 | - title: 'Bug fixes' 66 | regexp: "^.*fix[(\\w)]*:+.*$" 67 | order: 1 68 | - title: Others 69 | order: 999 70 | filters: 71 | exclude: 72 | - '^docs:' 73 | - '^test:' 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Filecoin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Saturn L2 Node 2 | 3 | The Saturn L2 Node is a CDN node for the Saturn network that fetches and caches/persists IPLD Dags serialised as CAR files. It fetches CAR files from origin servers that can serve IPLD data such as the IPFS Gateway and Filecoin SPs. 4 | 5 | 6 | The L2 node is meant to run on NATT'd home machines. This means that the L2 implementation needs to account for: 7 | 8 | 1. Limited disk space available for caching CAR files -> we should never exceed the limit set by the home user. 9 | 2. Limited uplink and download bandwidths and the uplink speed will be much lower than the download speeds. 10 | 3. L2's will be NATT'd and not reachable from the public internet. 11 | 4. L2's will have flaky connectivity. 12 | 13 | We've documented the important considerations and design of the L2 node that will allows us to build a reliable, low latency and high bandwidth CDN abstraction on top of these resource contrained homne machines with flaky connectivity at https://pl-strflt.notion.site/Building-the-Saturn-L2-network-and-L1-L2-interplay-6518deda51344a9db04bd3037b270ada. 14 | 15 | The document also details the implementation path we will be taking to eventually build a robust and feature complete MVP for the Saturn L2. 16 | 17 | ## Features 18 | 19 | At present, the L2 implementation has the following features: 20 | 21 | #### Cache misses to the IPFS Gateway as an origin server 22 | 23 | - The L2 node cache misses to the IPFS Gateway as it's origin server. The eventual goal is to cache miss to the Filecoin SP network and will be implemented down the road. 24 | 25 | - The L2 node follows the "cache on second miss rule". It only fetches and caches content(read CAR files) if it see's two requests for the same content in a rolling duration of 24 hours. This is well established CDN engineering wisdom and prevents disk churn on the L2s as most content is only ever requested once from a CDN node. 26 | 27 | ### Dagstore as a cache for CAR files 28 | 29 | - The L2 node uses the [dagstore](https://github.com/filecoin-project/dagstore) as a thread-safe, persistent, high-throughput and fixed sized cache for CAR files whose size we might not know upfront before we stream them from the origin server and download them. Filecoin SPs also use the dagstore for the same purpose i.e. a persistent cache for CAR files. 30 | 31 | - The dagstore ensures that the space used to persist CAR files never exceeds the space allocated to the L2 node by the user. It does this by using an automated LRU GC algorithm to evict CAR files to make space for new CAR files when the space used by CAR files exceeds a given threshold. The LRU GC algorithm uses a quota allocation mechanism to account for the fact that the L2 node can't know the size of a CAR file upfront before streaming the entire file from the IPFS Gateway. More details can be found [here](https://github.com/filecoin-project/dagstore/pull/125). 32 | 33 | - The dagstore is source agnostic and it should be relatively easy to swap out the IPFS Gateway with the Filecoin SP network down the line without changing the L2 implementation significantly. 34 | 35 | ### HTTP API for fetching L2 node stats 36 | 37 | - The L2 node exposes an HTTP API to fetch stats that the operator/user of the L2 node might be interested in. These stats include the total amount of data downloaded by the L2 node, the amount of data served to other CDN peers by the L2 node etc etc. 38 | 39 | - Note that the L2 node only binds to the localhost loopback interface for now and so this HTTP API can only be invoked by a caller running on the same machine. 40 | 41 | ### L1 Discovery and serving CAR files to L1s 42 | 43 | - The L2 now supports interop with the Saturn L1 network. The L2 node can now serve retrievals of CAR files over HTTP for a given (root cid, optional skip offset) tuple if it already has the requested DAG. 44 | 45 | - On it's first startup, the L2 node generates an Id(`L2Id`) for itself and persists it in the local file system. For all subsequent runs, the L2 node will reuse the persisted L2Id. 46 | 47 | - On every startup, the L2 node connects to the configured L1 Discovery API (See the `L1_DISCOVERY_API_URL` environment variable below) to get back a list of L1 nodes to connect to. 48 | 49 | - The L2 node then picks a maximum of `MAX_L1s`(configurable) L1s from those recieved from the Discovery API and joins 50 | the "swarm" for all those L1s. 51 | 52 | - The L2 node joins an L1's Swarm by invoking the GET `https://{L1_IP}/register/{L2Id}` registration API on the L1. The L1 should send back a 200 status code and then keep the connection alive. 53 | - The L2 node then starts reading requests for CAR files from the response stream of the registration call made above. The L1 should send a request as new line delimited json. The request is currently of the form: 54 | 55 | ``` 56 | type CARTransferRequest struct { 57 | RequestId string 58 | Root string 59 | SkipOffset uint64 60 | } 61 | ``` 62 | - For each request, the L2 node serves the CAR file by invoking the POST `https://{L1_IP}/data/{Root}/{RequestId}` API on the L1. 63 | The L2 will serve the CAR file only if it already has it. The L2 makes no guaruantees of sending back a POST response for each request recieved from an L1. 64 | If an L2 does not have a CAR file or if there's an error while serving the CAR file, the L2 will simply not send a POST or send some 65 | invalid bytes(incomplete CAR file) in the POST. The L1 should always ensure that a CAR file stream sent over POST ends with an `EOF` to ensure it has read a complete valid CAR file. 66 | - The number of concurrent requests that an L2 will serve for an L1 is configured using the `MAX_CONCURRENT_L1_REQUESTS` environment variable described below. 67 | - **Note**: The L2 node also ships with an upper bound on the number of connections it makes to a single L1(5 for now) to prevent abuse. Connections will be re-used to send responses for subsequent requests after an L1 has finished reading and closed the stream for an existing response over the connection. If all connections are busy with ongoing responses, subsequent responses will block till a connection is available. 68 | 69 | 70 | - If an L2 does not have the requested DAG, it simply returns a 404 so the client can fetch it directly from the IPFS Gateway. This decision was taken keeping in mind that it will be faster for the client to fetch the content directly from the IPFS Gateway rather than the client downloading it from the L2 which is itself downloading the content from the IPFS Gateway. This is because the L2 clients i.e. L1 Saturn nodes have significantly superior bandwidth compared to L2s. Low L2 uplink speeds without the benefits of geo-location and without the implementation of multi-peer L2 downloads can definitely become a bottleneck for L1s in the L2 cache miss scenario. 71 | 72 | - The L2 network does NOT support parallel download of a DAG from multiple L2 nodes for now. 73 | 74 | ## Setting up the L2 node and invoking the HTTP APIs 75 | 76 | 1. Build and Install the Saturn L2 binary located at cmd/saturn-l2. 77 | ``` 78 | cd cmd/saturn-l2 79 | go build ./... 80 | ``` 81 | 82 | 2. Run the saturn-l2 binary 83 | ``` 84 | cd cmd/saturn-l2 85 | ./saturn-l2 86 | ``` 87 | 88 | Note that before running the binary, you need to configure/think about the following environment variables: 89 | 90 | 1. `PORT` 91 | PORT is the environment variable that determines the port the saturn L2 service will bind to. 92 | If this environment variable is not configured, this service will bind to any available port by default. 93 | 94 | 2. `ROOT_DIR` 95 | ROOT_DIR is the environment variable that determines the root directory of the Saturn L2 Node. 96 | All persistent state and cached CAR files will be persisted under this directory. 97 | 98 | The following state is currently persisted inside the root directory on the user's machine: 99 | 100 | a) CAR files fetched from the IPFS Gateway. This is the data that the Saturn L2 CDN 101 | node is caching. These are stored as flat files on disk. 102 | 103 | b) Internal dagstore bookkeeping state. The indices for the cached CARv2 files are 104 | persisted as flat files on disk and the state of each dag/shard/CAR file is persisted in a 105 | leveldb key-value store on disk. 106 | 107 | c) L2 node stats that the L2 user/Station might be interested in. These are persisted in JSON format 108 | in a leveldb key-value store on disk. 109 | 110 | 3. `MAX_L2_DISK_SPACE` 111 | MAX_L2_DISK_SPACE is the environment variable that determines the maximum disk space the L2 node 112 | can use to store cached CAR files. If this env variable is not configured, it defaults to 200GiB. 113 | Note: For efficient operation, the configured value should be greater than or equal to 200Gib. 114 | 115 | 4. `FIL_WALLET_ADDRESS` 116 | FIL_WALLET_ADDRESS is the environment variable that determines the Filecoin wallet address of the L2 user. 117 | Note: This is a mandatory environment variable -> no default. 118 | 119 | 5. `L1_DISCOVERY_API_URL` 120 | L1_DISCOVERY_API_URL is the environment variable that determines the URL of the L1 Discovery API to invoke to 121 | get back the L1 nodes this L2 node will connect and serve CAR files to. For the production environment, this is currently 122 | https://orchestrator.strn.pl/nodes. 123 | 124 | 6. `MAX_L1s` 125 | MAX_L1s is the environment variable that determines the maximum number of L1s this L2 will connect to and join the swarm for. 126 | Defaults to 100. 127 | 128 | 7. `MAX_CONCURRENT_L1_REQUESTS` 129 | MAX_CONCURRENT_L1_REQUESTS is the environment variable that determines the maximum number of requests that will be 130 | processed concurrently for a single L1. Defaults to 3. 131 | 132 | 133 | 3. One the binary starts, it will print this to the standard output: 134 | 135 | ``` 136 | ./saturn-l2 137 | ... 138 | WebUI: http://localhost:52860/webui 139 | API: http://localhost:52860/ 140 | ... 141 | ``` 142 | 143 | When an L2 connects/disconnects with an L1 it prints this to standard output: 144 | 145 | ``` 146 | INFO: Saturn Node is online and connected to 1 peer(s) 147 | INFO: Saturn Node is online and connected to 2 peer(s) 148 | INFO: Saturn Node is online and connected to 1 peer(s) 149 | ERROR: Saturn Node lost connection to the network 150 | ``` 151 | 152 | If you want to connect to `WebUI`, also run `./scripts/download-webui.sh`. 153 | 154 | Note that the Saturn L2 node only binds to the **localhost** loopback network interface and so will only be reachable from the same machine. 155 | In the above snippet, `52860` is the port that the Saturn L2 node binds to on the localhost interface. This port can be configured using the `PORT` environment variable as mentioned above. 156 | 157 | ### HTTP APIs 158 | 159 | 1. GET **/stats** 160 | 161 | ``` 162 | curl http://localhost:52860/stats 163 | 164 | Response: 165 | {"Version":"v0.0.0", 166 | "BytesCurrentlyStored":0, -> Total space currently taken up by the cached CAR files on the L2 user's machine. 167 | "TotalBytesUploaded":0, -> Total number of bytes uploaded/served by the L2 node to requesting peers in it's entire lifetime. 168 | "TotalBytesDownloaded":0, -> Total number of bytes downloaded by the L2 node from the IPFS Gateway/origin server in it's entire lifetime. 169 | "NContentRequests":0, -> Total number of requests received by the L2 node for content from clients in it's entire lifetime. 170 | "NContentNotFoundReqs":0, -> Total number of requests for which the L2 did NOT have a cached CAR file to serve. 171 | "NSuccessfulRetrievals":0 -> Total number of successful retrievals served by the L2 node. 172 | "NContentReqErrors":0} -> Total number of errors encountered by the L2 node while serving content to client in it's entire lifetime. 173 | ``` 174 | 175 | **Sample cids to test with** can be found [here](https://pl-strflt.notion.site/Sample-cids-for-testing-Saturn-IPFS-Gateway-can-serve-all-these-cids-4387a7b734aa4a5fa3166d8eac7cac5e). These are cids the IPFS Gateway can serve. 176 | 177 | ## Developer's Guide 178 | 179 | ### Release process 180 | 181 | Publishing a new version is easy: 182 | 183 | 1. Create a tag 184 | 2. Push it to GitHub 185 | 186 | GitHub Actions and GoReleaser will take care of the rest. 187 | 188 | Example script: 189 | 190 | ```bash 191 | $ git checkout main 192 | $ git pull 193 | $ git tag -a -s vA.B.C 194 | $ git push origin vA.B.C 195 | ``` 196 | 197 | Replace vA.B.C with the actual version number, and please follow 198 | [Semantic Versioning](https://semver.org). 199 | 200 | You can omit `-s` if you are not signing your commits. 201 | 202 | 203 | -------------------------------------------------------------------------------- /carserver/server.go: -------------------------------------------------------------------------------- 1 | package carserver 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | 9 | "github.com/filecoin-project/saturn-l2/types" 10 | 11 | "github.com/filecoin-project/saturn-l2/station" 12 | 13 | "github.com/filecoin-project/saturn-l2/logs" 14 | 15 | "github.com/filecoin-project/saturn-l2/carstore" 16 | 17 | bstore "github.com/ipfs/go-ipfs-blockstore" 18 | 19 | cidlink "github.com/ipld/go-ipld-prime/linking/cid" 20 | "github.com/ipld/go-ipld-prime/storage/bsadapter" 21 | 22 | car "github.com/ipld/go-car/v2" 23 | ) 24 | 25 | // CarServer serves CAR files for a given root and selector. 26 | type CarServer struct { 27 | cs *carstore.CarStore 28 | logger *logs.SaturnLogger 29 | spai station.StationAPI 30 | } 31 | 32 | func New(cs *carstore.CarStore, logger *logs.SaturnLogger, sapi station.StationAPI) *CarServer { 33 | return &CarServer{ 34 | cs: cs, 35 | logger: logger, 36 | spai: sapi, 37 | } 38 | } 39 | 40 | func (l *CarServer) ServeCARFile(ctx context.Context, dr *types.DagTraversalRequest, w io.Writer) error { 41 | sw := &statWriter{w: w} 42 | 43 | if err := l.cs.FetchAndWriteCAR(dr.RequestId, dr.Root, func(ro bstore.Blockstore) error { 44 | ls := cidlink.DefaultLinkSystem() 45 | bsa := bsadapter.Adapter{Wrapped: ro} 46 | ls.SetReadStorage(&bsa) 47 | 48 | _, err := car.TraverseV1(ctx, &ls, dr.Root, dr.Selector, sw) 49 | if err != nil { 50 | l.logger.LogError(dr.RequestId, "car traversal failed", err) 51 | return fmt.Errorf("car traversal failed: %w", err) 52 | } 53 | 54 | return nil 55 | }); err != nil { 56 | if errors.Is(err, carstore.ErrNotFound) { 57 | if err := l.spai.RecordRetrievalServed(ctx, sw.n, 0, 1, 0); err != nil { 58 | l.logger.LogError(dr.RequestId, "failed to record retrieval not found", err) 59 | } 60 | 61 | l.logger.Infow(dr.RequestId, "not serving CAR as CAR not found", "err", err) 62 | } else { 63 | if err := l.spai.RecordRetrievalServed(ctx, sw.n, 1, 0, 0); err != nil { 64 | l.logger.LogError(dr.RequestId, "failed to record retrieval failure", err) 65 | } 66 | 67 | l.logger.LogError(dr.RequestId, "failed to traverse and serve car", err) 68 | } 69 | 70 | return err 71 | } else if err == nil { 72 | if err := l.spai.RecordRetrievalServed(ctx, sw.n, 0, 0, 1); err != nil { 73 | l.logger.LogError(dr.RequestId, "failed to record successful retrieval", err) 74 | } 75 | } 76 | 77 | l.logger.Infow(dr.RequestId, "car transfer successful") 78 | return nil 79 | } 80 | 81 | type statWriter struct { 82 | w io.Writer 83 | n uint64 84 | } 85 | 86 | func (sw *statWriter) Write(p []byte) (n int, err error) { 87 | n, err = sw.w.Write(p) 88 | sw.n += uint64(n) 89 | return 90 | } 91 | -------------------------------------------------------------------------------- /carserver/server_test.go: -------------------------------------------------------------------------------- 1 | package carserver 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "io" 9 | "net/http" 10 | "net/http/httptest" 11 | "testing" 12 | "time" 13 | 14 | "github.com/filecoin-project/saturn-l2/types" 15 | 16 | "golang.org/x/sync/errgroup" 17 | 18 | "github.com/filecoin-project/saturn-l2/station" 19 | 20 | datastore "github.com/ipfs/go-datastore" 21 | dss "github.com/ipfs/go-datastore/sync" 22 | 23 | "github.com/filecoin-project/saturn-l2/logs" 24 | _ "github.com/ipld/go-ipld-prime/codec/dagcbor" 25 | 26 | cid "github.com/ipfs/go-cid" 27 | 28 | "github.com/filecoin-project/saturn-l2/testutils" 29 | 30 | "github.com/google/uuid" 31 | 32 | "github.com/filecoin-project/saturn-l2/carstore" 33 | 34 | "github.com/stretchr/testify/require" 35 | ) 36 | 37 | func TestSimpleTransfer(t *testing.T) { 38 | ctx := context.Background() 39 | csh := buildHarness(t, ctx) 40 | defer csh.Stop(t) 41 | 42 | csh.assertStationStats(t, ctx, 0, 0, 0, 0, 0, 0, 0) 43 | 44 | url := csh.carserver.URL 45 | root := csh.root1 46 | contents := csh.bz1 47 | 48 | // send the request 49 | reqBz := mkRequestWithoutSelector(t, root, 0) 50 | resp := sendHttpReq(t, url, reqBz) 51 | 52 | require.EqualValues(t, http.StatusNotFound, resp.StatusCode) 53 | 54 | // second fetch should not work 55 | resp = sendHttpReq(t, url, reqBz) 56 | require.EqualValues(t, http.StatusNotFound, resp.StatusCode) 57 | 58 | // wait till L2 has cached the data 59 | require.Eventually(t, func() bool { 60 | has, err := csh.store.IsIndexed(ctx, root) 61 | return has && err == nil 62 | }, 1*time.Second, 100*time.Millisecond) 63 | 64 | // third fetch should work 65 | resp = sendHttpReq(t, url, reqBz) 66 | require.EqualValues(t, http.StatusOK, resp.StatusCode) 67 | 68 | bz := readHTTPResponse(t, resp) 69 | // ensure contents match 70 | require.EqualValues(t, contents, bz) 71 | 72 | csh.assertStationStats(t, ctx, len(contents), len(contents), 3, 0, len(contents), 2, 1) 73 | } 74 | 75 | func TestParallelTransfers(t *testing.T) { 76 | t.Skip("fails on CI") 77 | ctx := context.Background() 78 | csh := buildHarness(t, ctx) 79 | defer csh.Stop(t) 80 | 81 | csh.assertStationStats(t, ctx, 0, 0, 0, 0, 0, 0, 0) 82 | 83 | url := csh.carserver.URL 84 | root1 := csh.root1 85 | root2 := csh.root2 86 | contents1 := csh.bz1 87 | contents2 := csh.bz2 88 | 89 | count := 0 90 | 91 | // send the requests so both get cached 92 | require.Eventually(t, func() bool { 93 | count++ 94 | reqBz := mkRequestWithoutSelector(t, root1, 0) 95 | resp := sendHttpReq(t, url, reqBz) 96 | if resp.StatusCode == http.StatusOK { 97 | bz := readHTTPResponse(t, resp) 98 | return bytes.Equal(contents1, bz) 99 | } 100 | return false 101 | }, 5*time.Second, 100*time.Millisecond) 102 | 103 | require.Eventually(t, func() bool { 104 | count++ 105 | reqBz := mkRequestWithoutSelector(t, root2, 0) 106 | resp := sendHttpReq(t, url, reqBz) 107 | if resp.StatusCode == http.StatusOK { 108 | bz := readHTTPResponse(t, resp) 109 | return bytes.Equal(contents2, bz) 110 | } 111 | 112 | return false 113 | }, 5*time.Second, 100*time.Millisecond) 114 | 115 | l := len(contents1) + len(contents2) 116 | csh.assertStationStats(t, ctx, l, l, count, 0, l, count-2, 2) 117 | 118 | var errg errgroup.Group 119 | 120 | // fetch 10 in parallel 121 | for i := 0; i < 10; i++ { 122 | i := i 123 | errg.Go(func() error { 124 | var root cid.Cid 125 | 126 | if i%2 == 0 { 127 | root = root2 128 | } else { 129 | root = root1 130 | } 131 | 132 | reqBz := mkRequestWithoutSelector(t, root, 0) 133 | resp := sendHttpReq(t, url, reqBz) 134 | if resp.StatusCode != http.StatusOK { 135 | return errors.New("failed") 136 | } 137 | return nil 138 | }) 139 | 140 | } 141 | require.NoError(t, errg.Wait()) 142 | 143 | time.Sleep(1 * time.Second) 144 | 145 | csh.assertStationStats(t, ctx, 6*l, l, count+10, 0, l, count-2, 12) 146 | } 147 | 148 | type carServerHarness struct { 149 | store *carstore.CarStore 150 | gwapi *httptest.Server 151 | carserver *httptest.Server 152 | sapi station.StationAPI 153 | root1 cid.Cid 154 | bz1 []byte 155 | root2 cid.Cid 156 | bz2 []byte 157 | } 158 | 159 | func (csh *carServerHarness) assertStationStats(t *testing.T, ctx context.Context, upload, download, reqs, errors, storage int, nNotFound int, 160 | nSuccess int) { 161 | as, err := csh.sapi.AllStats(ctx) 162 | require.NoError(t, err) 163 | require.EqualValues(t, upload, as.TotalBytesUploaded) 164 | require.EqualValues(t, reqs, as.NContentRequests) 165 | require.EqualValues(t, errors, as.NContentReqErrors) 166 | require.EqualValues(t, download, as.TotalBytesDownloaded) 167 | require.EqualValues(t, storage, as.StorageStats.BytesCurrentlyStored) 168 | require.EqualValues(t, nNotFound, as.NContentNotFoundReqs) 169 | require.EqualValues(t, nSuccess, as.NSuccessfulRetrievals) 170 | } 171 | 172 | func (csh *carServerHarness) Stop(t *testing.T) { 173 | require.NoError(t, csh.store.Stop()) 174 | csh.gwapi.Close() 175 | csh.carserver.Close() 176 | } 177 | 178 | func buildHarness(t *testing.T, ctx context.Context) *carServerHarness { 179 | carFile1 := "../testdata/files/sample-v1.car" 180 | rootcid1, bz1 := testutils.ParseCar(t, ctx, carFile1) 181 | carFile2 := "../testdata/files/sample-rw-bs-v2.car" 182 | rootcid2, bz2 := testutils.ParseCar(t, ctx, carFile2) 183 | out := make(map[string][]byte) 184 | out[rootcid1.String()] = bz1 185 | out[rootcid2.String()] = bz2 186 | 187 | temp := t.TempDir() 188 | 189 | mds := dss.MutexWrap(datastore.NewMapDatastore()) 190 | sapi := NewStationAPIImpl(mds, nil) 191 | 192 | // create the getway api with a test http server 193 | svc := testutils.GetTestServerForRoots(t, out) 194 | gwAPI := carstore.NewGatewayAPI(svc.URL, sapi, 10000000) 195 | lg := logs.NewSaturnLogger() 196 | cfg := carstore.Config{MaxCARFilesDiskSpace: 100000000} 197 | cs, err := carstore.New(temp, gwAPI, cfg, lg) 198 | require.NoError(t, err) 199 | sapi.SetStorageStatsFetcher(cs) 200 | require.NoError(t, cs.Start(ctx)) 201 | 202 | // create and start the car server 203 | carserver := New(cs, lg, sapi) 204 | csvc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 205 | bz, err := io.ReadAll(r.Body) 206 | if err != nil { 207 | http.Error(w, err.Error(), http.StatusBadRequest) 208 | return 209 | } 210 | var cr types.CARTransferRequest 211 | if err := json.Unmarshal(bz, &cr); err != nil { 212 | http.Error(w, err.Error(), http.StatusBadRequest) 213 | return 214 | } 215 | dr, err := cr.ToDAGRequest() 216 | if err != nil { 217 | http.Error(w, err.Error(), http.StatusBadRequest) 218 | return 219 | } 220 | 221 | err = carserver.ServeCARFile(ctx, dr, w) 222 | if errors.Is(err, carstore.ErrNotFound) { 223 | http.Error(w, err.Error(), http.StatusNotFound) 224 | return 225 | } 226 | if err != nil { 227 | http.Error(w, err.Error(), http.StatusInternalServerError) 228 | return 229 | } 230 | 231 | })) 232 | 233 | return &carServerHarness{ 234 | store: cs, 235 | gwapi: svc, 236 | carserver: csvc, 237 | sapi: sapi, 238 | root1: rootcid1, 239 | root2: rootcid2, 240 | bz1: bz1, 241 | bz2: bz2, 242 | } 243 | } 244 | 245 | func readHTTPResponse(t *testing.T, resp *http.Response) []byte { 246 | bz, err := io.ReadAll(resp.Body) 247 | require.NoError(t, err) 248 | require.NotEmpty(t, resp) 249 | require.NoError(t, resp.Body.Close()) 250 | return bz 251 | } 252 | 253 | func mkRequestWithoutSelector(t *testing.T, root cid.Cid, offset uint64) []byte { 254 | req := types.CARTransferRequest{ 255 | Root: root.String(), 256 | RequestId: uuid.New().String(), 257 | SkipOffset: offset, 258 | } 259 | reqBz, err := json.Marshal(req) 260 | require.NoError(t, err) 261 | return reqBz 262 | } 263 | 264 | func sendHttpReq(t *testing.T, url string, body []byte) *http.Response { 265 | hreq, err := http.NewRequest("GET", url, bytes.NewReader(body)) 266 | require.NoError(t, err) 267 | resp, err := http.DefaultClient.Do(hreq) 268 | require.NoError(t, err) 269 | require.NotEmpty(t, resp) 270 | return resp 271 | } 272 | -------------------------------------------------------------------------------- /carserver/station_api_impl.go: -------------------------------------------------------------------------------- 1 | package carserver 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "sync" 8 | 9 | "github.com/ipfs/go-datastore/namespace" 10 | 11 | datastore "github.com/ipfs/go-datastore" 12 | 13 | "github.com/filecoin-project/saturn-l2/station" 14 | ) 15 | 16 | var Version = "v0.0.0" 17 | var contentReqKey = datastore.NewKey("/content-req") 18 | var storeNameSpace = "station" 19 | 20 | type StationAPIImpl struct { 21 | ss station.StorageStatsFetcher 22 | 23 | mu sync.RWMutex 24 | ds datastore.Batching 25 | } 26 | 27 | func NewStationAPIImpl(ds datastore.Batching, ss station.StorageStatsFetcher) *StationAPIImpl { 28 | nds := namespace.Wrap(ds, datastore.NewKey(storeNameSpace)) 29 | return &StationAPIImpl{ 30 | ss: ss, 31 | ds: nds, 32 | } 33 | } 34 | 35 | func (s *StationAPIImpl) SetStorageStatsFetcher(ss station.StorageStatsFetcher) { 36 | s.ss = ss 37 | } 38 | 39 | func (s *StationAPIImpl) RecordRetrievalServed(ctx context.Context, bytesServed, nErrors, nNotFound, nSuccess uint64) error { 40 | s.mu.Lock() 41 | defer s.mu.Unlock() 42 | 43 | return s.createOrUpdateReqStatsUnlocked(ctx, func(r *station.ReqStats) { 44 | r.TotalBytesUploaded = bytesServed 45 | r.NContentRequests = 1 46 | r.NContentNotFoundReqs = nNotFound 47 | r.NContentReqErrors = nErrors 48 | r.NSuccessfulRetrievals = nSuccess 49 | }, func(r *station.ReqStats) { 50 | r.TotalBytesUploaded += bytesServed 51 | r.NContentRequests += 1 52 | r.NContentNotFoundReqs += nNotFound 53 | r.NContentReqErrors += nErrors 54 | r.NSuccessfulRetrievals += nSuccess 55 | }) 56 | } 57 | 58 | func (s *StationAPIImpl) RecordDataDownloaded(ctx context.Context, bytesDownloaded uint64) error { 59 | s.mu.Lock() 60 | defer s.mu.Unlock() 61 | 62 | return s.createOrUpdateReqStatsUnlocked(ctx, func(r *station.ReqStats) { 63 | r.TotalBytesDownloaded = bytesDownloaded 64 | }, func(r *station.ReqStats) { 65 | r.TotalBytesDownloaded += bytesDownloaded 66 | }) 67 | } 68 | 69 | func (s *StationAPIImpl) createOrUpdateReqStatsUnlocked(ctx context.Context, createFn func(s *station.ReqStats), 70 | updateFn func(s *station.ReqStats)) error { 71 | 72 | bz, err := s.ds.Get(ctx, contentReqKey) 73 | if err != nil && err != datastore.ErrNotFound { 74 | return fmt.Errorf("failed to get retrieval stats from datastore: %w", err) 75 | } 76 | if err == datastore.ErrNotFound { 77 | stats := station.ReqStats{} 78 | createFn(&stats) 79 | bz, err := json.Marshal(stats) 80 | if err != nil { 81 | return fmt.Errorf("failed to marshal retrieval stats to json: %w", err) 82 | } 83 | 84 | if err := s.ds.Put(ctx, contentReqKey, bz); err != nil { 85 | return fmt.Errorf("failed to put to datastore: %w", err) 86 | } 87 | if err := s.ds.Sync(ctx, contentReqKey); err != nil { 88 | return fmt.Errorf("failed to sync datsstore key: %w", err) 89 | } 90 | return nil 91 | } 92 | var stats station.ReqStats 93 | if err := json.Unmarshal(bz, &stats); err != nil { 94 | return fmt.Errorf("failed to unmarshal existing retrieval stats: %w", err) 95 | } 96 | 97 | updateFn(&stats) 98 | 99 | bz, err = json.Marshal(stats) 100 | if err != nil { 101 | return fmt.Errorf("failed to marshal retrieval stats to json: %w", err) 102 | } 103 | if err := s.ds.Put(ctx, contentReqKey, bz); err != nil { 104 | return fmt.Errorf("failed to put datastore key: %w", err) 105 | } 106 | if err := s.ds.Sync(ctx, contentReqKey); err != nil { 107 | return fmt.Errorf("failed to sync datsstore key: %w", err) 108 | } 109 | return nil 110 | } 111 | 112 | func (s *StationAPIImpl) AllStats(ctx context.Context) (station.StationStats, error) { 113 | s.mu.RLock() 114 | defer s.mu.RUnlock() 115 | 116 | // storage stats 117 | storage, err := s.ss.Stat() 118 | if err != nil { 119 | return station.StationStats{}, fmt.Errorf("failed to fetch storage stats: %w", err) 120 | } 121 | 122 | // info 123 | info := station.RPInfo{ 124 | Version: Version, 125 | } 126 | 127 | // fetch retrieval stats 128 | bz, err := s.ds.Get(ctx, contentReqKey) 129 | if err != nil && err != datastore.ErrNotFound { 130 | return station.StationStats{}, fmt.Errorf("failed to fetch retrieval stats: %w", err) 131 | } 132 | if err == datastore.ErrNotFound { 133 | return station.StationStats{ 134 | RPInfo: info, 135 | StorageStats: storage, 136 | }, nil 137 | } 138 | 139 | var rs station.ReqStats 140 | if err := json.Unmarshal(bz, &rs); err != nil { 141 | return station.StationStats{}, fmt.Errorf("failed to unmarshal retrieval stats from json: %w", err) 142 | } 143 | 144 | return station.StationStats{ 145 | RPInfo: info, 146 | StorageStats: storage, 147 | ReqStats: rs, 148 | }, nil 149 | } 150 | 151 | var _ station.StationAPI = &StationAPIImpl{} 152 | -------------------------------------------------------------------------------- /carserver/station_api_impl_test.go: -------------------------------------------------------------------------------- 1 | package carserver 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/filecoin-project/saturn-l2/station" 8 | datastore "github.com/ipfs/go-datastore" 9 | dss "github.com/ipfs/go-datastore/sync" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestStationAPIImpl(t *testing.T) { 14 | ctx := context.Background() 15 | ds := dss.MutexWrap(datastore.NewMapDatastore()) 16 | sapi := NewStationAPIImpl(ds, &mockStorageStatsFetcher{ 17 | out: 790, 18 | }) 19 | 20 | as, err := sapi.AllStats(ctx) 21 | require.NoError(t, err) 22 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 23 | StorageStats: station.StorageStats{ 24 | BytesCurrentlyStored: 790, 25 | }}, as) 26 | 27 | require.NoError(t, sapi.RecordDataDownloaded(ctx, 100)) 28 | as, err = sapi.AllStats(ctx) 29 | require.NoError(t, err) 30 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 31 | StorageStats: station.StorageStats{ 32 | BytesCurrentlyStored: 790, 33 | }, 34 | ReqStats: station.ReqStats{ 35 | TotalBytesDownloaded: 100, 36 | }}, as) 37 | 38 | require.NoError(t, sapi.RecordDataDownloaded(ctx, 200)) 39 | as, err = sapi.AllStats(ctx) 40 | require.NoError(t, err) 41 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 42 | StorageStats: station.StorageStats{ 43 | BytesCurrentlyStored: 790, 44 | }, 45 | ReqStats: station.ReqStats{ 46 | TotalBytesDownloaded: 300, 47 | }}, as) 48 | 49 | require.NoError(t, sapi.RecordRetrievalServed(ctx, 100, 0, 0, 1)) 50 | as, err = sapi.AllStats(ctx) 51 | require.NoError(t, err) 52 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 53 | StorageStats: station.StorageStats{ 54 | BytesCurrentlyStored: 790, 55 | }, 56 | ReqStats: station.ReqStats{ 57 | TotalBytesUploaded: 100, 58 | TotalBytesDownloaded: 300, 59 | NSuccessfulRetrievals: 1, 60 | NContentRequests: 1, 61 | NContentNotFoundReqs: 0, 62 | NContentReqErrors: 0, 63 | }}, as) 64 | 65 | require.NoError(t, sapi.RecordRetrievalServed(ctx, 500, 2, 0, 0)) 66 | as, err = sapi.AllStats(ctx) 67 | require.NoError(t, err) 68 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 69 | StorageStats: station.StorageStats{ 70 | BytesCurrentlyStored: 790, 71 | }, 72 | ReqStats: station.ReqStats{ 73 | TotalBytesUploaded: 600, 74 | NContentRequests: 2, 75 | NContentReqErrors: 2, 76 | TotalBytesDownloaded: 300, 77 | NSuccessfulRetrievals: 1, 78 | }}, as) 79 | 80 | // restart API -> we should still get the same values 81 | sapi = NewStationAPIImpl(ds, &mockStorageStatsFetcher{ 82 | out: 790, 83 | }) 84 | as, err = sapi.AllStats(ctx) 85 | require.NoError(t, err) 86 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 87 | StorageStats: station.StorageStats{ 88 | BytesCurrentlyStored: 790, 89 | }, 90 | ReqStats: station.ReqStats{ 91 | TotalBytesUploaded: 600, 92 | NContentRequests: 2, 93 | NContentReqErrors: 2, 94 | NSuccessfulRetrievals: 1, 95 | TotalBytesDownloaded: 300, 96 | }}, as) 97 | 98 | require.NoError(t, sapi.RecordRetrievalServed(ctx, 500, 0, 1, 1)) 99 | as, err = sapi.AllStats(ctx) 100 | require.NoError(t, err) 101 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 102 | StorageStats: station.StorageStats{ 103 | BytesCurrentlyStored: 790, 104 | }, 105 | ReqStats: station.ReqStats{ 106 | TotalBytesUploaded: 1100, 107 | NContentRequests: 3, 108 | NContentReqErrors: 2, 109 | NSuccessfulRetrievals: 2, 110 | TotalBytesDownloaded: 300, 111 | NContentNotFoundReqs: 1, 112 | }}, as) 113 | 114 | require.NoError(t, sapi.RecordRetrievalServed(ctx, 500, 0, 1, 0)) 115 | as, err = sapi.AllStats(ctx) 116 | require.NoError(t, err) 117 | require.Equal(t, station.StationStats{RPInfo: station.RPInfo{Version: Version}, 118 | StorageStats: station.StorageStats{ 119 | BytesCurrentlyStored: 790, 120 | }, 121 | ReqStats: station.ReqStats{ 122 | TotalBytesUploaded: 1600, 123 | NContentRequests: 4, 124 | NContentReqErrors: 2, 125 | NSuccessfulRetrievals: 2, 126 | TotalBytesDownloaded: 300, 127 | NContentNotFoundReqs: 2, 128 | }}, as) 129 | 130 | } 131 | 132 | type mockStorageStatsFetcher struct { 133 | out uint64 134 | } 135 | 136 | func (ms *mockStorageStatsFetcher) Stat() (station.StorageStats, error) { 137 | return station.StorageStats{ 138 | BytesCurrentlyStored: ms.out, 139 | }, nil 140 | } 141 | -------------------------------------------------------------------------------- /carstore/blockstore.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/filecoin-project/dagstore" 8 | blocks "github.com/ipfs/go-block-format" 9 | cid "github.com/ipfs/go-cid" 10 | bstore "github.com/ipfs/go-ipfs-blockstore" 11 | ) 12 | 13 | // provides a writeable blockstore wrapper over the dagstore blockstore 14 | type blockstore struct { 15 | dagstore.ReadBlockstore 16 | } 17 | 18 | func (b *blockstore) DeleteBlock(context.Context, cid.Cid) error { 19 | return fmt.Errorf("DeleteBlock called but not implemented") 20 | } 21 | func (b *blockstore) Put(context.Context, blocks.Block) error { 22 | return fmt.Errorf("Put called but not implemented") 23 | } 24 | func (b *blockstore) PutMany(context.Context, []blocks.Block) error { 25 | return fmt.Errorf("PutMany called but not implemented") 26 | } 27 | 28 | var _ bstore.Blockstore = (*blockstore)(nil) 29 | -------------------------------------------------------------------------------- /carstore/carstore.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io/fs" 8 | "os" 9 | "path/filepath" 10 | "sync" 11 | "time" 12 | 13 | "github.com/filecoin-project/saturn-l2/station" 14 | 15 | "github.com/google/uuid" 16 | 17 | logging "github.com/ipfs/go-log/v2" 18 | 19 | "github.com/filecoin-project/saturn-l2/logs" 20 | 21 | "github.com/filecoin-project/dagstore/gc" 22 | 23 | "github.com/filecoin-project/dagstore/helpers" 24 | "github.com/filecoin-project/dagstore/mount" 25 | "github.com/filecoin-project/dagstore/shard" 26 | 27 | bstore "github.com/ipfs/go-ipfs-blockstore" 28 | 29 | "github.com/filecoin-project/dagstore/index" 30 | ds "github.com/ipfs/go-datastore" 31 | levelds "github.com/ipfs/go-ds-leveldb" 32 | ldbopts "github.com/syndtr/goleveldb/leveldb/opt" 33 | 34 | "github.com/filecoin-project/dagstore" 35 | cid "github.com/ipfs/go-cid" 36 | 37 | cache "github.com/patrickmn/go-cache" 38 | ) 39 | 40 | var ( 41 | log = logging.Logger("car-store") 42 | maxConcurrentIndex = 3 43 | maxConcurrentReadyFetches = 3 44 | secondMissDuration = 24 * time.Hour 45 | maxRecoverAttempts = uint64(1) 46 | defaultDownloadTimeout = 45 * time.Minute 47 | nConcurrentDownloads = 3 48 | nMaxCacheMissBuffer = 20 49 | ) 50 | 51 | var ( 52 | gwScheme = "gateway" 53 | // ErrNotFound indicates that we do not have the requested CAR file in the car store 54 | ErrNotFound = errors.New("CAR not found") 55 | ) 56 | 57 | type Config struct { 58 | // Maximum size to allocate to the car files directory on disk. 59 | // defaults to 200 Gib 60 | MaxCARFilesDiskSpace int64 61 | DownloadTimeout time.Duration 62 | } 63 | 64 | type cacheMissReq struct { 65 | reqId uuid.UUID 66 | root cid.Cid 67 | } 68 | 69 | type CarStore struct { 70 | ctx context.Context 71 | cancel context.CancelFunc 72 | wg sync.WaitGroup 73 | 74 | dagst *dagstore.DAGStore 75 | failureCh chan dagstore.ShardResult 76 | traceCh chan dagstore.Trace 77 | gcCh chan dagstore.AutomatedGCResult 78 | 79 | gwAPI GatewayAPI 80 | logger *logs.SaturnLogger 81 | 82 | mu sync.Mutex 83 | cacheMissTimeCache *cache.Cache 84 | downloading map[string]struct{} 85 | 86 | transientsDir string 87 | downloadTimeout time.Duration 88 | 89 | cacheMissBuffer chan cacheMissReq 90 | cacheMissSemaphore chan struct{} 91 | } 92 | 93 | func New(rootDir string, gwAPI GatewayAPI, cfg Config, logger *logs.SaturnLogger) (*CarStore, error) { 94 | // construct the DAG Store. 95 | registry := mount.NewRegistry() 96 | if err := registry.Register(gwScheme, mountTemplate(gwAPI)); err != nil { 97 | return nil, fmt.Errorf("failed to create dagstore registry: %w", err) 98 | } 99 | 100 | var ( 101 | transientsDir = filepath.Join(rootDir, "transients") 102 | datastoreDir = filepath.Join(rootDir, "datastore") 103 | indexDir = filepath.Join(rootDir, "index") 104 | ) 105 | 106 | dstore, err := newDatastore(datastoreDir) 107 | if err != nil { 108 | return nil, fmt.Errorf("failed to create datastore in %s: %w", datastoreDir, err) 109 | } 110 | irepo, err := index.NewFSRepo(indexDir) 111 | if err != nil { 112 | return nil, fmt.Errorf("failed to initialise dagstore index repo: %w", err) 113 | } 114 | topIndex := index.NewInverted(dstore) 115 | 116 | // The dagstore will write Shard failures to the `failureCh` here. 117 | failureCh := make(chan dagstore.ShardResult, 1) 118 | // The dagstore will write Trace events to the `traceCh` here. 119 | traceCh := make(chan dagstore.Trace, 32) 120 | // dagstore will write the automated GC trace to this channel 121 | gcCh := make(chan dagstore.AutomatedGCResult, 1) 122 | 123 | dcfg := dagstore.Config{ 124 | TransientsDir: transientsDir, 125 | IndexRepo: irepo, 126 | Datastore: dstore, 127 | MountRegistry: registry, 128 | FailureCh: failureCh, 129 | TraceCh: traceCh, 130 | TopLevelIndex: topIndex, 131 | MaxConcurrentIndex: maxConcurrentIndex, 132 | MaxConcurrentReadyFetches: maxConcurrentReadyFetches, 133 | RecoverOnStart: dagstore.RecoverOnAcquire, 134 | FetchOnStart: dagstore.FetchOnAcquire, 135 | AutomatedGCEnabled: true, 136 | AutomatedGCConfig: &dagstore.AutomatedGCConfig{ 137 | GarbeCollectionStrategy: gc.NewLRUGarbageCollector(), 138 | MaxTransientDirSize: cfg.MaxCARFilesDiskSpace, 139 | TransientsGCWatermarkHigh: 0.9, 140 | TransientsGCWatermarkLow: 0.7, 141 | AutomatedGCTraceCh: gcCh, 142 | }, 143 | } 144 | 145 | dagst, err := dagstore.NewDAGStore(dcfg) 146 | if err != nil { 147 | return nil, fmt.Errorf("failed to create dagstore to use for the car-store: %w", err) 148 | } 149 | 150 | downloadTimeout := defaultDownloadTimeout 151 | if cfg.DownloadTimeout != time.Duration(0) { 152 | downloadTimeout = cfg.DownloadTimeout 153 | } 154 | 155 | return &CarStore{ 156 | cacheMissTimeCache: cache.New(secondMissDuration, 5*time.Minute), 157 | downloading: make(map[string]struct{}), 158 | dagst: dagst, 159 | traceCh: traceCh, 160 | failureCh: failureCh, 161 | gwAPI: gwAPI, 162 | gcCh: gcCh, 163 | logger: logger.Subsystem("car-store"), 164 | transientsDir: transientsDir, 165 | downloadTimeout: downloadTimeout, 166 | 167 | cacheMissBuffer: make(chan cacheMissReq, nMaxCacheMissBuffer), 168 | cacheMissSemaphore: make(chan struct{}, nConcurrentDownloads), 169 | }, nil 170 | } 171 | 172 | func (cs *CarStore) Start(ctx context.Context) error { 173 | log.Info("starting car store") 174 | cs.ctx, cs.cancel = context.WithCancel(ctx) 175 | 176 | cs.wg.Add(1) 177 | go dagstore.RecoverImmediately(cs.ctx, cs.dagst, cs.failureCh, maxRecoverAttempts, cs.wg.Done) 178 | 179 | cs.wg.Add(1) 180 | go cs.gcTraceLoop() 181 | 182 | cs.wg.Add(1) 183 | go cs.traceLoop() 184 | 185 | cs.wg.Add(1) 186 | go cs.cacheMissLoop() 187 | 188 | err := cs.dagst.Start(ctx) 189 | if err == nil { 190 | log.Info("successfully started car store") 191 | } 192 | 193 | return err 194 | } 195 | 196 | func (cs *CarStore) IsIndexed(ctx context.Context, root cid.Cid) (bool, error) { 197 | sks, err := cs.dagst.ShardsContainingMultihash(ctx, root.Hash()) 198 | return len(sks) != 0, err 199 | } 200 | 201 | func (cs *CarStore) traceLoop() { 202 | defer cs.wg.Done() 203 | 204 | for { 205 | select { 206 | // Log trace events from the DAG store 207 | case tr := <-cs.traceCh: 208 | log.Debugw("trace", 209 | "shard-key", tr.Key.String(), 210 | "op-type", tr.Op.String(), 211 | "after", tr.After.String(), 212 | "disk-size", tr.TransientDirSizeCounter) 213 | 214 | case <-cs.ctx.Done(): 215 | return 216 | } 217 | } 218 | } 219 | 220 | func (cs *CarStore) gcTraceLoop() { 221 | defer cs.wg.Done() 222 | for { 223 | select { 224 | case res := <-cs.gcCh: 225 | log.Infow("shard reclaimed by automated gc", "shard", res.ReclaimedShard, 226 | "disk-size-after-reclaim", res.TransientsDirSizeAfterReclaim, "disk-size-before-reclaim", res.TransientsDirSizeBeforeReclaim, 227 | "accounted-before", res.TransientsAccountingBeforeReclaim, "accounted-after", res.TransientsAccountingAfterReclaim) 228 | case <-cs.ctx.Done(): 229 | return 230 | } 231 | } 232 | } 233 | 234 | func (cs *CarStore) Stop() error { 235 | log.Info("shutting down the carstore") 236 | // Close the DAG store 237 | if err := cs.dagst.Close(); err != nil { 238 | return fmt.Errorf("failed to close the dagstore: %w", err) 239 | } 240 | log.Info("dagstore closed") 241 | 242 | // Cancel the context 243 | cs.cancel() 244 | 245 | // Wait for the background go routine to exit 246 | log.Info("waiting for carstore background goroutines to exit") 247 | cs.wg.Wait() 248 | 249 | log.Info("successfully shut down the carstore") 250 | return nil 251 | } 252 | 253 | func (cs *CarStore) FetchAndWriteCAR(reqID uuid.UUID, root cid.Cid, writer func(bstore.Blockstore) error) error { 254 | cs.logger.Infow(reqID, "got CAR request", "root", root.String()) 255 | mh := root.Hash() 256 | 257 | // which dagstore shards have the requested root cid ? 258 | sks, err := cs.dagst.ShardsContainingMultihash(cs.ctx, mh) 259 | if err != nil && !errors.Is(err, ds.ErrNotFound) { 260 | cs.logger.LogError(reqID, "failed to lookup dagstore for shards containing the given multihash", err) 261 | return fmt.Errorf("failed to lookup dagstore for the given multihash: %w", err) 262 | } 263 | 264 | if err == nil && len(sks) != 0 { 265 | cs.logger.Infow(reqID, "found shards containing the requested cid") 266 | var sa *dagstore.ShardAccessor 267 | 268 | // among all the shards that have the requested root, select the first shard that we already have the CAR for locally. 269 | // If we don't have the CAR locally for any of the requested shards, we will simply return NO here and 270 | // asynchronously download the CAR from the origin server using the "cache on second miss" rule. 271 | for _, sk := range sks { 272 | sa, err = helpers.AcquireShardSync(cs.ctx, cs.dagst, sk, dagstore.AcquireOpts{ 273 | NoDownload: true, 274 | }) 275 | if err == nil { 276 | break 277 | } 278 | } 279 | 280 | // if we weren't able to acquire the shard using an already existing CAR file -> execute the cache on second miss rule 281 | // and return not found here. 282 | if sa == nil { 283 | cs.logger.Infow(reqID, "failed to acquire shard with nodownload=true, will execute the cache miss code", "err", err) 284 | // block and backpressure the L1 if we have too many concurrent downloads 285 | select { 286 | case cs.cacheMissBuffer <- cacheMissReq{reqId: reqID, root: root}: 287 | cs.logger.Infow(reqID, "queued to cache miss buffer") 288 | case <-cs.ctx.Done(): 289 | return cs.ctx.Err() 290 | default: 291 | cs.logger.Infow(reqID, "dropping cache miss request as no space in buffer") 292 | } 293 | return ErrNotFound 294 | } 295 | defer sa.Close() 296 | cs.logger.Infow(reqID, "acquired shard with nodownload=true") 297 | 298 | bs, err := sa.Blockstore() 299 | if err != nil { 300 | cs.logger.LogError(reqID, "failed to get blockstore for acquired shard", err) 301 | return fmt.Errorf("failed to get blockstore for shard: %w", err) 302 | } 303 | cs.logger.Infow(reqID, "acquired blockstore for shard") 304 | 305 | return writer(&blockstore{bs}) 306 | } 307 | 308 | // do not execute the cache miss if we don't have the capacity to do so 309 | select { 310 | case cs.cacheMissBuffer <- cacheMissReq{reqId: reqID, root: root}: 311 | cs.logger.Infow(reqID, "queued to cache miss buffer") 312 | case <-cs.ctx.Done(): 313 | return cs.ctx.Err() 314 | default: 315 | cs.logger.Infow(reqID, "dropping cache miss request as no space in buffer") 316 | } 317 | 318 | return ErrNotFound 319 | } 320 | 321 | func (cs *CarStore) cacheMissLoop() { 322 | defer cs.wg.Done() 323 | 324 | for { 325 | select { 326 | case req := <-cs.cacheMissBuffer: 327 | cs.logger.Infow(req.reqId, "dequeued request from cache miss buffer") 328 | cs.executeCacheMiss(req.reqId, req.root) 329 | case <-cs.ctx.Done(): 330 | return 331 | } 332 | } 333 | } 334 | 335 | func (cs *CarStore) executeCacheMiss(reqID uuid.UUID, root cid.Cid) { 336 | cs.mu.Lock() 337 | defer cs.mu.Unlock() 338 | 339 | mhkey := root.Hash().String() 340 | 341 | _, found := cs.cacheMissTimeCache.Get(mhkey) 342 | // add the key to our cache miss timecache no matter what 343 | // if the key already exists in the timecache -> this will simply give a bump to it's longevity in the time cache 344 | cs.cacheMissTimeCache.Add(mhkey, struct{}{}, cache.DefaultExpiration) // nolint:errcheck 345 | 346 | // if this is the very first cache miss for this key, there's nothing to do here. 347 | if !found { 348 | cs.logger.Infow(reqID, "first cache miss for given root, not downloading it") 349 | return 350 | } 351 | cs.logger.Infow(reqID, "more than one cache miss for given root, downloading and caching it") 352 | 353 | // if we're in the process of downloading and caching the key -> there's nothing to do here. 354 | if _, ok := cs.downloading[mhkey]; ok { 355 | cs.logger.Infow(reqID, "download already in progress for given root, returning") 356 | return 357 | } 358 | 359 | // if we have seen a cache miss for this key before and we're not already downloading and caching it -> do it ! 360 | mnt := &GatewayMount{RootCID: root, API: cs.gwAPI} 361 | cs.downloading[mhkey] = struct{}{} 362 | 363 | select { 364 | case cs.cacheMissSemaphore <- struct{}{}: 365 | cs.logger.Infow(reqID, "acquired cache miss semaphore") 366 | cs.wg.Add(1) 367 | go func(mhkey string) { 368 | defer func() { 369 | <-cs.cacheMissSemaphore 370 | cs.logger.Infow(reqID, "released cache miss semaphore") 371 | cs.mu.Lock() 372 | delete(cs.downloading, mhkey) 373 | cs.mu.Unlock() 374 | cs.wg.Done() 375 | }() 376 | 377 | ctx, cancel := context.WithDeadline(cs.ctx, time.Now().Add(cs.downloadTimeout)) 378 | defer cancel() 379 | sa, err := helpers.RegisterAndAcquireSync(ctx, cs.dagst, keyFromCIDMultihash(root), mnt, dagstore.RegisterOpts{}, dagstore.AcquireOpts{}) 380 | if err == nil { 381 | cs.logger.Infow(reqID, "successfully downloaded and cached given root") 382 | sa.Close() 383 | } else { 384 | cs.logger.LogError(reqID, "download failed as failed to register/acquire shard", err) 385 | } 386 | 387 | }(mhkey) 388 | case <-cs.ctx.Done(): 389 | return 390 | } 391 | } 392 | 393 | func (cs *CarStore) Stat() (station.StorageStats, error) { 394 | var out station.StorageStats 395 | 396 | err := filepath.WalkDir(cs.transientsDir, func(path string, d fs.DirEntry, err error) error { 397 | if err != nil { 398 | return err 399 | } 400 | if d.IsDir() { 401 | return nil 402 | } 403 | 404 | fi, err := d.Info() 405 | if err != nil { 406 | return err 407 | } 408 | out.BytesCurrentlyStored += uint64(fi.Size()) 409 | return nil 410 | }) 411 | 412 | return out, err 413 | } 414 | 415 | // newDatastore creates a datastore under the given base directory 416 | // for dagstore metadata. 417 | func newDatastore(dir string) (ds.Batching, error) { 418 | // Create the datastore directory if it doesn't exist yet. 419 | if err := os.MkdirAll(dir, 0755); err != nil { 420 | return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) 421 | } 422 | 423 | // Create a new LevelDB datastore 424 | dstore, err := levelds.NewDatastore(dir, &levelds.Options{ 425 | Compression: ldbopts.NoCompression, 426 | NoSync: false, 427 | Strict: ldbopts.StrictAll, 428 | ReadOnly: false, 429 | }) 430 | if err != nil { 431 | return nil, fmt.Errorf("failed to open datastore: %w", err) 432 | } 433 | return dstore, nil 434 | } 435 | 436 | func keyFromCIDMultihash(c cid.Cid) shard.Key { 437 | return shard.KeyFromString(c.Hash().String()) 438 | } 439 | -------------------------------------------------------------------------------- /carstore/carstore_test.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "golang.org/x/sync/errgroup" 12 | 13 | "github.com/ipld/go-ipld-prime/codec/dagcbor" 14 | "github.com/ipld/go-ipld-prime/multicodec" 15 | 16 | "github.com/filecoin-project/saturn-l2/station" 17 | 18 | cid "github.com/ipfs/go-cid" 19 | 20 | "github.com/filecoin-project/dagstore" 21 | 22 | "github.com/filecoin-project/saturn-l2/logs" 23 | "github.com/google/uuid" 24 | 25 | "github.com/filecoin-project/saturn-l2/testutils" 26 | bstore "github.com/ipfs/go-ipfs-blockstore" 27 | 28 | "github.com/stretchr/testify/require" 29 | ) 30 | 31 | var defaultMaxSize = int64(200 * 1073741824) // 200 Gib 32 | 33 | func init() { 34 | multicodec.RegisterEncoder(0x71, dagcbor.Encode) 35 | multicodec.RegisterDecoder(0x71, dagcbor.Decode) 36 | } 37 | 38 | func TestPersistentCache(t *testing.T) { 39 | ctx := context.Background() 40 | 41 | carv1File := "../testdata/files/sample-v1.car" 42 | rootcid, bz := testutils.ParseCar(t, ctx, carv1File) 43 | svc := testutils.GetTestServer(t, rootcid.String(), bz) 44 | defer svc.Close() 45 | csh := newCarStoreHarness(t, svc.URL, Config{MaxCARFilesDiskSpace: defaultMaxSize}) 46 | reqID := uuid.New() 47 | 48 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: 0}) 49 | // first hit -> not found 50 | csh.fetchAndAssertNotFound(reqID, rootcid) 51 | fmt.Println("\n data downloaded after first hit is", csh.ms.nDownloaded()) 52 | 53 | // second hit -> not found 54 | csh.fetchAndAssertNotFound(reqID, rootcid) 55 | fmt.Println("\n data downloaded after second hit is", csh.ms.nDownloaded()) 56 | 57 | // wait till l2 has fetched and cached it 58 | csh.assertAvailable(t, ctx, rootcid) 59 | fmt.Println("\n data downloaded after caching is", csh.ms.nDownloaded()) 60 | 61 | // third hit -> found 62 | csh.fetchAndAssertFound(ctx, reqID, rootcid) 63 | require.EqualValues(t, len(bz), csh.ms.nDownloaded()) 64 | fmt.Println("\n data downloaded is after third hit", csh.ms.nDownloaded()) 65 | 66 | // fourth hit -> found 67 | csh.fetchAndAssertFound(ctx, reqID, rootcid) 68 | require.EqualValues(t, len(bz), csh.ms.nDownloaded()) 69 | fmt.Println("\n data downloaded after fetching success is", csh.ms.nDownloaded()) 70 | 71 | // wait for shard to become reclaimable again 72 | require.Eventually(t, func() bool { 73 | si, err := csh.cs.dagst.GetShardInfo(keyFromCIDMultihash(rootcid)) 74 | return err == nil && si.ShardState == dagstore.ShardStateAvailable 75 | }, 50*time.Second, 200*time.Millisecond) 76 | 77 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: uint64(len(bz))}) 78 | 79 | // run dagstore GC -> CAR file is removed 80 | res, err := csh.cs.dagst.GC(ctx) 81 | require.NoError(t, err) 82 | require.Len(t, res.Shards, 1) 83 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: 0}) 84 | 85 | fmt.Println("\n data downloaded after dagstore GC is", csh.ms.nDownloaded()) 86 | 87 | // fetch car -> fails as we do not have it but will trigger a fetch again 88 | csh.fetchAndAssertNotFound(reqID, rootcid) 89 | fmt.Println("\n data downloaded after fetch trigger fails is", csh.ms.nDownloaded()) 90 | 91 | // fetch car -> works now as car file was downloaded in the previous fetch 92 | require.Eventually(t, func() bool { 93 | err = csh.cs.FetchAndWriteCAR(reqID, rootcid, func(_ bstore.Blockstore) error { 94 | return nil 95 | }) 96 | fmt.Println("\n data downloaded after a cycle is", csh.ms.nDownloaded()) 97 | return err == nil 98 | }, 50*time.Second, 200*time.Millisecond) 99 | 100 | require.NoError(t, csh.cs.Stop()) 101 | require.EqualValues(t, 2*len(bz), csh.ms.nDownloaded()) 102 | 103 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: uint64(len(bz))}) 104 | } 105 | 106 | func TestPersistentCacheConcurrent(t *testing.T) { 107 | ctx := context.Background() 108 | carv1File := "../testdata/files/sample-v1.car" 109 | rootcid, bz := testutils.ParseCar(t, ctx, carv1File) 110 | svc := testutils.GetTestServer(t, rootcid.String(), bz) 111 | defer svc.Close() 112 | csh := newCarStoreHarness(t, svc.URL, Config{MaxCARFilesDiskSpace: defaultMaxSize}) 113 | 114 | // send 100 concurrent requests 115 | csh.fetchNAsyNC(rootcid, 100) 116 | 117 | csh.assertAvailable(t, ctx, rootcid) 118 | 119 | // fetch shard 100 times -> should work 120 | var errg errgroup.Group 121 | for i := 0; i < 100; i++ { 122 | errg.Go(func() error { 123 | return csh.cs.FetchAndWriteCAR(uuid.New(), rootcid, func(_ bstore.Blockstore) error { 124 | return nil 125 | }) 126 | }) 127 | 128 | } 129 | require.NoError(t, errg.Wait()) 130 | require.EqualValues(t, len(bz), csh.ms.nDownloaded()) 131 | 132 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: uint64(len(bz))}) 133 | } 134 | 135 | func TestPersistentCacheMultipleParallelRequests(t *testing.T) { 136 | ctx := context.Background() 137 | carFile1 := "../testdata/files/sample-v1.car" 138 | rootcid1, bz1 := testutils.ParseCar(t, ctx, carFile1) 139 | 140 | carFile2 := "../testdata/files/sample-rw-bs-v2.car" 141 | rootcid2, bz2 := testutils.ParseCar(t, ctx, carFile2) 142 | 143 | out := make(map[string][]byte) 144 | out[rootcid1.String()] = bz1 145 | out[rootcid2.String()] = bz2 146 | 147 | svc := testutils.GetTestServerForRoots(t, out) 148 | defer svc.Close() 149 | 150 | csh := newCarStoreHarness(t, svc.URL, Config{MaxCARFilesDiskSpace: defaultMaxSize}) 151 | // send 100 concurrent requests 152 | csh.fetchNAsyNC(rootcid1, 100) 153 | // send 100 concurrent requests 154 | csh.fetchNAsyNC(rootcid2, 100) 155 | 156 | csh.assertAvailable(t, ctx, rootcid1) 157 | csh.assertAvailable(t, ctx, rootcid2) 158 | 159 | roots := []cid.Cid{rootcid1, rootcid2} 160 | 161 | // fetch shard 100 times -> should work 162 | var errg errgroup.Group 163 | for i := 0; i < 100; i++ { 164 | i := i 165 | errg.Go(func() error { 166 | return csh.cs.FetchAndWriteCAR(uuid.New(), roots[i%2], func(bs bstore.Blockstore) error { 167 | blk, err := bs.Get(ctx, roots[i%2]) 168 | if err != nil { 169 | return err 170 | } 171 | if blk == nil { 172 | return errors.New("not found") 173 | } 174 | 175 | return nil 176 | }) 177 | }) 178 | 179 | } 180 | require.NoError(t, errg.Wait()) 181 | 182 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: uint64(len(bz1) + len(bz2))}) 183 | require.EqualValues(t, len(bz1)+len(bz2), csh.ms.nDownloaded()) 184 | } 185 | 186 | func TestMountFetchErrorConcurrent(t *testing.T) { 187 | ctx := context.Background() 188 | carv1File := "../testdata/files/sample-v1.car" 189 | rootcid, _ := testutils.ParseCar(t, ctx, carv1File) 190 | svc := testutils.GetTestErrorServer(t) 191 | defer svc.Close() 192 | csh := newCarStoreHarness(t, svc.URL, Config{MaxCARFilesDiskSpace: defaultMaxSize}) 193 | 194 | // send 100 concurrent requests 195 | csh.fetchNAsyNC(rootcid, 100) 196 | 197 | // fetch 100 times -> all fail and no panic 198 | errCh := make(chan error, 100) 199 | 200 | for i := 0; i < 100; i++ { 201 | go func() { 202 | errCh <- csh.cs.FetchAndWriteCAR(uuid.New(), rootcid, func(_ bstore.Blockstore) error { 203 | return nil 204 | }) 205 | }() 206 | } 207 | 208 | for i := 0; i < 100; i++ { 209 | err := <-errCh 210 | require.EqualError(t, err, ErrNotFound.Error()) 211 | } 212 | 213 | require.EqualValues(t, 0, csh.ms.nDownloaded()) 214 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: 0}) 215 | } 216 | 217 | func TestDownloadTimeout(t *testing.T) { 218 | ctx := context.Background() 219 | carv1File := "../testdata/files/sample-v1.car" 220 | 221 | rootcid, _ := testutils.ParseCar(t, ctx, carv1File) 222 | 223 | svc := testutils.GetTestHangingServer(t) 224 | csh := newCarStoreHarness(t, svc.URL, Config{MaxCARFilesDiskSpace: defaultMaxSize, DownloadTimeout: 1 * time.Millisecond}) 225 | 226 | reqID := uuid.New() 227 | // first try -> not found 228 | csh.fetchAndAssertNotFound(reqID, rootcid) 229 | 230 | // second try -> not found 231 | csh.fetchAndAssertNotFound(reqID, rootcid) 232 | 233 | time.Sleep(3 * time.Second) 234 | 235 | // still errors out 236 | csh.fetchAndAssertNotFound(reqID, rootcid) 237 | require.EqualValues(t, 0, csh.ms.nDownloaded()) 238 | csh.assertStorageStats(t, station.StorageStats{BytesCurrentlyStored: 0}) 239 | } 240 | 241 | func (csh *carstoreHarness) assertAvailable(t *testing.T, ctx context.Context, c cid.Cid) { 242 | require.Eventually(t, func() bool { 243 | ks, err := csh.cs.dagst.ShardsContainingMultihash(ctx, c.Hash()) 244 | return err == nil && len(ks) == 1 245 | }, 50*time.Second, 200*time.Millisecond) 246 | } 247 | 248 | func (csh *carstoreHarness) fetchNAsyNC(rootCid cid.Cid, n int) { 249 | var wg sync.WaitGroup 250 | for i := 0; i < n; i++ { 251 | wg.Add(1) 252 | go func() { 253 | defer wg.Done() 254 | csh.cs.FetchAndWriteCAR(uuid.New(), rootCid, func(_ bstore.Blockstore) error { // nolint: errcheck 255 | return nil 256 | }) 257 | }() 258 | } 259 | wg.Wait() 260 | } 261 | 262 | type carstoreHarness struct { 263 | t *testing.T 264 | cs *CarStore 265 | ms *mockStationAPI 266 | } 267 | 268 | func newCarStoreHarness(t *testing.T, apiurl string, cfg Config) *carstoreHarness { 269 | sapi := &mockStationAPI{} 270 | lg := logs.NewSaturnLogger() 271 | 272 | ctx := context.Background() 273 | temp := t.TempDir() 274 | 275 | cs, err := New(temp, NewGatewayAPI(apiurl, sapi, 100000000), cfg, lg) 276 | require.NoError(t, err) 277 | require.NoError(t, cs.Start(ctx)) 278 | 279 | return &carstoreHarness{ 280 | cs: cs, 281 | t: t, 282 | ms: sapi, 283 | } 284 | } 285 | 286 | func (csh *carstoreHarness) fetchAndAssertNotFound(reqID uuid.UUID, rootCid cid.Cid) { 287 | err := csh.cs.FetchAndWriteCAR(reqID, rootCid, func(_ bstore.Blockstore) error { 288 | return nil 289 | }) 290 | require.EqualValues(csh.t, err, ErrNotFound) 291 | } 292 | 293 | func (csh *carstoreHarness) fetchAndAssertFound(ctx context.Context, reqID uuid.UUID, rootCid cid.Cid) { 294 | err := csh.cs.FetchAndWriteCAR(reqID, rootCid, func(bs bstore.Blockstore) error { 295 | blk, err := bs.Get(ctx, rootCid) 296 | if err != nil { 297 | return err 298 | } 299 | if blk == nil { 300 | return errors.New("empty root") 301 | } 302 | return nil 303 | }) 304 | require.NoError(csh.t, err) 305 | } 306 | 307 | func (csh *carstoreHarness) assertStorageStats(t *testing.T, ess station.StorageStats) { 308 | ss, err := csh.cs.Stat() 309 | require.NoError(t, err) 310 | require.Equal(t, ess, ss) 311 | } 312 | -------------------------------------------------------------------------------- /carstore/gateway_api.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/filecoin-project/saturn-l2/station" 11 | 12 | "github.com/filecoin-project/dagstore/mount" 13 | cid "github.com/ipfs/go-cid" 14 | ) 15 | 16 | var ( 17 | // ErrDownloadTooLarge means that the file being downloaded from the IPFS Gateway is larger than the 18 | // maximum size allowed. 19 | ErrDownloadTooLarge = errors.New("download is too large") 20 | ) 21 | 22 | type GatewayAPI interface { 23 | Fetch(ctx context.Context, rootCID cid.Cid) (mount.Reader, error) 24 | } 25 | 26 | var _ GatewayAPI = (*gatewayAPI)(nil) 27 | 28 | type gatewayAPI struct { 29 | client *http.Client 30 | baseURL string 31 | sApi station.StationAPI 32 | maxDownloadPerRequest uint64 33 | } 34 | 35 | func NewGatewayAPI(baseURL string, sApi station.StationAPI, maxDownloadPerRequest uint64) *gatewayAPI { 36 | client := &http.Client{ 37 | Timeout: defaultDownloadTimeout, 38 | } 39 | return &gatewayAPI{ 40 | maxDownloadPerRequest: maxDownloadPerRequest, 41 | client: client, 42 | baseURL: baseURL, 43 | sApi: sApi, 44 | } 45 | } 46 | 47 | func (g *gatewayAPI) Fetch(ctx context.Context, rootCID cid.Cid) (mount.Reader, error) { 48 | req, err := http.NewRequestWithContext(ctx, "GET", g.baseURL, nil) 49 | if err != nil { 50 | return nil, fmt.Errorf("failed to create http request: %w", err) 51 | } 52 | q := req.URL.Query() 53 | q.Add("arg", rootCID.String()) 54 | req.URL.RawQuery = q.Encode() 55 | 56 | resp, err := g.client.Do(req) 57 | if err != nil { 58 | return nil, fmt.Errorf("failed to execute http request: %w", err) 59 | } 60 | if resp.StatusCode != http.StatusOK { 61 | return nil, fmt.Errorf("http req failed: code: %d, status: '%s'", resp.StatusCode, resp.Status) 62 | } 63 | 64 | return &GatewayReader{ 65 | ctx: ctx, 66 | ReadCloser: resp.Body, 67 | sapi: g.sApi, 68 | maxDownloadPerRequest: g.maxDownloadPerRequest, 69 | }, nil 70 | } 71 | 72 | var _ mount.Reader = (*GatewayReader)(nil) 73 | 74 | type GatewayReader struct { 75 | ctx context.Context 76 | 77 | io.ReadCloser 78 | io.ReaderAt 79 | io.Seeker 80 | n uint64 81 | 82 | sapi station.StationAPI 83 | 84 | maxDownloadPerRequest uint64 85 | } 86 | 87 | func (gw *GatewayReader) Read(p []byte) (int, error) { 88 | n, err := gw.ReadCloser.Read(p) 89 | gw.n += uint64(n) 90 | if gw.n >= gw.maxDownloadPerRequest { 91 | return n, ErrDownloadTooLarge 92 | } 93 | return n, err 94 | } 95 | 96 | func (gw *GatewayReader) Close() error { 97 | fmt.Println("\n Recoding downloaded data", gw.n) 98 | if err := gw.sapi.RecordDataDownloaded(gw.ctx, gw.n); err != nil { 99 | log.Errorw("failed to record download stats", "err", err) 100 | return err 101 | } 102 | 103 | return nil 104 | } 105 | -------------------------------------------------------------------------------- /carstore/gateway_api_test.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "testing" 10 | 11 | "github.com/filecoin-project/saturn-l2/testutils" 12 | 13 | cid "github.com/ipfs/go-cid" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | var ( 18 | defaultURL = "https://ipfs.io/api/v0/dag/export" // nolint 19 | root = "QmfMYyn8LUWEfRXfijKFjBAshSsPVRUgwLZzsD7kcTtX1A" 20 | ) 21 | 22 | func TestGatewayAPI(t *testing.T) { 23 | ctx := context.Background() 24 | 25 | bz := []byte("hello") 26 | svc := testutils.GetTestServer(t, root, bz) 27 | defer svc.Close() 28 | 29 | gw := NewGatewayAPI(svc.URL, nil, 10000) 30 | 31 | c, err := cid.Decode(root) 32 | require.NoError(t, err) 33 | 34 | rd, err := gw.Fetch(ctx, c) 35 | require.NoError(t, err) 36 | require.NotEmpty(t, rd) 37 | 38 | out, err := io.ReadAll(rd) 39 | require.NoError(t, err) 40 | require.EqualValues(t, bz, out) 41 | } 42 | 43 | func TestGatewayAPIFailure(t *testing.T) { 44 | svc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 45 | w.WriteHeader(http.StatusInternalServerError) 46 | })) 47 | defer svc.Close() 48 | 49 | ctx := context.Background() 50 | gw := NewGatewayAPI(svc.URL, nil, 10000) 51 | 52 | c, err := cid.Decode(root) 53 | require.NoError(t, err) 54 | 55 | rd, err := gw.Fetch(ctx, c) 56 | require.Error(t, err) 57 | require.Empty(t, rd) 58 | } 59 | 60 | func TestIPFSGateway(t *testing.T) { 61 | t.Skip("e2e test with IPFS Gateway") 62 | ctx := context.Background() 63 | gw := &gatewayAPI{ 64 | baseURL: defaultURL, 65 | } 66 | 67 | c, err := cid.Decode(root) 68 | require.NoError(t, err) 69 | 70 | rd, err := gw.Fetch(ctx, c) 71 | require.NoError(t, err) 72 | require.NotEmpty(t, rd) 73 | 74 | bz, err := io.ReadAll(rd) 75 | require.NoError(t, err) 76 | require.NotEmpty(t, bz) 77 | } 78 | 79 | func TestDownloadFailsIfTooLarge(t *testing.T) { 80 | ctx := context.Background() 81 | 82 | bz := []byte("hello") 83 | svc := testutils.GetTestServer(t, root, bz) 84 | defer svc.Close() 85 | 86 | gw := NewGatewayAPI(svc.URL, nil, 1) 87 | 88 | c, err := cid.Decode(root) 89 | require.NoError(t, err) 90 | 91 | rd, err := gw.Fetch(ctx, c) 92 | require.NoError(t, err) 93 | require.NotEmpty(t, rd) 94 | 95 | _, err = io.ReadAll(rd) 96 | require.True(t, errors.Is(err, ErrDownloadTooLarge)) 97 | } 98 | -------------------------------------------------------------------------------- /carstore/gateway_mount.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/url" 7 | 8 | "github.com/filecoin-project/dagstore/mount" 9 | cid "github.com/ipfs/go-cid" 10 | ) 11 | 12 | type GatewayMount struct { 13 | API GatewayAPI 14 | RootCID cid.Cid 15 | } 16 | 17 | func (g *GatewayMount) Serialize() *url.URL { 18 | return &url.URL{ 19 | Host: g.RootCID.String(), 20 | } 21 | } 22 | 23 | func (g *GatewayMount) Deserialize(u *url.URL) error { 24 | rootCID, err := cid.Decode(u.Host) 25 | if err != nil { 26 | return fmt.Errorf("failed to parse root cid from host '%s': %w", u.Host, err) 27 | } 28 | g.RootCID = rootCID 29 | return nil 30 | } 31 | 32 | func (g *GatewayMount) Fetch(ctx context.Context) (mount.Reader, error) { 33 | return g.API.Fetch(ctx, g.RootCID) 34 | } 35 | 36 | func (g *GatewayMount) Info() mount.Info { 37 | return mount.Info{ 38 | Kind: mount.KindRemote, 39 | AccessSequential: true, 40 | AccessSeek: false, 41 | AccessRandom: false, 42 | } 43 | } 44 | 45 | func (g *GatewayMount) Close() error { 46 | return nil 47 | } 48 | 49 | func (l *GatewayMount) Stat(ctx context.Context) (mount.Stat, error) { 50 | // TODO: Size. 51 | return mount.Stat{ 52 | Exists: true, 53 | Ready: true, 54 | }, nil 55 | } 56 | 57 | func mountTemplate(api GatewayAPI) *GatewayMount { 58 | return &GatewayMount{API: api} 59 | } 60 | -------------------------------------------------------------------------------- /carstore/gateway_mount_test.go: -------------------------------------------------------------------------------- 1 | package carstore 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/filecoin-project/saturn-l2/station" 10 | 11 | "github.com/filecoin-project/saturn-l2/testutils" 12 | 13 | cid "github.com/ipfs/go-cid" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func TestGatewayMount(t *testing.T) { 18 | bz := []byte("hello") 19 | 20 | svc := testutils.GetTestServer(t, root, bz) 21 | defer svc.Close() 22 | 23 | gwAPI := NewGatewayAPI(svc.URL, &mockStationAPI{}, 10000) 24 | ctx := context.Background() 25 | c, err := cid.Decode(root) 26 | require.NoError(t, err) 27 | gm := &GatewayMount{ 28 | RootCID: c, 29 | API: gwAPI, 30 | } 31 | 32 | u := gm.Serialize() 33 | require.NotEmpty(t, u) 34 | 35 | gm2 := &GatewayMount{ 36 | API: gwAPI, 37 | } 38 | require.NoError(t, gm2.Deserialize(u)) 39 | 40 | rd, err := gm2.Fetch(ctx) 41 | require.NoError(t, err) 42 | require.NotEmpty(t, rd) 43 | out, err := io.ReadAll(rd) 44 | require.NoError(t, err) 45 | require.NotEmpty(t, out) 46 | require.EqualValues(t, out, bz) 47 | } 48 | 49 | type mockStationAPI struct { 50 | station.StationAPI 51 | 52 | mu sync.Mutex 53 | downloaded uint64 54 | } 55 | 56 | func (m *mockStationAPI) RecordDataDownloaded(_ context.Context, bytesDownloaded uint64) error { 57 | m.mu.Lock() 58 | defer m.mu.Unlock() 59 | 60 | m.downloaded += bytesDownloaded 61 | return nil 62 | } 63 | 64 | func (m *mockStationAPI) nDownloaded() uint64 { 65 | m.mu.Lock() 66 | defer m.mu.Unlock() 67 | 68 | return m.downloaded 69 | } 70 | -------------------------------------------------------------------------------- /cmd/saturn-l2/l1_discovery_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | var ( 14 | addr1 = &L1Addr{ 15 | Id: "1", 16 | Ip: "1.1.1.1", 17 | Distance: 1.0, 18 | Weight: 1, 19 | } 20 | addr2 = &L1Addr{ 21 | Id: "2", 22 | Ip: "2.2.2.2", 23 | Distance: 1.0, 24 | Weight: 1, 25 | } 26 | addr3 = &L1Addr{ 27 | Id: "3", 28 | Ip: "3.3.3.3", 29 | Distance: 1.0, 30 | Weight: 1, 31 | } 32 | ) 33 | 34 | func TestL1Discovery(t *testing.T) { 35 | ctx := context.Background() 36 | svc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 37 | addrs := []L1Addr{*addr1, *addr2, *addr3} 38 | bz, _ := json.Marshal(addrs) 39 | 40 | _, err := w.Write(bz) 41 | if err != nil { 42 | http.Error(w, err.Error(), http.StatusInternalServerError) 43 | } 44 | })) 45 | 46 | cfg := config{ 47 | L1DiscoveryAPIUrl: svc.URL, 48 | MaxL1Connections: 100, 49 | } 50 | l1s, err := getNearestL1sWithRetry(ctx, cfg, 1) 51 | require.NoError(t, err) 52 | require.Len(t, l1s, 3) 53 | 54 | cfg.MaxL1Connections = 1 55 | l1s, err = getNearestL1sWithRetry(ctx, cfg, 1) 56 | require.NoError(t, err) 57 | require.Len(t, l1s, 1) 58 | require.EqualValues(t, addr1.Ip, l1s[0]) 59 | } 60 | 61 | func TestL1DiscoveryFailure(t *testing.T) { 62 | svc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 63 | if _, err := w.Write([]byte("failure")); err != nil { 64 | http.Error(w, err.Error(), http.StatusInternalServerError) 65 | } 66 | })) 67 | 68 | cfg := config{ 69 | L1DiscoveryAPIUrl: svc.URL, 70 | MaxL1Connections: 100, 71 | } 72 | l1s, err := getNearestL1sWithRetry(context.Background(), cfg, 1) 73 | require.Error(t, err) 74 | require.Empty(t, l1s) 75 | } 76 | -------------------------------------------------------------------------------- /cmd/saturn-l2/l2_id_persistence_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/google/uuid" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestL2IdIsPersisted(t *testing.T) { 11 | dir := t.TempDir() 12 | id, err := readL2IdIfExists(dir) 13 | require.Error(t, err) 14 | require.EqualValues(t, uuid.UUID{}, id) 15 | 16 | id, err = createAndPersistL2Id(dir) 17 | require.NoError(t, err) 18 | require.NotEqualValues(t, uuid.UUID{}, id) 19 | 20 | id2, err := readL2IdIfExists(dir) 21 | require.NoError(t, err) 22 | require.EqualValues(t, id, id2) 23 | } 24 | -------------------------------------------------------------------------------- /cmd/saturn-l2/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net" 10 | "net/http" 11 | "net/url" 12 | "os" 13 | "os/signal" 14 | "path/filepath" 15 | "runtime" 16 | "strconv" 17 | "strings" 18 | "sync" 19 | "syscall" 20 | "time" 21 | 22 | "github.com/jpillora/backoff" 23 | 24 | "go.uber.org/atomic" 25 | 26 | "github.com/filecoin-project/saturn-l2/l1interop" 27 | 28 | "github.com/google/uuid" 29 | 30 | logging "github.com/ipfs/go-log/v2" 31 | 32 | "github.com/filecoin-project/saturn-l2/station" 33 | 34 | "github.com/filecoin-project/saturn-l2/carserver" 35 | "github.com/filecoin-project/saturn-l2/carstore" 36 | "github.com/filecoin-project/saturn-l2/logs" 37 | ds "github.com/ipfs/go-datastore" 38 | levelds "github.com/ipfs/go-ds-leveldb" 39 | ldbopts "github.com/syndtr/goleveldb/leveldb/opt" 40 | 41 | address "github.com/filecoin-project/go-address" 42 | "github.com/gorilla/mux" 43 | 44 | "github.com/filecoin-project/saturn-l2/resources" 45 | ) 46 | 47 | type L1Addr struct { 48 | Id string `json:"id"` 49 | Ip string `json:"ip"` 50 | Distance float64 `json:"distance"` 51 | Weight uint64 `json:"weight"` 52 | } 53 | type L1IPAddrs []string 54 | 55 | var log = logging.Logger("saturn-l2") 56 | 57 | const ( 58 | // PORT_ENV_VAR is the environment variable that determines the port the saturn L2 service will bind to. 59 | // If this environment variable is not configured, this service will bind to any available port. 60 | PORT_ENV_VAR = "PORT" 61 | 62 | // ROOT_DIR_ENV_VAR is the environment variable that determines the root directory of the Saturn L2 Node. 63 | // All persistent state and cached CAR files will be persisted under this directory. Defaults to $HOME/.saturn. 64 | ROOT_DIR_ENV_VAR = "ROOT_DIR" 65 | 66 | // MAX_DISK_SPACE_VAR configures the environment variable that determines the maximum disk space the L2 node can use to 67 | // store cached CAR files. If this env variable is not configured, it defaults to 200GiB. 68 | MAX_DISK_SPACE_VAR = "MAX_L2_DISK_SPACE" 69 | 70 | FIL_ADDRESS_VAR = "FIL_WALLET_ADDRESS" 71 | 72 | // L1_DISCOVERY_URL_VAR configures the environment variable that determines the URL of the L1 Discovery API to invoke to 73 | // get back the L1 nodes this L2 node will connect and serve CAR files to. 74 | // Defaults to `defaultL1DiscoveryURL`. 75 | L1_DISCOVERY_URL_VAR = "L1_DISCOVERY_API_URL" 76 | 77 | // MAX_L1s_VAR configures the environment variable that determines the maximum 78 | // number of L1s this L2 will connect to and join the swarm for. Defaults to 100. 79 | MAX_L1s_VAR = "MAX_L1s" 80 | 81 | // MAX_CONCURRENT_L1_REQUESTS_VAR configures the environment variable that determines the maximum 82 | // number of CAR file requests that will be processed concurrently for a single L1. defaults to 3. 83 | MAX_CONCURRENT_L1_REQUESTS_VAR = "MAX_CONCURRENT_L1_REQUESTS" 84 | 85 | // TEST_L1_IPS_VAR configures the environment variable that determines the L1 IP Addresses 86 | // that this L2 node will join the swarm for and serve CAR files to. This environment variable accepts a comma 87 | // separated list of L1 IP addresses. 88 | // If this environment variable is set, the `L1_DISCOVERY_URL` environment variable becomes a no-op. 89 | TEST_L1_IPS_VAR = "TEST_L1_IPS" 90 | ) 91 | 92 | var ( 93 | gateway_base_url = "https://ipfs.io/api/v0/dag/export" 94 | 95 | defaultMaxDiskSpace = uint64(200 * 1073741824) // 200 Gib 96 | 97 | // file the L2 Node Id will be persisted to. 98 | idFile = ".l2Id" 99 | 100 | // 1 MiB 101 | maxL1DiscoveryResponseSize = int64(1048576) 102 | 103 | defaultMaxL1ConcurrentRequests = uint64(3) 104 | 105 | // default maximum of the number of L1s this L2 node will connect to 106 | defaultMaxL1s = uint64(100) 107 | 108 | // timeout of the request we make to discover L1s 109 | l1_discovery_timeout = 5 * time.Minute 110 | 111 | // number of maximum connections to a single L1 112 | maxConnsPerL1 = 5 113 | 114 | // we are okay having upto 500 long running idle connections with L1s 115 | totalMaxIdleL1Conns = 500 116 | 117 | // in-activity timeout before we close an idle connection to an L1 118 | idleL1ConnTimeout = 30 * time.Minute 119 | 120 | // DNS Hostname of Saturn L1 Nodes for the L1 Test network. 121 | saturn_l1_hostName = "strn-test.pl" 122 | 123 | defaultL1DiscoveryURL = "https://orchestrator.strn-test.pl/nodes" 124 | 125 | checkL1ConnectivityInterval = 5 * time.Second 126 | 127 | maxL1DiscoveryAttempts = float64(10) 128 | maxL1DiscoveryBackoff = 60 * time.Second 129 | minL1DiscoveryBackoff = 2 * time.Second 130 | 131 | maxDownloadPerRequest = uint64(2147483648) // 2 Gib 132 | ) 133 | 134 | type config struct { 135 | Port int 136 | FilAddr string `json:"fil_wallet_address"` 137 | MaxDiskSpace uint64 138 | RootDir string 139 | L1DiscoveryAPIUrl string 140 | MaxL1Connections int 141 | MaxConcurrentL1Requests int 142 | UseTestL1IPAddrs bool 143 | TestL1IPAddr L1IPAddrs 144 | MaxDownloadPerRequest uint64 145 | } 146 | 147 | func main() { 148 | ctx, cancel := context.WithCancel(context.Background()) 149 | var carserver *CARServer 150 | var srv *http.Server 151 | var l1wg sync.WaitGroup 152 | var l1Clients []*l1interop.L1SseClient 153 | 154 | cleanup := func() { 155 | log.Info("shutting down all threads") 156 | cancel() 157 | 158 | // shut down the car server 159 | if carserver != nil { 160 | log.Info("shutting down the CAR server") 161 | if err := carserver.Stop(ctx); err != nil { 162 | log.Errorw("failed to stop car server", "err", err) 163 | } 164 | } 165 | 166 | // shut down all l1 clients 167 | for _, lc := range l1Clients { 168 | lc := lc 169 | log.Infow("closing connection with l1", "l1", lc.L1Addr) 170 | lc.Stop() 171 | log.Infow("finished closing connection with l1", "l1", lc.L1Addr) 172 | } 173 | 174 | // wait for all l1 connections to be torn down 175 | log.Info("waiting for all l1 connections to be torn down") 176 | l1wg.Wait() 177 | log.Info("finished tearing down all l1 connections") 178 | 179 | // shut down the http server 180 | if srv != nil { 181 | log.Info("shutting down the http server") 182 | _ = srv.Close() 183 | } 184 | os.Exit(0) 185 | } 186 | 187 | logging.SetAllLoggers(logging.LevelInfo) 188 | if err := logging.SetLogLevel("dagstore", "ERROR"); err != nil { 189 | panic(err) 190 | } 191 | // build app context 192 | c := make(chan os.Signal, 1) 193 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 194 | go func() { 195 | <-c 196 | log.Info("detected shutdown signal, will cleanup...") 197 | cleanup() 198 | }() 199 | 200 | // build L2 config 201 | cfg, err := mkConfig() 202 | if err != nil { 203 | fmt.Fprintf(os.Stderr, "failed to build config: %s", err.Error()) 204 | os.Exit(2) 205 | } 206 | log.Infow("parsed config", "cfg", cfg) 207 | 208 | // generate L2 UUID if this is the first run 209 | l2Id, err := readL2IdIfExists(cfg.RootDir) 210 | if err != nil { 211 | l2Id, err = createAndPersistL2Id(cfg.RootDir) 212 | if err != nil { 213 | fmt.Fprintf(os.Stderr, "failed to write L2 Id to file: %s", err.Error()) 214 | os.Exit(2) 215 | } 216 | } 217 | log.Infow("read l2 node Id", "l2Id", l2Id) 218 | 219 | cfgJson, err := json.Marshal(cfg) 220 | if err != nil { 221 | fmt.Fprintf(os.Stderr, "failed to serialise config: %s\n", err.Error()) 222 | os.Exit(2) 223 | } 224 | 225 | // get the Nearest L1s by talking to the orchestrator 226 | var l1IPAddrs L1IPAddrs 227 | log.Info("waiting to discover L1s...") 228 | 229 | if cfg.UseTestL1IPAddrs { 230 | l1IPAddrs = cfg.TestL1IPAddr 231 | } else { 232 | l1IPAddrs, err = getNearestL1sWithRetry(ctx, cfg, maxL1DiscoveryAttempts) 233 | if err != nil { 234 | fmt.Fprintf(os.Stderr, "failed to get nearest L1s to connect to: %s\n", err.Error()) 235 | os.Exit(2) 236 | } 237 | } 238 | log.Infow("discovered L1s", "l1 IP Addrs", strings.Join(l1IPAddrs, ", ")) 239 | fmt.Println("INFO: Saturn Node was able to connect to the Orchestrator and will now start connecting to the Saturn network...") 240 | 241 | // build the saturn logger 242 | logger := logs.NewSaturnLogger() 243 | 244 | // build a robust http client to use to connect and serve CAR files to L1s 245 | tr := http.DefaultTransport.(*http.Transport).Clone() 246 | tr.MaxIdleConns = totalMaxIdleL1Conns 247 | tr.MaxConnsPerHost = maxConnsPerL1 248 | tr.MaxIdleConnsPerHost = maxConnsPerL1 // number of maximum idle connections to a single L1 249 | tr.IdleConnTimeout = idleL1ConnTimeout 250 | tr.TLSClientConfig.ServerName = saturn_l1_hostName 251 | l1HttpClient := &http.Client{ 252 | Transport: tr, 253 | } 254 | 255 | // build and start the CAR server 256 | carserver, err = buildCarServer(cfg, logger) 257 | if err != nil { 258 | fmt.Fprintf(os.Stderr, "failed to build car server: %s", err.Error()) 259 | os.Exit(2) 260 | } 261 | if err := carserver.Start(ctx); err != nil { 262 | fmt.Fprintf(os.Stderr, "failed to start car server: %s", err.Error()) 263 | os.Exit(2) 264 | } 265 | 266 | // Connect and register with all L1s and start serving their requests 267 | nConnectedL1s := atomic.NewUint64(0) 268 | failedL1Ch := make(chan struct{}, len(l1IPAddrs)) 269 | 270 | for _, l1ip := range l1IPAddrs { 271 | l1ip := l1ip 272 | l1client := l1interop.New(l2Id.String(), l1HttpClient, logger, carserver.server, l1ip, cfg.MaxConcurrentL1Requests) 273 | l1Clients = append(l1Clients, l1client) 274 | 275 | l1wg.Add(1) 276 | go func(l1ip string) { 277 | defer l1wg.Done() 278 | defer func() { 279 | failedL1Ch <- struct{}{} 280 | }() 281 | 282 | if err := l1client.Start(nConnectedL1s); err != nil { 283 | if !errors.Is(err, context.Canceled) { 284 | log.Errorw("terminated connection attempts with l1", "l1", l1ip, "err", err) 285 | } 286 | } 287 | }(l1ip) 288 | } 289 | 290 | // if we fail to connect to any of the L1s after exhausting all retries; error out and exit. 291 | go func() { 292 | for i := 0; i < len(l1IPAddrs); i++ { 293 | select { 294 | case <-failedL1Ch: 295 | case <-ctx.Done(): 296 | return 297 | } 298 | } 299 | log.Error("failed to connect to any of the L1s after exhausting all attempts; shutting down") 300 | fmt.Println("ERROR: Saturn node failed to connect to the network and has exhausted all retry attempts") 301 | os.Exit(2) 302 | }() 303 | 304 | // start go-routine to log L1 connectivity 305 | go logL1Connectivity(ctx, nConnectedL1s) 306 | 307 | m := mux.NewRouter() 308 | m.PathPrefix("/config").Handler(http.HandlerFunc(configHandler(cfgJson))) 309 | m.PathPrefix("/webui").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 310 | webuiHandler(cfg, w, r) 311 | })) 312 | 313 | m.PathPrefix("/stats").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 314 | ss, err := carserver.sapi.AllStats(r.Context()) 315 | if err != nil { 316 | http.Error(w, "internal error", http.StatusInternalServerError) 317 | return 318 | } 319 | bz, err := json.Marshal(ss) 320 | if err != nil { 321 | http.Error(w, "failed to marshal stats to json", http.StatusInternalServerError) 322 | return 323 | } 324 | w.WriteHeader(http.StatusOK) 325 | if _, err := w.Write(bz); err != nil { 326 | http.Error(w, "failed to write stats to response", http.StatusInternalServerError) 327 | } 328 | })) 329 | 330 | srv = &http.Server{ 331 | Handler: m, 332 | } 333 | 334 | nl, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", cfg.Port)) 335 | if err != nil { 336 | fmt.Fprintf(os.Stderr, "cannot start the webserver: %s\n", err.Error()) 337 | os.Exit(4) 338 | } 339 | 340 | port := nl.Addr().(*net.TCPAddr).Port 341 | log.Infof("Server listening on %v", nl.Addr()) 342 | fmt.Printf("WebUI: http://localhost:%d/webui\n", port) 343 | fmt.Printf("API: http://localhost:%d/\n", port) 344 | 345 | if err := srv.Serve(nl); err != http.ErrServerClosed { 346 | fmt.Fprintf(os.Stderr, "error shutting down the server: %s", err.Error()) 347 | } 348 | } 349 | 350 | func logL1Connectivity(ctx context.Context, nConnectedL1s *atomic.Uint64) { 351 | ticker := time.NewTicker(checkL1ConnectivityInterval) 352 | defer ticker.Stop() 353 | 354 | lastNConnected := uint64(0) 355 | 356 | // get to the first connectivity event as fast as possible 357 | go func() { 358 | for { 359 | if ctx.Err() != nil { 360 | return 361 | } 362 | nConnected := nConnectedL1s.Load() 363 | if nConnected != 0 { 364 | fmt.Printf("INFO: Saturn Node is online and connected to %d peers\n", nConnected) 365 | return 366 | } 367 | } 368 | }() 369 | 370 | for { 371 | select { 372 | case <-ticker.C: 373 | nConnected := nConnectedL1s.Load() 374 | 375 | // if we are still not connected to any peers -> log it. 376 | if nConnected == 0 { 377 | fmt.Print("ERROR: Saturn Node is not able to connect to the network\n") 378 | } else { 379 | if nConnected != lastNConnected { 380 | fmt.Printf("INFO: Saturn Node is online and connected to %d peers\n", nConnected) 381 | } 382 | } 383 | 384 | lastNConnected = nConnected 385 | case <-ctx.Done(): 386 | return 387 | } 388 | } 389 | } 390 | 391 | func mkConfig() (config, error) { 392 | // parse port 393 | var port int 394 | portStr := os.Getenv(PORT_ENV_VAR) 395 | if portStr == "" { 396 | port = 0 397 | } else { 398 | var err error 399 | port, err = strconv.Atoi(portStr) 400 | if err != nil { 401 | return config{}, fmt.Errorf("failed to parse port value %s: %w", portStr, err) 402 | } 403 | } 404 | 405 | // parse FIL address 406 | filAddr := os.Getenv(FIL_ADDRESS_VAR) 407 | if filAddr == "" { 408 | return config{}, fmt.Errorf("No %s provided. Please set the environment variable.\n", FIL_ADDRESS_VAR) 409 | } 410 | if _, err := address.NewFromString(filAddr); err != nil { 411 | return config{}, fmt.Errorf("Invalid %s format: %w", FIL_ADDRESS_VAR, err) 412 | } 413 | 414 | // parse max disk space 415 | maxDiskSpace, err := readIntEnvVar(MAX_DISK_SPACE_VAR, defaultMaxDiskSpace) 416 | if err != nil { 417 | return config{}, fmt.Errorf("failed to parse max disk space env var: %w", err) 418 | } 419 | 420 | // parse root directory 421 | rootDirStr, err := getRootDir() 422 | if err != nil { 423 | return config{}, err 424 | } 425 | if _, err := os.Stat(rootDirStr); err != nil { 426 | if err := os.MkdirAll(rootDirStr, 0777); err != nil { 427 | return config{}, fmt.Errorf("failed to create default root dir %s, err=%w", rootDirStr, err) 428 | } 429 | log.Infow("create default l2 root directory", "dir", rootDirStr) 430 | } 431 | log.Infof("Using root dir %s\n", rootDirStr) 432 | 433 | var l1IPAddrs L1IPAddrs 434 | var useL1IPAddrs bool 435 | var durl string 436 | l1IpStr, exists := os.LookupEnv(TEST_L1_IPS_VAR) 437 | if exists { 438 | ips, err := parseL1IPs(l1IpStr) 439 | if err != nil { 440 | return config{}, fmt.Errorf("failed to parse L1 IPs environment variable: %w", err) 441 | } 442 | l1IPAddrs = ips 443 | useL1IPAddrs = true 444 | } else { 445 | // parse L1 Discovery API URL 446 | durl, exists = os.LookupEnv(L1_DISCOVERY_URL_VAR) 447 | if !exists { 448 | durl = defaultL1DiscoveryURL 449 | } 450 | if _, err := url.Parse(durl); err != nil { 451 | return config{}, fmt.Errorf("l1 discovery api url is invalid, failed to parse, err=%w", err) 452 | } 453 | } 454 | 455 | // parse max number of l1s to connect to 456 | maxL1Conns, err := readIntEnvVar(MAX_L1s_VAR, defaultMaxL1s) 457 | if err != nil { 458 | return config{}, fmt.Errorf("failed to parse MAX_L1_CONNECTIONS_VAR env var: %w", err) 459 | } 460 | 461 | // parse max number of concurrent L1 requests to serve 462 | maxConcurrentL1Reqs, err := readIntEnvVar(MAX_CONCURRENT_L1_REQUESTS_VAR, defaultMaxL1ConcurrentRequests) 463 | if err != nil { 464 | return config{}, fmt.Errorf("failed to parse MAX_CONCURRENT_L1_REQUESTS_VAR env var: %w", err) 465 | } 466 | 467 | return config{ 468 | Port: port, 469 | FilAddr: filAddr, 470 | MaxDiskSpace: maxDiskSpace, 471 | RootDir: rootDirStr, 472 | L1DiscoveryAPIUrl: durl, 473 | MaxL1Connections: int(maxL1Conns), 474 | MaxConcurrentL1Requests: int(maxConcurrentL1Reqs), 475 | UseTestL1IPAddrs: useL1IPAddrs, 476 | TestL1IPAddr: l1IPAddrs, 477 | MaxDownloadPerRequest: uint64(maxDownloadPerRequest), 478 | }, nil 479 | } 480 | 481 | func parseL1IPs(l1IPsStr string) (L1IPAddrs, error) { 482 | l1IPAddrs := strings.Split(l1IPsStr, ",") 483 | if len(l1IPAddrs) == 0 || (len(l1IPAddrs) == 1 && len(l1IPAddrs[0]) == 0) { 484 | return nil, errors.New("need at least one valid L1 IP address") 485 | } 486 | 487 | for _, s := range l1IPAddrs { 488 | if ip := net.ParseIP(s); ip == nil { 489 | return nil, fmt.Errorf("l1 IP is invalid, ip=%s", ip) 490 | } 491 | } 492 | 493 | return l1IPAddrs, nil 494 | } 495 | 496 | func getNearestL1sWithRetry(ctx context.Context, cfg config, maxL1DiscoveryAttempts float64) (L1IPAddrs, error) { 497 | backoff := &backoff.Backoff{ 498 | Min: minL1DiscoveryBackoff, 499 | Max: maxL1DiscoveryBackoff, 500 | Factor: 2, 501 | Jitter: true, 502 | } 503 | 504 | fmt.Println("INFO: Saturn Node will try to connect to the Saturn Orchestrator...") 505 | 506 | for { 507 | l1Addrs, err := getNearestL1s(ctx, cfg) 508 | if err == nil { 509 | return l1Addrs, nil 510 | } 511 | 512 | // if we've exhausted the maximum number of connection attempts with the L1, return. 513 | if backoff.Attempt() > maxL1DiscoveryAttempts { 514 | log.Errorw("exhausted all attempts to get L1s from orchestrator; not retrying", "err", err) 515 | return nil, err 516 | } 517 | 518 | log.Errorw("failed to get L1s from orchestrator; will retry", "err", err) 519 | fmt.Println("INFO: Saturn Node is unable to connect to the Orchestrator, retrying....") 520 | 521 | // backoff and wait before making a new request to the orchestrator. 522 | duration := backoff.Duration() 523 | bt := time.NewTimer(duration) 524 | defer bt.Stop() 525 | select { 526 | case <-bt.C: 527 | log.Infow("back-off complete, retrying request to orchestrator", 528 | "backoff time", duration.String()) 529 | case <-ctx.Done(): 530 | return nil, ctx.Err() 531 | } 532 | } 533 | } 534 | 535 | func getNearestL1s(ctx context.Context, cfg config) (L1IPAddrs, error) { 536 | l1IPAddrs := []string{} 537 | client := &http.Client{ 538 | Timeout: l1_discovery_timeout, 539 | } 540 | 541 | req, err := http.NewRequest(http.MethodGet, cfg.L1DiscoveryAPIUrl, nil) 542 | if err != nil { 543 | return nil, fmt.Errorf("failed to create request to L1 Discovery API") 544 | } 545 | req = req.WithContext(ctx) 546 | 547 | resp, err := client.Do(req) 548 | if err != nil { 549 | return nil, fmt.Errorf("failed to call l1 discovery API: %w", err) 550 | } 551 | defer resp.Body.Close() 552 | 553 | rd := io.LimitReader(resp.Body, maxL1DiscoveryResponseSize) 554 | l1addrs, err := io.ReadAll(rd) 555 | if err != nil { 556 | return nil, fmt.Errorf("failed to read l1 discovery response: %w", err) 557 | } 558 | 559 | var l1Addrs []L1Addr 560 | if err := json.Unmarshal(l1addrs, &l1Addrs); err != nil { 561 | return nil, fmt.Errorf("failed to unmarshal l1 addresses: %w", err) 562 | } 563 | 564 | for _, s := range l1Addrs { 565 | if ip := net.ParseIP(s.Ip); ip == nil { 566 | return nil, fmt.Errorf("l1 IP returned by L1 Discovery API is invalid, ip=%s", ip) 567 | } 568 | l1IPAddrs = append(l1IPAddrs, s.Ip) 569 | } 570 | 571 | if cfg.MaxL1Connections < len(l1IPAddrs) { 572 | l1IPAddrs = l1IPAddrs[:cfg.MaxL1Connections] 573 | } 574 | 575 | return l1IPAddrs, nil 576 | } 577 | 578 | type CARServer struct { 579 | server *carserver.CarServer 580 | store *carstore.CarStore 581 | sapi station.StationAPI 582 | } 583 | 584 | func (cs *CARServer) Start(ctx context.Context) error { 585 | return cs.store.Start(ctx) 586 | } 587 | 588 | func (cs *CARServer) Stop(_ context.Context) error { 589 | return cs.store.Stop() 590 | } 591 | 592 | func buildCarServer(cfg config, logger *logs.SaturnLogger) (*CARServer, error) { 593 | dss, err := newDatastore(filepath.Join(cfg.RootDir, "statestore")) 594 | if err != nil { 595 | return nil, fmt.Errorf("failed to create state datastore: %w", err) 596 | } 597 | 598 | sapi := carserver.NewStationAPIImpl(dss, nil) 599 | gwApi := carstore.NewGatewayAPI(gateway_base_url, sapi, cfg.MaxDownloadPerRequest) 600 | carStoreConfig := carstore.Config{ 601 | MaxCARFilesDiskSpace: int64(cfg.MaxDiskSpace), 602 | } 603 | store, err := carstore.New(cfg.RootDir, gwApi, carStoreConfig, logger) 604 | if err != nil { 605 | return nil, fmt.Errorf("failed to create car store: %w", err) 606 | } 607 | sapi.SetStorageStatsFetcher(store) 608 | 609 | cs := carserver.New(store, logger, sapi) 610 | return &CARServer{ 611 | server: cs, 612 | store: store, 613 | sapi: sapi, 614 | }, nil 615 | } 616 | 617 | func webuiHandler(cfg config, w http.ResponseWriter, r *http.Request) { 618 | rootDir := "webui" 619 | path := strings.TrimPrefix(r.URL.Path, "/") 620 | 621 | if path == rootDir { 622 | targetUrl := fmt.Sprintf("/%s/address/%s", rootDir, cfg.FilAddr) 623 | statusCode := 303 // See Other (a temporary redirect) 624 | http.Redirect(w, r, targetUrl, statusCode) 625 | return 626 | } 627 | 628 | _, err := resources.WebUI.Open(path) 629 | if path == rootDir || os.IsNotExist(err) { 630 | // file does not exist, serve index.html 631 | index, err := resources.WebUI.ReadFile(rootDir + "/index.html") 632 | if err != nil { 633 | http.Error(w, err.Error(), http.StatusInternalServerError) 634 | return 635 | } 636 | w.Header().Set("Content-Type", "text/html; charset=utf-8") 637 | w.WriteHeader(http.StatusOK) 638 | if _, err := w.Write(index); err != nil { 639 | http.Error(w, err.Error(), http.StatusInternalServerError) 640 | } 641 | return 642 | } else if err != nil { 643 | http.Error(w, err.Error(), http.StatusInternalServerError) 644 | return 645 | } 646 | 647 | // otherwise, use http.FileServer to serve the static dir 648 | http.FileServer(http.FS(resources.WebUI)).ServeHTTP(w, r) 649 | } 650 | 651 | func configHandler(conf []byte) func(http.ResponseWriter, *http.Request) { 652 | return func(w http.ResponseWriter, r *http.Request) { 653 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 654 | w.WriteHeader(http.StatusOK) 655 | if _, err := w.Write(conf); err != nil { 656 | http.Error(w, err.Error(), http.StatusInternalServerError) 657 | } 658 | } 659 | } 660 | 661 | // newDatastore creates a datastore under the given base directory 662 | // for dagstore metadata. 663 | func newDatastore(dir string) (ds.Batching, error) { 664 | // Create the datastore directory if it doesn't exist yet. 665 | if err := os.MkdirAll(dir, 0755); err != nil { 666 | return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) 667 | } 668 | 669 | // Create a new LevelDB datastore 670 | dstore, err := levelds.NewDatastore(dir, &levelds.Options{ 671 | Compression: ldbopts.NoCompression, 672 | NoSync: false, 673 | Strict: ldbopts.StrictAll, 674 | ReadOnly: false, 675 | }) 676 | if err != nil { 677 | return nil, fmt.Errorf("failed to open datastore: %w", err) 678 | } 679 | return dstore, nil 680 | } 681 | 682 | func getRootDir() (string, error) { 683 | if dir := os.Getenv(ROOT_DIR_ENV_VAR); dir != "" { 684 | abs, _ := filepath.Abs(dir) 685 | return abs, nil 686 | } 687 | 688 | if runtime.GOOS == "windows" { 689 | if localAppData := os.Getenv("LOCALAPPDATA"); localAppData != "" { 690 | return localAppData + "/saturn", nil 691 | } 692 | 693 | return "", errors.New("invalid Windows environment: LOCALAPPDATA is not set") 694 | } 695 | 696 | if home := os.Getenv("HOME"); home != "" { 697 | return home + "/.saturn", nil 698 | } 699 | 700 | return "", errors.New("invalid environment: HOME is not set") 701 | } 702 | 703 | func createAndPersistL2Id(root string) (uuid.UUID, error) { 704 | path := idFilePath(root) 705 | _ = os.Remove(path) 706 | l2Id := uuid.New() 707 | if err := os.WriteFile(path, []byte(l2Id.String()), 0644); err != nil { 708 | return uuid.UUID{}, err 709 | } 710 | return l2Id, nil 711 | } 712 | 713 | // returns the l2 id by reading it from the id file if it exists, otherwise returns error. 714 | func readL2IdIfExists(root string) (uuid.UUID, error) { 715 | path := idFilePath(root) 716 | _, err := os.Stat(path) 717 | if err != nil { 718 | return uuid.UUID{}, err 719 | } 720 | 721 | bz, err := os.ReadFile(path) 722 | if err != nil { 723 | return uuid.UUID{}, err 724 | } 725 | 726 | u, err := uuid.Parse(string(bz)) 727 | if err != nil { 728 | return uuid.UUID{}, err 729 | } 730 | 731 | return u, nil 732 | } 733 | 734 | func idFilePath(rootDir string) string { 735 | return filepath.Join(rootDir, idFile) 736 | } 737 | 738 | func readIntEnvVar(name string, defaultVal uint64) (uint64, error) { 739 | valStr := os.Getenv(name) 740 | if valStr == "" { 741 | return defaultVal, nil 742 | } 743 | 744 | val, err := strconv.ParseUint(valStr, 10, 64) 745 | if err != nil { 746 | return 0, fmt.Errorf("failed to parse environment variable %s as integer: %w", valStr, err) 747 | } 748 | if val <= 0 { 749 | return 0, errors.New("integer environment variable must be positive") 750 | } 751 | 752 | return val, nil 753 | } 754 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/filecoin-project/saturn-l2 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/filecoin-project/dagstore v0.5.3-0.20220930091127-95a7d39bc17e 7 | github.com/filecoin-project/go-address v1.0.0 8 | github.com/google/uuid v1.3.0 9 | github.com/gorilla/mux v1.7.4 10 | github.com/ipfs/go-block-format v0.0.3 11 | github.com/ipfs/go-cid v0.1.0 12 | github.com/ipfs/go-datastore v0.5.1 13 | github.com/ipfs/go-ds-leveldb v0.5.0 14 | github.com/ipfs/go-ipfs-blockstore v1.2.0 15 | github.com/ipfs/go-log/v2 v2.5.1 16 | github.com/ipld/go-car/v2 v2.5.1 17 | github.com/ipld/go-ipld-prime v0.16.0 18 | github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 19 | github.com/jpillora/backoff v1.0.0 20 | github.com/patrickmn/go-cache v2.1.0+incompatible 21 | github.com/stretchr/testify v1.7.0 22 | github.com/syndtr/goleveldb v1.0.0 23 | go.uber.org/atomic v1.9.0 24 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c 25 | ) 26 | 27 | require ( 28 | github.com/benbjohnson/clock v1.3.0 // indirect 29 | github.com/davecgh/go-spew v1.1.1 // indirect 30 | github.com/gogo/protobuf v1.3.2 // indirect 31 | github.com/golang/snappy v0.0.3 // indirect 32 | github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect 33 | github.com/hashicorp/golang-lru v0.5.4 // indirect 34 | github.com/ipfs/bbloom v0.0.4 // indirect 35 | github.com/ipfs/go-blockservice v0.3.0 // indirect 36 | github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect 37 | github.com/ipfs/go-ipfs-exchange-interface v0.1.0 // indirect 38 | github.com/ipfs/go-ipfs-util v0.0.2 // indirect 39 | github.com/ipfs/go-ipld-cbor v0.0.6 // indirect 40 | github.com/ipfs/go-ipld-format v0.3.0 // indirect 41 | github.com/ipfs/go-ipld-legacy v0.1.1 // indirect 42 | github.com/ipfs/go-log v1.0.5 // indirect 43 | github.com/ipfs/go-merkledag v0.6.0 // indirect 44 | github.com/ipfs/go-metrics-interface v0.0.1 // indirect 45 | github.com/ipfs/go-peertaskqueue v0.7.1 // indirect 46 | github.com/ipfs/go-verifcid v0.0.1 // indirect 47 | github.com/ipld/go-codec-dagpb v1.3.2 // indirect 48 | github.com/jbenet/goprocess v0.1.4 // indirect 49 | github.com/klauspost/cpuid/v2 v2.0.12 // indirect 50 | github.com/libp2p/go-libp2p v0.19.2 // indirect 51 | github.com/libp2p/go-libp2p-core v0.15.1 // indirect 52 | github.com/libp2p/go-libp2p-record v0.1.3 // indirect 53 | github.com/libp2p/go-libp2p-testing v0.9.2 // indirect 54 | github.com/mattn/go-isatty v0.0.14 // indirect 55 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect 56 | github.com/minio/sha256-simd v1.0.0 // indirect 57 | github.com/mr-tron/base58 v1.2.0 // indirect 58 | github.com/multiformats/go-base32 v0.0.4 // indirect 59 | github.com/multiformats/go-base36 v0.1.0 // indirect 60 | github.com/multiformats/go-multiaddr v0.5.0 // indirect 61 | github.com/multiformats/go-multibase v0.0.3 // indirect 62 | github.com/multiformats/go-multicodec v0.5.0 // indirect 63 | github.com/multiformats/go-multihash v0.1.0 // indirect 64 | github.com/multiformats/go-varint v0.0.6 // indirect 65 | github.com/onsi/ginkgo v1.16.5 // indirect 66 | github.com/opentracing/opentracing-go v1.2.0 // indirect 67 | github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect 68 | github.com/pmezard/go-difflib v1.0.0 // indirect 69 | github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect 70 | github.com/smartystreets/assertions v1.0.1 // indirect 71 | github.com/spaolacci/murmur3 v1.1.0 // indirect 72 | github.com/warpfork/go-testmark v0.9.0 // indirect 73 | github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect 74 | github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14 // indirect 75 | go.uber.org/multierr v1.8.0 // indirect 76 | go.uber.org/zap v1.21.0 // indirect 77 | golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect 78 | golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 // indirect 79 | golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect 80 | golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect 81 | golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect 82 | google.golang.org/protobuf v1.28.0 // indirect 83 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect 84 | lukechampine.com/blake3 v1.1.7 // indirect 85 | ) 86 | -------------------------------------------------------------------------------- /l1interop/l1sseclient.go: -------------------------------------------------------------------------------- 1 | package l1interop 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "sync" 12 | "time" 13 | 14 | "github.com/filecoin-project/saturn-l2/carstore" 15 | 16 | "go.uber.org/atomic" 17 | 18 | "github.com/filecoin-project/saturn-l2/logs" 19 | 20 | "github.com/filecoin-project/saturn-l2/types" 21 | 22 | logging "github.com/ipfs/go-log/v2" 23 | 24 | "github.com/jpillora/backoff" 25 | ) 26 | 27 | var ( 28 | // 5s, 7s, 11s, 16s, 25s, 38s, 1m, 1m30s, 2m, 3m, 7m, 10m, 10m, 10m, 10m 29 | minBackOff = 5 * time.Second 30 | maxBackOff = 10 * time.Minute 31 | factor = 1.5 32 | maxReconnectAttempts = 15 33 | maxPostResponseSize = int64(102400) // 100 Kib 34 | 35 | log = logging.Logger("l1-interop") 36 | ) 37 | 38 | var ( 39 | l1RegisterURL = "https://%s/register/%s" 40 | l1PostURL = "https://%s/data/%s?requestId=%s" 41 | ) 42 | 43 | type L1SseClient struct { 44 | ctx context.Context 45 | cancelF context.CancelFunc 46 | 47 | L1Addr string 48 | 49 | client *http.Client 50 | l2Id string 51 | 52 | minBackOffWait time.Duration 53 | maxBackoffWait time.Duration 54 | backOffFactor float64 55 | maxReconnectAttempts float64 56 | 57 | cs carServer 58 | logger *logs.SaturnLogger 59 | 60 | wg sync.WaitGroup 61 | 62 | semaphore chan struct{} 63 | } 64 | 65 | type carServer interface { 66 | ServeCARFile(ctx context.Context, dr *types.DagTraversalRequest, w io.Writer) error 67 | } 68 | 69 | func New(l2Id string, client *http.Client, logger *logs.SaturnLogger, cs carServer, l1Addr string, maxConcurrentReqs int) *L1SseClient { 70 | ctx, cancel := context.WithCancel(context.Background()) 71 | return &L1SseClient{ 72 | ctx: ctx, 73 | cancelF: cancel, 74 | client: client, 75 | l2Id: l2Id, 76 | minBackOffWait: minBackOff, 77 | maxBackoffWait: maxBackOff, 78 | backOffFactor: factor, 79 | maxReconnectAttempts: float64(maxReconnectAttempts), 80 | logger: logger, 81 | cs: cs, 82 | L1Addr: l1Addr, 83 | semaphore: make(chan struct{}, maxConcurrentReqs), 84 | } 85 | } 86 | 87 | func (l *L1SseClient) Start(nConnectedl1s *atomic.Uint64) error { 88 | backoff := &backoff.Backoff{ 89 | Min: l.minBackOffWait, 90 | Max: l.maxBackoffWait, 91 | Factor: factor, 92 | Jitter: true, 93 | } 94 | 95 | l1url := fmt.Sprintf(l1RegisterURL, l.L1Addr, l.l2Id) 96 | 97 | var resp *http.Response 98 | 99 | for { 100 | if resp != nil && resp.Body != nil { 101 | lr := io.LimitReader(resp.Body, maxPostResponseSize) 102 | _, _ = io.Copy(io.Discard, lr) 103 | resp.Body.Close() 104 | } 105 | // if context has already been cancelled, return immediately 106 | if l.ctx.Err() != nil { 107 | return l.ctx.Err() 108 | } 109 | 110 | // construct an http register request to send to the L1 with the given context 111 | req, err := http.NewRequest("GET", l1url, nil) 112 | if err != nil { 113 | return fmt.Errorf("failed to create http req: %w", err) 114 | } 115 | req = req.WithContext(l.ctx) 116 | 117 | doBackOffFn := func() error { 118 | // if we've exhausted the maximum number of connection attempts with the L1, return. 119 | if backoff.Attempt() > l.maxReconnectAttempts { 120 | log.Errorw("failed to connect to l1; exhausted max attempts", "l1", l.L1Addr, "err", err) 121 | return fmt.Errorf("failed to connect to l1 after exhausting max attempts, l1: %s, err: %w", l.L1Addr, err) 122 | } 123 | 124 | // backoff and wait before making a new connection attempt to the L1. 125 | duration := backoff.Duration() 126 | bt := time.NewTimer(duration) 127 | defer bt.Stop() 128 | select { 129 | case <-bt.C: 130 | log.Infow("back-off complete, retrying http register request to l1", 131 | "backoff time", duration.String(), "l1 Addr", l.L1Addr) 132 | case <-l.ctx.Done(): 133 | log.Errorw("did not retry http request: context cancelled", "err", l.ctx.Err(), "l1", l.L1Addr) 134 | return l.ctx.Err() 135 | } 136 | return nil 137 | } 138 | 139 | // make an http connection with keep alive 140 | resp, err = l.client.Do(req) 141 | if err != nil { 142 | if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { 143 | return l.ctx.Err() 144 | } 145 | log.Errorw("failed to send register request to l1; will backoff and retry", "l1", l.L1Addr, "err", err) 146 | 147 | if err := doBackOffFn(); err != nil { 148 | return err 149 | } 150 | continue 151 | } 152 | defer resp.Body.Close() 153 | 154 | // return immediately if we got a 4xx response status code from the L1. 155 | if resp.StatusCode/100 == 4 { 156 | log.Errorw("http registration request to L1 failed with non-retryable status code; returning", 157 | "status code", resp.StatusCode, "l1", l.L1Addr) 158 | return fmt.Errorf("terminating http request: received %d response from L1", resp.StatusCode) 159 | } 160 | 161 | // if we got anything other than a 200 for the registration -> retry 162 | if resp.StatusCode != http.StatusOK { 163 | log.Errorw("http registration request to l1 got invalid status code; will backoff and retry registration", 164 | "code", resp.StatusCode, "l1", l.L1Addr) 165 | if err := doBackOffFn(); err != nil { 166 | return err 167 | } 168 | continue 169 | } 170 | 171 | // we've registered successfully -> reset the backoff counter 172 | backoff.Reset() 173 | n := nConnectedl1s.Inc() 174 | log.Infow("new L1 connection established", "l1", l.L1Addr, "nL1sConnected", n) 175 | 176 | // we've successfully connected to the L1, start reading new line delimited json requests for CAR files 177 | scanner := bufio.NewScanner(resp.Body) 178 | for scanner.Scan() { 179 | reqJSON := scanner.Text() 180 | if len(reqJSON) == 0 { 181 | continue 182 | } 183 | 184 | log.Infow("received request from L1", "l1", l.L1Addr, "json", reqJSON) 185 | 186 | var carReq types.CARTransferRequest 187 | if err := json.Unmarshal([]byte(reqJSON), &carReq); err != nil { 188 | nConnectedl1s.Dec() 189 | return fmt.Errorf("could not unmarshal l1 request: req=%s, err=%w", reqJSON, err) 190 | } 191 | 192 | dr, err := carReq.ToDAGRequest() 193 | if err != nil { 194 | nConnectedl1s.Dec() 195 | return fmt.Errorf("could not parse car transfer request,err=%w", err) 196 | } 197 | 198 | l.logger.Infow(dr.RequestId, "parsed CAR transfer request received from L1", "l1", l.L1Addr, "req", dr) 199 | 200 | log.Debugw("will try to acquire semaphore for l1 request", "l1", l.L1Addr) 201 | select { 202 | case l.semaphore <- struct{}{}: 203 | log.Debugw("successfully acquired semaphore for l1 request", "l1", l.L1Addr) 204 | case <-l.ctx.Done(): 205 | nConnectedl1s.Dec() 206 | return l.ctx.Err() 207 | } 208 | 209 | l.wg.Add(1) 210 | go func() { 211 | defer l.wg.Done() 212 | defer func() { 213 | select { 214 | case <-l.semaphore: 215 | log.Debugw("successfully released semaphore for l1 request", "l1", l.L1Addr) 216 | case <-l.ctx.Done(): 217 | return 218 | } 219 | }() 220 | 221 | if err := l.sendCarResponse(l.ctx, l.L1Addr, dr); err != nil { 222 | if !errors.Is(err, carstore.ErrNotFound) { 223 | l.logger.Errorw(dr.RequestId, "failed to send CAR file to L1 using Post", "err", err, "l1", l.L1Addr) 224 | } else { 225 | l.logger.Infow(dr.RequestId, "not sending CAR over POST", "err", err, "l1", l.L1Addr) 226 | } 227 | } 228 | }() 229 | } 230 | 231 | if err := scanner.Err(); err != nil { 232 | log.Errorw("error while reading l1 requests; will reconnect and retry", "err", err) 233 | } 234 | 235 | n = nConnectedl1s.Dec() 236 | log.Infow("lost connection to L1", "l1", l.L1Addr, "nL1sConnected", n) 237 | } 238 | } 239 | 240 | func (l *L1SseClient) Stop() { 241 | l.cancelF() 242 | l.wg.Wait() 243 | } 244 | 245 | func (l *L1SseClient) sendCarResponse(ctx context.Context, l1Addr string, dr *types.DagTraversalRequest) error { 246 | respUrl := fmt.Sprintf(l1PostURL, l1Addr, dr.Root.String(), dr.RequestId.String()) 247 | 248 | prd, pw := io.Pipe() 249 | defer prd.Close() 250 | defer pw.Close() 251 | 252 | l.wg.Add(1) 253 | go func() { 254 | defer l.wg.Done() 255 | err := l.cs.ServeCARFile(ctx, dr, pw) 256 | _ = pw.CloseWithError(err) 257 | }() 258 | 259 | req, err := http.NewRequest(http.MethodPost, respUrl, prd) 260 | if err != nil { 261 | _ = prd.CloseWithError(err) 262 | return fmt.Errorf("failed to create http post request to send back car to L1; url=%s; err=%w", respUrl, err) 263 | } 264 | req = req.WithContext(ctx) 265 | 266 | resp, err := l.client.Do(req) 267 | if err != nil { 268 | _ = prd.CloseWithError(err) 269 | return fmt.Errorf("failed to send http post request with CAR to L1;url=%s, err=%w", respUrl, err) 270 | } 271 | defer resp.Body.Close() 272 | _ = prd.Close() 273 | 274 | lr := io.LimitReader(resp.Body, maxPostResponseSize) 275 | _, _ = io.Copy(io.Discard, lr) 276 | 277 | if resp.StatusCode != http.StatusOK { 278 | return fmt.Errorf("got status code %d from L1 for POST url %s , expected %d", resp.StatusCode, respUrl, http.StatusOK) 279 | } 280 | 281 | l.logger.Infow(dr.RequestId, "successfully sent CAR file to L1", "l1", l1Addr, "url", respUrl) 282 | return nil 283 | } 284 | -------------------------------------------------------------------------------- /l1interop/l1sseclient_test.go: -------------------------------------------------------------------------------- 1 | package l1interop 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/tls" 7 | "encoding/json" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "net/http/httptest" 12 | "sync" 13 | "testing" 14 | "time" 15 | 16 | "go.uber.org/atomic" 17 | 18 | "github.com/stretchr/testify/require" 19 | 20 | cid "github.com/ipfs/go-cid" 21 | 22 | "github.com/filecoin-project/saturn-l2/carstore" 23 | 24 | "github.com/filecoin-project/saturn-l2/types" 25 | _ "github.com/ipld/go-ipld-prime/codec/dagcbor" 26 | 27 | "github.com/filecoin-project/saturn-l2/testutils" 28 | 29 | "github.com/filecoin-project/saturn-l2/logs" 30 | "github.com/google/uuid" 31 | "github.com/gorilla/mux" 32 | ) 33 | 34 | func TestSingleL1Simple(t *testing.T) { 35 | l2Id := uuid.New().String() 36 | 37 | h := buildHarness(t, l2Id, 1) 38 | h.Start() 39 | defer h.Stop() 40 | 41 | req := uuid.New() 42 | h.sendReq(req, h.cid1, 0) 43 | 44 | require.Eventually(t, func() bool { 45 | return h.hasRecievedCAR(req.String(), h.cid1.String(), 0) 46 | }, 5*time.Second, 200*time.Millisecond) 47 | 48 | require.EqualValues(t, 1, h.nL1sConnected.Load()) 49 | 50 | } 51 | 52 | func TestL1ConcurrentRequests(t *testing.T) { 53 | l2Id := uuid.New().String() 54 | 55 | h := buildHarness(t, l2Id, 1) 56 | h.Start() 57 | defer h.Stop() 58 | 59 | // send 50 requests and assert we get back 20 responses 60 | var wg sync.WaitGroup 61 | var reqs []string 62 | for i := 0; i < 50; i++ { 63 | req := uuid.New() 64 | reqs = append(reqs, req.String()) 65 | var cid cid.Cid 66 | if i%2 == 0 { 67 | cid = h.cid1 68 | } else { 69 | cid = h.cid2 70 | } 71 | 72 | wg.Add(1) 73 | go func() { 74 | defer wg.Done() 75 | h.sendReq(req, cid, 0) 76 | }() 77 | } 78 | 79 | wg.Wait() 80 | 81 | require.Eventually(t, func() bool { 82 | for i := 0; i < 50; i++ { 83 | id := reqs[i] 84 | var cid cid.Cid 85 | if i%2 == 0 { 86 | cid = h.cid1 87 | } else { 88 | cid = h.cid2 89 | } 90 | 91 | ok := h.hasRecievedCAR(id, cid.String(), 0) 92 | if !ok { 93 | return false 94 | } 95 | 96 | } 97 | return true 98 | }, 5*time.Second, 200*time.Millisecond) 99 | 100 | require.EqualValues(t, 1, h.nL1sConnected.Load()) 101 | } 102 | 103 | func TestMultipleL1ConcurrentRequests(t *testing.T) { 104 | l2Id := uuid.New().String() 105 | nL1s := 2 106 | 107 | h := buildHarness(t, l2Id, nL1s) 108 | h.Start() 109 | defer h.Stop() 110 | 111 | // send 50 requests and assert we get back 20 responses 112 | var wg sync.WaitGroup 113 | var reqs []string 114 | for i := 0; i < 50; i++ { 115 | i := i 116 | req := uuid.New() 117 | reqs = append(reqs, req.String()) 118 | var cid cid.Cid 119 | if i%2 == 0 { 120 | cid = h.cid1 121 | } else { 122 | cid = h.cid2 123 | } 124 | 125 | wg.Add(1) 126 | go func() { 127 | defer wg.Done() 128 | h.sendReq(req, cid, i%nL1s) 129 | }() 130 | } 131 | 132 | wg.Wait() 133 | 134 | require.Eventually(t, func() bool { 135 | for i := 0; i < 50; i++ { 136 | id := reqs[i] 137 | var cid cid.Cid 138 | if i%2 == 0 { 139 | cid = h.cid1 140 | } else { 141 | cid = h.cid2 142 | } 143 | 144 | ok := h.hasRecievedCAR(id, cid.String(), i%nL1s) 145 | if !ok { 146 | return false 147 | } 148 | 149 | } 150 | return true 151 | }, 10*time.Second, 200*time.Millisecond) 152 | 153 | require.EqualValues(t, 2, h.nL1sConnected.Load()) 154 | } 155 | 156 | type l1State struct { 157 | svc *httptest.Server 158 | 159 | requests chan types.CARTransferRequest 160 | 161 | mu sync.Mutex 162 | gotPosts map[string][]byte 163 | } 164 | 165 | type l1Harness struct { 166 | cid1 cid.Cid 167 | cid2 cid.Cid 168 | 169 | emu sync.Mutex 170 | expectedContent map[string][]byte 171 | 172 | t *testing.T 173 | ctx context.Context 174 | cancelF context.CancelFunc 175 | 176 | mu sync.Mutex 177 | l1Clients map[int]*L1SseClient 178 | l1s map[int]*l1State 179 | 180 | nL1sConnected *atomic.Uint64 181 | } 182 | 183 | func (h *l1Harness) Start() { 184 | for _, l1 := range h.l1s { 185 | l1 := l1 186 | l1.svc.StartTLS() 187 | } 188 | 189 | for _, l1Client := range h.l1Clients { 190 | l1Client := l1Client 191 | go l1Client.Start(h.nL1sConnected) // nolint 192 | } 193 | } 194 | 195 | func (h *l1Harness) Stop() { 196 | h.cancelF() 197 | 198 | h.mu.Lock() 199 | defer h.mu.Unlock() 200 | 201 | for _, l1 := range h.l1s { 202 | l1 := l1 203 | l1.svc.Close() 204 | } 205 | 206 | for _, l1Client := range h.l1Clients { 207 | l1Client := l1Client 208 | l1Client.Stop() 209 | } 210 | } 211 | 212 | func (h *l1Harness) hasRecievedCAR(reqId string, root string, l1 int) bool { 213 | h.mu.Lock() 214 | l1S := h.l1s[l1] 215 | l1S.mu.Lock() 216 | 217 | actualBz, ok := l1S.gotPosts[root+reqId] 218 | l1S.mu.Unlock() 219 | h.mu.Unlock() 220 | if !ok { 221 | return false 222 | } 223 | 224 | h.emu.Lock() 225 | expectedBz, ok := h.expectedContent[root] 226 | h.emu.Unlock() 227 | if !ok { 228 | return false 229 | } 230 | 231 | return bytes.Equal(expectedBz, actualBz) 232 | } 233 | 234 | func (h *l1Harness) sendReq(reqId uuid.UUID, root cid.Cid, l1 int) { 235 | h.mu.Lock() 236 | defer h.mu.Unlock() 237 | 238 | l1S := h.l1s[l1] 239 | 240 | l1S.requests <- types.CARTransferRequest{ 241 | RequestId: reqId.String(), 242 | Root: root.String(), 243 | } 244 | } 245 | 246 | func buildHarness(t *testing.T, l2Id string, nL1s int) *l1Harness { 247 | hCtx, cancel := context.WithCancel(context.Background()) 248 | carFile1 := "../testdata/files/sample-v1.car" 249 | rootcid1, bz1 := testutils.ParseCar(t, hCtx, carFile1) 250 | carFile2 := "../testdata/files/sample-rw-bs-v2.car" 251 | rootcid2, bz2 := testutils.ParseCar(t, hCtx, carFile2) 252 | 253 | tr := &http.Transport{ 254 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, 255 | } 256 | client := &http.Client{ 257 | Transport: tr, 258 | } 259 | lg := logs.NewSaturnLogger() 260 | 261 | h := &l1Harness{ 262 | cancelF: cancel, 263 | cid1: rootcid1, 264 | cid2: rootcid2, 265 | ctx: hCtx, 266 | t: t, 267 | expectedContent: make(map[string][]byte), 268 | l1s: make(map[int]*l1State), 269 | l1Clients: make(map[int]*L1SseClient), 270 | nL1sConnected: atomic.NewUint64(0), 271 | } 272 | h.expectedContent[rootcid1.String()] = bz1 273 | h.expectedContent[rootcid2.String()] = bz2 274 | ms := newMockCarServer(t, hCtx) 275 | 276 | // Build the L1 HTTP Server 277 | for i := 0; i < nL1s; i++ { 278 | l1 := &l1State{ 279 | gotPosts: make(map[string][]byte), 280 | requests: make(chan types.CARTransferRequest, 100), 281 | } 282 | 283 | mu := mux.NewRouter() 284 | mu.HandleFunc("/data/{root}", func(w http.ResponseWriter, r *http.Request) { 285 | vars := mux.Vars(r) 286 | rootCid := vars["root"] 287 | 288 | vs := r.URL.Query() 289 | requestId := vs.Get("requestId") 290 | 291 | bz, err := io.ReadAll(r.Body) 292 | if err != nil { 293 | return 294 | } 295 | r.Body.Close() 296 | l1.mu.Lock() 297 | l1.gotPosts[rootCid+requestId] = bz 298 | l1.mu.Unlock() 299 | }) 300 | 301 | mu.HandleFunc("/register/{l2Id}", func(w http.ResponseWriter, r *http.Request) { 302 | w.WriteHeader(http.StatusOK) 303 | vars := mux.Vars(r) 304 | l2Idr := vars["l2Id"] 305 | if l2Idr != l2Id { 306 | return 307 | } 308 | for { 309 | select { 310 | case cr := <-l1.requests: 311 | bz, err := json.Marshal(cr) 312 | if err != nil { 313 | return 314 | } 315 | if _, err := w.Write(bz); err != nil { 316 | fmt.Println("ERROR when writing to L2", err.Error()) 317 | return 318 | } 319 | if _, err := w.Write([]byte("\n")); err != nil { 320 | fmt.Println("ERROR when writing to L2", err.Error()) 321 | return 322 | } 323 | if f, ok := w.(http.Flusher); ok { 324 | f.Flush() 325 | } 326 | case <-hCtx.Done(): 327 | return 328 | } 329 | } 330 | }) 331 | 332 | svc := httptest.NewUnstartedServer(mu) 333 | l1.svc = svc 334 | 335 | h.l1Clients[i] = New(l2Id, client, lg, ms, svc.Listener.Addr().String(), 3) 336 | h.l1s[i] = l1 337 | } 338 | 339 | return h 340 | } 341 | 342 | type mockCarServer struct { 343 | cars map[string][]byte 344 | } 345 | 346 | func newMockCarServer(t *testing.T, ctx context.Context) *mockCarServer { 347 | carFile1 := "../testdata/files/sample-v1.car" 348 | rootcid1, bz1 := testutils.ParseCar(t, ctx, carFile1) 349 | carFile2 := "../testdata/files/sample-rw-bs-v2.car" 350 | rootcid2, bz2 := testutils.ParseCar(t, ctx, carFile2) 351 | 352 | m := make(map[string][]byte) 353 | 354 | m[rootcid1.String()] = bz1 355 | m[rootcid2.String()] = bz2 356 | 357 | return &mockCarServer{ 358 | cars: m, 359 | } 360 | } 361 | 362 | func (mc *mockCarServer) ServeCARFile(ctx context.Context, dr *types.DagTraversalRequest, w io.Writer) error { 363 | r := dr.Root.String() 364 | 365 | bz, ok := mc.cars[r] 366 | if !ok { 367 | return carstore.ErrNotFound 368 | } 369 | if _, err := w.Write(bz); err != nil { 370 | return err 371 | } 372 | return nil 373 | } 374 | -------------------------------------------------------------------------------- /logs/logs.go: -------------------------------------------------------------------------------- 1 | package logs 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | logging "github.com/ipfs/go-log/v2" 6 | ) 7 | 8 | var base = logging.Logger("saturn-l2") 9 | 10 | type SaturnLogger struct { 11 | logger *logging.ZapEventLogger 12 | subsystem string 13 | } 14 | 15 | func NewSaturnLogger() *SaturnLogger { 16 | return &SaturnLogger{ 17 | logger: base, 18 | } 19 | } 20 | 21 | func (s *SaturnLogger) Subsystem(name string) *SaturnLogger { 22 | return &SaturnLogger{ 23 | logger: logging.Logger(s.subsystem + name), 24 | subsystem: name, 25 | } 26 | } 27 | 28 | func (s *SaturnLogger) Debugw(reqID uuid.UUID, msg string, kvs ...interface{}) { 29 | kvs = paramsWithReqID(reqID, kvs...) 30 | s.logger.Debugw(msg, kvs...) 31 | } 32 | 33 | func (s *SaturnLogger) Infow(reqID uuid.UUID, msg string, kvs ...interface{}) { 34 | kvs = paramsWithReqID(reqID, kvs...) 35 | s.logger.Infow(msg, kvs...) 36 | } 37 | 38 | func (s *SaturnLogger) Warnw(reqID uuid.UUID, msg string, kvs ...interface{}) { 39 | kvs = paramsWithReqID(reqID, kvs...) 40 | s.logger.Warnw(msg, kvs...) 41 | } 42 | 43 | func (s *SaturnLogger) Errorw(reqID uuid.UUID, errMsg string, kvs ...interface{}) { 44 | kvs = paramsWithReqID(reqID, kvs...) 45 | 46 | s.logger.Errorw(errMsg, kvs...) 47 | } 48 | 49 | func (s *SaturnLogger) LogError(reqID uuid.UUID, errMsg string, err error) { 50 | s.Errorw(reqID, errMsg, "err", err.Error()) 51 | } 52 | 53 | func paramsWithReqID(reqID uuid.UUID, kvs ...interface{}) []interface{} { 54 | kvs = append([]interface{}{"id", reqID}, kvs...) 55 | return kvs 56 | } 57 | -------------------------------------------------------------------------------- /resources/resources.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "embed" 5 | _ "embed" 6 | ) 7 | 8 | // webui folder is empty during local development, embed resources.go 9 | // so go doesn't complain about "no embeddable files" 10 | // 11 | //go:embed webui resources.go 12 | var WebUI embed.FS 13 | -------------------------------------------------------------------------------- /resources/webui/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webui", 3 | "lockfileVersion": 2, 4 | "requires": true, 5 | "packages": {} 6 | } 7 | -------------------------------------------------------------------------------- /scripts/download-webui.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | OUTDIR=$(dirname $0)/../resources/webui/ 4 | 5 | echo "⇣ Downloading webui dist to $OUTDIR ⇣" 6 | 7 | mkdir -p $OUTDIR 8 | 9 | # Downloads latest release 10 | url=$(curl -s https://api.github.com/repos/filecoin-saturn/node-webui/releases/latest | \ 11 | jq -r '.assets[] | select(.name|match("saturn-webui.tar.gz$")) | .browser_download_url') 12 | 13 | curl -L $url | tar -zx -C $OUTDIR 14 | -------------------------------------------------------------------------------- /station/station.go: -------------------------------------------------------------------------------- 1 | package station 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | type StationAPI interface { 8 | RecordRetrievalServed(ctx context.Context, bytesServed, nErrors, nNotFound, nSuccess uint64) error 9 | AllStats(ctx context.Context) (StationStats, error) 10 | RecordDataDownloaded(ctx context.Context, bytesDownloaded uint64) error 11 | } 12 | 13 | type StorageStatsFetcher interface { 14 | Stat() (StorageStats, error) 15 | } 16 | 17 | type StationStats struct { 18 | RPInfo 19 | StorageStats 20 | ReqStats 21 | } 22 | 23 | type RPInfo struct { 24 | Version string 25 | } 26 | 27 | type StorageStats struct { 28 | BytesCurrentlyStored uint64 29 | } 30 | 31 | type ReqStats struct { 32 | TotalBytesUploaded uint64 33 | TotalBytesDownloaded uint64 34 | NContentRequests uint64 35 | NContentNotFoundReqs uint64 36 | NSuccessfulRetrievals uint64 37 | 38 | NContentReqErrors uint64 39 | } 40 | -------------------------------------------------------------------------------- /testdata/files/junk.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-saturn/L2-node/75912be52ce81c0a4365cce6de5b0701cac52a56/testdata/files/junk.dat -------------------------------------------------------------------------------- /testdata/files/sample-rw-bs-v2.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-saturn/L2-node/75912be52ce81c0a4365cce6de5b0701cac52a56/testdata/files/sample-rw-bs-v2.car -------------------------------------------------------------------------------- /testdata/files/sample-v1.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-saturn/L2-node/75912be52ce81c0a4365cce6de5b0701cac52a56/testdata/files/sample-v1.car -------------------------------------------------------------------------------- /testdata/files/sample-wrapped-v2.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-saturn/L2-node/75912be52ce81c0a4365cce6de5b0701cac52a56/testdata/files/sample-wrapped-v2.car -------------------------------------------------------------------------------- /testdata/testdata.go: -------------------------------------------------------------------------------- 1 | package testdata 2 | 3 | import ( 4 | "bytes" 5 | "embed" 6 | "fmt" 7 | 8 | "github.com/ipfs/go-cid" 9 | "github.com/ipld/go-car/v2" 10 | ) 11 | 12 | const ( 13 | FSPathCarV1 = "files/sample-v1.car" 14 | FSPathCarV2 = "files/sample-wrapped-v2.car" 15 | FSPathCarV22 = "files/sample-rw-bs-v2.car" 16 | FSPathJunk = "files/junk.dat" 17 | 18 | RootPathCarV1 = "testdata/files/sample-v1.car" 19 | RootPathCarV2 = "testdata/files/sample-wrapped-v2.car" 20 | RootPathJunk = "testdata/files/funk.dat" 21 | ) 22 | 23 | var ( 24 | //go:embed files/* 25 | FS embed.FS 26 | 27 | CarV1 []byte 28 | CarV2 []byte 29 | Junk []byte 30 | 31 | // RootCID is the root CID of the carv2 for testing. 32 | RootCID cid.Cid 33 | ) 34 | 35 | func init() { 36 | var err error 37 | CarV1, err = FS.ReadFile(FSPathCarV1) 38 | if err != nil { 39 | panic(err) 40 | } 41 | 42 | CarV2, err = FS.ReadFile(FSPathCarV2) 43 | if err != nil { 44 | panic(err) 45 | } 46 | 47 | Junk, err = FS.ReadFile(FSPathJunk) 48 | if err != nil { 49 | panic(err) 50 | } 51 | 52 | reader, err := car.NewReader(bytes.NewReader(CarV2)) 53 | if err != nil { 54 | panic(fmt.Errorf("failed to parse carv2: %w", err)) 55 | } 56 | defer reader.Close() 57 | 58 | roots, err := reader.Roots() 59 | if err != nil { 60 | panic(fmt.Errorf("failed to obtain carv2 roots: %w", err)) 61 | } 62 | if len(roots) == 0 { 63 | panic("carv2 has no roots") 64 | } 65 | RootCID = roots[0] 66 | } 67 | -------------------------------------------------------------------------------- /testutils/helpers.go: -------------------------------------------------------------------------------- 1 | package testutils 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | "time" 10 | 11 | car "github.com/ipld/go-car/v2" 12 | cidlink "github.com/ipld/go-ipld-prime/linking/cid" 13 | "github.com/ipld/go-ipld-prime/storage/bsadapter" 14 | selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" 15 | 16 | cid "github.com/ipfs/go-cid" 17 | carv2bs "github.com/ipld/go-car/v2/blockstore" 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | func ParseCar(t *testing.T, ctx context.Context, path string) (cid.Cid, []byte) { 22 | from, err := carv2bs.OpenReadOnly(path) 23 | require.NoError(t, err) 24 | rts, err := from.Roots() 25 | require.NoError(t, err) 26 | 27 | ls := cidlink.DefaultLinkSystem() 28 | bsa := bsadapter.Adapter{Wrapped: from} 29 | ls.SetReadStorage(&bsa) 30 | 31 | w := bytes.NewBuffer(nil) 32 | _, err = car.TraverseV1(ctx, &ls, rts[0], selectorparse.CommonSelector_ExploreAllRecursively, w) 33 | require.NoError(t, err) 34 | 35 | require.NoError(t, from.Close()) 36 | 37 | return rts[0], w.Bytes() 38 | } 39 | 40 | func GetTestServerFor(t *testing.T, ctx context.Context, path string) (cid.Cid, []byte, *httptest.Server) { 41 | root, contents := ParseCar(t, ctx, path) 42 | return root, contents, GetTestServer(t, root.String(), contents) 43 | } 44 | 45 | func GetTestHangingServer(t *testing.T) *httptest.Server { 46 | return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 47 | time.Sleep(24 * time.Hour) 48 | })) 49 | } 50 | 51 | func GetTestServerForRoots(t *testing.T, out map[string][]byte) *httptest.Server { 52 | return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 53 | q := r.URL.Query() 54 | v := q.Get("arg") 55 | if len(v) == 0 { 56 | http.Error(w, "invalid arg", http.StatusBadRequest) 57 | return 58 | } 59 | bz, ok := out[v] 60 | if !ok { 61 | http.Error(w, "invalid arg", http.StatusBadRequest) 62 | return 63 | } 64 | _, err := w.Write(bz) 65 | if err != nil { 66 | http.Error(w, err.Error(), http.StatusInternalServerError) 67 | } 68 | })) 69 | } 70 | 71 | func GetTestServer(t *testing.T, root string, out []byte) *httptest.Server { 72 | return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 73 | time.Sleep(1 * time.Second) 74 | q := r.URL.Query() 75 | v := q.Get("arg") 76 | if len(v) == 0 || v != root { 77 | http.Error(w, "invalid arg", http.StatusBadRequest) 78 | return 79 | } 80 | if _, err := w.Write(out); err != nil { 81 | http.Error(w, err.Error(), http.StatusInternalServerError) 82 | } 83 | })) 84 | } 85 | 86 | func GetTestErrorServer(t *testing.T) *httptest.Server { 87 | return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 88 | http.Error(w, "bad req", http.StatusInternalServerError) 89 | })) 90 | } 91 | -------------------------------------------------------------------------------- /types/types.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | 6 | selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" 7 | 8 | "github.com/google/uuid" 9 | 10 | cid "github.com/ipfs/go-cid" 11 | ipld "github.com/ipld/go-ipld-prime" 12 | ) 13 | 14 | // CARTransferRequest is the request sent by the client to transfer a CAR file 15 | // for the given root and selector. 16 | type CARTransferRequest struct { 17 | RequestId string 18 | Root string 19 | SkipOffset uint64 20 | } 21 | 22 | func (c *CARTransferRequest) ToDAGRequest() (*DagTraversalRequest, error) { 23 | rootCid, err := cid.Decode(c.Root) 24 | if err != nil { 25 | return nil, fmt.Errorf("failed to parse cid: %w", err) 26 | } 27 | 28 | // use the default "select all" selector for now. 29 | sel := selectorparse.CommonSelector_ExploreAllRecursively 30 | 31 | reqId, err := uuid.Parse(c.RequestId) 32 | if err != nil { 33 | return nil, fmt.Errorf("failed to parse uuid: %w", err) 34 | } 35 | 36 | return &DagTraversalRequest{ 37 | RequestId: reqId, 38 | Root: rootCid, 39 | Selector: sel, 40 | }, nil 41 | } 42 | 43 | type DagTraversalRequest struct { 44 | RequestId uuid.UUID 45 | Root cid.Cid 46 | Selector ipld.Node 47 | } 48 | -------------------------------------------------------------------------------- /types/types_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | cid "github.com/ipfs/go-cid" 8 | 9 | "github.com/google/uuid" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | var root = "QmfMYyn8LUWEfRXfijKFjBAshSsPVRUgwLZzsD7kcTtX1A" 14 | 15 | func TestCarTransferRequest(t *testing.T) { 16 | c, err := cid.Decode(root) 17 | require.NoError(t, err) 18 | 19 | tcs := map[string]struct { 20 | cr CARTransferRequest 21 | isError bool 22 | }{ 23 | "invalid cid": { 24 | cr: CARTransferRequest{ 25 | RequestId: uuid.New().String(), 26 | Root: "test", 27 | }, 28 | isError: true, 29 | }, 30 | "invalid uuid": { 31 | cr: CARTransferRequest{ 32 | RequestId: "blah", 33 | Root: c.String(), 34 | }, 35 | isError: true, 36 | }, 37 | "valid request": { 38 | cr: CARTransferRequest{ 39 | Root: c.String(), 40 | RequestId: uuid.New().String(), 41 | }, 42 | isError: false, 43 | }, 44 | } 45 | 46 | for name, tc := range tcs { 47 | t.Run(name, func(t *testing.T) { 48 | bz, err := json.Marshal(tc.cr) 49 | require.NoError(t, err) 50 | 51 | var cr CARTransferRequest 52 | require.NoError(t, json.Unmarshal(bz, &cr)) 53 | require.EqualValues(t, cr.Root, tc.cr.Root) 54 | 55 | dr, err := tc.cr.ToDAGRequest() 56 | if tc.isError { 57 | require.Error(t, err) 58 | require.Nil(t, dr) 59 | } else { 60 | require.NoError(t, err) 61 | require.NotNil(t, dr) 62 | require.EqualValues(t, tc.cr.RequestId, dr.RequestId.String()) 63 | require.EqualValues(t, tc.cr.Root, dr.Root.String()) 64 | } 65 | }) 66 | } 67 | } 68 | --------------------------------------------------------------------------------