├── .prettierignore ├── .github ├── CODEOWNERS └── workflows │ ├── dependabot-auto-merge.yml │ ├── ci.yml │ ├── dependabot-auto-approve-minor.yml │ └── publish.yml ├── FUNDING.json ├── eslint.config.js ├── test.js ├── README.md ├── test ├── drand-client.test.js ├── ipni-client.test.js ├── miner-info.test.js ├── http-assertions.test.js ├── tasker.test.js ├── integration.js ├── multiaddr.test.js └── spark.js ├── main.js ├── package.json ├── lib ├── constants.js ├── activity-state.js ├── http-assertions.js ├── miner-info.js ├── drand-client.js ├── ipni-client.js ├── multiaddr.js ├── tasker.js └── spark.js ├── release.sh ├── deps.ts ├── manual-check.js ├── .gitignore └── LICENSE /.prettierignore: -------------------------------------------------------------------------------- 1 | /vendor 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @bajtos @juliangruber 2 | -------------------------------------------------------------------------------- /FUNDING.json: -------------------------------------------------------------------------------- 1 | { 2 | "drips": { 3 | "filecoin": { 4 | "ownedBy": "0xDe946319e3dBA67b58bd771de01AF8aCCafcDA9d" 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /eslint.config.js: -------------------------------------------------------------------------------- 1 | import neostandard from 'neostandard' 2 | 3 | export default neostandard({ 4 | noStyle: true, // Disable style-related rules, we use Prettier 5 | ts: true, 6 | ignores: ['vendor/**', 'deps.ts'], 7 | }) 8 | -------------------------------------------------------------------------------- /test.js: -------------------------------------------------------------------------------- 1 | import './test/http-assertions.test.js' 2 | import './test/ipni-client.test.js' 3 | import './test/miner-info.test.js' 4 | import './test/multiaddr.test.js' 5 | 6 | import './test/integration.js' 7 | import './test/spark.js' 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # spark 2 | 3 | SP Retrieval Checker Module 4 | 5 | > [!CAUTION] 6 | > 7 | > **This repository is no longer mantained.** 8 | > 9 | > Filecoin Spark and Checker Network continue to operate in a permissioned architecture. 10 | > See the [announcement](https://x.com/FilecoinCDN/status/1932472254245298504) for more details. 11 | -------------------------------------------------------------------------------- /test/drand-client.test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'zinnia:test' 2 | import { assertEquals } from 'zinnia:assert' 3 | import { getRandomnessForSparkRound } from '../lib/drand-client.js' 4 | 5 | test('getRandomnessForSparkRound', async () => { 6 | const randomness = await getRandomnessForSparkRound(4111111) 7 | assertEquals( 8 | randomness, 9 | 'fc90e50dcdf20886b56c038b30fa921a5e57c532ea448dadcc209e44eec0445e', 10 | ) 11 | }) 12 | -------------------------------------------------------------------------------- /main.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | Zinnia.activity.error( 4 | 'Spark update: Filecoin Station and Checker Network programmes ended. The node is no longer contributing to the network, and there will be no further rewards. Thank you for your participation!', 5 | ) 6 | 7 | while (true) { 8 | await new Promise((resolve) => setTimeout(resolve, 60_000)) 9 | } 10 | 11 | // import Spark from './lib/spark.js' 12 | // const spark = new Spark() 13 | // await spark.run() 14 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-auto-merge.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot auto-merge 2 | on: pull_request 3 | 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | jobs: 9 | dependabot: 10 | runs-on: ubuntu-latest 11 | if: ${{ github.actor == 'dependabot[bot]' }} 12 | steps: 13 | - name: Authenticate cli with a PAT 14 | run: echo "${{ secrets.DEPENDABOT_TOKEN }}" | gh auth login --with-token 15 | - name: Enable auto-merge for Dependabot PRs 16 | run: gh pr merge --auto --squash "$PR_URL" 17 | env: 18 | PR_URL: ${{github.event.pull_request.html_url}} 19 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "spark-checker", 3 | "version": "0.1.0", 4 | "type": "module", 5 | "files": [ 6 | "main.js", 7 | "lib/", 8 | "vendor/", 9 | "README.md", 10 | "LICENSE", 11 | "deps.ts" 12 | ], 13 | "scripts": { 14 | "lint": "eslint && prettier --check .", 15 | "lint:fix": "eslint --fix && prettier --write ." 16 | }, 17 | "prettier": "@checkernetwork/prettier-config", 18 | "devDependencies": { 19 | "@checkernetwork/prettier-config": "^1.0.0", 20 | "eslint": "^9.23.0", 21 | "neostandard": "^0.12.1", 22 | "prettier": "^3.5.3" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /lib/constants.js: -------------------------------------------------------------------------------- 1 | export const SPARK_VERSION = '1.20.0' 2 | export const MAX_CAR_SIZE = 200 * 1024 * 1024 // 200 MB 3 | export const APPROX_ROUND_LENGTH_IN_MS = 20 * 60_000 // 20 minutes 4 | export const MAX_JITTER_BETWEEN_TASKS_IN_MS = 10_000 // 10 seconds 5 | export const RPC_URL = 'https://api.node.glif.io/' 6 | export const RPC_AUTH = 'KZLIUb9ejreYOm-mZFM3UNADE0ux6CrHjxnS2D2Qgb8=' 7 | export const MINER_TO_PEERID_CONTRACT_ADDRESS = 8 | '0x14183aD016Ddc83D638425D6328009aa390339Ce' // Contract address on the Filecoin EVM 9 | export const MAX_REQUEST_DURATION_MS = 90_000 10 | export const OFFLINE_RETRY_DELAY_MS = 5_000 // 5 seconds 11 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | git diff --quiet HEAD || (echo "please revert the uncommited changes"; exit 1) 5 | 6 | SPARK_VERSION="${1?Missing required argument: semver}" 7 | 8 | (echo "$SPARK_VERSION" | grep -Eq '^\d+\.\d+\.\d+$') || { 9 | echo Invalid version string \'$SPARK_VERSION\'. Must be MAJOR.MINOR.PATCH 10 | exit 1 11 | } 12 | 13 | sed -i '' -e "s/SPARK_VERSION = .*/SPARK_VERSION = '$SPARK_VERSION'/" lib/constants.js 14 | git add lib/constants.js 15 | git commit -m v"$SPARK_VERSION" 16 | git tag -s v"$SPARK_VERSION" -m v"$SPARK_VERSION" 17 | git push 18 | git push origin v"$SPARK_VERSION" 19 | open https://github.com/filecoin-station/spark/releases/new?tag=v"$SPARK_VERSION" 20 | -------------------------------------------------------------------------------- /test/ipni-client.test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'zinnia:test' 2 | import { assertEquals } from 'zinnia:assert' 3 | import { queryTheIndex } from '../lib/ipni-client.js' 4 | 5 | const KNOWN_CID = 'bafkreih25dih6ug3xtj73vswccw423b56ilrwmnos4cbwhrceudopdp5sq' 6 | const FRISBEE_PEER_ID = '12D3KooWC8gXxg9LoJ9h3hy3jzBkEAxamyHEQJKtRmAuBuvoMzpr' 7 | 8 | test('query advertised CID', async () => { 9 | const result = await queryTheIndex(KNOWN_CID, FRISBEE_PEER_ID) 10 | assertEquals(result, { 11 | indexerResult: 'OK', 12 | provider: { 13 | address: '/dns/frisbii.fly.dev/tcp/443/https', 14 | protocol: 'http', 15 | }, 16 | }) 17 | }) 18 | 19 | test('ignore advertisements from other miners', async () => { 20 | const result = await queryTheIndex(KNOWN_CID, '12D3KooWsomebodyelse') 21 | assertEquals(result.indexerResult, 'NO_VALID_ADVERTISEMENT') 22 | }) 23 | -------------------------------------------------------------------------------- /lib/activity-state.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | // Create activity events when we become healthy or produce errors 4 | export class ActivityState { 5 | #healthy = null 6 | 7 | onOutdatedClient() { 8 | this.onError( 9 | 'SPARK is outdated. Please upgrade Filecoin Station to the latest version.', 10 | ) 11 | } 12 | 13 | onError(msg) { 14 | if (this.#healthy === null || this.#healthy) { 15 | this.#healthy = false 16 | Zinnia.activity.error(msg ?? 'SPARK failed reporting retrieval') 17 | } 18 | } 19 | 20 | onHealthy() { 21 | if (this.#healthy === null) { 22 | this.#healthy = true 23 | Zinnia.activity.info('SPARK started reporting retrievals') 24 | } else if (!this.#healthy) { 25 | this.#healthy = true 26 | Zinnia.activity.info('SPARK retrieval reporting resumed') 27 | } 28 | } 29 | //expose the healthy state 30 | isHealthy() { 31 | return this.#healthy === true 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /lib/http-assertions.js: -------------------------------------------------------------------------------- 1 | import { AssertionError } from 'zinnia:assert' 2 | 3 | export { assertOkResponse } from '../vendor/deno-deps.js' 4 | 5 | /** 6 | * @param {Response} res 7 | * @param {string} [errorMsg] 8 | */ 9 | export async function assertRedirectResponse(res, errorMsg) { 10 | if ([301, 302, 303, 304, 307, 308].includes(res.status)) { 11 | const location = res.headers.get('location') 12 | if (!location) { 13 | const msg = 14 | (errorMsg ? errorMsg + ' ' : '') + 15 | 'The server response is missing the Location header. Headers found:\n' + 16 | Array.from(res.headers.keys()).join('\n') 17 | throw new AssertionError(msg) 18 | } 19 | return 20 | } 21 | 22 | let body 23 | try { 24 | body = await res.text() 25 | } catch {} 26 | const err = new Error( 27 | `${errorMsg ?? 'Server did not respond with redirect'} (${res.status}): ${body?.trimEnd()}`, 28 | ) 29 | err.statusCode = res.status 30 | err.serverMessage = body 31 | throw err 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | 7 | env: 8 | ZINNIA_VERSION: v0.20.2 9 | jobs: 10 | test-linux: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - run: 15 | curl -L https://github.com/filecoin-station/zinnia/releases/download/${{ 16 | env.ZINNIA_VERSION }}/zinnia-linux-x64.tar.gz | tar -xz 17 | - run: ./zinnia run test.js 18 | 19 | test-windows: 20 | runs-on: windows-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | - uses: robinraju/release-downloader@v1.11 24 | with: 25 | repository: 'filecoin-station/zinnia' 26 | tag: ${{ env.ZINNIA_VERSION }} 27 | fileName: zinnia-windows-x64.zip 28 | extract: true 29 | token: ${{ secrets.GITHUB_TOKEN }} 30 | - run: ./zinnia.exe run test.js 31 | 32 | lint: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v4 36 | - uses: actions/setup-node@v4 37 | - run: npm ci 38 | - run: npm run lint 39 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-auto-approve-minor.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot auto-approve minor updates 2 | on: pull_request 3 | 4 | permissions: 5 | pull-requests: write 6 | 7 | jobs: 8 | dependabot: 9 | runs-on: ubuntu-latest 10 | if: ${{ github.actor == 'dependabot[bot]' }} 11 | strategy: 12 | matrix: 13 | dependencyStartsWith: 14 | - '@checkernetwork/prettier-config' 15 | - neostandard 16 | - prettier 17 | - typescript 18 | steps: 19 | - name: Dependabot metadata 20 | id: metadata 21 | uses: dependabot/fetch-metadata@v2 22 | with: 23 | github-token: '${{ secrets.GITHUB_TOKEN }}' 24 | - name: Approve a PR 25 | if: ${{startsWith(steps.metadata.outputs.dependency-names, matrix.dependencyStartsWith) && (steps.metadata.outputs.update-type == 'version-update:semver-patch' || steps.metadata.outputs.update-type == 'version-update:semver-minor')}} 26 | run: gh pr review --approve "$PR_URL" 27 | env: 28 | PR_URL: ${{github.event.pull_request.html_url}} 29 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 30 | -------------------------------------------------------------------------------- /lib/miner-info.js: -------------------------------------------------------------------------------- 1 | import { RPC_URL, RPC_AUTH } from './constants.js' 2 | import { 3 | getIndexProviderPeerId as getPeerId, 4 | MINER_TO_PEERID_CONTRACT_ADDRESS, 5 | MINER_TO_PEERID_CONTRACT_ABI, 6 | ethers, 7 | } from '../vendor/deno-deps.js' 8 | 9 | // Initialize contract 10 | const fetchRequest = new ethers.FetchRequest(RPC_URL) 11 | fetchRequest.setHeader('Authorization', `Bearer ${RPC_AUTH}`) 12 | const provider = new ethers.JsonRpcProvider(fetchRequest) 13 | const smartContractClient = new ethers.Contract( 14 | MINER_TO_PEERID_CONTRACT_ADDRESS, 15 | MINER_TO_PEERID_CONTRACT_ABI, 16 | provider, 17 | ) 18 | 19 | /** 20 | * @param {string} minerId - The ID of the miner. 21 | * @param {object} options - Options for the function. 22 | * @param {number} options.maxAttempts - The maximum number of attempts to fetch 23 | * the peer ID. 24 | * @returns {Promise} The peer ID of the miner. 25 | */ 26 | export async function getIndexProviderPeerId( 27 | minerId, 28 | { maxAttempts = 5 } = {}, 29 | ) { 30 | try { 31 | const { peerId, source } = await getPeerId(minerId, smartContractClient, { 32 | rpcUrl: RPC_URL, 33 | rpcAuth: RPC_AUTH, 34 | maxAttempts, 35 | signal: AbortSignal.timeout(60_000), 36 | }) 37 | console.log(`Peer ID fetched from ${source}.`) 38 | return peerId 39 | } catch (err) { 40 | console.error(err) 41 | throw err 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /test/miner-info.test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'zinnia:test' 2 | import { 3 | assertMatch, 4 | AssertionError, 5 | assert, 6 | assertEquals, 7 | } from 'zinnia:assert' 8 | import { getIndexProviderPeerId } from '../lib/miner-info.js' 9 | 10 | const KNOWN_MINER_ID = 'f0142637' 11 | 12 | test('get peer id of a known miner', async () => { 13 | const result = await getIndexProviderPeerId(KNOWN_MINER_ID) 14 | assertMatch(result, /^12D3KooW/) 15 | }) 16 | 17 | test('get peer id of a miner that does not exist', async () => { 18 | try { 19 | const result = await getIndexProviderPeerId('f010', { maxAttempts: 1 }) 20 | throw new AssertionError( 21 | `Expected "getIndexProviderPeerId()" to fail, but it resolved with "${result}" instead.`, 22 | ) 23 | } catch (err) { 24 | assert(err instanceof Error, 'Expected error to be an instance of Error') 25 | assert( 26 | err.message 27 | .toString() 28 | .includes('Error fetching index provider PeerID for miner f010'), 29 | ) 30 | assert( 31 | err.cause.toString().includes('Error fetching PeerID for miner f010'), 32 | ) 33 | } 34 | }) 35 | 36 | test('getIndexProviderPeerId returns correct peer id for miner f03303347', async () => { 37 | const peerId = await getIndexProviderPeerId('f03303347') 38 | 39 | assert(typeof peerId === 'string', 'Expected peerId to be a string') 40 | assertEquals(peerId, '12D3KooWJ91c6xQshrNe7QAXPFAaeRrHWq2UrgXGPf8UmMZMwyZ5') 41 | }) 42 | -------------------------------------------------------------------------------- /test/http-assertions.test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'zinnia:test' 2 | import { 3 | AssertionError, 4 | assertStringIncludes, 5 | assertRejects, 6 | } from 'zinnia:assert' 7 | import { assertRedirectResponse } from '../lib/http-assertions.js' 8 | 9 | test('assertRedirectResponse - 302', async () => { 10 | const responseMock = { 11 | status: 302, 12 | headers: new Headers({ location: '/new-location' }), 13 | async text() { 14 | throw new AssertionError('res.text() should not have been called') 15 | }, 16 | } 17 | 18 | await assertRedirectResponse(responseMock) 19 | }) 20 | 21 | test('assertRedirectResponse - mission Location header', async () => { 22 | const responseMock = { 23 | status: 302, 24 | headers: new Headers(), 25 | async text() { 26 | throw new AssertionError('res.text() should not have been called') 27 | }, 28 | } 29 | 30 | const err = await assertRejects(() => assertRedirectResponse(responseMock)) 31 | assertStringIncludes(err.message, 'Location') 32 | }) 33 | 34 | test('assertRedirectResponse - not redirect', async () => { 35 | const responseMock = { 36 | status: 200, 37 | headers: new Headers(), 38 | async text() { 39 | return 'RESPONSE BODY' 40 | }, 41 | } 42 | 43 | const err = await assertRejects(() => 44 | assertRedirectResponse(responseMock, 'NOT REDIRECT'), 45 | ) 46 | assertStringIncludes(err.message, 'NOT REDIRECT') 47 | assertStringIncludes(err.message, 'RESPONSE BODY') 48 | }) 49 | -------------------------------------------------------------------------------- /test/tasker.test.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | import { test } from 'zinnia:test' 4 | import { assertEquals } from 'zinnia:assert' 5 | import { getStationKey, getTaskKey, pickTasks } from '../lib/tasker.js' 6 | 7 | const RANDOMNESS = 8 | 'fc90e50dcdf20886b56c038b30fa921a5e57c532ea448dadcc209e44eec0445e' 9 | 10 | test('getTaskKey', async () => { 11 | const key = await getTaskKey({ cid: 'bafyone', minerId: 'f0123' }, RANDOMNESS) 12 | assertEquals( 13 | key, 14 | 19408172415633384483144889917969030396168570904487614072975030553911283422991n, 15 | ) 16 | }) 17 | 18 | test('getStationKey', async () => { 19 | const key = await getStationKey(Zinnia.stationId) 20 | assertEquals( 21 | key, 22 | 15730389902218173522122968096857080019341969656147255283496861606681823756880n, 23 | ) 24 | }) 25 | 26 | test('pickTasksForNode', async () => { 27 | const allTasks = [ 28 | { cid: 'bafyone', minerId: 'f010' }, 29 | { cid: 'bafyone', minerId: 'f020' }, 30 | { cid: 'bafyone', minerId: 'f030' }, 31 | { cid: 'bafyone', minerId: 'f040' }, 32 | 33 | { cid: 'bafytwo', minerId: 'f010' }, 34 | { cid: 'bafytwo', minerId: 'f020' }, 35 | { cid: 'bafytwo', minerId: 'f030' }, 36 | { cid: 'bafytwo', minerId: 'f040' }, 37 | ] 38 | 39 | const selectedTasks = await pickTasks({ 40 | tasks: allTasks, 41 | stationId: 'some-station-id', 42 | randomness: RANDOMNESS, 43 | maxTasksPerRound: 3, 44 | }) 45 | 46 | assertEquals(selectedTasks, [ 47 | { cid: 'bafyone', minerId: 'f020' }, 48 | { cid: 'bafyone', minerId: 'f010' }, 49 | { cid: 'bafytwo', minerId: 'f020' }, 50 | ]) 51 | }) 52 | -------------------------------------------------------------------------------- /deps.ts: -------------------------------------------------------------------------------- 1 | // 3rd-party dependencies from Denoland 2 | // 3 | // Run the following script after making change in this file: 4 | // deno bundle deps.ts vendor/deno-deps.js 5 | // 6 | // You must use a 1.x version of Deno, e.g. v1.43.1 7 | 8 | export { encodeHex } from 'https://deno.land/std@0.203.0/encoding/hex.ts' 9 | export { decodeBase64 } from 'https://deno.land/std@0.203.0/encoding/base64.ts' 10 | export { decode as decodeVarint } from 'https://deno.land/x/varint@v2.0.0/varint.ts' 11 | export { retry } from 'https://deno.land/std@0.203.0/async/retry.ts' 12 | 13 | // Deno Bundle does not support npm dependencies, we have to load them via CDN 14 | export { ethers } from 'https://cdn.jsdelivr.net/npm/ethers@6.13.5/dist/ethers.min.js' 15 | export { 16 | getIndexProviderPeerId, 17 | MINER_TO_PEERID_CONTRACT_ADDRESS, 18 | MINER_TO_PEERID_CONTRACT_ABI, 19 | } from 'https://cdn.jsdelivr.net/npm/index-provider-peer-id@1.0.1/index.js/+esm' 20 | export { CarBlockIterator } from 'https://cdn.skypack.dev/@ipld/car@5.3.2/?dts' 21 | export { 22 | UnsupportedHashError, 23 | HashMismatchError, 24 | validateBlock, 25 | } from 'https://cdn.skypack.dev/@web3-storage/car-block-validator@1.2.0/?dts' 26 | // cdn.skypack.dev cannot resolve import from @noble/hashes 27 | // jsdelivr.net seems to work better, it's also recommended by drand-client 28 | export { 29 | fetchBeaconByTime, 30 | HttpChainClient, 31 | HttpCachingChain, 32 | } from 'https://cdn.jsdelivr.net/npm/drand-client@1.2.6/index.js/+esm' 33 | 34 | export { assertOkResponse } from 'https://cdn.skypack.dev/assert-ok-response@1.0.0/?dts' 35 | import pRetry from 'https://cdn.skypack.dev/p-retry@6.2.1/?dts' 36 | export { pRetry } 37 | -------------------------------------------------------------------------------- /lib/drand-client.js: -------------------------------------------------------------------------------- 1 | import { 2 | fetchBeaconByTime, 3 | HttpChainClient, 4 | HttpCachingChain, 5 | } from '../vendor/deno-deps.js' 6 | 7 | // See https://docs.filecoin.io/networks/mainnet#genesis 8 | const FIL_MAINNET_GENESIS_TS = new Date('2020-08-24T22:00:00Z').getTime() 9 | const FIL_MAINNET_BLOCK_TIME = 30_000 // 30 seconds 10 | 11 | /** @type {import('https://cdn.skypack.dev/drand-client@1.2.6/?dts').ChainOptions} */ 12 | const DRAND_OPTIONS = { 13 | // FIXME: beacon verification does not work when using drand-client via CDN :( 14 | // Without verification, we are blindly trusting https://api.drand.sh/ to provide honest responses. 15 | // See https://github.com/filecoin-station/spark/issues/86 16 | disableBeaconVerification: true, 17 | noCache: false, 18 | chainVerificationParams: { 19 | // quicknet 20 | chainHash: 21 | '52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971', 22 | publicKey: 23 | '83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a', 24 | }, 25 | } 26 | 27 | const DRAND_URL = `https://api2.drand.sh/${DRAND_OPTIONS.chainVerificationParams.chainHash}` 28 | const chain = new HttpCachingChain(DRAND_URL, DRAND_OPTIONS) 29 | const client = new HttpChainClient(chain, DRAND_OPTIONS) 30 | 31 | /** @param {number} roundStartEpoch */ 32 | export async function getRandomnessForSparkRound(roundStartEpoch) { 33 | const roundStartedAt = 34 | roundStartEpoch * FIL_MAINNET_BLOCK_TIME + FIL_MAINNET_GENESIS_TS 35 | const beacon = await fetchBeaconByTime(client, roundStartedAt) 36 | return beacon.randomness 37 | } 38 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | on: 3 | release: 4 | types: [released] 5 | jobs: 6 | publish: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - run: curl -L ${{ github.event.release.tarball_url }} > source.tar.gz 11 | - uses: filecoin-station/publish-zinnia-module-action@v0.2.0 12 | id: publish 13 | with: 14 | source: source.tar.gz 15 | w3up-private-key: ${{ secrets.W3UP_PRIVATE_KEY }} 16 | w3up-proof: ${{ secrets.W3UP_PROOF }} 17 | w3name-private-key: ${{ secrets.W3NAME_PRIVATE_KEY }} 18 | w3name-revision: ${{ secrets.W3NAME_REVISION }} 19 | - if: failure() 20 | uses: slackapi/slack-github-action@v2.0.0 21 | with: 22 | method: chat.postMessage 23 | token: ${{ secrets.SLACK_BOT_TOKEN }} 24 | payload: | 25 | { 26 | "channel": "alerts", 27 | "text": "Publishing `${{ github.event.repository.name }}` failed", 28 | "blocks": [ 29 | { 30 | "type": "section", 31 | "text": { 32 | "type": "mrkdwn", 33 | "text": ":warning: *<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Publishing `${{ github.event.repository.name }}` failed>*" 34 | } 35 | } 36 | ] 37 | } 38 | - uses: slackapi/slack-github-action@v1.27.0 39 | with: 40 | channel-id: filecoin-slack-spark 41 | payload: | 42 | { 43 | "text": "SPARK checker version ${{ github.event.release.tag_name }} released", 44 | "blocks": [ 45 | { 46 | "type": "section", 47 | "text": { 48 | "type": "mrkdwn", 49 | "text": "<${{ github.server_url }}/${{ github.repository }}/tree/${{ github.event.release.tag_name }}|Source code> published to IPFS as (tar.gz)" 50 | } 51 | } 52 | ] 53 | } 54 | env: 55 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_SPARK_RELEASES_TOKEN }} 56 | -------------------------------------------------------------------------------- /test/integration.js: -------------------------------------------------------------------------------- 1 | import Spark from '../lib/spark.js' 2 | 3 | import { assert, assertEquals } from 'zinnia:assert' 4 | import { test } from 'zinnia:test' 5 | 6 | const KNOWN_CID = 'bafkreih25dih6ug3xtj73vswccw423b56ilrwmnos4cbwhrceudopdp5sq' 7 | const OUR_FAKE_MINER_ID = 'f01spark' 8 | const FRISBEE_PEER_ID = '12D3KooWC8gXxg9LoJ9h3hy3jzBkEAxamyHEQJKtRmAuBuvoMzpr' 9 | 10 | test('integration', async () => { 11 | const spark = new Spark() 12 | const measurementId = await spark.nextRetrieval() 13 | const res = await fetch( 14 | `https://api.filspark.com/measurements/${measurementId}`, 15 | ) 16 | assert(res.ok) 17 | const retrieval = await res.json() 18 | assert(retrieval.indexerResult) 19 | assert(retrieval.finishedAt) 20 | }) 21 | 22 | test('retrieval check for our CID', async () => { 23 | const minersChecked = [] 24 | const getIndexProviderPeerId = async (minerId) => { 25 | minersChecked.push(minerId) 26 | return FRISBEE_PEER_ID 27 | } 28 | const spark = new Spark({ getIndexProviderPeerId }) 29 | spark.getRetrieval = async () => ({ 30 | cid: KNOWN_CID, 31 | minerId: OUR_FAKE_MINER_ID, 32 | }) 33 | 34 | const measurementId = await spark.nextRetrieval() 35 | const res = await fetch( 36 | `https://api.filspark.com/measurements/${measurementId}`, 37 | ) 38 | assert(res.ok) 39 | const m = await res.json() 40 | const assertProp = (prop, expectedValue) => 41 | assertEquals(m[prop], expectedValue, prop) 42 | 43 | assertEquals(minersChecked, [OUR_FAKE_MINER_ID]) 44 | 45 | assertProp('cid', KNOWN_CID) 46 | assertProp('minerId', OUR_FAKE_MINER_ID) 47 | assertProp('providerId', FRISBEE_PEER_ID) 48 | assertProp('indexerResult', 'OK') 49 | assertProp('providerAddress', '/dns/frisbii.fly.dev/tcp/443/https') 50 | assertProp('protocol', 'http') 51 | assertProp('timeout', false) 52 | assertProp('statusCode', 200) 53 | // Note: frisbii.fly.io doesn't support HEAD requests yet 54 | // https://github.com/CheckerNetwork/frisbii-on-fly/issues/3 55 | assertProp('headStatusCode', 405) 56 | assertProp('byteLength', 200) 57 | assertProp('carTooLarge', false) 58 | // TODO - spark-api does not record this field yet 59 | // assertProp('carChecksum', '122069f03061f7ad4c14a5691b7e96d3ddd109023a6539a0b4230ea3dc92050e7136') 60 | }) 61 | 62 | test('can execute manual check for our CID', async () => { 63 | await import('../manual-check.js') 64 | }) 65 | -------------------------------------------------------------------------------- /test/multiaddr.test.js: -------------------------------------------------------------------------------- 1 | import { test } from 'zinnia:test' 2 | import { assertEquals, assertThrows } from 'zinnia:assert' 3 | import { multiaddrToHttpUrl } from '../lib/multiaddr.js' 4 | 5 | const HAPPY_CASES = [ 6 | ['/ip4/127.0.0.1/tcp/80/http', 'http://127.0.0.1'], 7 | ['/ip4/127.0.0.1/tcp/8080/http', 'http://127.0.0.1:8080'], 8 | ['/ip4/127.0.0.1/tcp/443/https', 'https://127.0.0.1'], 9 | ['/ip4/127.0.0.1/tcp/8080/https', 'https://127.0.0.1:8080'], 10 | ['/dns/meridian.space/tcp/8080/http', 'http://meridian.space:8080'], 11 | ['/dns4/meridian.space/tcp/8080/http', 'http://meridian.space:8080'], 12 | ['/dns6/meridian.space/tcp/8080/http', 'http://meridian.space:8080'], 13 | [ 14 | '/dns/meridian.space/https/http-path/%2Fipni-provider%2FproviderID', 15 | 'https://meridian.space/ipni-provider/providerID', 16 | ], 17 | ['/dns/meridian.space/https/http-path/', 'https://meridian.space'], 18 | ['/dns/meridian.space/https/http-path', 'https://meridian.space'], 19 | ['/dns/meridian.space/https', 'https://meridian.space'], 20 | ['/dns/meridian.space/http', 'http://meridian.space'], 21 | ['/ip4/127.0.0.1/http', 'http://127.0.0.1'], 22 | ['/ip4/127.0.0.1/https', 'https://127.0.0.1'], 23 | ] 24 | 25 | for (const [multiaddr, expectedUri] of HAPPY_CASES) { 26 | test(`parse ${multiaddr}`, () => { 27 | const uri = multiaddrToHttpUrl(multiaddr) 28 | assertEquals(uri, expectedUri) 29 | }) 30 | } 31 | 32 | const ERROR_CASES = [ 33 | [ 34 | '/ip4/127.0.0.1/tcp/80', 35 | 'Cannot parse "/ip4/127.0.0.1/tcp/80": unsupported scheme "undefined"', 36 | ], 37 | [ 38 | '/ip4/127.0.0.1/udp/90', 39 | 'Cannot parse "/ip4/127.0.0.1/udp/90": unsupported protocol "udp"', 40 | ], 41 | [ 42 | '/ip4/127.0.0.1/tcp/8080/http/p2p/pubkey', 43 | 'Cannot parse "/ip4/127.0.0.1/tcp/8080/http/p2p/pubkey": too many parts', 44 | ], 45 | // NOTE: This is a valid multiaddr value that we decided to not support yet. 46 | [ 47 | '/dns/meridian.space/tcp/8080/http/http-path/%2Fipni-provider%2FproviderID', 48 | 'Cannot parse "/dns/meridian.space/tcp/8080/http/http-path/%2Fipni-provider%2FproviderID": unsupported scheme "tcp"', 49 | ], 50 | [ 51 | '/dns/meridian.space/http/http-path/invalid%path', 52 | 'Cannot parse "/dns/meridian.space/http/http-path/invalid%path": unsupported http path', 53 | ], 54 | ] 55 | 56 | for (const [multiaddr, expectedError] of ERROR_CASES) { 57 | test(`parse ${multiaddr}`, () => { 58 | const err = assertThrows(() => multiaddrToHttpUrl(multiaddr)) 59 | assertEquals(err.message, expectedError) 60 | }) 61 | } 62 | -------------------------------------------------------------------------------- /manual-check.js: -------------------------------------------------------------------------------- 1 | // 2 | // Usage: 3 | // zinnia run manual-check.js 4 | // 5 | 6 | import Spark, { getRetrievalUrl } from './lib/spark.js' 7 | import { getIndexProviderPeerId as defaultGetIndexProvider } from './lib/miner-info.js' 8 | 9 | // The task to check, replace with your own values 10 | const task = { 11 | cid: 'bafkreih25dih6ug3xtj73vswccw423b56ilrwmnos4cbwhrceudopdp5sq', 12 | minerId: 'f0frisbii', 13 | } 14 | 15 | const getIndexProviderPeerId = (minerId) => 16 | minerId === 'f0frisbii' 17 | ? '12D3KooWC8gXxg9LoJ9h3hy3jzBkEAxamyHEQJKtRmAuBuvoMzpr' 18 | : defaultGetIndexProvider(minerId) 19 | 20 | // Run the check 21 | const spark = new Spark({ getIndexProviderPeerId }) 22 | const stats = { ...task, indexerResult: null, statusCode: null, byteLength: 0 } 23 | await spark.executeRetrievalCheck(task, stats) 24 | console.log('Measurement: %o', stats) 25 | 26 | if (stats.providerAddress && stats.statusCode !== 200) { 27 | console.log('\nThe retrieval failed.') 28 | switch (stats.protocol) { 29 | case 'graphsync': 30 | console.log('You can get more details by running Lassie manually:\n') 31 | console.log( 32 | ' lassie fetch -o /dev/null -vv --dag-scope block --protocols graphsync --providers %s %s', 33 | JSON.stringify(stats.providerAddress), 34 | task.cid, 35 | ) 36 | console.log( 37 | '\nHow to install Lassie: https://github.com/filecoin-project/lassie?tab=readme-ov-file#installation', 38 | ) 39 | break 40 | case 'http': 41 | try { 42 | const url = getRetrievalUrl( 43 | stats.protocol, 44 | stats.providerAddress, 45 | task.cid, 46 | ) 47 | console.log( 48 | 'You can get more details by requesting the following URL yourself:\n', 49 | ) 50 | console.log(' %s', url) 51 | console.log('\nE.g. using `curl`:') 52 | console.log(' curl -i %s', JSON.stringify(url)) 53 | console.log('\nYou can also test the retrieval using Lassie:\n') 54 | console.log( 55 | ' lassie fetch -o /dev/null -vv --dag-scope block --protocols http --providers %s %s', 56 | JSON.stringify(stats.providerAddress), 57 | task.cid, 58 | ) 59 | console.log( 60 | '\nHow to install Lassie: https://github.com/filecoin-project/lassie?tab=readme-ov-file#installation', 61 | ) 62 | } catch (err) { 63 | console.log( 64 | 'The provider address %j cannot be converted to a URL: %s', 65 | stats.providerAddress, 66 | err.message ?? err, 67 | ) 68 | } 69 | break 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /lib/ipni-client.js: -------------------------------------------------------------------------------- 1 | import { 2 | decodeBase64, 3 | decodeVarint, 4 | pRetry, 5 | assertOkResponse, 6 | } from '../vendor/deno-deps.js' 7 | 8 | /** 9 | * @param {string} cid 10 | * @param {string} providerId 11 | * @returns {Promise<{ 12 | * indexerResult: string 13 | * provider?: { address: string; protocol: string } 14 | * }>} 15 | */ 16 | export async function queryTheIndex(cid, providerId) { 17 | let providerResults 18 | try { 19 | providerResults = await pRetry(() => getRetrievalProviders(cid), { 20 | retries: 5, 21 | shouldRetry: (error) => { 22 | return error.statusCode && error.statusCode >= 500 23 | }, 24 | onFailedAttempt: (error) => { 25 | console.error(error) 26 | console.error('IPNI query failed, retrying...') 27 | }, 28 | }) 29 | console.log('IPNI returned %s provider results', providerResults.length) 30 | } catch (err) { 31 | console.error('IPNI query failed.', err) 32 | return { 33 | indexerResult: 34 | typeof err.statusCode === 'number' 35 | ? `ERROR_${err.statusCode}` 36 | : 'ERROR_FETCH', 37 | } 38 | } 39 | 40 | let graphsyncProvider 41 | for (const p of providerResults) { 42 | if (p.Provider.ID !== providerId) continue 43 | 44 | const [protocolCode] = decodeVarint(decodeBase64(p.Metadata)) 45 | const protocol = { 46 | 0x900: 'bitswap', 47 | 0x910: 'graphsync', 48 | 0x0920: 'http', 49 | 4128768: 'graphsync', 50 | }[protocolCode] 51 | 52 | const address = p.Provider.Addrs[0] 53 | if (!address) continue 54 | 55 | switch (protocol) { 56 | case 'http': 57 | return { 58 | indexerResult: 'OK', 59 | provider: { address, protocol }, 60 | } 61 | 62 | case 'graphsync': 63 | if (!graphsyncProvider) { 64 | graphsyncProvider = { 65 | address: `${address}/p2p/${p.Provider.ID}`, 66 | protocol, 67 | } 68 | } 69 | } 70 | } 71 | if (graphsyncProvider) { 72 | console.log('HTTP protocol is not advertised, falling back to Graphsync.') 73 | return { 74 | indexerResult: 'HTTP_NOT_ADVERTISED', 75 | provider: graphsyncProvider, 76 | } 77 | } 78 | 79 | console.log( 80 | 'All advertisements are from other miners or for unsupported protocols.', 81 | ) 82 | return { indexerResult: 'NO_VALID_ADVERTISEMENT' } 83 | } 84 | 85 | async function getRetrievalProviders(cid) { 86 | const url = `https://cid.contact/cid/${encodeURIComponent(cid)}` 87 | const res = await fetch(url) 88 | await assertOkResponse(res) 89 | 90 | const result = await res.json() 91 | return result.MultihashResults.flatMap((r) => r.ProviderResults) 92 | } 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | -------------------------------------------------------------------------------- /lib/multiaddr.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @param {string} addr Multiaddr, e.g. `/ip4/127.0.0.1/tcp/80/http` 3 | * @returns {string} Parsed URI, e.g. `http://127.0.0.1:80` 4 | */ 5 | export function multiaddrToHttpUrl(addr) { 6 | const [multiAddr, httpPathMultiAddr] = addr.split('/http-path') 7 | const [, hostType, hostValue, ...multiAddrParts] = multiAddr.split('/') 8 | let scheme, path, rest, port 9 | if (addr.includes('/http-path')) { 10 | ;[scheme, ...rest] = multiAddrParts 11 | try { 12 | // Remove leading slash and parse URI-encoded path 13 | // See https://github.com/multiformats/multiaddr/blob/cab92e8e6da2e70c5f1b8aa59976e71e6922b392/protocols/http-path.md#usage 14 | path = decodeURIComponent(httpPathMultiAddr.substring(1)) 15 | } catch (err) { 16 | throw Object.assign( 17 | new Error(`Cannot parse "${addr}": unsupported http path`, { 18 | cause: err, 19 | }), 20 | { code: 'INVALID_HTTP_PATH' }, 21 | ) 22 | } // Handle HTTP/HTTPs addresses using the default port 23 | } else if (multiAddrParts[0] === 'http' || multiAddrParts[0] === 'https') { 24 | ;[scheme, ...rest] = multiAddrParts 25 | } else { 26 | let ipProtocol 27 | ;[ipProtocol, port, scheme, ...rest] = multiAddrParts 28 | 29 | if (ipProtocol !== 'tcp') { 30 | throw Object.assign( 31 | new Error( 32 | `Cannot parse "${addr}": unsupported protocol "${ipProtocol}"`, 33 | ), 34 | { code: 'UNSUPPORTED_MULTIADDR_PROTO' }, 35 | ) 36 | } 37 | } 38 | 39 | if (scheme !== 'http' && scheme !== 'https') { 40 | throw Object.assign( 41 | new Error(`Cannot parse "${addr}": unsupported scheme "${scheme}"`), 42 | { 43 | code: 'UNSUPPORTED_MULTIADDR_SCHEME', 44 | }, 45 | ) 46 | } 47 | 48 | if (rest.length) { 49 | throw Object.assign(new Error(`Cannot parse "${addr}": too many parts`), { 50 | code: 'MULTIADDR_HAS_TOO_MANY_PARTS', 51 | }) 52 | } 53 | 54 | let url = `${scheme}://${getUriHost(hostType, hostValue)}` 55 | if (port) url += getUriPort(scheme, port) 56 | if (path) url += path 57 | return url 58 | } 59 | 60 | function getUriHost(hostType, hostValue) { 61 | switch (hostType) { 62 | case 'ip4': 63 | case 'dns': 64 | case 'dns4': 65 | case 'dns6': 66 | return hostValue 67 | case 'ip6': 68 | // See https://superuser.com/a/367788/135774: 69 | // According to RFC2732, literal IPv6 addresses should be put inside square brackets in URLs 70 | return `[${hostValue}]` 71 | } 72 | 73 | throw Object.assign( 74 | new Error(`Unsupported multiaddr host type "${hostType}"`), 75 | { 76 | code: 'UNSUPPORTED_MULTIADDR_HOST_TYPE', 77 | }, 78 | ) 79 | } 80 | 81 | function getUriPort(scheme, port) { 82 | if (scheme === 'http' && port === '80') return '' 83 | if (scheme === 'https' && port === '443') return '' 84 | return `:${port}` 85 | } 86 | -------------------------------------------------------------------------------- /lib/tasker.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | import { ActivityState } from './activity-state.js' 4 | import { encodeHex } from '../vendor/deno-deps.js' 5 | import { assertOkResponse, assertRedirectResponse } from './http-assertions.js' 6 | import { getRandomnessForSparkRound } from './drand-client.js' 7 | import { assertEquals, assertInstanceOf } from 'zinnia:assert' 8 | 9 | /** @typedef {{ cid: string; minerId: string }} RetrievalTask */ 10 | /** @typedef {RetrievalTask & { key: string }} KeyedRetrievalTask */ 11 | 12 | export class Tasker { 13 | #lastRoundUrl 14 | /** @type {Task[]} */ 15 | #remainingRoundTasks 16 | #fetch 17 | #activity 18 | 19 | /** 20 | * @param {object} args 21 | * @param {globalThis.fetch} args.fetch 22 | * @param {ActivityState} args.activityState 23 | */ 24 | constructor({ 25 | fetch = globalThis.fetch, 26 | activityState = new ActivityState(), 27 | } = {}) { 28 | this.#fetch = fetch 29 | this.#activity = activityState 30 | 31 | this.maxTasksPerRound = 360 32 | 33 | // TODO: persist these two values across module restarts 34 | // Without persistence, after the Spark module is restarted, it will start executing the same 35 | // retrieval tasks we have already executed 36 | this.#lastRoundUrl = 'unknown' 37 | this.#remainingRoundTasks = [] 38 | } 39 | 40 | /** @returns {Task | undefined} */ 41 | async next() { 42 | await this.#updateCurrentRound() 43 | return this.#remainingRoundTasks.pop() 44 | } 45 | 46 | async #updateCurrentRound() { 47 | console.log('Checking the current SPARK round...') 48 | let res = await this.#fetch('https://api.filspark.com/rounds/current', { 49 | method: 'GET', 50 | headers: { 'Content-Type': 'application/json' }, 51 | redirect: 'manual', 52 | signal: AbortSignal.timeout(10_000), 53 | }) 54 | 55 | await assertRedirectResponse( 56 | res, 57 | 'Failed to find the URL of the current SPARK round', 58 | ) 59 | const roundUrl = res.headers.get('location') 60 | this.#activity.onHealthy() 61 | if (roundUrl === this.#lastRoundUrl) { 62 | console.log('Round did not change since the last iteration') 63 | return 64 | } 65 | 66 | console.log('Fetching round details at location %s', roundUrl) 67 | res = await this.#fetch(`https://api.filspark.com${roundUrl}`, { 68 | method: 'GET', 69 | headers: { 'Content-Type': 'application/json' }, 70 | signal: AbortSignal.timeout(10_000), 71 | }) 72 | await assertOkResponse(res, 'Failed to fetch the current SPARK round') 73 | const { retrievalTasks, maxTasksPerNode, ...round } = await res.json() 74 | console.log('Current SPARK round:', round) 75 | console.log(' %s max tasks per round', maxTasksPerNode ?? '') 76 | console.log(' %s retrieval tasks', retrievalTasks.length) 77 | this.maxTasksPerRound = maxTasksPerNode 78 | 79 | const randomness = await getRandomnessForSparkRound(round.startEpoch) 80 | console.log(' randomness: %s', randomness) 81 | 82 | this.#remainingRoundTasks = await pickTasksForNode({ 83 | tasks: retrievalTasks, 84 | maxTasksPerRound: this.maxTasksPerRound, 85 | randomness, 86 | stationId: Zinnia.stationId, 87 | }) 88 | 89 | this.#lastRoundUrl = roundUrl 90 | } 91 | } 92 | 93 | const textEncoder = new TextEncoder() 94 | 95 | /** 96 | * @param {Task} task 97 | * @param {string} randomness 98 | * @returns 99 | */ 100 | export async function getTaskKey(task, randomness) { 101 | assertEquals(typeof task, 'object', 'task must be an object') 102 | assertEquals(typeof task.cid, 'string', 'task.cid must be a string') 103 | assertEquals(typeof task.minerId, 'string', 'task.minerId must be a string') 104 | assertEquals(typeof randomness, 'string', 'randomness must be a string') 105 | 106 | const data = [task.cid, task.minerId, randomness].join('\n') 107 | const hash = await crypto.subtle.digest('sha-256', textEncoder.encode(data)) 108 | return BigInt('0x' + encodeHex(hash)) 109 | } 110 | 111 | /** @param {string} stationId */ 112 | export async function getStationKey(stationId) { 113 | assertEquals(typeof stationId, 'string', 'stationId must be a string') 114 | 115 | const hash = await crypto.subtle.digest( 116 | 'sha-256', 117 | textEncoder.encode(stationId), 118 | ) 119 | return BigInt('0x' + encodeHex(hash)) 120 | } 121 | 122 | /** 123 | * @param {object} args 124 | * @param {Task[]} args.tasks 125 | * @param {string} args.stationId 126 | * @param {string} args.randomness 127 | * @param {number} args.maxTasksPerRound 128 | * @returns {Promise} 129 | */ 130 | export async function pickTasksForNode({ 131 | tasks, 132 | stationId, 133 | randomness, 134 | maxTasksPerRound, 135 | }) { 136 | assertInstanceOf(tasks, Array, 'tasks must be an array') 137 | assertEquals(typeof stationId, 'string', 'stationId must be a string') 138 | assertEquals(typeof randomness, 'string', 'randomness must be a string') 139 | assertEquals( 140 | typeof maxTasksPerRound, 141 | 'number', 142 | 'maxTasksPerRound must be a number', 143 | ) 144 | 145 | const keyedTasks = await Promise.all( 146 | tasks.map(async (t) => ({ ...t, key: await getTaskKey(t, randomness) })), 147 | ) 148 | const stationKey = await getStationKey(stationId) 149 | 150 | /** 151 | * @param {{ key: bigint }} a 152 | * @param {{ key: bigint }} b 153 | * @returns {number} 154 | */ 155 | const comparator = (a, b) => { 156 | const ad = a.key ^ stationKey 157 | const bd = b.key ^ stationKey 158 | return ad > bd ? 1 : ad < bd ? -1 : 0 159 | } 160 | 161 | keyedTasks.sort(comparator) 162 | keyedTasks.splice(maxTasksPerRound) 163 | 164 | return keyedTasks.map(({ key, ...t }) => t) 165 | } 166 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The contents of this repository are Copyright (c) corresponding authors and 2 | contributors, licensed under the `Permissive License Stack` meaning either of: 3 | 4 | - Apache-2.0 Software License: https://www.apache.org/licenses/LICENSE-2.0 5 | ([...4tr2kfsq](https://dweb.link/ipfs/bafkreiankqxazcae4onkp436wag2lj3ccso4nawxqkkfckd6cg4tr2kfsq)) 6 | 7 | - MIT Software License: https://opensource.org/licenses/MIT 8 | ([...vljevcba](https://dweb.link/ipfs/bafkreiepofszg4gfe2gzuhojmksgemsub2h4uy2gewdnr35kswvljevcba)) 9 | 10 | You may not use the contents of this repository except in compliance 11 | with one of the listed Licenses. For an extended clarification of the 12 | intent behind the choice of Licensing please refer to 13 | https://protocol.ai/blog/announcing-the-permissive-license-stack/ 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the terms listed in this notice is distributed on 17 | an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 18 | either express or implied. See each License for the specific language 19 | governing permissions and limitations under that License. 20 | 21 | 22 | `SPDX-License-Identifier: Apache-2.0 OR MIT` 23 | 24 | Verbatim copies of both licenses are included below: 25 | 26 |
Apache-2.0 Software License 27 | 28 | ``` 29 | Apache License 30 | Version 2.0, January 2004 31 | http://www.apache.org/licenses/ 32 | 33 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 34 | 35 | 1. Definitions. 36 | 37 | "License" shall mean the terms and conditions for use, reproduction, 38 | and distribution as defined by Sections 1 through 9 of this document. 39 | 40 | "Licensor" shall mean the copyright owner or entity authorized by 41 | the copyright owner that is granting the License. 42 | 43 | "Legal Entity" shall mean the union of the acting entity and all 44 | other entities that control, are controlled by, or are under common 45 | control with that entity. For the purposes of this definition, 46 | "control" means (i) the power, direct or indirect, to cause the 47 | direction or management of such entity, whether by contract or 48 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 49 | outstanding shares, or (iii) beneficial ownership of such entity. 50 | 51 | "You" (or "Your") shall mean an individual or Legal Entity 52 | exercising permissions granted by this License. 53 | 54 | "Source" form shall mean the preferred form for making modifications, 55 | including but not limited to software source code, documentation 56 | source, and configuration files. 57 | 58 | "Object" form shall mean any form resulting from mechanical 59 | transformation or translation of a Source form, including but 60 | not limited to compiled object code, generated documentation, 61 | and conversions to other media types. 62 | 63 | "Work" shall mean the work of authorship, whether in Source or 64 | Object form, made available under the License, as indicated by a 65 | copyright notice that is included in or attached to the work 66 | (an example is provided in the Appendix below). 67 | 68 | "Derivative Works" shall mean any work, whether in Source or Object 69 | form, that is based on (or derived from) the Work and for which the 70 | editorial revisions, annotations, elaborations, or other modifications 71 | represent, as a whole, an original work of authorship. For the purposes 72 | of this License, Derivative Works shall not include works that remain 73 | separable from, or merely link (or bind by name) to the interfaces of, 74 | the Work and Derivative Works thereof. 75 | 76 | "Contribution" shall mean any work of authorship, including 77 | the original version of the Work and any modifications or additions 78 | to that Work or Derivative Works thereof, that is intentionally 79 | submitted to Licensor for inclusion in the Work by the copyright owner 80 | or by an individual or Legal Entity authorized to submit on behalf of 81 | the copyright owner. For the purposes of this definition, "submitted" 82 | means any form of electronic, verbal, or written communication sent 83 | to the Licensor or its representatives, including but not limited to 84 | communication on electronic mailing lists, source code control systems, 85 | and issue tracking systems that are managed by, or on behalf of, the 86 | Licensor for the purpose of discussing and improving the Work, but 87 | excluding communication that is conspicuously marked or otherwise 88 | designated in writing by the copyright owner as "Not a Contribution." 89 | 90 | "Contributor" shall mean Licensor and any individual or Legal Entity 91 | on behalf of whom a Contribution has been received by Licensor and 92 | subsequently incorporated within the Work. 93 | 94 | 2. Grant of Copyright License. Subject to the terms and conditions of 95 | this License, each Contributor hereby grants to You a perpetual, 96 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 97 | copyright license to reproduce, prepare Derivative Works of, 98 | publicly display, publicly perform, sublicense, and distribute the 99 | Work and such Derivative Works in Source or Object form. 100 | 101 | 3. Grant of Patent License. Subject to the terms and conditions of 102 | this License, each Contributor hereby grants to You a perpetual, 103 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 104 | (except as stated in this section) patent license to make, have made, 105 | use, offer to sell, sell, import, and otherwise transfer the Work, 106 | where such license applies only to those patent claims licensable 107 | by such Contributor that are necessarily infringed by their 108 | Contribution(s) alone or by combination of their Contribution(s) 109 | with the Work to which such Contribution(s) was submitted. If You 110 | institute patent litigation against any entity (including a 111 | cross-claim or counterclaim in a lawsuit) alleging that the Work 112 | or a Contribution incorporated within the Work constitutes direct 113 | or contributory patent infringement, then any patent licenses 114 | granted to You under this License for that Work shall terminate 115 | as of the date such litigation is filed. 116 | 117 | 4. Redistribution. You may reproduce and distribute copies of the 118 | Work or Derivative Works thereof in any medium, with or without 119 | modifications, and in Source or Object form, provided that You 120 | meet the following conditions: 121 | 122 | (a) You must give any other recipients of the Work or 123 | Derivative Works a copy of this License; and 124 | 125 | (b) You must cause any modified files to carry prominent notices 126 | stating that You changed the files; and 127 | 128 | (c) You must retain, in the Source form of any Derivative Works 129 | that You distribute, all copyright, patent, trademark, and 130 | attribution notices from the Source form of the Work, 131 | excluding those notices that do not pertain to any part of 132 | the Derivative Works; and 133 | 134 | (d) If the Work includes a "NOTICE" text file as part of its 135 | distribution, then any Derivative Works that You distribute must 136 | include a readable copy of the attribution notices contained 137 | within such NOTICE file, excluding those notices that do not 138 | pertain to any part of the Derivative Works, in at least one 139 | of the following places: within a NOTICE text file distributed 140 | as part of the Derivative Works; within the Source form or 141 | documentation, if provided along with the Derivative Works; or, 142 | within a display generated by the Derivative Works, if and 143 | wherever such third-party notices normally appear. The contents 144 | of the NOTICE file are for informational purposes only and 145 | do not modify the License. You may add Your own attribution 146 | notices within Derivative Works that You distribute, alongside 147 | or as an addendum to the NOTICE text from the Work, provided 148 | that such additional attribution notices cannot be construed 149 | as modifying the License. 150 | 151 | You may add Your own copyright statement to Your modifications and 152 | may provide additional or different license terms and conditions 153 | for use, reproduction, or distribution of Your modifications, or 154 | for any such Derivative Works as a whole, provided Your use, 155 | reproduction, and distribution of the Work otherwise complies with 156 | the conditions stated in this License. 157 | 158 | 5. Submission of Contributions. Unless You explicitly state otherwise, 159 | any Contribution intentionally submitted for inclusion in the Work 160 | by You to the Licensor shall be under the terms and conditions of 161 | this License, without any additional terms or conditions. 162 | Notwithstanding the above, nothing herein shall supersede or modify 163 | the terms of any separate license agreement you may have executed 164 | with Licensor regarding such Contributions. 165 | 166 | 6. Trademarks. This License does not grant permission to use the trade 167 | names, trademarks, service marks, or product names of the Licensor, 168 | except as required for reasonable and customary use in describing the 169 | origin of the Work and reproducing the content of the NOTICE file. 170 | 171 | 7. Disclaimer of Warranty. Unless required by applicable law or 172 | agreed to in writing, Licensor provides the Work (and each 173 | Contributor provides its Contributions) on an "AS IS" BASIS, 174 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 175 | implied, including, without limitation, any warranties or conditions 176 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 177 | PARTICULAR PURPOSE. You are solely responsible for determining the 178 | appropriateness of using or redistributing the Work and assume any 179 | risks associated with Your exercise of permissions under this License. 180 | 181 | 8. Limitation of Liability. In no event and under no legal theory, 182 | whether in tort (including negligence), contract, or otherwise, 183 | unless required by applicable law (such as deliberate and grossly 184 | negligent acts) or agreed to in writing, shall any Contributor be 185 | liable to You for damages, including any direct, indirect, special, 186 | incidental, or consequential damages of any character arising as a 187 | result of this License or out of the use or inability to use the 188 | Work (including but not limited to damages for loss of goodwill, 189 | work stoppage, computer failure or malfunction, or any and all 190 | other commercial damages or losses), even if such Contributor 191 | has been advised of the possibility of such damages. 192 | 193 | 9. Accepting Warranty or Additional Liability. While redistributing 194 | the Work or Derivative Works thereof, You may choose to offer, 195 | and charge a fee for, acceptance of support, warranty, indemnity, 196 | or other liability obligations and/or rights consistent with this 197 | License. However, in accepting such obligations, You may act only 198 | on Your own behalf and on Your sole responsibility, not on behalf 199 | of any other Contributor, and only if You agree to indemnify, 200 | defend, and hold each Contributor harmless for any liability 201 | incurred by, or claims asserted against, such Contributor by reason 202 | of your accepting any such warranty or additional liability. 203 | 204 | END OF TERMS AND CONDITIONS 205 | ``` 206 |
207 | 208 |
MIT Software License 209 | 210 | ``` 211 | Permission is hereby granted, free of charge, to any person obtaining a copy 212 | of this software and associated documentation files (the "Software"), to deal 213 | in the Software without restriction, including without limitation the rights 214 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 215 | copies of the Software, and to permit persons to whom the Software is 216 | furnished to do so, subject to the following conditions: 217 | 218 | The above copyright notice and this permission notice shall be included in 219 | all copies or substantial portions of the Software. 220 | 221 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 222 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 223 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 224 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 225 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 226 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 227 | THE SOFTWARE. 228 | ``` 229 |
230 | -------------------------------------------------------------------------------- /lib/spark.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | import { ActivityState } from './activity-state.js' 4 | import { 5 | SPARK_VERSION, 6 | MAX_CAR_SIZE, 7 | APPROX_ROUND_LENGTH_IN_MS, 8 | MAX_JITTER_BETWEEN_TASKS_IN_MS, 9 | MAX_REQUEST_DURATION_MS, 10 | OFFLINE_RETRY_DELAY_MS, 11 | } from './constants.js' 12 | import { queryTheIndex } from './ipni-client.js' 13 | import { assertOkResponse } from './http-assertions.js' 14 | import { getIndexProviderPeerId as defaultGetIndexProvider } from './miner-info.js' 15 | import { multiaddrToHttpUrl } from './multiaddr.js' 16 | import { Tasker } from './tasker.js' 17 | 18 | import { 19 | CarBlockIterator, 20 | encodeHex, 21 | HashMismatchError, 22 | UnsupportedHashError, 23 | validateBlock, 24 | } from '../vendor/deno-deps.js' 25 | 26 | const sleep = (dt) => new Promise((resolve) => setTimeout(resolve, dt)) 27 | 28 | export default class Spark { 29 | #fetch 30 | #getIndexProviderPeerId 31 | #activity = new ActivityState() 32 | #tasker 33 | 34 | constructor({ 35 | fetch = globalThis.fetch, 36 | getIndexProviderPeerId = defaultGetIndexProvider, 37 | } = {}) { 38 | this.#fetch = fetch 39 | this.#getIndexProviderPeerId = getIndexProviderPeerId 40 | this.#tasker = new Tasker({ 41 | fetch: this.#fetch, 42 | activityState: this.#activity, 43 | }) 44 | } 45 | 46 | async getRetrieval() { 47 | const retrieval = await this.#tasker.next() 48 | if (retrieval) { 49 | console.log({ retrieval }) 50 | } 51 | return retrieval 52 | } 53 | 54 | async executeRetrievalCheck(retrieval, stats) { 55 | console.log( 56 | `Calling Filecoin JSON-RPC to get PeerId of miner ${retrieval.minerId}`, 57 | ) 58 | try { 59 | const peerId = await this.#getIndexProviderPeerId(retrieval.minerId) 60 | console.log(`Found peer id: ${peerId}`) 61 | stats.providerId = peerId 62 | } catch (err) { 63 | // There are three common error cases: 64 | // 1. We are offline 65 | // 2. The JSON RPC provider is down 66 | // 3. JSON RPC errors like when Miner ID is not a known actor 67 | // There isn't much we can do in the first two cases. We can notify the user that we are not 68 | // performing any jobs and wait until the problem is resolved. 69 | // The third case should not happen unless we made a mistake, so we want to learn about it 70 | if (err.name === 'FilecoinRpcError') { 71 | // TODO: report the error to Sentry 72 | console.error( 73 | 'The error printed below was not expected, please report it on GitHub:', 74 | ) 75 | console.error('https://github.com/filecoin-station/spark/issues/new') 76 | } 77 | // Abort the check, no measurement should be recorded 78 | throw err 79 | } 80 | 81 | console.log( 82 | `Querying IPNI to find retrieval providers for ${retrieval.cid}`, 83 | ) 84 | const { indexerResult, provider } = await queryTheIndex( 85 | retrieval.cid, 86 | stats.providerId, 87 | ) 88 | stats.indexerResult = indexerResult 89 | 90 | const providerFound = 91 | indexerResult === 'OK' || indexerResult === 'HTTP_NOT_ADVERTISED' 92 | if (!providerFound) return 93 | 94 | stats.protocol = provider.protocol 95 | stats.providerAddress = provider.address 96 | 97 | await this.fetchCAR( 98 | provider.protocol, 99 | provider.address, 100 | retrieval.cid, 101 | stats, 102 | ) 103 | if (stats.protocol === 'http') { 104 | await this.testHeadRequest(provider.address, retrieval.cid, stats) 105 | } 106 | } 107 | 108 | async fetchCAR( 109 | protocol, 110 | address, 111 | cid, 112 | stats, 113 | { maxRequestDurationMs = MAX_REQUEST_DURATION_MS } = {}, 114 | ) { 115 | // Abort if no progress was made for 60 seconds 116 | const controller = new AbortController() 117 | const { signal } = controller 118 | 119 | let requestIdleTimeout 120 | 121 | const resetTimeout = () => { 122 | if (requestIdleTimeout) { 123 | clearTimeout(requestIdleTimeout) 124 | } 125 | requestIdleTimeout = setTimeout(() => { 126 | stats.timeout = true 127 | controller.abort() 128 | }, 60_000) 129 | } 130 | 131 | const maxDurationTimeout = setTimeout(() => { 132 | stats.timeout = true 133 | controller.abort() 134 | }, maxRequestDurationMs) 135 | 136 | // WebCrypto API does not support streams yet, the hashing function requires entire data 137 | // to be provided at once. See https://github.com/w3c/webcrypto/issues/73 138 | const carBuffer = new ArrayBuffer(0, { maxByteLength: MAX_CAR_SIZE }) 139 | const carBytes = new Uint8Array(carBuffer) 140 | 141 | stats.startAt = new Date() 142 | 143 | try { 144 | const url = getRetrievalUrl(protocol, address, cid) 145 | console.log(`Fetching: ${url}`) 146 | 147 | resetTimeout() 148 | const res = await this.#fetch(url, { signal }) 149 | stats.statusCode = res.status 150 | 151 | if (res.ok) { 152 | resetTimeout() 153 | for await (const value of res.body) { 154 | if (stats.firstByteAt === null) { 155 | stats.firstByteAt = new Date() 156 | } 157 | stats.byteLength += value.byteLength 158 | 159 | // We want to limit how large content we are willing to download. 160 | // 1. To make sure we don't spend too much time (and network bandwidth) on a single task, 161 | // so that we can complete more tasks per round 162 | // 2. Until we have streaming hashes, we need to keep the entire payload in memory, and so 163 | // we need to put an upper limit on how much memory we consume. 164 | if (stats.byteLength > MAX_CAR_SIZE) { 165 | stats.carTooLarge = true 166 | break 167 | } 168 | 169 | const offset = carBuffer.byteLength 170 | carBuffer.resize(offset + value.byteLength) 171 | carBytes.set(value, offset) 172 | 173 | resetTimeout() 174 | } 175 | 176 | if (!stats.carTooLarge) { 177 | await verifyContent(cid, carBytes) 178 | 179 | const digest = await crypto.subtle.digest('sha-256', carBytes) 180 | // 12 is the code for sha2-256 181 | // 20 is the digest length (32 bytes = 256 bits) 182 | stats.carChecksum = '1220' + encodeHex(digest) 183 | } 184 | } else { 185 | console.error( 186 | 'Retrieval failed with status code %s: %s', 187 | res.status, 188 | (await res.text()).trimEnd(), 189 | ) 190 | } 191 | } catch (err) { 192 | console.error(`Failed to fetch ${cid} from ${address} using ${protocol}`) 193 | console.error(err) 194 | if (!stats.statusCode || stats.statusCode === 200) { 195 | stats.statusCode = mapErrorToStatusCode(err) 196 | } 197 | } finally { 198 | clearTimeout(requestIdleTimeout) 199 | clearTimeout(maxDurationTimeout) 200 | } 201 | 202 | stats.endAt = new Date() 203 | } 204 | 205 | async testHeadRequest(address, cid, stats) { 206 | const url = getRetrievalUrl('http', address, cid) 207 | console.log(`Testing HEAD request: ${url}`) 208 | try { 209 | const res = await this.#fetch(url, { 210 | method: 'HEAD', 211 | headers: { 212 | Accept: 'application/vnd.ipld.raw', 213 | }, 214 | signal: AbortSignal.timeout(10_000), 215 | }) 216 | stats.headStatusCode = res.status 217 | } catch (err) { 218 | console.error(`Failed to make HEAD request to ${address} for ${cid}`) 219 | console.error(err) 220 | stats.headStatusCode = mapErrorToStatusCode(err) 221 | } 222 | } 223 | 224 | async submitMeasurement(task, stats) { 225 | console.log('Submitting measurement...') 226 | const payload = { 227 | sparkVersion: SPARK_VERSION, 228 | zinniaVersion: Zinnia.versions.zinnia, 229 | ...task, 230 | ...stats, 231 | participantAddress: Zinnia.walletAddress, 232 | stationId: Zinnia.stationId, 233 | } 234 | console.log('%o', payload) 235 | const res = await this.#fetch('https://api.filspark.com/measurements', { 236 | method: 'POST', 237 | body: JSON.stringify(payload), 238 | headers: { 239 | 'Content-Type': 'application/json', 240 | }, 241 | signal: AbortSignal.timeout(10_000), 242 | }) 243 | await assertOkResponse(res, 'Failed to submit measurement') 244 | const { id } = await res.json() 245 | console.log('Measurement submitted (id: %s)', id) 246 | return id 247 | } 248 | 249 | async nextRetrieval() { 250 | const retrieval = await this.getRetrieval() 251 | if (!retrieval) { 252 | console.log( 253 | 'Completed all tasks for the current round. Waiting for the next round to start.', 254 | ) 255 | return 256 | } 257 | 258 | const stats = newStats() 259 | 260 | await this.executeRetrievalCheck(retrieval, stats) 261 | 262 | const measurementId = await this.submitMeasurement(retrieval, { ...stats }) 263 | Zinnia.jobCompleted() 264 | return measurementId 265 | } 266 | 267 | async run() { 268 | while (true) { 269 | const started = Date.now() 270 | try { 271 | await this.nextRetrieval() 272 | this.#activity.onHealthy() 273 | } catch (err) { 274 | this.handleRunError(err) 275 | } 276 | const duration = Date.now() - started 277 | const isHealthy = this.#activity.isHealthy() 278 | 279 | const delay = calculateDelayBeforeNextTask({ 280 | isHealthy, 281 | roundLengthInMs: APPROX_ROUND_LENGTH_IN_MS, 282 | maxJitterInMs: MAX_JITTER_BETWEEN_TASKS_IN_MS, 283 | maxTasksPerRound: this.#tasker.maxTasksPerRound, 284 | lastTaskDurationInMs: duration, 285 | }) 286 | 287 | if (delay > 0) { 288 | console.log( 289 | `${isHealthy ? 'Online' : 'Offline'} – sleeping for ${Math.round(delay / 1000)}s before next task.`, 290 | ) 291 | await sleep(delay) 292 | console.log() // add an empty line to visually delimit logs from different tasks 293 | } 294 | } 295 | } 296 | 297 | handleRunError(err) { 298 | if (err.statusCode === 400 && err.serverMessage === 'OUTDATED CLIENT') { 299 | this.#activity.onOutdatedClient() 300 | } else { 301 | this.#activity.onError() 302 | } 303 | console.error(err) 304 | } 305 | } 306 | 307 | /** 308 | * @param {object} args 309 | * @param {number} args.roundLengthInMs 310 | * @param {number} [args.maxJitterInMs=0] Default is `0` 311 | * @param {number} args.maxTasksPerRound 312 | * @param {number} args.lastTaskDurationInMs 313 | */ 314 | export function calculateDelayBeforeNextTask({ 315 | isHealthy = true, 316 | roundLengthInMs, 317 | maxJitterInMs = 0, 318 | maxTasksPerRound, 319 | lastTaskDurationInMs, 320 | }) { 321 | if (!isHealthy) return OFFLINE_RETRY_DELAY_MS 322 | 323 | const baseDelay = roundLengthInMs / maxTasksPerRound 324 | const delay = baseDelay - lastTaskDurationInMs 325 | const base = Math.min(delay, 60_000) 326 | 327 | // Introduce some jitter to avoid all clients querying cid.contact at the same time 328 | const jitter = Math.round(Math.random() * maxJitterInMs) 329 | 330 | return base + jitter 331 | } 332 | 333 | export function newStats() { 334 | return { 335 | timeout: false, 336 | startAt: null, 337 | firstByteAt: null, 338 | endAt: null, 339 | carTooLarge: false, 340 | byteLength: 0, 341 | carChecksum: null, 342 | statusCode: null, 343 | headStatusCode: null, 344 | } 345 | } 346 | 347 | export function getRetrievalUrl(protocol, address, cid) { 348 | if (protocol === 'http') { 349 | const baseUrl = multiaddrToHttpUrl(address) 350 | return `${baseUrl}/ipfs/${cid}?dag-scope=block` 351 | } 352 | 353 | const searchParams = new URLSearchParams({ 354 | // See https://github.com/filecoin-project/lassie/blob/main/docs/HTTP_SPEC.md#dag-scope-request-query-parameter 355 | // Only the root block at the end of the path is returned after blocks required to verify the specified path segments. 356 | 'dag-scope': 'block', 357 | protocols: protocol, 358 | providers: address, 359 | }) 360 | return `ipfs://${cid}?${searchParams.toString()}` 361 | } 362 | 363 | /** 364 | * @param {string} cid 365 | * @param {Uint8Array} carBytes 366 | */ 367 | async function verifyContent(cid, carBytes) { 368 | let reader 369 | try { 370 | reader = await CarBlockIterator.fromBytes(carBytes) 371 | } catch (err) { 372 | throw Object.assign(err, { code: 'CANNOT_PARSE_CAR_BYTES' }) 373 | } 374 | 375 | for await (const block of reader) { 376 | if (block.cid.toString() !== cid.toString()) { 377 | throw Object.assign( 378 | new Error(`Unexpected block CID ${block.cid}. Expected: ${cid}`), 379 | { 380 | code: 'UNEXPECTED_CAR_BLOCK', 381 | }, 382 | ) 383 | } 384 | 385 | await validateBlock(block) 386 | } 387 | } 388 | 389 | function mapErrorToStatusCode(err) { 390 | // 7xx codes for multiaddr parsing errors 391 | switch (err.code) { 392 | case 'UNSUPPORTED_MULTIADDR_HOST_TYPE': 393 | return 701 394 | case 'UNSUPPORTED_MULTIADDR_PROTO': 395 | return 702 396 | case 'UNSUPPORTED_MULTIADDR_SCHEME': 397 | return 703 398 | case 'MULTIADDR_HAS_TOO_MANY_PARTS': 399 | return 704 400 | case 'INVALID_HTTP_PATH': 401 | return 705 402 | } 403 | 404 | // 9xx for content verification errors 405 | if (err instanceof UnsupportedHashError) { 406 | return 901 407 | } else if (err instanceof HashMismatchError) { 408 | return 902 409 | } else if (err.code === 'UNEXPECTED_CAR_BLOCK') { 410 | return 903 411 | } else if (err.code === 'CANNOT_PARSE_CAR_BYTES') { 412 | return 904 413 | } 414 | 415 | // 8xx errors for network connection errors 416 | // Unfortunately, the Fetch API does not support programmatic detection of various error 417 | // conditions. We have to check the error message text. 418 | if (err.message.includes('dns error')) { 419 | return 801 420 | } else if (err.message.includes('tcp connect error')) { 421 | return 802 422 | } 423 | 424 | // Fallback code for unknown errors 425 | return 600 426 | } 427 | -------------------------------------------------------------------------------- /test/spark.js: -------------------------------------------------------------------------------- 1 | /* global Zinnia */ 2 | 3 | import Spark, { calculateDelayBeforeNextTask, newStats } from '../lib/spark.js' 4 | import { test } from 'zinnia:test' 5 | import { 6 | assertInstanceOf, 7 | assertEquals, 8 | assertArrayIncludes, 9 | assertNotEquals, 10 | assertLessOrEqual, 11 | assertGreaterOrEqual, 12 | } from 'zinnia:assert' 13 | import { SPARK_VERSION, OFFLINE_RETRY_DELAY_MS } from '../lib/constants.js' 14 | 15 | const KNOWN_CID = 'bafkreih25dih6ug3xtj73vswccw423b56ilrwmnos4cbwhrceudopdp5sq' 16 | 17 | test('getRetrieval', async () => { 18 | const round = { 19 | roundId: '123', 20 | startEpoch: 4111111, 21 | maxTasksPerNode: 1, 22 | retrievalTasks: [ 23 | { 24 | cid: 'bafkreidysaugf7iuvemebpzwxxas5rctbyiryykagup2ygkojmx7ag64gy', 25 | minerId: 'f010', 26 | }, 27 | { 28 | cid: 'QmUMpWycKJ7GUDJp9GBRX4qWUFUePUmHzri9Tm1CQHEzbJ', 29 | minerId: 'f020', 30 | }, 31 | ], 32 | } 33 | const requests = [] 34 | const fetch = async (url, allOpts) => { 35 | const { signal, ...opts } = allOpts 36 | requests.push({ url, opts }) 37 | if (url === 'https://api.filspark.com/rounds/current') { 38 | const headers = new Headers() 39 | headers.set('location', '/rounds/meridian/0x84607/115') 40 | return { 41 | status: 302, 42 | ok: false, 43 | headers, 44 | } 45 | } 46 | 47 | return { 48 | status: 200, 49 | ok: true, 50 | async json() { 51 | return round 52 | }, 53 | } 54 | } 55 | const spark = new Spark({ fetch }) 56 | const retrieval = await spark.getRetrieval() 57 | assertArrayIncludes( 58 | round.retrievalTasks.map(JSON.stringify), 59 | [retrieval].map(JSON.stringify), 60 | ) 61 | assertEquals(requests, [ 62 | { 63 | url: 'https://api.filspark.com/rounds/current', 64 | opts: { 65 | method: 'GET', 66 | redirect: 'manual', 67 | headers: { 'Content-Type': 'application/json' }, 68 | }, 69 | }, 70 | { 71 | url: 'https://api.filspark.com/rounds/meridian/0x84607/115', 72 | opts: { 73 | headers: { 74 | 'Content-Type': 'application/json', 75 | }, 76 | method: 'GET', 77 | }, 78 | }, 79 | ]) 80 | }) 81 | 82 | test('testHeadRequest', async () => { 83 | const requests = [] 84 | const spark = new Spark({ 85 | fetch: async (url, { method, headers }) => { 86 | requests.push({ url: url.toString(), method, headers }) 87 | return { 88 | status: 200, 89 | } 90 | }, 91 | }) 92 | const stats = {} 93 | await spark.testHeadRequest( 94 | '/dns/frisbii.fly.dev/tcp/443/https', 95 | KNOWN_CID, 96 | stats, 97 | ) 98 | assertEquals(stats.headStatusCode, 200) 99 | assertEquals(requests, [ 100 | { 101 | url: `https://frisbii.fly.dev/ipfs/${KNOWN_CID}?dag-scope=block`, 102 | method: 'HEAD', 103 | headers: { Accept: 'application/vnd.ipld.raw' }, 104 | }, 105 | ]) 106 | }) 107 | 108 | test('testHeadRequest - with statusCode=500', async () => { 109 | const requests = [] 110 | const spark = new Spark({ 111 | fetch: async (url, { method }) => { 112 | requests.push({ url: url.toString(), method }) 113 | return { 114 | status: 500, 115 | } 116 | }, 117 | }) 118 | const stats = {} 119 | await spark.testHeadRequest( 120 | '/dns/frisbii.fly.dev/tcp/443/https', 121 | KNOWN_CID, 122 | stats, 123 | ) 124 | assertEquals(stats.headStatusCode, 500) 125 | assertEquals(requests, [ 126 | { 127 | url: `https://frisbii.fly.dev/ipfs/${KNOWN_CID}?dag-scope=block`, 128 | method: 'HEAD', 129 | }, 130 | ]) 131 | }) 132 | 133 | test('testHeadRequest - with network failure', async () => { 134 | const requests = [] 135 | const spark = new Spark({ 136 | fetch: async (url, { method }) => { 137 | requests.push({ url: url.toString(), method }) 138 | throw new Error() 139 | }, 140 | }) 141 | const stats = {} 142 | await spark.testHeadRequest( 143 | '/dns/frisbii.fly.dev/tcp/443/https', 144 | KNOWN_CID, 145 | stats, 146 | ) 147 | assertEquals(stats.headStatusCode, 600) 148 | assertEquals(requests, [ 149 | { 150 | url: `https://frisbii.fly.dev/ipfs/${KNOWN_CID}?dag-scope=block`, 151 | method: 'HEAD', 152 | }, 153 | ]) 154 | }) 155 | 156 | test('fetchCAR - http', async () => { 157 | const requests = [] 158 | const spark = new Spark({ 159 | fetch: async (url) => { 160 | requests.push(url.toString()) 161 | return fetch(url) 162 | }, 163 | }) 164 | const stats = newStats() 165 | await spark.fetchCAR( 166 | 'http', 167 | '/dns/frisbii.fly.dev/tcp/443/https', 168 | KNOWN_CID, 169 | stats, 170 | ) 171 | assertEquals(stats.statusCode, 200, 'stats.statusCode') 172 | assertEquals(stats.timeout, false, 'stats.timeout') 173 | assertInstanceOf(stats.startAt, Date) 174 | assertInstanceOf(stats.firstByteAt, Date) 175 | assertInstanceOf(stats.endAt, Date) 176 | assertEquals(stats.carTooLarge, false, 'stats.carTooLarge') 177 | assertEquals(stats.byteLength, 200, 'stats.byteLength') 178 | assertEquals( 179 | stats.carChecksum, 180 | '122069f03061f7ad4c14a5691b7e96d3ddd109023a6539a0b4230ea3dc92050e7136', 181 | 'stats.carChecksum', 182 | ) 183 | assertEquals(requests, [ 184 | `https://frisbii.fly.dev/ipfs/${KNOWN_CID}?dag-scope=block`, 185 | ]) 186 | }) 187 | 188 | /* Fixme: Find an active deal on a reliable graphsync provider 189 | test('fetchCAR - graphsync', async () => { 190 | // This test relies on data stored as part of a Filecoin deal which will eventually expire. 191 | // Also the storage provider may decide to stop serving Graphsync retrievals. 192 | // When that happens, this test will start failing, and we will need to find different 193 | // content that can be retrieved over Graphsync. 194 | // Hopefully, we will no longer support Graphsync by that time. 195 | const cid = 'bafybeiepi56qxfcwqgpstg25r6sonig7y3pzd37lwambzmlcmbnujjri4a' 196 | const addr = '/dns/f010479.twinquasar.io/tcp/42002/p2p/12D3KooWHKeaNCnYByQUMS2n5PAZ1KZ9xKXqsb4bhpxVJ6bBJg5V' 197 | 198 | const requests = [] 199 | const spark = new Spark({ 200 | fetch: async (url) => { 201 | requests.push(url.toString()) 202 | return fetch(url) 203 | } 204 | }) 205 | const stats = newStats() 206 | await spark.fetchCAR('graphsync', addr, cid, stats) 207 | assertEquals(stats.statusCode, 200, 'stats.statusCode') 208 | assertEquals(stats.timeout, false, 'stats.timeout') 209 | assertInstanceOf(stats.startAt, Date) 210 | assertInstanceOf(stats.firstByteAt, Date) 211 | assertInstanceOf(stats.endAt, Date) 212 | assertEquals(stats.carTooLarge, false, 'stats.carTooLarge') 213 | assertEquals(stats.byteLength, 217, 'stats.byteLength') 214 | assertEquals(stats.carChecksum, '1220a8d765159d8829f2bca7df05e5cd46eb88bdaa30905d3d08c6295562ea072f0f', 'stats.carChecksum') 215 | assertEquals(requests, [`ipfs://${cid}?dag-scope=block&protocols=graphsync&providers=${encodeURIComponent(addr)}`]) 216 | }) 217 | */ 218 | 219 | /* Disabled as long as we are fetching the top-level block only 220 | test('fetchCAR exceeding MAX_CAR_SIZE', async () => { 221 | const fetch = async url => { 222 | return { 223 | status: 200, 224 | ok: true, 225 | body: (async function * () { 226 | const data = new Uint8Array(MAX_CAR_SIZE + 1) 227 | data.fill(11, 0, -1) 228 | yield data 229 | })() 230 | } 231 | } 232 | const spark = new Spark({ fetch }) 233 | const stats = newStats() 234 | await spark.fetchCAR('http', '/ip4/127.0.0.1/tcp/80/http', 'bafy', stats) 235 | assertEquals(stats.timeout, false) 236 | assertEquals(stats.carTooLarge, true) 237 | assertEquals(stats.byteLength, MAX_CAR_SIZE + 1) 238 | assertEquals(stats.carChecksum, null) 239 | assertEquals(stats.statusCode, 200) 240 | }) 241 | */ 242 | 243 | test('fetchCAR fails with statusCode=701 (unsupported host type)', async () => { 244 | const spark = new Spark() 245 | const stats = newStats() 246 | await spark.fetchCAR('http', '/ip99/1.2.3.4.5/tcp/80/http', KNOWN_CID, stats) 247 | assertEquals(stats.statusCode, 701, 'stats.statusCode') 248 | }) 249 | 250 | test('fetchCAR fails with statusCode=702 (protocol is not tcp)', async () => { 251 | const spark = new Spark() 252 | const stats = newStats() 253 | await spark.fetchCAR('http', '/ip4/1.2.3.4/udp/80/http', KNOWN_CID, stats) 254 | assertEquals(stats.statusCode, 702, 'stats.statusCode') 255 | }) 256 | 257 | test('fetchCAR fails with statusCode=703 (scheme is not http/https) - multiaddr without http-path', async () => { 258 | const spark = new Spark() 259 | const stats = newStats() 260 | await spark.fetchCAR('http', '/ip4/1.2.3.4/tcp/80/ldap', KNOWN_CID, stats) 261 | assertEquals(stats.statusCode, 703, 'stats.statusCode') 262 | }) 263 | 264 | test('fetchCAR fails with statusCode=703 (scheme is not supported) - multiaddr with http-path', async () => { 265 | const spark = new Spark() 266 | const stats = newStats() 267 | await spark.fetchCAR( 268 | 'http', 269 | '/dns/meridian.space/tcp/8080/http/http-path/%2Fipni-provider%2FproviderID', 270 | KNOWN_CID, 271 | stats, 272 | ) 273 | assertEquals(stats.statusCode, 703, 'stats.statusCode') 274 | }) 275 | 276 | test('fetchCAR fails with statusCode=704 (multiaddr has too many parts)', async () => { 277 | const spark = new Spark() 278 | const stats = newStats() 279 | await spark.fetchCAR( 280 | 'http', 281 | '/ip4/1.2.3.4/tcp/80/http/p2p/pubkey', 282 | KNOWN_CID, 283 | stats, 284 | ) 285 | assertEquals(stats.statusCode, 704, 'stats.statusCode') 286 | }) 287 | 288 | test('fetchCAR fails with statusCode=705 (multiaddr has invalid http-path)', async () => { 289 | const spark = new Spark() 290 | const stats = newStats() 291 | await spark.fetchCAR( 292 | 'http', 293 | '/dns/meridian.space/http/http-path/invalid%path', 294 | KNOWN_CID, 295 | stats, 296 | ) 297 | assertEquals(stats.statusCode, 705, 'stats.statusCode') 298 | }) 299 | 300 | test('fetchCAR fails with statusCode=801 (DNS error)', async () => { 301 | const spark = new Spark() 302 | const stats = newStats() 303 | await spark.fetchCAR( 304 | 'http', 305 | '/dns/invalid.example.com/tcp/80/http', 306 | KNOWN_CID, 307 | stats, 308 | ) 309 | assertEquals(stats.statusCode, 801, 'stats.statusCode') 310 | }) 311 | 312 | test('fetchCAR fails with statusCode=802 (TCP connection refused)', async () => { 313 | const spark = new Spark() 314 | const stats = newStats() 315 | await spark.fetchCAR('http', '/ip4/127.0.0.1/tcp/79/http', KNOWN_CID, stats) 316 | assertEquals(stats.statusCode, 802, 'stats.statusCode') 317 | }) 318 | 319 | test('fetchCAR fails with statusCode=802 (TCP connection refused)', async () => { 320 | const spark = new Spark() 321 | const stats = newStats() 322 | await spark.fetchCAR('http', '/ip4/127.0.0.1/tcp/79/http', KNOWN_CID, stats) 323 | assertEquals(stats.statusCode, 802, 'stats.statusCode') 324 | }) 325 | 326 | // TODO: 327 | // statusCode=901 - unsupported hash algorithm 328 | 329 | test('fetchCAR fails with statusCode=902 (hash mismatch)', async () => { 330 | const spark = new Spark({ 331 | fetch: async (url) => { 332 | const res = await fetch(url) 333 | return { 334 | status: res.status, 335 | ok: res.ok, 336 | body: (async function* () { 337 | const bytes = new Uint8Array(await res.arrayBuffer()) 338 | // manipulate one byte inside the CAR block 339 | bytes[bytes.length - 1] = bytes[bytes.length - 1] ^ 0x88 340 | yield bytes 341 | })(), 342 | } 343 | }, 344 | }) 345 | const stats = newStats() 346 | await spark.fetchCAR( 347 | 'http', 348 | '/dns/frisbii.fly.dev/tcp/443/https', 349 | KNOWN_CID, 350 | stats, 351 | ) 352 | assertEquals(stats.statusCode, 902, 'stats.statusCode') 353 | }) 354 | 355 | test('fetchCAR fails with statusCode=903 (unexpected CAR block)', async () => { 356 | const spark = new Spark({ 357 | // Fetch the root block of a different CID 358 | fetch: (_url) => 359 | fetch( 360 | 'https://frisbii.fly.dev/ipfs/bafkreih5zasorm4tlfga4ztwvm2dlnw6jxwwuvgnokyt3mjamfn3svvpyy?dag-scope=block', 361 | ), 362 | }) 363 | const stats = newStats() 364 | await spark.fetchCAR('http', '/ip4/127.0.0.1/tcp/80/http', KNOWN_CID, stats) 365 | assertEquals(stats.statusCode, 903, 'stats.statusCode') 366 | }) 367 | 368 | test('fetchCAR fails with statusCode=904 (cannot parse CAR)', async () => { 369 | const spark = new Spark({ 370 | fetch: async (_url) => { 371 | return { 372 | status: 200, 373 | ok: true, 374 | body: (async function* () { 375 | yield new Uint8Array([1, 2, 3]) 376 | })(), 377 | } 378 | }, 379 | }) 380 | const stats = newStats() 381 | await spark.fetchCAR('http', '/ip4/127.0.0.1/tcp/80/http', KNOWN_CID, stats) 382 | assertEquals(stats.statusCode, 904, 'stats.statusCode') 383 | }) 384 | 385 | test('submitRetrieval', async () => { 386 | const requests = [] 387 | const fetch = async (url, allOpts) => { 388 | const { signal, ...opts } = allOpts 389 | requests.push({ url, opts }) 390 | return { 391 | status: 200, 392 | ok: true, 393 | async json() { 394 | return { id: 123 } 395 | }, 396 | } 397 | } 398 | const spark = new Spark({ fetch }) 399 | await spark.submitMeasurement({ cid: 'bafytest' }, {}) 400 | assertEquals(requests, [ 401 | { 402 | url: 'https://api.filspark.com/measurements', 403 | opts: { 404 | method: 'POST', 405 | body: JSON.stringify({ 406 | sparkVersion: SPARK_VERSION, 407 | zinniaVersion: Zinnia.versions.zinnia, 408 | cid: 'bafytest', 409 | participantAddress: Zinnia.walletAddress, 410 | stationId: Zinnia.stationId, 411 | }), 412 | headers: { 'Content-Type': 'application/json' }, 413 | }, 414 | }, 415 | ]) 416 | }) 417 | 418 | test('calculateDelayBeforeNextTask() returns value based on average task duration', () => { 419 | const delay = calculateDelayBeforeNextTask({ 420 | lastTaskDurationInMs: 3_000, 421 | 422 | // one task every 10 seconds (on average) 423 | roundLengthInMs: 60_000, 424 | maxTasksPerRound: 6, 425 | }) 426 | assertEquals(delay, 7_000) 427 | }) 428 | 429 | test('calculateDelayBeforeNextTask() handles zero tasks per round', () => { 430 | const delay = calculateDelayBeforeNextTask({ 431 | maxTasksPerRound: 0, 432 | // the values below are not important 433 | roundLengthInMs: 12345, 434 | lastTaskDurationInMs: 12, 435 | }) 436 | assertEquals(delay, 60_000) 437 | }) 438 | 439 | test('calculateDelayBeforeNextTask() handles one task per round', () => { 440 | const delay = calculateDelayBeforeNextTask({ 441 | roundLengthInMs: 20 * 60_000, 442 | maxTasksPerRound: 1, 443 | lastTaskDurationInMs: 1_000, 444 | }) 445 | assertEquals(delay, 60_000) 446 | }) 447 | 448 | test('calculateDelayBeforeNextTask() introduces random jitter', () => { 449 | const getDelay = () => 450 | calculateDelayBeforeNextTask({ 451 | lastTaskDurationInMs: 3_000, 452 | 453 | // one task every 10 seconds (on average) 454 | roundLengthInMs: 60_000, 455 | maxTasksPerRound: 6, 456 | 457 | // jitter up to 1 second 458 | maxJitterInMs: 1_000, 459 | }) 460 | 461 | const delay1 = getDelay() 462 | const delay2 = getDelay() 463 | 464 | assertGreaterOrEqual(delay1, 7_000) 465 | assertLessOrEqual(delay1, 7_000 + 1_000) 466 | 467 | assertNotEquals( 468 | delay1, 469 | delay2, 470 | `Expected delay values to be different because of jitter. Actual value: ${delay1}`, 471 | ) 472 | assertLessOrEqual( 473 | Math.abs(delay1 - delay2), 474 | 1_000, 475 | `expected delay values to be within 1 second of each other. Actual values: ${delay1} <> ${delay2}`, 476 | ) 477 | }) 478 | 479 | test('calculateDelayBeforeNextTask() introduces random jitter for zero tasks in round', () => { 480 | const getDelay = () => 481 | calculateDelayBeforeNextTask({ 482 | maxTasksPerRound: 0, 483 | 484 | // jitter up to 1 second 485 | maxJitterInMs: 1_000, 486 | 487 | // the values below are not important 488 | roundLengthInMs: 12345, 489 | lastTaskDurationInMs: 12, 490 | }) 491 | 492 | const delay1 = getDelay() 493 | const delay2 = getDelay() 494 | 495 | assertNotEquals( 496 | delay1, 497 | delay2, 498 | `Expected delay values to be different because of jitter. Actual value: ${delay1}`, 499 | ) 500 | assertLessOrEqual( 501 | Math.abs(delay1 - delay2), 502 | 1_000, 503 | `expected delay values to be within 1 second of each other. Actual values: ${delay1} <> ${delay2}`, 504 | ) 505 | }) 506 | 507 | test('calculateDelayBeforeNextTask() returns OFFLINE_RETRY_DELAY_MS when offline', () => { 508 | const delay = calculateDelayBeforeNextTask({ 509 | isHealthy: false, 510 | roundLengthInMs: 20 * 60_000, 511 | maxJitterInMs: 10_000, 512 | maxTasksPerRound: 1, 513 | lastTaskDurationInMs: 1000, 514 | }) 515 | 516 | assertEquals( 517 | delay, 518 | OFFLINE_RETRY_DELAY_MS, 519 | `Expected delay to match OFFLINE_RETRY_DELAY_MS (${OFFLINE_RETRY_DELAY_MS}) when offline`, 520 | ) 521 | }) 522 | 523 | test('fetchCAR triggers timeout after long retrieval', async () => { 524 | const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms)) 525 | const fetch = async (_url, { signal }) => { 526 | return { 527 | status: 200, 528 | ok: true, 529 | body: (async function* () { 530 | while (true) { 531 | if (signal.aborted) { 532 | throw new DOMException('Aborted', 'AbortError') 533 | } 534 | yield new Uint8Array([0]) 535 | await sleep(500) 536 | } 537 | })(), 538 | } 539 | } 540 | 541 | const spark = new Spark({ fetch }) 542 | const stats = newStats() 543 | 544 | await spark.fetchCAR( 545 | 'http', 546 | '/dns/example.com/tcp/80/http', 547 | KNOWN_CID, 548 | stats, 549 | { 550 | maxRequestDurationMs: 0, 551 | }, 552 | ) 553 | 554 | assertEquals(stats.timeout, true) 555 | }) 556 | --------------------------------------------------------------------------------