├── .config.json ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── config.yml │ └── open_an_issue.md └── workflows │ ├── build.yml │ ├── generated-pr.yml │ ├── linter.yml │ └── stale.yml ├── .gitignore ├── .markdownlint.json ├── ARCHITECTURE.md ├── BITSWAP.md ├── DWEB_ADDRESSING.md ├── IMPORTERS_EXPORTERS.md ├── IPIP ├── 0000-template.md ├── 0001-lightweight-improvement-proposal-process.md ├── 0002-gateway-redirects-file.md ├── 0288-gateway-tar-response-format.md ├── 0328-gateway-json-cbor-response-format.md └── 0337-delegated-routing-http-api.md ├── IPIP_PROCESS.md ├── IPNS.md ├── KEYCHAIN.md ├── KEYSTORE.md ├── MERKLE_DAG.md ├── Makefile ├── README.md ├── REPO.md ├── REPO_FS.md ├── UNIXFS.md ├── http-gateways ├── DNSLINK_GATEWAY.md ├── PATH_GATEWAY.md ├── README.md ├── REDIRECTS_FILE.md ├── SUBDOMAIN_GATEWAY.md └── TRUSTLESS_GATEWAY.md ├── img ├── components │ ├── components.001.jpg │ ├── components.002.jpg │ ├── components.003.jpg │ ├── components.004.jpg │ ├── components.005.jpg │ └── components.key ├── dex-graphs │ ├── arch.monopic │ └── arch.txt ├── ip.waist.png ├── ipfs-resolve │ ├── ipfs-resolve.gif │ ├── resolve-gif.001.jpg │ ├── resolve-gif.002.jpg │ ├── resolve-gif.003.jpg │ ├── resolve-gif.004.jpg │ ├── resolve-gif.005.jpg │ ├── resolve-gif.006.jpg │ ├── resolve-gif.007.jpg │ └── resolve-gif.008.jpg ├── ipfs-splash-lg.png ├── ipfs-splash.png ├── ipfs-stack.png ├── mdag.waist.png └── spec.key ├── ipip-template.md ├── ipns ├── IPNS.md ├── IPNS_PUBSUB.md └── README.md ├── package-lock.json ├── package.json ├── routing └── ROUTING_V1_HTTP.md ├── src ├── _includes │ ├── footer.html │ ├── head.html │ ├── header.html │ ├── ipips-list.html │ └── list.html ├── architecture │ ├── index.html │ └── principles.md ├── bitswap-protocol.md ├── compact-denylist-format.md ├── css │ ├── index.css │ └── specs.css ├── exchange │ └── index.html ├── http-gateways │ ├── dnslink-gateway.md │ ├── index.html │ ├── libp2p-gateway.md │ ├── path-gateway.md │ ├── subdomain-gateway.md │ ├── trustless-gateway.md │ └── web-redirects-file.md ├── img │ ├── ipns-overview.png │ ├── watermark-proposal.svg │ └── watermark-ratified.svg ├── index.html ├── ipips │ ├── index.html │ ├── ipip-0001.md │ ├── ipip-0002.md │ ├── ipip-0288.md │ ├── ipip-0328.md │ ├── ipip-0337.md │ ├── ipip-0351.md │ ├── ipip-0379.md │ ├── ipip-0383.md │ ├── ipip-0386.md │ ├── ipip-0402.md │ ├── ipip-0410.md │ ├── ipip-0412.md │ ├── ipip-0417.md │ ├── ipip-0428.md │ └── ipip-0484.md ├── ipns │ ├── index.html │ ├── ipns-pubsub-router.md │ └── ipns-record.md ├── meta │ ├── code-of-conduct.md │ ├── index.html │ ├── ipip-process.md │ └── spec-for-specs.md └── routing │ ├── http-routing-v1.md │ └── index.html ├── template.html └── template.md /.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "input": "src", 3 | "output": "out", 4 | "template": "template.html", 5 | "baseURL": "https://specs.ipfs.tech", 6 | "github": { 7 | "repository": "ipfs/specs" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This is a CODEOWNERS file 2 | # Each line is a file pattern followed by one or more owners. 3 | # Order is important; the last matching pattern takes the most precedence. 4 | # See: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners 5 | 6 | # global IPIP 7 | IPIP/ @ipfs/specs-stewards 8 | src/ipips/ @ipfs/specs-stewards 9 | 10 | # Selected Spec Stewards can be defined below to be automatically requested for 11 | # review when someone opens a pull request that modifies area of their 12 | # interest. 13 | 14 | http-gateways/ @lidel 15 | src/http-gateways/ @lidel -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Getting Help on IPFS 4 | url: https://ipfs.io/help 5 | about: All information about how and where to get help on IPFS. 6 | - name: IPFS Official Forum 7 | url: https://discuss.ipfs.io 8 | about: Please post general questions, support requests, and discussions here. 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/open_an_issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Open an issue 3 | about: Only for actionable issues relevant to this repository. 4 | title: '' 5 | labels: need/triage 6 | assignees: '' 7 | 8 | --- 9 | 20 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build and Deploy 2 | 3 | # Explicitly declare permissions 4 | permissions: 5 | contents: read 6 | pull-requests: write 7 | statuses: write 8 | 9 | on: 10 | push: 11 | branches: 12 | - main 13 | pull_request: 14 | branches: 15 | - main 16 | 17 | env: 18 | BUILD_PATH: 'out' 19 | 20 | concurrency: 21 | group: ${{ github.workflow }}-${{ github.ref }} 22 | cancel-in-progress: true # Cancel in progress runs if a new run is started 23 | 24 | jobs: 25 | build-and-deploy: 26 | runs-on: ubuntu-latest 27 | outputs: 28 | cid: ${{ steps.deploy.outputs.cid }} 29 | steps: 30 | - name: Checkout code 31 | uses: actions/checkout@v4 32 | 33 | - name: Setup Node.js 34 | uses: actions/setup-node@v4 35 | with: 36 | node-version: '20' 37 | cache: 'npm' 38 | 39 | - name: Install dependencies 40 | run: npm ci --prefer-offline --no-audit --progress=false 41 | 42 | - name: Build project 43 | run: make website 44 | 45 | - name: Upload static files as artifact 46 | id: upload-artifact 47 | uses: actions/upload-pages-artifact@v3 48 | with: 49 | path: ${{ env.BUILD_PATH }} 50 | 51 | - uses: ipfs/ipfs-deploy-action@v1 52 | name: Deploy to IPFS Mirror Providers 53 | id: deploy 54 | with: 55 | path-to-deploy: ${{ env.BUILD_PATH }} 56 | cluster-url: "/dnsaddr/ipfs-websites.collab.ipfscluster.io" 57 | cluster-user: ${{ secrets.CLUSTER_USER }} 58 | cluster-password: ${{ secrets.CLUSTER_PASSWORD }} 59 | storacha-key: ${{ secrets.STORACHA_KEY }} 60 | storacha-proof: ${{ secrets.STORACHA_PROOF }} 61 | #TODO pinata-jwt-token: ${{ secrets.PINATA_JWT_TOKEN }} 62 | github-token: ${{ github.token }} 63 | 64 | # TODO: right now, DNSLink is controlled by Fleek, and we use ipfs/ipfs-deploy-action for PR previews 65 | #- name: Update DNSLink 66 | # if: false # TODO github.ref == 'refs/heads/main' # only update DNSLink for main branch 67 | # uses: ipfs/dnslink-action@v0.1 68 | # with: 69 | # cid: ${{ steps.deploy.outputs.cid }} 70 | # dnslink_domain: 'specs.ipfs.tech' 71 | # cf_record_id: ${{ secrets.CF_RECORD_ID }} 72 | # cf_zone_id: ${{ secrets.CF_ZONE_ID }} 73 | # cf_auth_token: ${{ secrets.CF_AUTH_TOKEN }} 74 | # github_token: ${{ github.token }} 75 | # set_github_status: true 76 | 77 | 78 | gh-pages: 79 | runs-on: 'ubuntu-latest' 80 | needs: build-and-deploy 81 | if: github.ref == 'refs/heads/main' # only deploy to gh-pages for main branch 82 | permissions: 83 | pages: write # to deploy to Pages 84 | id-token: write # to verify the deployment originates from an appropriate source 85 | environment: 86 | name: 'github-pages' 87 | url: ${{ steps.deployment.outputs.page_url }} 88 | steps: 89 | - name: Deploy to GitHub Pages 90 | id: deployment 91 | uses: actions/deploy-pages@v4 92 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/linter.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | workflow_dispatch: 4 | push: 5 | 6 | jobs: 7 | #markdownlint: 8 | # runs-on: ubuntu-latest 9 | # steps: 10 | # - uses: actions/checkout@v2 11 | # - uses: xt0rted/markdownlint-problem-matcher@b643b0751c371f357690337d4549221347c0e1bc # v1.0 12 | # - run: npx markdownlint **/*.md --ignore node_modules 13 | super-linter: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout Code 17 | uses: actions/checkout@v3 18 | with: 19 | # Full git history is needed to get a proper list of changed files within `super-linter` 20 | fetch-depth: 0 21 | - name: Lint 22 | uses: super-linter/super-linter/slim@v7 23 | env: 24 | LINTER_RULES_PATH: '.' 25 | MARKDOWN_CONFIG_FILE: .markdownlint.json 26 | VALIDATE_ALL_CODEBASE: false 27 | VALIDATE_MARKDOWN: true 28 | DEFAULT_BRANCH: main 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | out/ 2 | node_modules/ 3 | super-linter.log 4 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "single-h1": false, 3 | "ul-style": false, 4 | "no-bare-urls": false, 5 | "no-duplicate-heading": false, 6 | "no-emphasis-as-header": false, 7 | "fenced-code-language": false, 8 | "blanks-around-lists": false, 9 | "single-trailing-newline": false, 10 | "link-fragments": false, 11 | "line-length": false 12 | } 13 | -------------------------------------------------------------------------------- /ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | # ![](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square) IPFS Architecture Overview 2 | 3 | 4 | **Authors(s)**: 5 | - [Juan Benet](https://github.com/jbenet) 6 | - [David Dias](https://github.com/daviddias) 7 | 8 | **Maintainer(s)**: 9 | - N/A 10 | 11 | * * * 12 | 13 | **Abstract** 14 | 15 | This spec document defines the IPFS protocol stack, the subsystems, the interfaces, and how it all fits together. It delegates non-interface details to other specs as much as possible. This is meant as a top-level view of the protocol and how the system fits together. 16 | 17 | Note, this document is not meant to be an introduction of the concepts in IPFS and is not recommended as a first pass to understanding how IPFS works. For that, please refer to the [IPFS paper](https://github.com/ipfs/papers/raw/master/ipfs-cap2pfs/ipfs-p2p-file-system.pdf). 18 | 19 | # Table of Contents 20 | 21 | - 1. IPFS and the Merkle DAG 22 | - 2. Nodes and Network Model 23 | - 3. The Stack 24 | - 4. Applications and data structures -- on top of IPFS 25 | - 5. Lifetime of fetching an object 26 | - 6. IPFS User Interfaces 27 | 28 | # 1. IPFS and the Merkle DAG 29 | 30 | At the heart of IPFS is the MerkleDAG, a directed acyclic graph whose links are hashes. This gives all objects in IPFS useful properties: 31 | 32 | - authenticated: content can be hashed and verified against the link 33 | - permanent: once fetched, objects can be cached forever 34 | - universal: any data structure can be represented as a merkledag 35 | - decentralized: objects can be created by anyone, without centralized writers 36 | 37 | In turn, these yield properties for the system as a whole: 38 | 39 | - links are content addressed 40 | - objects can be served by untrusted agents 41 | - objects can be cached permanently 42 | - objects can be created and used offline 43 | - networks can be partitioned and merged 44 | - any data structure can be modelled and distributed 45 | - (todo: list more) 46 | 47 | IPFS is a stack of network protocols that organize agent networks to create, publish, distribute, serve, and download merkledags. It is the authenticated, decentralized, permanent web. 48 | 49 | 50 | # 2. Nodes and Network Model 51 | 52 | The IPFS network uses PKI based identity. An "ipfs node" is a program that can find, publish, and replicate merkledag objects. Its identity is defined by a private key. Specifically: 53 | 54 | ``` 55 | privateKey, publicKey := keygen() 56 | nodeID := multihash(publicKey) 57 | ``` 58 | 59 | See more in the [IPFS keystore spec](https://github.com/ipfs/specs/blob/master/KEYSTORE.md). 60 | 61 | ## 2.1 multihash and upgradeable hashing 62 | 63 | All hashes in ipfs are encoded with [multihash](https://github.com/jbenet/multihash/), a self-describing hash format. The actual hash function used depends on security requirements. The cryptosystem of IPFS is upgradeable, meaning that as hash functions are broken, networks can shift to stronger hashes. There is no free lunch, as objects may need to be rehashed, or links duplicated. But ensuring that tools built do not assume a pre-defined length of hash digest means tools that work with today's hash functions will also work with tomorrows longer hash functions too. 64 | 65 | As of this writing, IPFS nodes _must_ support: 66 | 67 | ``` 68 | sha2-256 69 | sha2-512 70 | sha3 71 | ``` 72 | 73 | 74 | # 3. The Stack 75 | 76 | IPFS has a stack of modular protocols. Each layer may have multiple implementations, all in different modules. This spec will only address the interfaces between the layers, and briefly mention possible implementations. Details are left to the other specs. 77 | 78 | IPFS has five layers: 79 | 80 | - **naming** - a self-certifying PKI namespace (IPNS) 81 | - **merkledag** - data structure format (thin waist) 82 | - **exchange** - block transport and replication 83 | - **routing** - locating peers and objects 84 | - **network** - establishing connections between peers 85 | 86 | ![](img/ipfs-stack.png) 87 | 88 | These are briefly described bottom-up. 89 | 90 | ## 3.1 Network 91 | 92 | The **network** provides point-to-point transports (reliable and unreliable) between any two IPFS nodes in the network. It handles: 93 | - NAT traversal - hole punching, port mapping, and relay 94 | - supports multiple transports - TCP, SCTP, UTP, ... 95 | - supports encryption, signing, or clear communications 96 | - multi-multiplexes -multiplexes connections, streams, protocols, peers, ... 97 | 98 | See more in the [libp2p specs](https://github.com/libp2p/specs). 99 | 100 | ## 3.2 Routing -- finding peers and data 101 | 102 | The IPFS **Routing** layer serves two important purposes: 103 | - **peer routing** -- to find other nodes 104 | - **content routing** -- to find data published to ipfs 105 | 106 | The Routing System is an interface that is satisfied by various kinds of implementations. For example: 107 | 108 | - **DHTs:** perhaps the most common, DHTs can be used to create a semi-persistent routing record distributed cache in the network. 109 | - **mdns:** used to find services advertised locally. `mdns` (or `dnssd`) is a local discovery service. We will be using it. 110 | - **snr:** supernode routing is a delegated routing system: it delegates to one of a set of supernodes. This is roughly like federated routing. 111 | - **dns:** ipfs routing could even happen over dns. 112 | 113 | See more in the [libp2p specs](https://github.com/libp2p/specs). 114 | 115 | ## 3.3 Block Exchange -- transferring content-addressed data 116 | 117 | The IPFS **Block Exchange** takes care of negotiating bulk data transfers. Once nodes know each other -- and are connected -- the exchange protocols govern how the transfer of content-addressed blocks occurs. 118 | 119 | The Block Exchange is an interface that is satisfied by various kinds of implementations. For example: 120 | 121 | - **Bitswap:** our main protocol for exchanging data. It is a generalization 122 | of BitTorrent to work with arbitrary (and not known a priori) DAGs. 123 | - **HTTP:** a simple exchange can be implemented with HTTP clients and servers. 124 | 125 | ## 3.4. Merkledag -- making sense of data 126 | 127 | [As discussed above](#IPFS-and-the-Merkle-DAG), the IPFS **merkledag** (also known as IPLD - InterPlanetary Linked Data) is the data structure at the heart of IPFS. It is an [acyclic directed graph](http://en.wikipedia.org/wiki/Directed_acyclic_graph) whose edges are hashes. Another name for it is the merkleweb. 128 | 129 | The merkledag data structure is: 130 | 131 | ```protobuf 132 | message MDagLink { 133 | bytes Hash = 1; // multihash of the target object 134 | string Name = 2; // utf string name. should be unique per object 135 | uint64 Tsize = 3; // cumulative size of target object 136 | } 137 | 138 | message MDagNode { 139 | MDagLink Links = 2; // refs to other objects 140 | bytes Data = 1; // opaque user data 141 | } 142 | ``` 143 | 144 | The merkledag is the "thin waist" of authenticated data structures. It is a minimal set of information needed to represent + transfer arbitrary authenticated data structures. More complex data structures are implemented on top of the merkledag, such as: 145 | 146 | - **git** and other version control systems 147 | - **bitcoin** and other blockchains 148 | - **unixfs**, a content-addressed unix filesystem 149 | 150 | See more in the [IPLD spec](https://ipld.io/specs/). 151 | 152 | ## 3.4.1 Merkledag Paths 153 | 154 | The merkledag is enough to resolve paths: 155 | 156 | ``` 157 | /ipfs/QmdpMvUptHuGysVn6mj69K53EhitFd2LzeHCmHrHasHjVX/test/foo 158 | ``` 159 | 160 | - (a) Would first fetch + resolve `QmdpMvUptHuGysVn6mj69K53EhitFd2LzeHCmHrHasHjVX` 161 | - (b) Then look into the links of (a), find the hash for `test`, and resolve it 162 | - (c) Then look into the links of (b), find the hash for `foo`, and resolve it 163 | 164 | See more in the [path resolution spec](https://github.com/ipld/specs/blob/master/data-model-layer/paths.md). 165 | 166 | ![](img/ipfs-resolve/ipfs-resolve.gif) 167 | 168 | ## 3.5 Naming -- PKI namespace and mutable pointers 169 | 170 | IPFS is mostly concerned with content-addressed data, which by nature is immutable: changing an object would change its hash -- and thus its address, making it a _different_ object altogether. (Think of it as a copy-on-write filesystem). 171 | 172 | The IPFS **naming** layer -- or IPNS -- handles the creation of: 173 | - mutable pointers to objects 174 | - human-readable names 175 | 176 | IPNS is based on [SFS](http://en.wikipedia.org/wiki/Self-certifying_File_System). It is a PKI namespace -- a name is simply the hash of a public key. Whoever controls the private key controls the name. Records are signed by the private key and distributed anywhere (in IPFS, via the routing system). This is an egalitarian way to assign mutable names in the internet at large, without any centralization whatsoever, or certificate authorities. 177 | 178 | See more in the [IPNS spec](https://github.com/ipfs/specs/blob/master/IPNS.md). 179 | 180 | # 4. Applications and Data Structures -- on top of IPFS 181 | 182 | The stack described so far is enough to represent arbitrary data structures and replicate them across the internet. It is also enough to build and deploy decentralized websites. 183 | 184 | Applications and data structures on top of IPFS are represented as merkledags. Users can create arbitrary data structures that extend the merkledag and deploy them to the rest of the world using any of the tools that understand IPFS. 185 | 186 | See more in the [IPLD data structures specs](https://github.com/ipld/specs/tree/master/data-structures). 187 | 188 | ## 4.1 unixfs -- representing traditional files 189 | 190 | The unix filesystem abstractions -- files and directories -- are the main way people conceive of files in the internet. In IPFS, `unixfs` is a data structure that represents unix files on top of IPFS. We need a separate data structure to carry over information like: 191 | 192 | - whether the object represents a file or directory. 193 | - total sizes, minus indexing overhead 194 | 195 | See more in the [unixfs spec](https://github.com/ipfs/specs/blob/master/UNIXFS.md). 196 | 197 | ## 5. Lifetime of fetching an object. 198 | 199 | Suppose we ask an IPFS node to retrieve 200 | 201 | ``` 202 | /ipfs/QmdpMvUptHuGysVn6mj69K53EhitFd2LzeHCmHrHasHjVX/test/foo 203 | ``` 204 | 205 | The IPFS node first splits the path into components (discarding the `ipfs` prefix): 206 | 207 | ``` 208 | [ "QmdpMvUptHuGysVn6mj69K53EhitFd2LzeHCmHrHasHjVX", "test", "foo" ] 209 | ``` 210 | 211 | Then, the IPFS node resolves the components. 212 | The first component in an `/ipfs/...` path is always a multihash. 213 | The rest are names of links, to be resolved into multihashes. 214 | 215 | # 6. IPFS User Interfaces 216 | 217 | IPFS is not just a protocol. It is also a toolset. IPFS implementations include various tools for working with the merkledag, how to publish something, how to name something, etc. These interfaces may be critical to the survival of an implementation, or the project as a whole. These interfaces govern how people use IPFS, thus careful attention must be given to their design and implementation. Examples: 218 | 219 | - The [IPFS api](https://docs.ipfs.io/reference/api/http/) - an HTTP service 220 | - The [IPFS cli](https://docs.ipfs.io/reference/api/cli/) - a unix cli 221 | - The [IPFS libs](https://github.com/ipfs/ipfs#http-client-libraries) - implementations in various languages 222 | - The IPFS gateways - nodes in the internet that serve HTTP over IPFS 223 | 224 | * * * 225 | 226 | # WIP Stack Dump: 227 | 228 | - How the layers fit together 229 | - How they call on each other 230 | - Mention all the ports 231 | - Mention all the interfaces with the user 232 | - Mention gateways 233 | -------------------------------------------------------------------------------- /BITSWAP.md: -------------------------------------------------------------------------------- 1 | # Bitswap 2 | 3 | Moved to https://specs.ipfs.tech/bitswap-protocol/ 4 | -------------------------------------------------------------------------------- /DWEB_ADDRESSING.md: -------------------------------------------------------------------------------- 1 | # ![](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square) Addressing on the Decentralized Web 2 | 3 | **Authors(s)**: 4 | - [Lars Gierth](mailto:lgierth@ipfs.io) 5 | 6 | **Maintainers(s)**: 7 | - 8 | 9 | * * * 10 | 11 | **Abstract** 12 | 13 | This document is largely incomplete. 14 | 15 | 16 | # Table of contents: 17 | - Introduction 18 | - The precarious web 19 | - Link competition and link rot 20 | - The addressing rift 21 | - DWeb Addressing 22 | - Namespaces 23 | - /ipfs -- immutable data 24 | - /ipns -- mutable pointers 25 | - Addressing data from other content-addressed systems 26 | - Network addressing 27 | - Interoperability 28 | - DWeb Addressing with HTTP 29 | - ipfs:// and ipns:// URL schemes 30 | - dweb: URI scheme 31 | - Content Security Policy / Origins 32 | - Appendix 33 | - DWeb Maturity Model 34 | - FAQ 35 | - Implementations 36 | - Future Work 37 | - Related work 38 | 39 | ## Introduction 40 | 41 | Location-based addressing is a centralizing vector on the web. It lets links rot and drives copies of content into mutual competition. 42 | 43 | This document describes a content-based addressing model which provides permanent links that don't rot and are cryptographically verifiable. The result is a more cooperative, resilient, and performant web. 44 | -------------------------------------------------------------------------------- /IMPORTERS_EXPORTERS.md: -------------------------------------------------------------------------------- 1 | # ![](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square) Data Importers & Exporters 2 | 3 | **Authors(s)**: 4 | - David Dias 5 | - Juan Benet 6 | 7 | * * * 8 | 9 | **Abstract** 10 | 11 | IPFS Data Importing spec describes the several importing mechanisms used by IPFS that can be also be reused by other systems. An importing mechanism is composed by one or more chunkers and data format layouts. 12 | 13 | Lots of discussions around this topic, some of them here: 14 | 15 | - https://github.com/ipfs/notes/issues/204 16 | - https://github.com/ipfs/notes/issues/216 17 | - https://github.com/ipfs/notes/issues/205 18 | - https://github.com/ipfs/notes/issues/144 19 | 20 | # Table of contents 21 | 22 | - [Introduction]() 23 | - [Requirements]() 24 | - [Architecture]() 25 | - [Interfaces]() 26 | - [Implementations]() 27 | - [References]() 28 | 29 | ## Introduction 30 | 31 | Importing data into IPFS can be done in a variety of ways. These are use-case specific, produce different data structures, produce different graph topologies, and so on. These are not strictly needed in an IPFS implementation, but definitely make it more useful. 32 | 33 | These data importing primitives are really just tools on top of IPLD, meaning that these can be generic and separate from IPFS itself. 34 | 35 | Essentially, data importing is divided into two parts: 36 | 37 | - Layouts - The graph topologies in which data is going to be structured and represented, there can include: 38 | - balanced graphs, simpler to implement 39 | - trickledag, a custom graph optimized for seeking 40 | - live stream 41 | - database indices 42 | - and so on 43 | - Splitters - The chunking algorithms applied to each file, these can be: 44 | - fixed size chunking (also known as dumb chunking) 45 | - rabin fingerprinting 46 | - dedicated format chunking, these require knowledge of the format and typically only work with certain time of files (e.g. video, audio, images, etc) 47 | - special data structures chunking, formats like, tar, pdf, doc, container and/org vm images fall into this category 48 | 49 | ### Goals 50 | 51 | - Have a set of primitives to digest, chunk and parse files, so that different chunkers can be replaced/added without any trouble. 52 | 53 | ## Requirements 54 | 55 | These are a set of requirements (or guidelines) of the expectations that need to be fulfilled for a layout or a splitter: 56 | 57 | - a layout should expose an API encoder/decoder like, that is, able to convert data to its format and convert it back to the original format 58 | - a layout should contain a clear unambiguous representation of the data that gets converted to its format 59 | - a layout can leverage one or more splitting strategies, applying the best strategy depending on the data format (dedicated format chunking) 60 | - a splitter can be: 61 | - agnostic - chunks any data format in the same way 62 | - dedicated - only able to chunk specific data formats 63 | - a splitter should expose also a encoder/decoder like API 64 | - a splitter, once fed with data, should yield chunks to be added to layout or another layout of itself 65 | - an importer is a aggregate of layouts and splitters 66 | 67 | ## Architecture 68 | 69 | ```bash 70 | ┌───────────┐ ┌──────────┐ 71 | ┌──────┐ │ │ │ │ ┌────────────────┐ 72 | │ DATA │━━━━━▶│ chunker │━━━━━━━▶│ layout │━━━━━━━▶│ DATA formatted │ 73 | └──────┘ │ │ │ │ └────────────────┘ 74 | └───────────┘ └──────────┘ 75 | ▲ ▲ 76 | └─────────────────────────────────┘ 77 | Importer 78 | ``` 79 | 80 | - `chunkers or splitters` algorithms that read a stream and produce a series of chunks. for our purposes should be deterministic on the stream. divided into: 81 | - `universal chunkers` which work on any streams given to them. (e.g. size, rabin, etc). should work roughly equally well across inputs. 82 | - `specific chunkers` which work on specific types of files (tar splitter, mp4 splitter, etc). special purpose but super useful for big files and special types of data. 83 | - `layouts or topologies` graph topologies (e.g. balanced vs trickledag vs ext4, ... etc) 84 | - `importer` is a process that reads in some data (single file, set of files, archive, db, etc), and outputs a dag. may use many chunkers. may use many layouts. 85 | 86 | ## Interfaces 87 | 88 | #### splitters 89 | 90 | #### layout 91 | 92 | #### importer 93 | 94 | ## Implementations 95 | 96 | #### chunker 97 | 98 | - go-chunk https://github.com/jbenet/go-chunk 99 | 100 | #### layout 101 | 102 | #### importer 103 | 104 | ## References 105 | -------------------------------------------------------------------------------- /IPIP/0000-template.md: -------------------------------------------------------------------------------- 1 | Moved to [`../ipip-template.md`](../ipip-template.md). 2 | -------------------------------------------------------------------------------- /IPIP/0001-lightweight-improvement-proposal-process.md: -------------------------------------------------------------------------------- 1 | # IPIP 0001: Lightweight Improvement Process for IPFS Specifications 2 | 3 | Moved to https://specs.ipfs.tech/ipips/ipip-0001/ 4 | -------------------------------------------------------------------------------- /IPIP/0002-gateway-redirects-file.md: -------------------------------------------------------------------------------- 1 | # IPIP 0002: _redirects File Support on Web Gateways 2 | 3 | Moved to https://specs.ipfs.tech/ipips/ipip-0002/ -------------------------------------------------------------------------------- /IPIP/0288-gateway-tar-response-format.md: -------------------------------------------------------------------------------- 1 | # IPIP-288: TAR Response Format on HTTP Gateways 2 | 3 | Moved to https://specs.ipfs.tech/ipips/ipip-0288/ 4 | -------------------------------------------------------------------------------- /IPIP/0328-gateway-json-cbor-response-format.md: -------------------------------------------------------------------------------- 1 | # IPIP-328: JSON and CBOR Response Formats on HTTP Gateways 2 | 3 | Moved to https://specs.ipfs.tech/ipips/ipip-0328/ 4 | -------------------------------------------------------------------------------- /IPIP/0337-delegated-routing-http-api.md: -------------------------------------------------------------------------------- 1 | # IPIP-337: Delegated Content Routing HTTP API 2 | 3 | Moved to https://specs.ipfs.tech/ipips/ipip-0337/ 4 | -------------------------------------------------------------------------------- /IPIP_PROCESS.md: -------------------------------------------------------------------------------- 1 | # IPIP: Improvement Process for IPFS Specifications 2 | 3 | Moved to https://specs.ipfs.tech/meta/ipip-process/ 4 | -------------------------------------------------------------------------------- /IPNS.md: -------------------------------------------------------------------------------- 1 | # IPNS Specs Moved 2 | 3 | Moved to [./ipns](./ipns/) 4 | -------------------------------------------------------------------------------- /KEYCHAIN.md: -------------------------------------------------------------------------------- 1 | # ![](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square) The Keychain 2 | 3 | **Authors(s)**: 4 | - [Juan Benet](github.com/jbenet) 5 | 6 | * * * 7 | 8 | **Abstract** 9 | 10 | This document presents _The Keychain_, a distributed merkle-linked data structure that links cryptographic keys, identities, signatures, certificates, ciphertexts, proofs, and other objects. 11 | 12 | The idea of _The Keychain_ is to provide a common construction for managing and distributing cryptographic keys and artifacts. It is similar to a Public Key Infrastructure, but goes further into binding objects together. 13 | 14 | # Table of Contents 15 | 16 | TODO 17 | 18 | ## Types 19 | 20 | ```go 21 | // Identity represents an entity that can prove possession of keys. 22 | // It is meant to map to People, Groups, Processes, etc. It is 23 | // essentially a Prover 24 | type Identity struct { 25 | Name string // does not need to be unique. 26 | } 27 | 28 | // Key represents a cryptographic key 29 | type Key struct { 30 | Algorithm Link // the algorithm used to generate the key 31 | Encoding Link // the encoding used to store the key 32 | Bytes Data // the raw key bytes 33 | } 34 | 35 | // KeyPair represents a pair of keys 36 | type KeyPair struct { 37 | Public Link // the public key 38 | Secret Link // the secret key 39 | } 40 | 41 | // Signature represents a digital signature over another object. 42 | type Signature struct { 43 | Key Link // the key used to verify this signature (PublicKey) 44 | Algorithm Link // the algorithm used to sign the signee 45 | Encoding Link // the encoding the sig is serialized with 46 | Signee Link // the object the key is signing 47 | Bytes Data // the raw signature bytes 48 | } 49 | 50 | // Ciphertext represents encrypted data 51 | type Encryption struct { 52 | Decryptor Link // the identity able to decrypt the encryption 53 | Ciphertext Link // the encrypted data 54 | } 55 | ``` 56 | 57 | 58 | ## Proof Types 59 | 60 | ```go 61 | // ProofOfControl proves a certain key is under control of a prover. 62 | var ProofOfControl = "proof-of-control" 63 | 64 | // ProofOfWork proves an amount of work was expended by a prover. 65 | var ProofOfWork = "proof-of-work" 66 | 67 | // ProofOfStorage proves certain data is possessed by prover. 68 | var ProofOfStorage = "proof-of-storage" 69 | 70 | // ProofOfRetrievability proves certain data is possessed by 71 | // _and retrievable from_ a prover. 72 | var ProofOfRetrievability = "proof-of-retrievability" 73 | ``` 74 | 75 | ## diagrams 76 | 77 | ![](https://www.evernote.com/l/AMZm3JN_2TJIL5frkmLYPf71oeA7qaOUiVEB/image.png) 78 | 79 | ![](https://www.evernote.com/l/AMacVgdLVAhPc5EOuvFZKHOhhd9VNcUq9zAB/image.png) 80 | -------------------------------------------------------------------------------- /KEYSTORE.md: -------------------------------------------------------------------------------- 1 | # Keystore 2 | 3 | Moved to https://github.com/ipfs/kubo/blob/master/docs/specifications/keystore.md 4 | -------------------------------------------------------------------------------- /MERKLE_DAG.md: -------------------------------------------------------------------------------- 1 | # ![](https://img.shields.io/badge/status-deprecated-red.svg?style=flat-square) The merkledag 2 | 3 | **This spec has been deprecated in favor of [IPLD](https://github.com/ipld/specs/).** It offers a clearer description of how to link different kinds of hash-based structures (e.g. linking a file in IPFS to a commit in Git), has a more generalized and flexible format, and uses a JSON-compatible representation, among other improvements. 4 | 5 | **Authors(s)**: 6 | - [Juan Benet](https://github.com/jbenet) 7 | - [Jeromy Johnson](https://github.com/whyrusleeping) 8 | 9 | * * * 10 | 11 | **Abstract** 12 | 13 | The _ipfs merkledag_ is a directed acyclic graph whose edges are merkle-links. This means that links to objects can authenticate the objects themselves, and that every object contains a secure representation of its children. 14 | 15 | This is a powerful primitive for distributed systems computations. The merkledag simplifies distributed protocols by providing an append-only authenticated data structure. Parties can communicate and exchange secure references (merkle-links) to objects. The references are enough to verify the correctness of the object at a later time, which allows the objects themselves to be served over untrusted channels. Merkledags also allow the branching of a data structure and subsequent merging, as in the version control system git. More generally, merkledags simplify the construction of Secure [CRDTs](http://en.wikipedia.org/wiki/Conflict-free_replicated_data_type), which enable distributed, convergent, commutative computation in an authenticated, secure way. 16 | 17 | ## Table of Contents 18 | 19 | TODO 20 | 21 | ## Definitions 22 | 23 | - `hash` - throughout this document, the word `hash` refers specifically to cryptographic hash functions, such as sha3. 24 | - `dag` - directed acyclic graph 25 | - `merkle-link` - a link (graph edge) between two objects, which is (a) represented by the hash of the target object, and (b) embedded in the source object. merkle-links construct graphs (dags) whose links are content-addressed, and authenticated. 26 | - `merkledag` - the merkledag is a directed acyclic graph whose links are merkle-links (hashes of the content). It is a hash tree, and (under a very loose definition) a Merkle tree. Alternative names: the merkle-web, the merkle-forest, the merkle-chain. 27 | - `multihash` - the [multihash](https://github.com/jbenet/multihash) format / protocol. 28 | - `ipfs object` - a node in the ipfs merkledag. It represents a singular entity. 29 | - `merkledag format` - the format that ipfs objects are expressed with. 30 | - `link segment` or `link table` - the part of the merkledag format that expresses links to other objects. 31 | - `data segment` - the part of the merkledag format that expresses non-link object data. 32 | - `protobuf` - [protocol buffers](https://developers.google.com/protocol-buffers/), a serialization encoding. 33 | - `multicodec` - a self-describing, generalized serialization format. 34 | 35 | 36 | ## The Format 37 | 38 | The IPFS merkledag format is very simple. It serves as a thin waist for more complex applications and data structure transports. Therefore, it aims to be as simple and small as possible. 39 | 40 | The format has two parts, the logical format, and the serialized format. 41 | 42 | ### Logical Format 43 | 44 | The merkledag format defines two parts, `Nodes` and `Links` between nodes. `Nodes` embed `Links` in their `Link Segment` (or link table). 45 | 46 | A node is divided in two parts: 47 | - a `Link Segment` which contains all the links. 48 | - a `Data Segment` which contains the object data. 49 | 50 | Instead of following previous approaches to merkledags, which place data mostly at the edges, the IPFS merkledag adapts the format of the HTTP web: every path endpoint is an object with _both_ links and data. (this is fundamentally different from UNIX files, in which objects have _either_ links (directories) _or_ data (files).). 51 | 52 | The logical format -- in protobuf -- looks like this: 53 | 54 | ```proto3 55 | // An IPFS MerkleDAG Link 56 | message MerkleLink { 57 | bytes Hash = 1; // multihash of the target object 58 | string Name = 2; // utf string name 59 | uint64 Tsize = 3; // cumulative size of target object 60 | 61 | // user extensions start at 50 62 | } 63 | 64 | // An IPFS MerkleDAG Node 65 | message MerkleNode { 66 | repeated MerkleLink Links = 2; // refs to other objects 67 | bytes Data = 1; // opaque user data 68 | 69 | // user extensions start at 50 70 | } 71 | ``` 72 | 73 | ### Serialized Format 74 | 75 | 76 | (TODO remove this? use only protobuf?) 77 | ~~The logical representation is serialized into raw bytes using `multicodec`, a self-describing format that abstracts between serialization frameworks. That way, we can use the ipfs merkledag with various marshaling and serialization formats.~~ 78 | 79 | The logical representation is serialized into raw bytes using _protocol buffers_, a serialization format. 80 | 81 | ## Discussion 82 | 83 | ### Real World Examples 84 | 85 | Many successful distributed systems employ specialized merkledags at their core: 86 | - Merkle trees -- a special case of the merkledag -- are a well known cryptographic technique used to authenticate large amounts of data. The original use case involved one-time lamport signatures. 87 | - SFS-RO turns a unix filesystem into a merkledag, constructing a secure, distributed filesystem. 88 | - git uses a merkledag to enable distributed version control and source code management. Other DVCSes, such as mercurial and monotone, also feature a merkledag. 89 | - plan9 uses a merkledag to construct its snapshotting filesystems -- Fossil and Venti. 90 | - bittorrent uses a merkledag to provide secure and short infohash links to its downloadable torrents. 91 | - Google Wave -- a distributed communications platform -- used a merkledag to construct its commutative operational transforms, and enable convergent distributed collaboration. This functionality has since been folded into Google Docs. 92 | - bitcoin uses a merkledag to construct the blockchain, a shared append-only ledger with convergent distributed consensus. 93 | - Tahoe-LAFS uses a merkledag to construct a secure, distributed, capability filesystem based on the least-authority principle. 94 | 95 | (NOTE: please suggest other systems to reference here.) 96 | 97 | ### Thin Waist of Data Structures 98 | 99 | At its core, IPFS provides the merkledag as a primitive (or "internet layer") to build sophisticated applications easily. It is a "thin-waist" for secure, distributed applications, which -- by agreeing to follow the common format -- can then run across any replication, routing, and transport protocols. To draw an analogy, this is like the "thin-waist" IP provided to connect hosts across medium-specific networks. 100 | 101 | ![](mdag.waist.png) 102 | 103 | ![](ip.waist.png) 104 | 105 | This kind of modularity enables complicated and powerful applications to be built with little effort on top of a common base. All the complexity of authentication, distribution, replication, routing, and transport can be pulled in from other protocols and tools. This type of modularity is what made the layered internet -- the TCP/IP stack -- so tremendously powerful. 106 | 107 | ### Web of Data Structures 108 | 109 | In a sense, IPFS is a "web of data-structures", with the merkledag as the common denominator. Agreeing upon a format allows linking different authenticated data structures to each other, enabling sophisticated distributed applications to easily construct, distribute, and link their data. 110 | 111 | ### Linked Data 112 | 113 | The merkledag is a type of Linked-Data. The links do not follow the standard URI format, and instead opt for a more general and flexible UNIX filesystem path format, but the power is all there. One can trivially map formats like JSON-LD directly onto IPFS (IPFS-LD), making IPFS applications capable of using the full-power of the semantic web. 114 | 115 | A powerful result of content (and identity) addressing is that linked data definitions can be distributed directly with the content itself, and do not need to be served from the original location. This enables the creation of Linked Data definitions, specs, and applications which can operate faster (no need to fetch it over the network), disconnected, or even completely offline. 116 | 117 | ## Merkledag Notation 118 | 119 | To facilitate the definition of other data structures and protocols, we define a notation to express merkledag data structures. This defines their logical representation, and also a format specification (when using the ipfs merkledag format). 120 | 121 | #### ~~ WIP / TODO ~~ 122 | 123 | ``` 124 | tree node { 125 | links { 126 | 127 | } 128 | 129 | data { 130 | 131 | } 132 | } 133 | 134 | commit node { 135 | "parent" repeated link; // links to the parent commit 136 | "author" link; // link to the author of commit 137 | "" 138 | } 139 | ``` 140 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install 2 | 3 | all: website 4 | 5 | clean: 6 | rm -rf ./out 7 | 8 | install: 9 | @if [ ! -d "node_modules" ] || ! git diff --quiet package-lock.json; then \ 10 | npm ci --prefer-offline --no-audit --no-fund --progress=false; \ 11 | fi 12 | 13 | website: clean install 14 | npx spec-generator -c .config.json 15 | 16 | watch: clean install 17 | npx spec-generator -c .config.json -w 18 | 19 | superlinter: 20 | docker run --rm -e VALIDATE_ALL_CODEBASE=false -e RUN_LOCAL=true -e VALIDATE_MARKDOWN=true -e MARKDOWN_CONFIG_FILE=".markdownlint.json" -e LINTER_RULES_PATH="." -e DEFAULT_BRANCH='main' -v $(shell pwd):/tmp/lint ghcr.io/super-linter/super-linter:slim-v7 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IPFS Specifications 2 | 3 | > This repository contains the specs for the IPFS Protocol and associated subsystems. 4 | 5 | - [Documentation and Community](#documentation-and-community) 6 | - [Understanding badges](#understanding-the-meaning-of-the-spec-badges-and-their-lifecycle) 7 | - [Index](#index) 8 | - [Contribute](#contribute) 9 | - [InterPlanetary Improvement Process (IPIP)](#interplanetary-improvement-process-ipip) 10 | 11 | ## Documentation and Community 12 | 13 | Looking for user support? 14 | 15 | See [Documentation](https://docs.ipfs.io), 16 | [Discussion Forums](https://discuss.ipfs.io/), and other 17 | [Community Resources](https://docs.ipfs.io/community/) instead. 18 | 19 | ## Understanding the meaning of the spec badges and their lifecycle 20 | 21 | We use the following label system to identify the state of each spec: 22 | 23 | - ![wip](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square) - A work-in-progress, possibly to describe an idea before actually committing to a full draft of the spec. 24 | - ![draft](https://img.shields.io/badge/status-draft-yellow.svg?style=flat-square) - A draft that is ready to review. It should be implementable. 25 | - ![reliable](https://img.shields.io/badge/status-reliable-green.svg?style=flat-square) - A spec that has been adopted (implemented) and can be used as a reference point to learn how the system works. 26 | - ![stable](https://img.shields.io/badge/status-stable-brightgreen.svg?style=flat-square) - We consider this spec to close to final, it might be improved but the system it specifies should not change fundamentally. 27 | - ![permanent](https://img.shields.io/badge/status-permanent-blue.svg?style=flat-square) - This spec will not change. 28 | - ![deprecated](https://img.shields.io/badge/status-deprecated-red.svg?style=flat-square) - This spec is no longer in use. 29 | 30 | Nothing in this spec repository is `permanent` or even `stable` yet. Most of the subsystems are still a `draft` or in `reliable` state. 31 | 32 | ## Index 33 | 34 | The specs contained in this and related repositories are: 35 | 36 | - **IPFS Protocol:** 37 | - [IPFS Guide](https://docs.ipfs.tech/) - to start your IPFS journey 38 | - [Protocol Architecture Overview](./ARCHITECTURE.md) - the top-level spec and the stack 39 | - **User Interface (aka Public APIs):** 40 | - [HTTP Gateways](https://specs.ipfs.tech/http-gateways/) - implementation agnostic interfaces for accessing content-addressed data over HTTP 41 | - [Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) - implementation agnostic interfaces for content/peer/IPNS routing over HTTP 42 | - IPFS implementations may provide additional HTTP interfaces, for example: 43 | - [Kubo RPC at /api/v0](https://docs.ipfs.tech/reference/kubo/rpc/) 44 | - **Data Formats:** 45 | - [IPLD](https://ipld.io/specs/) - InterPlanetary Linked Data. 46 | - [DAG-CBOR](https://ipld.io/docs/codecs/known/dag-cbor/) - binary format, supporting the complete IPLD Data Model, with excellent performance, and suitable for any job. 47 | - [DAG-JSON](https://ipld.io/docs/codecs/known/dag-json/) - human-readable format, supporting almost the complete IPLD Data Model, and very convenient for interoperability, development, and debugging. 48 | - [DAG-PB](https://ipld.io/docs/codecs/known/dag-pb/) - a binary format for specific limited structures of data, which is highly used in IPFS and [UnixFS](./UNIXFS.md). 49 | - [CAR](https://ipld.io/specs/transport/car/) - transport format used to store content addressable objects in the form of IPLD block data as a sequence of bytes; typically as an [application/vnd.ipld.car](https://www.iana.org/assignments/media-types/application/vnd.ipld.car) file with a `.car` extension 50 | - Self Describing Formats ([multiformats](http://github.com/multiformats/multiformats)): 51 | - [multihash](https://github.com/multiformats/multihash) - self-describing hash digest format. 52 | - [multiaddr](https://github.com/multiformats/multiaddr) - self-describing addressing format. 53 | - [multicodec](https://github.com/multiformats/multicodec) - self-describing protocol/encoding streams (note: a file is a stream). 54 | - [multistream](https://github.com/multiformats/multistream) - multistream is a format -- or simple protocol -- for disambiguating, and layering streams. It is extremely simple. 55 | - **Files and Directories:** 56 | - [UnixFS](./UNIXFS.md) 57 | - Related userland concepts (external docs): 58 | - [MFS, Mutable File System, or the Files API](https://docs.ipfs.tech/concepts/file-systems/#mutable-file-system-mfs) 59 | - **Storage Layer:** 60 | - [Pinning Service API](https://ipfs.github.io/pinning-services-api-spec/) 61 | - [Repo](./REPO.md) - IPFS node local repository spec 62 | - [FileSystem Repo](./REPO_FS.md) - IPFS node local repository spec 63 | - **Block Exchanges:** 64 | - [Bitswap](./BITSWAP.md) - BitTorrent-inspired exchange 65 | - **Key Management:** 66 | - [KeyStore](./KEYSTORE.md) - Key management on IPFS 67 | - [KeyChain](./KEYCHAIN.md) - Distribution of cryptographic Artifacts 68 | - **Networking layer:** 69 | - [libp2p](https://github.com/libp2p/specs) - libp2p is a modular and extensible network stack, built and use by IPFS, but that it can be reused as a standalone project. Covers: 70 | - **Records, Naming and Record Systems:** 71 | - [IPNS](https://specs.ipfs.tech/ipns/) - InterPlanetary Naming System 72 | - [IPNS Record Creation and Verification](https://specs.ipfs.tech/ipns/ipns-pubsub-router/) 73 | - [IPNS over PubSub](https://specs.ipfs.tech/ipns/ipns-pubsub-router/) 74 | - [DNSLink](https://dnslink.dev) - mapping DNS names to IPFS content paths 75 | - [DNSAddr](https://github.com/multiformats/multiaddr/blob/master/protocols/DNSADDR.md) - mapping DNS names to libp2p multiaddrs 76 | - **Other/related/included:** 77 | - [PDD](https://github.com/ipfs/pdd) - Protocol Driven Development 78 | 79 | ## Contribute 80 | 81 | [![contribute](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) 82 | 83 | Suggestions, contributions, criticisms are welcome. Though please make sure to familiarize yourself deeply with IPFS, the models it adopts, and the principles it follows. 84 | This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). 85 | 86 | ### InterPlanetary Improvement Process (IPIP) 87 | 88 | - Want to propose a change to an existing specification? 89 | - Or add a new protocol? 90 | 91 | See: 92 | - [IPIP: Improvement Process for IPFS Specifications](https://specs.ipfs.tech/meta/ipip-process/) 93 | - List of [ratified IPIPs](https://specs.ipfs.tech/ipips/) 94 | - List of [open IPIPs](https://github.com/ipfs/specs/pulls?q=is%3Apr+is%3Aopen+ipip+sort%3Aupdated-desc) 95 | -------------------------------------------------------------------------------- /REPO.md: -------------------------------------------------------------------------------- 1 | # Repository 2 | 3 | Moved to https://github.com/ipfs/kubo/blob/master/docs/specifications/repository.md 4 | -------------------------------------------------------------------------------- /REPO_FS.md: -------------------------------------------------------------------------------- 1 | # Repository FS 2 | 3 | Moved to https://github.com/ipfs/kubo/blob/master/docs/specifications/repository_fs.md 4 | -------------------------------------------------------------------------------- /http-gateways/DNSLINK_GATEWAY.md: -------------------------------------------------------------------------------- 1 | # DNSLink Gateway Specification 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/dnslink-gateway/ 4 | -------------------------------------------------------------------------------- /http-gateways/PATH_GATEWAY.md: -------------------------------------------------------------------------------- 1 | # Path Gateway Specification 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/path-gateway/ 4 | -------------------------------------------------------------------------------- /http-gateways/README.md: -------------------------------------------------------------------------------- 1 | # Specification for HTTP Gateways 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/ 4 | -------------------------------------------------------------------------------- /http-gateways/REDIRECTS_FILE.md: -------------------------------------------------------------------------------- 1 | # _redirects File Specification 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/web-redirects-file/ 4 | -------------------------------------------------------------------------------- /http-gateways/SUBDOMAIN_GATEWAY.md: -------------------------------------------------------------------------------- 1 | # Subdomain Gateway Specification 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/subdomain-gateway/ 4 | -------------------------------------------------------------------------------- /http-gateways/TRUSTLESS_GATEWAY.md: -------------------------------------------------------------------------------- 1 | # Trustless Gateway Specification 2 | 3 | Moved to https://specs.ipfs.tech/http-gateways/trustless-gateway/ 4 | -------------------------------------------------------------------------------- /img/components/components.001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.001.jpg -------------------------------------------------------------------------------- /img/components/components.002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.002.jpg -------------------------------------------------------------------------------- /img/components/components.003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.003.jpg -------------------------------------------------------------------------------- /img/components/components.004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.004.jpg -------------------------------------------------------------------------------- /img/components/components.005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.005.jpg -------------------------------------------------------------------------------- /img/components/components.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/components/components.key -------------------------------------------------------------------------------- /img/dex-graphs/arch.monopic: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/dex-graphs/arch.monopic -------------------------------------------------------------------------------- /img/dex-graphs/arch.txt: -------------------------------------------------------------------------------- 1 | ┌───────────┐ ┌──────────┐ 2 | ┌──────┐ │ │ │ │ ┌────────────────┐ 3 | │ DATA │━━━━━▶│ chunker │━━━━━━━▶│ layout │━━━━━━━▶│ DATA formatted │ 4 | └──────┘ │ │ │ │ └────────────────┘ 5 | └───────────┘ └──────────┘ 6 | ▲ ▲ 7 | └─────────────────────────────────┘ 8 | Importer -------------------------------------------------------------------------------- /img/ip.waist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ip.waist.png -------------------------------------------------------------------------------- /img/ipfs-resolve/ipfs-resolve.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/ipfs-resolve.gif -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.001.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.002.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.003.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.004.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.005.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.006.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.007.jpg -------------------------------------------------------------------------------- /img/ipfs-resolve/resolve-gif.008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-resolve/resolve-gif.008.jpg -------------------------------------------------------------------------------- /img/ipfs-splash-lg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-splash-lg.png -------------------------------------------------------------------------------- /img/ipfs-splash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-splash.png -------------------------------------------------------------------------------- /img/ipfs-stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/ipfs-stack.png -------------------------------------------------------------------------------- /img/mdag.waist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/mdag.waist.png -------------------------------------------------------------------------------- /img/spec.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/img/spec.key -------------------------------------------------------------------------------- /ipip-template.md: -------------------------------------------------------------------------------- 1 | --- 2 | # IPIP number should match its pull request number. After you open a PR, 3 | # please update title and update the filename to `ipip0000`. 4 | title: "IPIP-0000: InterPlanetary Improvement Proposal Template" 5 | date: YYYY-MM-DD 6 | ipip: proposal 7 | editors: 8 | - name: Your Name 9 | relatedIssues: 10 | - link to issue 11 | order: 0000 12 | tags: ['ipips'] 13 | --- 14 | 15 | ## Summary 16 | 17 | 18 | This is the suggested template for new IPIPs. 19 | 20 | ## Motivation 21 | 22 | AKA Problem Statement 23 | 24 | Clearly explain why the existing protocol specification is inadequate 25 | to address the problem that the IPIP solves. 26 | 27 | ## Detailed design 28 | 29 | AKA Solution Proposal 30 | 31 | Describe the proposed solution and list all changes made to the specs repository. 32 | 33 | The resulting specification should be detailed enough to allow competing, 34 | interoperable implementations. 35 | 36 | When modifying an existing specification file, this section should provide a 37 | summary of changes. When adding new specification files, list all of them. 38 | 39 | ## Design rationale 40 | 41 | The rationale fleshes out the specification by describing what motivated 42 | the design and why particular design decisions were made. 43 | 44 | Provide evidence of rough consensus and working code within the community, 45 | and discuss important objections or concerns raised during discussion. 46 | 47 | ### User benefit 48 | 49 | How will end users benefit from this work? 50 | 51 | ### Compatibility 52 | 53 | Explain the upgrade considerations for existing implementations. 54 | 55 | ### Security 56 | 57 | Explain the security implications/considerations relevant to the proposed change. 58 | 59 | ### Alternatives 60 | 61 | Describe alternate designs that were considered and related work. 62 | 63 | ## Test fixtures 64 | 65 | List relevant CIDs. Describe how implementations can use them to determine 66 | specification compliance. This section can be skipped if IPIP does not deal 67 | with the way IPFS handles content-addressed data, or the modified specification 68 | file already includes this information. 69 | 70 | ### Copyright 71 | 72 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 73 | -------------------------------------------------------------------------------- /ipns/IPNS.md: -------------------------------------------------------------------------------- 1 | # Inter-Planetary Naming System 2 | 3 | Moved to https://specs.ipfs.tech/ipns/ipns-record/ 4 | -------------------------------------------------------------------------------- /ipns/IPNS_PUBSUB.md: -------------------------------------------------------------------------------- 1 | # IPNS PubSub Router 2 | 3 | Moved to https://specs.ipfs.tech/ipns/ipns-pubsub-router/ 4 | -------------------------------------------------------------------------------- /ipns/README.md: -------------------------------------------------------------------------------- 1 | # IPNS Specifications 2 | 3 | Moved to https://specs.ipfs.tech/ipns/ 4 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-specs-website", 3 | "version": "1.0.0", 4 | "description": "> This repository contains the specs for the IPFS Protocol and associated subsystems.", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "", 11 | "private": true, 12 | "devDependencies": { 13 | "spec-generator": "^1.5.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /routing/ROUTING_V1_HTTP.md: -------------------------------------------------------------------------------- 1 | # Routing V1 HTTP API 2 | 3 | Moved to https://specs.ipfs.tech/routing/http-routing-v1/ 4 | -------------------------------------------------------------------------------- /src/_includes/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/_includes/head.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {% if title != 'Home' %}{{ title }} | {% endif %}IPFS Standards 7 | {% if description %}{% endif %} 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/_includes/header.html: -------------------------------------------------------------------------------- 1 | {% include 'head.html' %} 2 | 3 | 9 | 10 |
11 |
12 |

{{ title }}

13 |

{{ description }}

14 |
15 |
16 | -------------------------------------------------------------------------------- /src/_includes/ipips-list.html: -------------------------------------------------------------------------------- 1 |
2 | {% assign sortedPosts = collections.ipips | sortByOrder | reverse %} 3 | {%- for post in sortedPosts -%} 4 |
{{ post.data.title }}
5 | {%- endfor -%} 6 |
7 | -------------------------------------------------------------------------------- /src/_includes/list.html: -------------------------------------------------------------------------------- 1 |
2 | {% assign sortedPosts = posts | sortByOrder %} 3 | {%- for post in sortedPosts -%} 4 |
{{ post.data.title }}
5 |
{{ post.data.description }}
6 | {%- endfor -%} 7 |
-------------------------------------------------------------------------------- /src/architecture/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: Architecture 3 | description: | 4 | These documents define the architectural principles that IPFS is built upon, and can be used as tools to evaluate 5 | implementations and applications of IPFS. 6 | --- 7 | 8 | {% include 'header.html' %} 9 | 10 |
11 | {% include 'list.html', posts: collections.architecture %} 12 |
13 | 14 | {% include 'footer.html' %} 15 | -------------------------------------------------------------------------------- /src/bitswap-protocol.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Bitswap Protocol 3 | description: > 4 | Bitswap is a libp2p data exchange protocol for finding, sending and receiving content 5 | addressed blocks of data. It attempts to acquire blocks from the p2p network 6 | that have been requested by the client, and also send blocks to others who 7 | want them. 8 | date: 2022-08-26 9 | maturity: reliable 10 | editors: 11 | - name: Juan Benet 12 | github: jbenet 13 | affiliation: 14 | name: Protocol Labs 15 | url: https://protocol.ai/ 16 | - name: Jeromy Johnson 17 | github: whyrusleeping 18 | affiliation: 19 | name: Protocol Labs 20 | url: https://protocol.ai/ 21 | - name: David Dias 22 | github: daviddias 23 | affiliation: 24 | name: Protocol Labs 25 | url: https://protocol.ai/ 26 | - name: Adin Schmahmann 27 | github: aschmahmann 28 | affiliation: 29 | name: Protocol Labs 30 | url: https://protocol.ai/ 31 | tags: ['exchange', 'routing'] 32 | order: 1 33 | --- 34 | 35 | Bitswap is a libp2p data exchange protocol for sending and receiving content 36 | addressed blocks of data. 37 | 38 | Bitswap has two primary jobs: 39 | 40 | 1. Attempt to acquire blocks from the network that have been requested by the client. 41 | 2. Send blocks in its possession to other peers who want them. 42 | 43 | Secondary job (since [v1.2.0](#bitswap-1-2-0)) is to act as a basic content routing system for querying connected peers. 44 | 45 | ## Introduction 46 | 47 | Bitswap is a message-based protocol, as opposed to request-response. All messages 48 | contain wantlists, and/or blocks. Upon receiving a wantlist, a Bitswap server SHOULD 49 | eventually process and respond to the requester with either information about the 50 | block or the block itself. Upon receiving blocks, the client SHOULD send a `Cancel` 51 | notification to peers that have asked for the data, signifying that the client no 52 | longer wants the block. 53 | 54 | Bitswap aims to be a simple protocol, so that implementations can balance aspects 55 | such as throughput, latency, fairness, memory usage, etc. for their specific 56 | requirements. 57 | 58 | ## Bitswap Protocol Versions 59 | 60 | There are multiple Bitswap versions and more may evolve over time. We give brief 61 | overviews as to the changes behind each protocol version: 62 | 63 | - `/ipfs/bitswap/1.0.0` - Initial version 64 | - `/ipfs/bitswap/1.1.0` - Support [CIDv1](https://docs.ipfs.io/concepts/glossary/#cid-v1) 65 | - `/ipfs/bitswap/1.2.0` - Support Wantlist `Have`'s and `Have`/`DontHave` responses 66 | 67 | ## Block Sizes 68 | 69 | Bitswap implementations MUST support sending and receiving individual blocks of 70 | sizes less than or equal to 2MiB. Handling blocks larger than 2MiB is not recommended 71 | so as to keep compatibility with implementations which only support up to 2MiB. 72 | 73 | ## Bitswap 1.0.0 74 | 75 | ### Bitswap 1.0.0: Interaction Pattern 76 | 77 | Given that a client *C* wants to fetch data from some server *S*: 78 | 79 | 1. *C* sends a message to *S* for the blocks it wants, via a stream `s_want` 80 | 1. *C* MAY either send a complete wantlist, or an update to an outstanding wantlist 81 | 2. *C* MAY reuse this stream to send new wants 82 | 2. *S* sends back blocks on a stream `s_receive`. *S* MAY reuse this stream to send 83 | back subsequent responses. 84 | 1. *S* SHOULD respect the relative priority of wantlist requests from *C*, with 85 | wants that have higher `priority` values being responded to first. 86 | 3. When *C* no longer needs a block it previously asked for, it SHOULD send a `Cancel` 87 | message for that block to all peers from which it has not received a response 88 | about that block 89 | 90 | ### Bitswap 1.0.0: Message 91 | 92 | A single Bitswap message MAY contain any of the following content: 93 | 94 | 1. The sender’s wantlist. This wantlist MAY either be the sender’s complete wantlist 95 | or just the changes to the sender’s wantlist that the receiver needs to know. 96 | 2. Data blocks. These are meant to be blocks that the receiver has requested (i.e., 97 | blocks that are on the receiver’s wantlist as far as the sender is aware at the 98 | time of sending). 99 | 100 | #### Bitswap 1.0.0: Wire Format 101 | 102 | The wire format for Bitswap is simply a stream of Bitswap messages. The following 103 | protobuf describes the form of these messages. Note: all protobufs are described 104 | using [proto3](https://protobuf.dev/programming-guides/proto3/) syntax. 105 | 106 | ```protobuf 107 | message Message { 108 | message Wantlist { 109 | message Entry { 110 | bytes block = 1; // the block key, i.e. a CIDv0 111 | int32 priority = 2; // the priority (normalized). default to 1 112 | bool cancel = 3; // whether this revokes an entry 113 | } 114 | 115 | repeated Entry entries = 1; // a list of wantlist entries 116 | bool full = 2; // whether this is the full wantlist. default to false 117 | } 118 | 119 | Wantlist wantlist = 1; 120 | repeated bytes blocks = 2; 121 | } 122 | ``` 123 | 124 | ### Bitswap 1.0.0: Protocol Format 125 | 126 | All protocol messages sent over a stream are prefixed with the message length in 127 | bytes, encoded as an unsigned variable length integer as defined by the multiformats 128 | [unsigned-varint] spec. 129 | 130 | All protocol messages MUST be less than or equal to 4MiB in size. 131 | 132 | ## Bitswap 1.1.0 133 | 134 | Bitswap 1.1.0 introduces a `payload` field to the protobuf message and deprecates the 135 | existing 'blocks' field. The 'payload' field is an array of pairs of CID prefixes 136 | and block data. The CID prefixes are used to ensure the correct codecs and hash 137 | functions are used to handle the block on the receiving end. 138 | 139 | It is otherwise identical to 1.0.0. 140 | 141 | ### Bitswap 1.1.0: Wire Format 142 | 143 | ```protobuf 144 | message Message { 145 | message Entry { 146 | bytes block = 1; // CID of the block 147 | int32 priority = 2; // the priority (normalized). default to 1 148 | bool cancel = 3; // whether this revokes an entry 149 | } 150 | 151 | repeated Entry entries = 1; // a list of wantlist entries 152 | bool full = 2; // whether this is the full wantlist. default to false 153 | } 154 | 155 | message Block { 156 | bytes prefix = 1; // CID prefix (all of the CID components except for the digest of the multihash) 157 | bytes data = 2; 158 | } 159 | 160 | Wantlist wantlist = 1; 161 | repeated Block payload = 3; 162 | } 163 | ``` 164 | 165 | ## Bitswap 1.2.0 166 | 167 | Bitswap 1.2.0 extends the Bitswap 1.1.0 protocol with the three changes: 168 | 169 | 1. Being able to ask if a peer has the data, not just to send the data. 170 | 2. A peer can respond that it does not have some data rather than just not responding. 171 | 3. Nodes can indicate on messages how much data they have queued to send to the peer 172 | they are sending the message to. 173 | 174 | ### Bitswap 1.2.0: Interaction Pattern 175 | 176 | Given that a client *C* wants to fetch data from some server *S*: 177 | 178 | 1. *C* opens a stream `s_want` to *S* and sends a message for the blocks it wants: 179 | 1. *C* MAY either send a complete wantlist, or an update to an outstanding wantlist. 180 | 2. *C* MAY reuse this stream to send new wants. 181 | 3. For each of the items in the wantlist *C* MAY ask if *S* has the block 182 | (i.e. a `Have` request) or for *S* to send the block (i.e. a `Block` request). 183 | *C* MAY also ask *S* to send back a `DontHave` message in the event it doesn't 184 | have the block. 185 | 2. *S* responds back on a stream `s_receive`. *S* MAY reuse this stream to send 186 | back subsequent responses: 187 | 1. If *C* sends *S* a `Have` request for data *S* has (and is willing to give 188 | to *C*) it SHOULD respond with a `Have`, although it MAY instead respond 189 | with the block itself (e.g. if the block is very small). 190 | 2. If *C* sends *S* a `Have` request for data *S* does not have (or has but 191 | is not willing to give to *C*) and *C* has requested for `DontHave` responses 192 | then *S* SHOULD respond with `DontHave`. 193 | 3. *S* MAY choose to include the number of bytes that are pending to be sent 194 | to *C* in the response message. 195 | 4. *S* SHOULD respect the relative priority of wantlist requests from *C*, 196 | with wants that have higher `priority` values being responded to first. 197 | 3. When *C* no longer needs a block it previously asked for it SHOULD send a 198 | `Cancel` message for that request to any peers that have not already responded 199 | about that particular block. It SHOULD particularly send `Cancel` messages for 200 | `Block` requests (as opposed to `Have` requests) that have not yet been answered. 201 | 202 | :::note 203 | 204 | `Have`'s and `Have`/`DontHave` responses enable Bitswap to be used as 205 | bare-bones content routing system for connected peers. 206 | 207 | ::: 208 | 209 | ### Bitswap 1.2.0: Wire Format 210 | 211 | ```protobuf 212 | message Message { 213 | message Wantlist { 214 | enum WantType { 215 | Block = 0; 216 | Have = 1; 217 | } 218 | 219 | message Entry { 220 | bytes block = 1; // CID of the block 221 | int32 priority = 2; // the priority (normalized). default to 1 222 | bool cancel = 3; // whether this revokes an entry 223 | WantType wantType = 4; // Note: defaults to enum 0, ie Block 224 | bool sendDontHave = 5; // Note: defaults to false 225 | } 226 | 227 | repeated Entry entries = 1; // a list of wantlist entries 228 | bool full = 2; // whether this is the full wantlist. default to false 229 | } 230 | message Block { 231 | bytes prefix = 1; // CID prefix (all of the CID components except for the digest of the multihash) 232 | bytes data = 2; 233 | } 234 | 235 | enum BlockPresenceType { 236 | Have = 0; 237 | DontHave = 1; 238 | } 239 | message BlockPresence { 240 | bytes cid = 1; 241 | BlockPresenceType type = 2; 242 | } 243 | 244 | Wantlist wantlist = 1; 245 | repeated Block payload = 3; 246 | repeated BlockPresence blockPresences = 4; 247 | int32 pendingBytes = 5; 248 | } 249 | ``` 250 | 251 | ## Implementations 252 | 253 | - GO: [`boxo/bitswap`](https://github.com/ipfs/boxo/tree/main/bitswap) 254 | - JS: [js-ipfs-bitswap](https://github.com/ipfs/js-ipfs-bitswap) 255 | 256 | [unsigned-varint]: https://github.com/multiformats/unsigned-varint 257 | -------------------------------------------------------------------------------- /src/css/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | padding: 0; 4 | font-size: 1rem; 5 | line-height: 1.5; 6 | font-family: var(--body-family); 7 | font-weight: 300; 8 | } 9 | 10 | nav { 11 | background: var(--ipfs-gradient-0-background-image); 12 | } 13 | nav > div { 14 | max-width: var(--max-page-width); 15 | margin: 0 auto; 16 | padding: 1rem 1rem; 17 | } 18 | nav img, 19 | nav a { 20 | vertical-align: middle; 21 | } 22 | nav a { 23 | color: var(--ipfs-off-white); 24 | text-decoration: none; 25 | /* font-size: var(--size-h5); */ 26 | font-weight: var(--weight-semi-bold); 27 | } 28 | 29 | header { 30 | background: var(--ipfs-brand-gradient); 31 | } 32 | header > div { 33 | max-width: var(--max-page-width); 34 | margin: 0 auto; 35 | padding: 3rem 1rem; 36 | } 37 | header h1 { 38 | font-size: var(--size-h2); 39 | font-weight: var(--weight-semi-bold); 40 | margin-top: 0; 41 | line-height: 1; 42 | color: var(--ipfs-off-white); 43 | } 44 | header img.logo { 45 | margin-bottom: 2rem; 46 | } 47 | header p { 48 | color: var(--ipfs-off-white); 49 | max-width: var(--max-readable-width); 50 | } 51 | 52 | main { 53 | max-width: var(--max-page-width); 54 | margin: 4rem auto 0; 55 | padding: 0 1rem; 56 | } 57 | 58 | .basic-grid { 59 | display: grid; 60 | column-gap: 4rem; 61 | row-gap: 4rem; 62 | grid-template-columns: repeat(1, minmax(0,1fr)); 63 | } 64 | @media (min-width: 640px) { 65 | .basic-grid { 66 | grid-template-columns: repeat(2, minmax(0,1fr)); 67 | } 68 | } 69 | 70 | h2.pitch-title { 71 | color: var(--fg); 72 | text-align: center; 73 | margin-top: 9rem; 74 | font-weight: var(--weight-regular); 75 | font-size: var(--size-h2); 76 | margin-bottom: 1rem; 77 | line-height: 1; 78 | } 79 | .pitch-subtitle { 80 | text-align: center; 81 | font-size: var(--size-h5); 82 | font-weight: var(--weight-light); 83 | margin-bottom: 4rem; 84 | line-height: 1.2; 85 | } 86 | .pitch { 87 | background: var(--stark-bg); 88 | color: var(--stark-fg); 89 | padding: 0 1rem; 90 | } 91 | .pitch h3 { 92 | text-align: center; 93 | margin-bottom: 1rem; 94 | } 95 | .pitch h3::after { 96 | width: 3rem; 97 | height: 0.5rem; 98 | background: var(--ipfs-teal); 99 | display: block; 100 | content: " "; 101 | margin: 0.5rem auto; 102 | } 103 | 104 | main > section { 105 | margin-top: 9rem; 106 | } 107 | h2 { 108 | font-size: var(--size-h2); 109 | font-weight: var(--weight-semi-bold); 110 | margin-bottom: 1rem; 111 | } 112 | main > section section { 113 | background: var(--stark-bg); 114 | color: var(--stark-fg); 115 | padding: 0 1rem; 116 | } 117 | h3 { 118 | font-size: var(--size-h3); 119 | font-weight: var(--weight-semi-bold); 120 | margin-bottom: 1rem; 121 | } 122 | h3 a { 123 | color: inherit; 124 | } 125 | 126 | a { 127 | color: var(--highlight-colour); 128 | transition: color 0.2s; 129 | } 130 | a:hover { 131 | color: var(--ipfs-yellow); 132 | } 133 | dt { 134 | font-style: normal; 135 | font-weight: bold; 136 | } 137 | dt a { 138 | text-decoration-color: var(--ipfs-stone-grey); 139 | display: block; 140 | } 141 | dd { 142 | font-style: normal; 143 | margin-left: 1rem; 144 | margin-bottom: .6rem; 145 | } 146 | 147 | 148 | footer { 149 | margin-top: 4rem; 150 | text-align: center; 151 | color: var(--ipfs-ash-grey); 152 | background: var(--stark-bg); 153 | border-top: 1px solid var(--ipfs-jade); 154 | } 155 | footer > div { 156 | max-width: var(--max-page-width); 157 | margin: 0 auto; 158 | padding: 1rem 1rem 3rem 1rem; 159 | } 160 | footer a { 161 | text-decoration: none; 162 | } 163 | footer img { 164 | vertical-align: text-bottom; 165 | } 166 | -------------------------------------------------------------------------------- /src/css/specs.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --size-mono: 0.9em; 3 | } 4 | 5 | body { 6 | max-width: var(--max-readable-width); 7 | margin: 0 auto; 8 | padding: 0 0 0 0; 9 | line-height: 1.5; 10 | } 11 | 12 | .watermark { 13 | display: none; 14 | position: fixed; 15 | top: 0; 16 | left: 0; 17 | width: 100%; 18 | height: 100%; 19 | z-index: -999; 20 | pointer-events: none; 21 | } 22 | 23 | .ipip-proposal .watermark { 24 | display: block; 25 | background: url('/img/watermark-proposal.svg'); 26 | } 27 | 28 | .ipip-ratified .watermark { 29 | display: block; 30 | background: url('/img/watermark-ratified.svg'); 31 | } 32 | 33 | #ipseity-back-to-root { 34 | margin-bottom: 2rem; 35 | background: var(--standard-gradient); 36 | padding-left: 1rem; 37 | font-weight: 600; 38 | font-size: 0.9rem; 39 | } 40 | 41 | #ipseity-back-to-root a { 42 | color: var(--ipfs-off-white); 43 | text-decoration: none; 44 | transition: color 0.2s; 45 | } 46 | 47 | #ipseity-back-to-root a:hover { 48 | color: var(--ipfs-yellow); 49 | } 50 | 51 | header { 52 | background-image: url(/img/ipfs-standards.svg); 53 | background-repeat: no-repeat; 54 | background-size: 100px; 55 | background-position-x: right; 56 | margin-bottom: 3rem; 57 | border-bottom: 0.25rem solid var(--ipfs-turquoise); 58 | padding-bottom: 2rem; 59 | } 60 | 61 | header dt { 62 | font-weight: var(--weight-regular); 63 | text-decoration: underline; 64 | } 65 | 66 | header dd img { 67 | opacity: 0.5; 68 | transition: opacity .5s; 69 | background: var(--contrast-bg); 70 | border-radius: 2px; 71 | vertical-align: sub; 72 | } 73 | 74 | header dd img:hover { 75 | opacity: 1; 76 | } 77 | 78 | header dd a { 79 | text-decoration: none; 80 | } 81 | 82 | h1 { 83 | max-width: calc(100% - (100px + 1rem)); 84 | font-weight: var(--weight-semi-bold); 85 | line-height: 1; 86 | margin: 0.15rem 0 0 0; 87 | } 88 | 89 | h2, h3, h4, h5, h6 { 90 | margin: 3rem 0 0 0; 91 | font-weight: var(--weight-semi-bold); 92 | } 93 | 94 | :is(h2, h3, h4, h5, h6) bdi.secno { 95 | color: var(--ipfs-ash-grey); 96 | } 97 | 98 | p#last-modified { 99 | color: var(--ipfs-ash-grey); 100 | margin-top: 0rem; 101 | font-size: 1.6rem; 102 | } 103 | 104 | p { 105 | margin: 1rem 0; 106 | } 107 | 108 | img { 109 | max-width: 100%; 110 | } 111 | 112 | .header-wrapper { 113 | display: flex; 114 | align-items: baseline; 115 | } 116 | 117 | .header-wrapper + section > .header-wrapper > :is(h3, h4, h5, h6) { 118 | margin-top: 1rem; 119 | } 120 | 121 | .self-link::after { 122 | color: var(--ipfs-ash-grey); 123 | } 124 | 125 | .self-link:hover::after { 126 | color: var(--ipfs-yellow); 127 | } 128 | 129 | @media only screen and (max-width: 816px) { 130 | body { 131 | margin-left: 1rem; 132 | margin-right: 1rem; 133 | font-size: 1rem; 134 | } 135 | 136 | #ipseity-back-to-root { 137 | margin: 0 -1rem 2rem -1rem; 138 | } 139 | 140 | header { 141 | background-size: 50px; 142 | } 143 | 144 | p#last-modified { 145 | font-size: 1.2rem; 146 | } 147 | } 148 | 149 | @media print { 150 | body { 151 | font-size: 1rem; 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/exchange/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: Exchange 3 | description: | 4 | Exchange is the way to for sending and receiving content-addressed blocks of data. 5 | --- 6 | 7 | {% include 'header.html' %} 8 | 9 |
10 | {% include 'list.html', posts: collections.exchange %} 11 |
12 | 13 | {% include 'footer.html' %} 14 | -------------------------------------------------------------------------------- /src/http-gateways/dnslink-gateway.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: DNSLink Gateway Specification 3 | description: > 4 | Defines how to utilize the HTTP Host header to serve a content path from a 5 | DNSLink record as a website under a particular DNS name. 6 | date: 2022-11-09 7 | maturity: reliable 8 | editors: 9 | - name: Marcin Rataj 10 | github: lidel 11 | url: https://lidel.org/ 12 | affiliation: 13 | name: Protocol Labs 14 | url: https://protocol.ai/ 15 | - name: Thibault Meunier 16 | github: thibmeu 17 | affiliation: 18 | name: Cloudflare 19 | url: https://cloudflare.com/ 20 | tags: ['httpGateways', 'webHttpGateways'] 21 | order: 4 22 | --- 23 | 24 | DNSLink Gateway is an extension of :cite[path-gateway] that enables hosting a 25 | specific content path under a specific DNS name. 26 | 27 | This document describes the delta between :cite[path-gateway] and this gateway type. 28 | 29 | In short: 30 | 31 | - HTTP request includes a valid [DNSLink](https://dnslink.dev/) name in `Host` header 32 | - gateway decides if DNSlink name is allowed 33 | - gateway resolves DNSLink to an immutable content root identified by a CID 34 | - HTTP response includes the data for the CID 35 | 36 | # HTTP API 37 | 38 | ## `GET /[{path}][?{params}]` 39 | 40 | Downloads data at specified path under the content path for DNSLink name provided in `Host` header. 41 | 42 | - `path` – optional path to a file or a directory under the content root sent in `Host` HTTP header 43 | - Example: if `Host: example.com` then the content path to resolve is `/ipns/example.com/{path}` 44 | 45 | ## `HEAD /[{path}][?{params}]` 46 | 47 | Same as GET, but does not return any payload. 48 | 49 | # HTTP Request 50 | 51 | Below MUST be implemented **in addition** to "HTTP Request" of :cite[path-gateway]. 52 | 53 | ## Request headers 54 | 55 | ### `Host` (request header) 56 | 57 | Defines the [DNSLink](https://docs.ipfs.io/concepts/glossary/#dnslink) name 58 | to RECURSIVELY resolve into an immutable `/ipfs/{cid}/` prefix that should 59 | be prepended to the `path` before the final IPFS content path resolution 60 | is performed. 61 | 62 | Implementations MUST ensure DNSLink resolution is safe and correct: 63 | 64 | - each DNSLink may include an additional path segment, which MUST be preserved 65 | - each DNSLink may point at other DNSLink, which means there MUST be a hard 66 | recursion limit (e.g. 32) and HTTP 400 Bad Request error MUST be returned 67 | when the limit is reached. 68 | 69 | **Example: resolving an advanced DNSLink chain** 70 | 71 | To illustrate, given DNSLink records: 72 | 73 | - `_dnslink.a.example.com` TXT record: `dnslink=/ipns/b.example.net/path-b` 74 | - `_dnslink.b.example.net` TXT record: `dnslink=/ipfs/bafy…qy3k/path-c` 75 | 76 | HTTP client sends `GET /path-a` request with `Host: a.example.com` header 77 | which recursively resolves all DNSLinks and produces the final immutable 78 | content path: 79 | 80 | 1. `Host` header + `/path-a` → `/ipns/a.example.net/path-a` 81 | 2. Resolving DNSlink at `a.example.net` replaces `/ipns/a.example.net` with `/ipns/b.example.net/path-b` 82 | 3. Resolving DNSlink at `b.example.net` replaces `/ipns/b.example.net` with `/ipfs/bafy…qy3k/path-c` 83 | 4. The immutable content path is `/ipfs/bafy…qy3k/path-c/path-b/path-a` 84 | 85 | # HTTP Response 86 | 87 | Same as "HTTP Response" of :cite[path-gateway]. 88 | 89 | # Appendix: notes for implementers 90 | 91 | ## Leveraging DNS for content routing 92 | 93 | - It is a good idea to publish 94 | [DNSAddr](https://github.com/multiformats/multiaddr/blob/master/protocols/DNSADDR.md) 95 | TXT records with known content providers for the data behind a DNSLink. IPFS 96 | clients will be able to detect DNSAddr and preconnect to known content 97 | providers, removing the need for expensive DHT lookup. 98 | 99 | ## Redirects, single-page applications, and custom 404s 100 | 101 | DNSLink Gateway implementations SHOULD include `_redirects` file support 102 | defined in :cite[web-redirects-file]. 103 | -------------------------------------------------------------------------------- /src/http-gateways/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: HTTP Gateways 3 | description: | 4 | IPFS Gateway acts as a bridge between traditional HTTP clients and IPFS. Through the gateway, users can download files, 5 | directories and other IPLD data stored in IPFS as if they were stored in a traditional web server. 6 | --- 7 | 8 | {% include 'header.html' %} 9 | 10 |
11 |

HTTP

12 |

13 | Low-level gateways that expose IPFS resources over HTTP protocol. 14 |

15 | {% include 'list.html', posts: collections.lowLevelHttpGateways %} 16 |

Web

17 |

18 | Designed for website hosting and improved interoperability with web browsers and 19 | origin-based security model. 20 |

21 | {% include 'list.html', posts: collections.webHttpGateways %} 22 |
23 | 24 | {% include 'footer.html' %} 25 | -------------------------------------------------------------------------------- /src/http-gateways/libp2p-gateway.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: libp2p+HTTP Transport Gateway Specification 3 | description: > 4 | Describes how HTTP Gateway semantics can be used over libp2p transports and how libp2p can coexist with other HTTP services on the same host. 5 | date: 2025-03-06 6 | maturity: draft 7 | editors: 8 | - name: Adin Schmahmann 9 | github: aschmahmann 10 | affiliation: 11 | name: Shipyard 12 | url: https://ipshipyard.com 13 | - name: Marcin Rataj 14 | github: lidel 15 | affiliation: 16 | name: Shipyard 17 | url: https://ipshipyard.com 18 | xref: 19 | - path-gateway 20 | - trustless-gateway 21 | tags: ['httpGateways', 'lowLevelHttpGateways', 'exchange', 'transport'] 22 | order: 3 23 | --- 24 | 25 | This specification describes how HTTP Gateway semantics 26 | and APIs can be used over [libp2p](https://github.com/libp2p/specs) transports, 27 | and how libp2p can coexist with other HTTP services on the same host. 28 | 29 | # libp2p HTTP Protocols Manifest 30 | 31 | The [libp2p+HTTP specification](https://github.com/libp2p/specs/blob/master/http/README.md) 32 | describes how to use libp2p with HTTP semantics over stream transports, as well as how 33 | to do discovery of what protocols are available (and where they are mounted). 34 | 35 | ## `.well-known/libp2p/protocols` 36 | 37 | Any libp2p application sub-protocols exposed behind `/http/1.1` protocol can be 38 | discovered by the well-known resource (:cite[rfc8615]) at `.well-known/libp2p/protocols`. 39 | 40 | ### Protocol Identifier 41 | 42 | In order for a pure HTTP Gateway protocol like the :cite[trustless-gateway] to 43 | coexist with libp2p in this environment it requires a protocol identifier to act as a key in 44 | the `.well-known/libp2p/protocols` mapping file. 45 | 46 | The `/http/1.1` sub-protocol identifier for the IPFS Gateway when used over libp2p is: 47 | 48 | ``` 49 | /ipfs/gateway 50 | ``` 51 | 52 | ### Protocol Mounting 53 | 54 | A reference `.well-known/libp2p/protocols` JSON body with mapping that assumes the gateway to be mounted at `/`: 55 | 56 | ```js 57 | { 58 | "protocols": { 59 | "/ipfs/gateway": {"path": "/"}, 60 | } 61 | } 62 | ``` 63 | 64 | # Peer ID Authentication 65 | 66 | [Peer ID Authentication over HTTP](https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md) is optional and SHOULD NOT be required by [Trustless Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/) HTTP endpoint defined for `/ipfs/gateway` handler. 67 | 68 | Clients following the Trustless Gateway specification MUST verify each CID individually, without being concerned with peer identity. 69 | PeerID authentication is not required for trustless retrieval and HTTP-only clients SHOULD work without it. 70 | 71 | # Gateway Type Detection 72 | 73 | The `/ipfs/gateway` protocol identifier is shared among all Gateway specifications. 74 | 75 | An HTTP server mounted behind the `/ipfs/gateway` identifier MUST expose the most basic [block (application/vnd.ipld.raw)](https://specs.ipfs.tech/http-gateways/trustless-gateway/#block-responses-application-vnd-ipld-raw) 76 | responses from :cite[trustless-gateway], but MAY also support other gateway types and features. 77 | 78 | Client implementations SHOULD [perform feature detection](https://specs.ipfs.tech/http-gateways/trustless-gateway/#dedicated-probe-paths) on their own, 79 | or assume only the most basic [block (application/vnd.ipld.raw)](https://specs.ipfs.tech/http-gateways/trustless-gateway/#block-responses-application-vnd-ipld-raw) 80 | response type from :cite[trustless-gateway] is available. 81 | -------------------------------------------------------------------------------- /src/http-gateways/web-redirects-file.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Web _redirects File Specification 3 | description: > 4 | Defines how URL redirects and rewrites can be implemented by adding rules to 5 | a plain text file stored underneath the root CID of a website. 6 | date: 2025-03-19 7 | maturity: reliable 8 | editors: 9 | - name: Justin Johnson 10 | github: justincjohnson 11 | affiliation: 12 | name: Fission 13 | url: https://fission.codes/ 14 | - name: Marcin Rataj 15 | github: lidel 16 | affiliation: 17 | name: Shipyard 18 | url: https://ipshipyard.com 19 | tags: ['httpGateways', 'webHttpGateways'] 20 | order: 5 21 | --- 22 | 23 | The Web Redirects File specification is an extension of the Subdomain Gateway and DNSLink Gateway specifications. 24 | 25 | Developers can enable URL redirects or rewrites by adding redirect rules to a file named `_redirects` stored underneath the root CID of their website. 26 | 27 | This can be used, for example, to enable URL rewriting for hosting a single-page application, to redirect invalid URLs to a pretty 404 page, or to avoid [link rot](https://en.wikipedia.org/wiki/Link_rot) when moving to IPFS-based website hosting. 28 | 29 | # File Name and Location 30 | 31 | The Redirects File MUST be named `_redirects` and stored underneath the root CID of the website. 32 | 33 | # File Format 34 | 35 | The Redirects File MUST be a text file containing one or more lines with the following format (brackets indication optionality). 36 | 37 | ``` 38 | from to [status] 39 | ``` 40 | 41 | ## From 42 | 43 | The path to redirect from. 44 | 45 | ## To 46 | 47 | The URL or path to redirect to. 48 | 49 | ## Status 50 | 51 | An optional integer specifying the HTTP status code to return from the request. Supported values are: 52 | 53 | - `200` - OK 54 | - Redirect will be treated as a rewrite, returning OK without changing the URL in the browser. 55 | - `301` - Permanent Redirect (default) 56 | - `302` - Found (commonly used for Temporary Redirect) 57 | - `303` - See Other (replacing PUT and POST with GET) 58 | - `307` - Temporary Redirect (explicitly preserving body and HTTP method of original request) 59 | - `308` - Permanent Redirect (explicitly preserving body and HTTP method of original request) 60 | - `404` - Not Found 61 | - Useful for redirecting invalid URLs to a pretty 404 page. 62 | - `410` - Gone 63 | - `451` - Unavailable For Legal Reasons 64 | 65 | ## Placeholders 66 | 67 | Placeholders are named variables that can be used to match path segments in the `from` path and inject them into the `to` path. 68 | 69 | For example: 70 | 71 | ``` 72 | /posts/:month/:day/:year/:slug /articles/:year/:month/:day/:slug 73 | ``` 74 | 75 | This rule will redirect a URL like `/posts/06/15/2022/hello-world` to `/articles/2022/06/15/hello-world`. 76 | 77 | Implementation MUST error when the same placeholder name is used more than once in `from`. 78 | 79 | Implementation MUST allow the same placeholder name to be used more than once in `to`. 80 | 81 | ### Catch-All Splat 82 | 83 | If a `from` path ends with an asterisk (i.e. `*`), the remainder of the `from` path is slurped up into the special `:splat` placeholder, which can then be injected into the `to` path. 84 | 85 | For example: 86 | 87 | ``` 88 | /posts/* /articles/:splat 89 | ``` 90 | 91 | This rule will redirect a URL like `/posts/2022/06/15/hello-world` to `/articles/2022/06/15/hello-world`. 92 | 93 | Splat logic MUST only apply to a single trailing asterisk, as this is a greedy match, consuming the remainder of the path. 94 | 95 | ### Comments 96 | 97 | Any line beginning with `#` MUST be treated as a comment and ignored at evaluation time. 98 | 99 | For example: 100 | 101 | ``` 102 | # Redirect home to index.html 103 | /home /index.html 301 104 | ``` 105 | 106 | is functionally equivalent to 107 | 108 | ``` 109 | /home /index.html 301 110 | ``` 111 | 112 | ### Line Termination 113 | 114 | Lines MUST be separated from each other by either `\n` or `\r\n`. 115 | 116 | Termination of the last line in the file is optional. 117 | 118 | ### Whitespace Characters 119 | 120 | Blank lines, leading and trailing whitespace characters like `\x20` (space) or 121 | `\t` (tab) MUST be ignored, aside from the line termination mentioned above. 122 | 123 | ### Max File Size 124 | 125 | The file size MUST NOT exceed 64 KiB. 126 | 127 | # Evaluation 128 | 129 | ## Same-Origin Requirement 130 | 131 | Rules MUST only be evaluated in contexts where 132 | [Same-Origin](https://en.wikipedia.org/wiki/Same-origin_policy) isolation per 133 | root CID is possible. 134 | 135 | This requirement is fulfilled on a Subdomain or DNSLink HTTP Gateway, 136 | and also applies to a web browser with native `ipfs://` and `ipns://` scheme handler. 137 | 138 | ## Order 139 | 140 | Rules MUST be evaluated in order, redirecting or rewriting using the first matching rule. 141 | 142 | The non-existent paths that are being requested should be intercepted and redirected to the destination path and the specified HTTP status code returned. The rules are evaluated in the order they appear in the file. 143 | 144 | Any request for an existing file should be returned as is, and not intercepted by the last catch all rule. 145 | 146 | ## No Forced Redirects 147 | 148 | All redirect logic MUST only be evaluated if the requested path is not present in the DAG. This means that any performance impact associated with checking for the existence of a `_redirects` file or evaluating redirect rules will only be incurred for non-existent paths. 149 | 150 | ## Error Handling 151 | 152 | If the `_redirects` file exists but there is an error reading or parsing it, the errors MUST be returned to the user with a 500 HTTP status code. 153 | 154 | ## Query Parameters 155 | 156 | Implementations SHOULD retain any dynamic query parameters supplied by the user and pass them along in the `Location` header of the HTTP redirect response. 157 | 158 | When merging these user-provided parameters with any static ones defined in the [`To`](#to) field, the user’s dynamic values take precedence, overriding static ones in case of a conflict. 159 | 160 | # Security 161 | 162 | This functionality will only be evaluated for Subdomain or DNSLink Gateways, to ensure that redirect paths are relative to the root CID hosted at the specified domain name. 163 | 164 | Parsing of the `_redirects` file should be done safely to prevent any sort of injection vector or daemon crash. 165 | 166 | The [max file size](#max-file-size) helps to prevent an additional [denial of service attack](https://en.wikipedia.org/wiki/Denial-of-service_attack) vector. 167 | 168 | # Appendix: notes for implementers 169 | 170 | ## Test fixtures 171 | 172 | Sample files for various test cases can be found in `QmQyqMY5vUBSbSxyitJqthgwZunCQjDVtNd8ggVCxzuPQ4`. 173 | Implementations SHOULD use it for internal testing. 174 | 175 | ``` 176 | $ ipfs ls QmQyqMY5vUBSbSxyitJqthgwZunCQjDVtNd8ggVCxzuPQ4 177 | QmcBcFnKKqgpCVMxxGsriw9ByTVF6uDdKDMuEBq3m6f1bm - bad-codes/ 178 | QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj - examples/ 179 | QmU7ysGXwAtiV7aBarZASJsxKoKyKmd9Xrz2FFamSCbg8S - forced/ 180 | QmWHn2TunA1g7gQ7q9rwAoWuot2hMpojZ6cZ9ERsNKm5gE - good-codes/ 181 | QmRgpzYQESidTtTojN8zRWjiNs9Cy6o7KHRxh7kDpJm3KH - invalid/ 182 | QmYzMrtPyBv7LKiEAGLLRPtvqm3SjQYLWxwWQ2vnpxQwRd - newlines/ 183 | QmQTfvjGmvTfxFpUcZNLdTLuKV227KJkGiN6xooHVeVZAS - too-large/ 184 | ``` 185 | 186 | For example, the "examples" site can be found in `QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj`. 187 | 188 | ``` 189 | $ ipfs ls /ipfs/QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj 190 | Qmd9GD7Bauh6N2ZLfNnYS3b7QVAijbud83b8GE8LPMNBBP 7 404.html 191 | QmSmR9NShZ89VEBrn9SBy7Xxvjw8Qe6XArD5GqtHvbtBM3 7 410.html 192 | QmVQqj9oZig9tH3ENHo4bxV5pNgssUwFCXUjAJAVcZVbJG 7 451.html 193 | QmZU3kboiyi9jV59D8Mw8wzuvsr3HmvskqhYRRhdFA8wRq 317 _redirects 194 | QmaWDLb4gnJcJbT1Df5X3j91ysiwkkyxw6329NLiC1KMDR - articles/ 195 | QmS6ZNKE9s8fsHoEnArsZXnzMWijKddhXXDsAev8LdTT5z 9 index.html 196 | QmNwEgMrExwSsE8DCjZjahYfHUfkSWRhtqSkQUh4Fk3udD 7 one.html 197 | QmVe2GcTbEPZkMbjVoQ9YieVGKCHmuHMcJ2kbSCzuBKh2s - redirected-splat/ 198 | QmUGVnZaofnd5nEDvT2bxcFck7rHyJRbpXkh9znjrJNV92 7 two.html 199 | ``` 200 | 201 | The `_redirects` file is as follows. 202 | 203 | ``` 204 | $ ipfs cat /ipfs/QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj/_redirects 205 | /redirect-one /one.html 206 | /301-redirect-one /one.html 301 207 | /302-redirect-two /two.html 302 208 | /200-index /index.html 200 209 | /posts/:year/:month/:day/:title /articles/:year/:month/:day/:title 301 210 | /splat/* /redirected-splat/:splat 301 211 | /not-found/* /404.html 404 212 | /gone/* /410.html 410 213 | /unavail/* /451.html 451 214 | /* /index.html 200 215 | ``` 216 | 217 | A dedicated test vector with URL query parameter behavior can be found in `bafybeiee3ltldvmfgsxiqazbatrkbvkl34eanbourajwnavhupb64nnbxa`. 218 | Implementations SHOULD use it for internal testing when [query parameter support](#query-parameters) is desired. 219 | 220 | ``` 221 | $ ipfs cat bafybeiee3ltldvmfgsxiqazbatrkbvkl34eanbourajwnavhupb64nnbxa/_redirects 222 | # redirect to URL with some static query parameters 223 | /source1/* /target-file?static-query1=static-val1&static-query2=static-val2 301 224 | 225 | # redirect to URL where path segments are converted to query parameters 226 | /source2/:code/:name /target-file?code=:code&name=:name 301 227 | 228 | # catch-all redirect (test should make request with query parameters, and confirm response preserved them in returned Location header) 229 | /source3/* https://example.net/target3/:splat 301 230 | ``` 231 | -------------------------------------------------------------------------------- /src/img/ipns-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs/specs/c5fa7922b415550f92cf0883b92f9d9c4802bf71/src/img/ipns-overview.png -------------------------------------------------------------------------------- /src/img/watermark-proposal.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Proposal 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /src/img/watermark-ratified.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Ratified 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /src/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: Home 3 | --- 4 | 5 | {% include 'head.html' %} 6 | 7 |
8 |
9 | 10 |

IPFS Standards

11 |

12 | The purpose of IPFS Standards is to foster interoperability between independent implementations of 13 | the IPFS stack by producing Internet-grade specifications and test suites. 14 |

15 |
16 |
17 | 18 |
19 |

Specifying IPFS and the InterPlanetary stack.

20 |

21 | The technology that powers the content-addressable web is being standardized here. 22 |

23 | 59 |
60 |

Specifications

61 |

62 | The specifications are broken up into multiple areas that cover the stack. 63 |

64 |
65 |
66 |

Architecture

67 |

68 | These documents define the architectural principles that IPFS is built upon, and can be used as tools to evaluate 69 | implementations and applications of IPFS. 70 |

71 | {% include 'list.html', posts: collections.architecture %} 72 |
73 |
74 |

Meta

75 |

76 | Meta contains all the non-technical documents that conspire to make the standards 77 | project work: what the core values are, what the governance model is, how to produce documents, 78 | etc. 79 |

80 | {% include 'list.html', posts: collections.meta %} 81 |
82 |
83 |

Routing

84 |

85 | Routing is the way to determine where to find a given content, peer, or IPNS record. 86 |

87 | {% include 'list.html', posts: collections.routing %} 88 |
89 |
90 |

Exchange

91 |

92 | Exchange is the way to for sending and receiving content-addressed blocks of data. 93 |

94 | {% include 'list.html', posts: collections.exchange %} 95 |
96 |
97 |

HTTP Gateways

98 |

99 | IPFS Gateway acts as a bridge between traditional HTTP clients and IPFS. Through the gateway, users can download files, 100 | directories and other IPLD data stored in IPFS as if they were stored in a traditional web server. 101 |

102 |

103 | Low-level HTTP semantics: 104 |

105 | {% include 'list.html', posts: collections.lowLevelHttpGateways %} 106 |

107 | Web semantics (for website hosting and web browsers): 108 |

109 | {% include 'list.html', posts: collections.webHttpGateways %} 110 |
111 |
112 |

IPNS

113 |

114 | The InterPlanetary Naming System (IPNS) is a naming system responsible for creating, reading and updating mutable pointers to data. 115 |

116 | {% include 'list.html', posts: collections.ipns %} 117 |
118 |
119 |

Content Filtering

120 |

121 | How IPFS service operators can control the content hosted on their nodes. 122 |

123 | {% include 'list.html', posts: collections.filtering %} 124 |
125 |
126 |

InterPlanetary Improvement Proposals

127 |

128 | InterPlanetary Improvement Proposals (IPIP) are an orderly mechanism to consider 129 | changes to the IPFS specification. They are not changes to the specification itself, 130 | but their approval leads to a change in the specification. 131 |

132 | {% include 'ipips-list.html' %} 133 |
134 |
135 |
136 |
137 | 138 | {% include 'footer.html' %} 139 | -------------------------------------------------------------------------------- /src/ipips/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: InterPlanetary Improvement Proposals 3 | description: | 4 | An InterPlanetary Improvement Proposals (IPIP) provides an orderly mechanism for 5 | considering proposed changes to IPFS specifications. An IPIP proposal is not to be the spec itself; 6 | the approval of an IPIP leads to an update to a specification. 7 | --- 8 | 9 | {% include 'header.html' %} 10 | 11 |
12 | {% include 'ipips-list.html' %} 13 |
14 | 15 | {% include 'footer.html' %} 16 | -------------------------------------------------------------------------------- /src/ipips/ipip-0001.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0001: Lightweight Improvement Process for IPFS Specifications" 3 | date: 2022-06-10 4 | ipip: ratified 5 | editors: 6 | - name: Marcin Rataj 7 | github: lidel 8 | url: https://lidel.org/ 9 | - name: wilkyr31d 10 | github: wilkyr31d 11 | relatedIssues: 12 | - https://github.com/ipfs/specs/issues/286 13 | order: 1 14 | tags: ['ipips'] 15 | --- 16 | 17 | This _InterPlanetary Improvement Proposal_ (IPIP) introduces a lightweight 18 | "request for comments/change" process for the IPFS specifications 19 | [repository][1]. 20 | 21 | [1]: https://github.com/ipfs/specs/ 22 | 23 | ## Motivation 24 | 25 | Today, protocol design discussions often take place in a repository of an IPFS 26 | implementation. These conversations are unintentionally obscured from the useful input of [Specs Stewards], other 27 | implementations, service operators and the wider IPFS Community. 28 | 29 | The IPFS Project needs a mechanism for proposing and evaluating specification 30 | improvements that are not tied to a specific programming language 31 | or implementation of IPFS. 32 | 33 | ## Detailed design 34 | 35 | Adopt an informal IPIP process for the [ipfs/specs][1] repository, providing a 36 | minimal structure for opening, reviewing, and merging specification changes. 37 | 38 | The purpose of IPIP documents is to **document motivation** behind the change 39 | applied to the spec. **IPIP is not to be the spec itself**. 40 | 41 | To illustrate: 42 | - In order to understand how (hypothetical) WebDAV Gateway works, one would 43 | read contents of specs in `ipfs/specs/WEBDAV_GATEWAY.md`. 44 | - IPIP in `ipfs/specs/IPIP/000N-webdav-gateway.md` would only include 45 | **Motivation** and explainer why certain design decisions were made at a 46 | certain point in time. Initial `IPIP/000N-webdav-gateway.md` would explain 47 | why we added WebDAV spec in the first place. 48 | - If we realize the spec has a bug, we will evaluate the impact: adding more 49 | details, test vectors, and editorials/cosmetics can be fixed without IPIP. 50 | - Things that could cause an interop issues require a PR with fix and IPIP in 51 | `ipfs/specs/IPIP/000M-webdav-fix-for-foo.md` explaining why we make the 52 | breaking spec change, compatibility/migration considerations etc. 53 | 54 | ### IPIP Lifecycle 55 | 56 | Up-to-date process and IPIP lifecycle will be published in :cite[ipip-process]. 57 | 58 |
59 | Click to expand the initial (historical) flow 60 | 61 | ### Opening an improvement proposal (IPIP) 62 | 63 | Changes to IPFS specifications can be proposed by opening a Git pull-request 64 | (PR) against the `ipfs/specs` repository. 65 | 66 | In addition to specification changes, such PR must include a short **IPIP 67 | document** based on the template in [`ipfs/specs/ipip-template.md`](https://github.com/ipfs/specs/blob/main/ipip-template.md). 68 | 69 | When a new specification file is added to the repo, it should be based on 70 | the template at [`ipfs/specs/template.md`](https://github.com/ipfs/specs/blob/main/template.md). 71 | 72 | ### Reviewing IPIPs 73 | 74 | [Specs Stewards] will review new IPIP PRs during periodical (best-effort) triage. 75 | 76 | IPFS Community is encouraged to participate in the review process. 77 | 78 | IPIP can be either: 79 | - merged, 80 | - rejected (PR close without merging), 81 | - deferred (converting PR back to a draft). 82 | 83 | The final decision belongs to [Specs Stewards]. 84 | 85 | ### Merging IPIPs 86 | 87 | PR with a IPIP can be merged only after two [Specs Stewards] approve it and 88 | there are no objections from other Stewards. 89 | 90 | IPIP number is assigned before the PR merge. 91 | 92 | IPIP author and two approving [Specs Stewards] are added to `CODEOWNERS` file 93 | to be automatically asked to review any future changes to files added or 94 | modified by the IPIP. 95 | 96 |
97 | 98 | 99 | ### Long-term plan 100 | 101 | [Specs Stewards] will adjust the process based on usage patterns. 102 | 103 | ## Design rationale 104 | 105 | We want to empower IPFS community members and implementers with the ability to propose 106 | changes in a well-known way, without introducing much overhead. 107 | 108 | Guiding principles: 109 | - No new tooling 110 | - Reuse Markdown, Git, and existing PR review process 111 | - Convention over Byzantine process 112 | - Proposing a new IPIP should have low cognitive overhead, allowing us to 113 | focus on specs 114 | - Reuse existing GitHub developer accounts and reputation attached to them 115 | - One should be able to create a valid IPIP without reading a long explainer 116 | like this one. Looking at past IPIPs, and then copying a template and 117 | opening a PR with the proposal should be more than enough. 118 | 119 | ### User benefit 120 | 121 | End users will indirectly benefit from a healthy IPIP process being in place: 122 | 123 | - IPFS community members will be able to use IPIP drafts for evaluating ideas 124 | before investing time into building new things. 125 | - The bar for creating a brand new IPFS implementation will be lowered, and 126 | existing implementations will be able to propose improvements for others to 127 | adopt. This removes the soft vendor lock-in present when the oldest 128 | implementation is considered as the reference standard and source of truth. 129 | - IPFS implementers will have a better understanding of why certain design 130 | decisions were made, and have both historical context and language-agnostic 131 | specifications with test fixtures ready for use in their project, ensuring 132 | a high level of interoperability. 133 | - More eyes looking at specifications will improve overall quality over time. 134 | 135 | As a result, IPFS will become easier to implement, useful in more contexts, 136 | and benefit more people. 137 | 138 | ### Compatibility 139 | 140 | Existing contents of [ipfs/specs][1] repository act as the initial state 141 | against which IPIP PRs can be opened. 142 | 143 | ### Security 144 | 145 | Existing Git-based review infrastructure, user accounts and reputation 146 | system will be reused. 147 | 148 | Merging IPIP will require approval from two [Specs Stewards]. 149 | 150 | ### Alternatives 151 | 152 | - Maintaining the status quo (no IPIP process) is not acceptable, as we want to 153 | move specification discussions away from repositories of specific 154 | implementations. We need a mechanism for discussing improvements that is not 155 | tied to specific implementation or language. 156 | - Creating more elaborate IPIP process. This comes with increased overhead and 157 | risk. Introducing a complex process requires deeper understanding of 158 | community needs and pitfalls of preexisting processes, and since we don't 159 | have any process in place, starting light, limits the risk. 160 | 161 | ### Copyright 162 | 163 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 164 | 165 | [Specs Stewards]: https://github.com/orgs/ipfs/teams/specs-stewards/members 166 | -------------------------------------------------------------------------------- /src/ipips/ipip-0002.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0002: _redirects File Support on Web Gateways" 3 | date: 2022-06-15 4 | ipip: ratified 5 | editors: 6 | - name: Justin Johnson 7 | github: justincjohnson 8 | - name: Marcin Rataj 9 | github: lidel 10 | url: https://lidel.org/ 11 | - name: Henrique Dias 12 | github: hacdias 13 | url: https://hacdias.com/ 14 | relatedIssues: 15 | - https://github.com/ipfs/specs/issues/257 16 | - https://github.com/ipfs/kubo/pull/8890 17 | - https://github.com/ipfs/ipfs-docs/pull/1275 18 | order: 2 19 | tags: ['ipips'] 20 | --- 21 | 22 | Provide support for URL redirects and rewrites for web sites hosted on Subdomain or DNSLink Gateways, thus enabling support for [single-page applications (SPAs)](https://en.wikipedia.org/wiki/Single-page_application), and avoiding [link rot](https://en.wikipedia.org/wiki/Link_rot) when moving to IPFS-backed hosting. 23 | 24 | ## Motivation 25 | 26 | Web sites often need to redirect from one URL to another, for example, to change the appearance of a URL, to change where content is located without breaking existing links (see [Cool URIs don't change](https://www.w3.org/Provider/Style/URI), [link rot](https://en.wikipedia.org/wiki/Link_rot)), to redirect invalid URLs to a pretty 404 page, or to enable URL rewriting. 27 | URL rewriting in particular is a critical feature for hosting SPAs, allowing routing logic to be handled by front end code. SPA support is the primary impetus for this RFC. 28 | 29 | Currently the only way to handle URL redirects or rewrites is with additional software such as NGINX sitting in front of the Gateway. This software introduces operational complexity and decreases the uniformity of experience when navigating to content hosted on a Gateway, thus decreasing the value proposition of hosting web sites in IPFS. 30 | 31 | This IPIP proposes the introduction of redirect support for content hosted on Subdomain or DNSLink Gateways, configured via a `_redirects` file residing underneath the root CID of the web site. 32 | 33 | ## Detailed design 34 | 35 | Allow developers to configure redirect support by adding redirect rules to a file named `_redirects` stored underneath the root CID of their web site. 36 | The format for this file is similar to those of [Netlify](https://docs.netlify.com/routing/redirects/#syntax-for-the-redirects-file) and [Cloudflare Pages](https://developers.cloudflare.com/pages/platform/redirects) but only supporting a subset of their functionality. 37 | 38 | The format for the file is `from to [status]`. 39 | 40 | - `from` - specifies the path to intercept (can include placeholders and a trailing splat) 41 | - `to` - specifies the path or URL to redirect to (can include placeholders or splat matched in `from`) 42 | - `status` - optional [HTTP status code](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) (301 if not specified) 43 | 44 | Rules in the file are evaluated top to bottom. 45 | 46 | For performance reasons this proposal does not include forced redirect support (i.e. redirect rules that are evaluated even if the `from` path exists). In other word, redirect logic will be evaluated if and only if the requested path does not exist. If the requested path exists, we won't even check for the existence of the `_redirects` file. 47 | 48 | If a `_redirects` file exists but is unable to be processed, perhaps not even parsing correctly, errors will be returned to the user viewing the site via the Gateway. 49 | 50 | The detailed specification is added in :cite[web-redirects-file]. 51 | 52 | ### Test fixtures 53 | 54 | `QmQyqMY5vUBSbSxyitJqthgwZunCQjDVtNd8ggVCxzuPQ4` 55 | 56 | See spec for testing details. 57 | 58 | ## Design rationale 59 | 60 | Popular services today such as [Netlify](https://docs.netlify.com/routing/redirects/#syntax-for-the-redirects-file) and [Cloudflare Pages](https://developers.cloudflare.com/pages/platform/redirects) allow developers to configure redirect support 61 | using a `_redirects` file hosted at the top level of the web site. While we do not intend to provide all of the same functionality, it seems desirable to use a similar approach to provide a meaningful subset of the functionality offered by these services. 62 | 63 | - The format is simple and low on syntax 64 | - Many developers are already familiar with this file name and format 65 | - Using a text file for configuration enables developers to make changes without using other IPFS tools 66 | - The configuration can be easily versioned in both version control systems and IPFS by virtue of the resulting change to the root CID for the content 67 | 68 | ### User benefit 69 | 70 | Provides general URL redirect and rewrite support, which enables three important features: 71 | 1. Developers will be able to host single-page applications in IPFS. 72 | 2. Same configuration file used for setting up pretty 404 pages. 73 | 3. The cost of switching hosting of an existing website to IPFS is lowered by making it possible to keep all legacy URLs working. 74 | 75 | ### Compatibility 76 | 77 | If by some chance developers are already hosting sites that contain a `_redirects` file that does something else, they may need to update the contents of the file to match the new functionality. Errors returned to the user due to parsing errors will guide them regarding the required updates. 78 | 79 | ### Alternatives 80 | 81 | - There was some discussion early on about a [manifest file](https://github.com/ipfs/specs/issues/257) that could be used to configure redirect support in addition to many other things. While the idea of a manifest file has merit, manifest files are much larger in scope and it became challenging to reach agreement on functionality to include. 82 | There is already a large need for redirect support for SPAs, and this proposal allows us to provide that critical functionality without being hampered by further design discussion around manifest files. 83 | In addition, similar to how Netlify allows redirect support to be configured in either a `_redirects` file or a more general [configuration file](https://docs.netlify.com/configure-builds/file-based-configuration/#redirects), there is nothing precluding IPFS from allowing developers to configure redirect support in an app manifest later on. 84 | - There was some discussion with the [n0](https://github.com/n0-computer/) team about potential ways to improve the performance of retrieving metadata such as redirect rules, possibly including it as metadata with the root CID such that it would be included with the request for the CID to begin with. 85 | I believe the performance concerns are alleviated by not providing forced redirect support, and looking for `_redirects` only if the DAG is missing a requested path. Never the less, if a more generic metadata facility were to be introduced in the future, it may make sense to reconsider how redirect rules are specified. 86 | 87 | ### Copyright 88 | 89 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 90 | -------------------------------------------------------------------------------- /src/ipips/ipip-0288.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0288: TAR Response Format on HTTP Gateways" 3 | date: 2022-06-10 4 | ipip: ratified 5 | editors: 6 | - name: Henrique Dias 7 | github: hacdias 8 | url: https://hacdias.com/ 9 | - name: Marcin Rataj 10 | github: lidel 11 | url: https://lidel.org/ 12 | relatedIssues: 13 | - https://github.com/ipfs/specs/pull/288 14 | - https://github.com/ipfs/go-ipfs/pull/9029 15 | - https://github.com/ipfs/go-ipfs/pull/9034 16 | order: 288 17 | tags: ['ipips'] 18 | --- 19 | 20 | ## Summary 21 | 22 | Add TAR response format to the :cite[path-gateway]. 23 | 24 | ## Motivation 25 | 26 | Currently, the HTTP Gateway only allows for UnixFS deserialization of a single 27 | UnixFS file. Directories have to be downloaded one file at a time, using 28 | multiple requests, or as a CAR, which requires deserialization in userland, 29 | via additional tools like [ipfs-car](https://www.npmjs.com/package/ipfs-car). 30 | 31 | This is to illustrate we have a functional gap where user is currently unable 32 | to leverage trusted HTTP gateway for deserializing UnixFS directory tree. We 33 | would like to remove the need for dealing with CARs when a gateway is trusted 34 | (e.g., a localhost gateway). 35 | 36 | An example use case is for the IPFS Web UI, which currently allows users to 37 | download directories using a workaround. This workaround works via a proprietary 38 | Kubo RPC API that only supports `POST` requests and the Web UI has to store the entire 39 | directory in memory before the user can download it. 40 | 41 | By introducing TAR responses on the HTTP Gateway, we provide vendor-agnosic way 42 | of downloading entire directories in deserialized form, which increases utility 43 | and interop provided by HTTP gateways. 44 | 45 | ## Detailed design 46 | 47 | The solution is to allow the Gateway to support producing TAR archives 48 | by requesting them using either the `Accept` HTTP header or the `format` 49 | URL query. 50 | 51 | ## Test fixtures 52 | 53 | Existing `curl` and `tar` tools can be used by implementers for testing. 54 | 55 | Providing static test vectors has little value here, as different TAR libraries 56 | may produce different byte-to-byte files due to unspecified ordering of files and 57 | directories inside. 58 | 59 | However, there are certain behaviors, detailed in the [security section](#security) 60 | that should be handled. To test such behaviors, the following fixtures can be used: 61 | 62 | - [`bafybeibfevfxlvxp5vxobr5oapczpf7resxnleb7tkqmdorc4gl5cdva3y`][inside-dag] 63 | is a UnixFS DAG that contains a file with a name that looks like a relative 64 | path that points inside the root directory. Downloading it as a TAR must 65 | work. 66 | 67 | - [`bafkreict7qp5aqs52445bk4o7iuymf3davw67tpqqiscglujx3w6r7hwoq`][inside-dag-tar] 68 | is an example TAR file that corresponds to the aforementioned UnixFS DAG. Its 69 | structure can be inspected in order to check if new implementations conform 70 | to the specification. 71 | 72 | - [`bafybeicaj7kvxpcv4neaqzwhrqqmdstu4dhrwfpknrgebq6nzcecfucvyu`][outside-dag] 73 | is a UnixFS DAG that contains a file with a name that looks like a relative 74 | path that points outside the root directory. Downloading it as a TAR must 75 | error. 76 | 77 | ## Design rationale 78 | 79 | The current gateway already supports different response formats via the 80 | `Accept` HTTP header and the `format` URL query. This IPIP proposes adding 81 | one more supported format to that list. 82 | 83 | ### User benefit 84 | 85 | Users will be able to directly download deserialized UnixFS directories from 86 | the gateway. Having a single TAR stream is saving resources on both client and 87 | HTTP server, and removes complexity related to redundant buffering or CAR 88 | deserialization when gateway is trusted. 89 | 90 | In the Web UI, for example, we will be able to create a direct link to download 91 | a directory, instead of using the API to put the whole file in memory before 92 | downloading it. 93 | 94 | CLI users will be able to download a directory with existing tools like `curl` and `tar` without 95 | having to talk to implementation-specific RPC APIs like `/api/v0/get` from Kubo. 96 | 97 | Fetching a directory from a local gateway will be as simple as: 98 | 99 | ```console 100 | $ export DIR_CID=bafybeigccimv3zqm5g4jt363faybagywkvqbrismoquogimy7kvz2sj7sq 101 | $ curl "http://127.0.0.1:8080/ipfs/$DIR_CID?format=tar" | tar xv 102 | bafybeigccimv3zqm5g4jt363faybagywkvqbrismoquogimy7kvz2sj7sq 103 | bafybeigccimv3zqm5g4jt363faybagywkvqbrismoquogimy7kvz2sj7sq/1 - Barrel - Part 1 - alt.txt 104 | bafybeigccimv3zqm5g4jt363faybagywkvqbrismoquogimy7kvz2sj7sq/1 - Barrel - Part 1 - transcript.txt 105 | bafybeigccimv3zqm5g4jt363faybagywkvqbrismoquogimy7kvz2sj7sq/1 - Barrel - Part 1.png 106 | ``` 107 | 108 | ### Compatibility 109 | 110 | This IPIP is backwards compatible: adds a new opt-in response type, does not 111 | modify preexisting behaviors. 112 | 113 | Existing content type `application/x-tar` is used when request is made with an `Accept` header. 114 | 115 | ### Security 116 | 117 | Third-party UnixFS file names may include unexpected values, such as `../`. 118 | 119 | Manually created UnixFS DAGs can be turned into malicious TAR files. For example, 120 | if a UnixFS directory contains a file that points at a relative path outside 121 | its root, the unpacking of the TAR file may overwrite local files outside the expected 122 | destination. 123 | 124 | In order to prevent this, the specification requires implementations to do 125 | basic sanitization of paths returned inside a TAR response. 126 | 127 | If the UnixFS directory contains a file whose path 128 | points outside the root, the TAR file download **should** fail by force-closing 129 | the HTTP connection, leading to a network error. 130 | 131 | To test this, we provide some [test fixtures](#test-fixtures). The user should be 132 | suggested to use a CAR file if they want to download the raw files. 133 | 134 | ### Alternatives 135 | 136 | One discussed alternative would be to support uncompressed ZIP files. However, 137 | TAR and TAR-related libraries are already supported by some IPFS 138 | implementations, and are easier to work with in CLI. TAR provides simpler 139 | abstraction, and layering compression on top of TAR stream allows for greater 140 | flexibility than alternative options that come with own, opinionated approaches 141 | to compression. 142 | 143 | In addition, we considered supporting [Gzipped TAR](https://github.com/ipfs/go-ipfs/pull/9034) out of the box, 144 | but decided against it as gzip or alternative compression may be introduced on the HTTP transport layer. 145 | 146 | ### Copyright 147 | 148 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 149 | 150 | [inside-dag]: https://dweb.link/ipfs/bafybeibfevfxlvxp5vxobr5oapczpf7resxnleb7tkqmdorc4gl5cdva3y?format=car 151 | [inside-dag-tar]: https://dweb.link/ipfs/bafkreict7qp5aqs52445bk4o7iuymf3davw67tpqqiscglujx3w6r7hwoq?format=car 152 | [outside-dag]: https://dweb.link/ipfs/bafybeicaj7kvxpcv4neaqzwhrqqmdstu4dhrwfpknrgebq6nzcecfucvyu?format=car 153 | -------------------------------------------------------------------------------- /src/ipips/ipip-0328.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0328: JSON and CBOR Response Formats on HTTP Gateways" 3 | date: 2022-10-07 4 | ipip: ratified 5 | editors: 6 | - name: Henrique Dias 7 | github: hacdias 8 | url: https://hacdias.com/ 9 | - name: Marcin Rataj 10 | github: lidel 11 | url: https://lidel.org/ 12 | - name: Gus Eggert 13 | github: guseggert 14 | relatedIssues: 15 | - https://github.com/ipfs/in-web-browsers/issues/182 16 | - https://github.com/ipfs/specs/pull/328 17 | - https://github.com/ipfs/kubo/issues/8823 18 | - https://github.com/ipfs/kubo/pull/9335 19 | - https://github.com/ipfs/kubo/issues/7552 20 | order: 328 21 | tags: ['ipips'] 22 | --- 23 | 24 | ## Summary 25 | 26 | Add support for the [DAG-JSON], [DAG-CBOR], JSON and CBOR response formats in 27 | the :cite[path-gateway]. 28 | 29 | ## Motivation 30 | 31 | Currently, the gateway supports requesting data in the [DAG-PB], RAW, [CAR] and 32 | TAR formats. In addition, it allows for traversing of links encoded through CBOR 33 | Tag 42, as long as they are intermediate links, and not the final document. 34 | It works on both DAG-CBOR, and its JSON representation, DAG-JSON. However, it 35 | should be possible to download deserialized versions of the final JSON/CBOR document 36 | in raw format (not wrapped in UnixFS). 37 | 38 | The main functional gap in the IPFS ecosystem is the lack of support for 39 | non-UnixFS DAGs on HTTP gateways. Users are able to create custom DAGs based on 40 | traversable DAG-CBOR thanks to [CBOR tag 42 being reserved for CIDs][cbor-42] 41 | and DAG-JSON documents, but they are unable to load deserialized documents from 42 | a local gateway, which is severely decreasing the utility of non-UnixFS DAGs. 43 | 44 | Adding JSON and CBOR response types will also benefit UnixFS. DAG-PB has a 45 | [logical format][dag-pb-format] which makes it possible to represent a DAG-PB 46 | directory as a [DAG-JSON] document. This means that, if we support DAG-JSON in 47 | the gateway, then we would support 48 | [JSON responses for directory listings][ipfs/go-ipfs/issues/7552], which has been 49 | requested by our users in the past. 50 | 51 | In addition, this functionality is already present on the current Kubo CLI. By 52 | bringing it to the gateways, we provide users with more power when it comes 53 | to storing and fetching CBOR and JSON in IPFS. 54 | 55 | ## Detailed design 56 | 57 | The solution is to allow the Gateway to support serializing data as [DAG-JSON], 58 | [DAG-CBOR], JSON and CBOR by requesting them using either the `Accept` HTTP header 59 | or the `format` URL query. In addition, if the resolved CID is of one of the 60 | aforementioned types, the gateway should be able to resolve them instead of 61 | failing with `node type unknown`. 62 | 63 | ## Test fixtures 64 | 65 | - [`bafybeiegxwlgmoh2cny7qlolykdf7aq7g6dlommarldrbm7c4hbckhfcke`][f-dag-pb] is a 66 | DAG-PB directory. 67 | - [`bafkreidmwhhm6myajxlpu7kofe3aqwf4ezxxn46cp5fko7mb6x74g4k5nm`][f-dag-pb-json] 68 | is the aforementioned DAG-PB directory's [Logical DAG-JSON representation][dag-pb-format] that 69 | is expected to be returned when using `?format=dag-json`. 70 | 71 | ## Design rationale 72 | 73 | The current gateway already supports different response formats via the 74 | `Accept` HTTP header and the `format` URL query. This IPIP proposes adding 75 | JSON and CBOR formats to that list. 76 | 77 | In addition, the current gateway already supports traversing through DAG-CBOR 78 | and DAG-JSON links if they are intermediary documents. With this IPIP, we aim 79 | to be able to download the DAG-CBOR, DAG-JSON, JSON and CBOR documents 80 | themselves, with correct `Content-Type` headers. 81 | 82 | ### User benefit 83 | 84 | The user benefits from this change as they will now be able to retrieve 85 | content encoded in the traversable DAG-JSON and DAG-CBOR formats. This is 86 | something that has been [requested before][ipfs/go-ipfs/issues/7552]. 87 | 88 | In addition, both UX and DX are significantly improved, since every UnixFS directory can 89 | now be inspected in a regular web browser via `?format=json`. This can remove the 90 | need for parsing HTML with directory listing. 91 | 92 | ### Compatibility 93 | 94 | This IPIP adds new response types and does not modify existing ones, 95 | making it a backwards-compatible change. 96 | 97 | ### Security 98 | 99 | Serializers and deserializers for the JSON and CBOR must follow the security 100 | considerations of the original specifications, found in: 101 | 102 | - [RFC 8259 (JSON), Section 12][rfc8259-sec12] 103 | - [RFC 8949 (CBOR), Section 10][rfc8949-sec10] 104 | 105 | DAG-JSON and DAG-CBOR follow the same security considerations as JSON and CBOR. 106 | Note that DAG-JSON and DAG-CBOR are stricter subsets of JSON and CBOR, respectively. 107 | Therefore they must follow their specification and error if the payload is not 108 | strict enough: 109 | 110 | - [DAG-JSON Spec][dag-json-spec] 111 | - [DAG-CBOR Spec][dag-cbor-spec] 112 | 113 | ### Alternatives 114 | 115 | #### Why four content types? 116 | 117 | If we do not introduce DAG-JSON, DAG-CBOR, JSON and CBOR response formats in 118 | the gateway, the usage of IPFS is constricted to files and directories represented 119 | by UnixFS (DAG-PB) codec. Therefore, if a user wants to store JSON and/or CBOR 120 | in IPFS, they have to wrap it as a UnixFS file in order to be able to fetch it 121 | through the gateway. That adds size and processing overhead. 122 | 123 | In addition, we could introduce only DAG-JSON and DAG-CBOR. However, not 124 | supporting the generic variants, JSON and CBOR, would lead to poor UX. The 125 | ability to retrieve DAG-JSON as `application/json` is an important step 126 | for the interoperability of the HTTP Gateway with web browsers and other tools 127 | that expect specific Content Types. Namely, `Content-Type: application/json` with 128 | `Content-Disposition: inline` allows for JSON preview to be rendered in a web browser 129 | and webdev tools. 130 | 131 | #### Why JSON/CBOR pathing is limited to full blocks? 132 | 133 | Finally, we considered supporting pathing within both DAG and non-DAG variants 134 | of the JSON and CBOR codecs. Pathing within these documents could lead to responses 135 | with extracts from the document. For example, if we have the document: 136 | 137 | ```json 138 | { 139 | "link" { 140 | "to": { 141 | "some": { 142 | "cid2": 143 | } 144 | } 145 | } 146 | } 147 | ``` 148 | 149 | With CID `bafy`, and we navigate to `/ipfs/bafy/link/to`, we would be able to 150 | retrieve an extract from the document. 151 | 152 | ```json 153 | { 154 | "some": { 155 | "cid2": 156 | } 157 | } 158 | ``` 159 | 160 | However, supporting this raises questions whose answers are not clearly defined 161 | or agreed upon yet. Right now, pathing is only supported over CID-based Links, 162 | such as Tag 42 in CBOR. In addition, some HTTP headers regarding caching are based 163 | on the CID, and adding extraction pathings would not be clear. Giving users the 164 | possibility to retrieve JSON, CBOR, DAG-JSON AND DAG-CBOR documents through the 165 | gateway is, in itself, a progress and will open the doors for new tools and explorations. 166 | 167 | ### Copyright 168 | 169 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 170 | 171 | [cbor-42]: https://github.com/core-wg/yang-cbor/issues/13#issuecomment-524378859 172 | [DAG-PB]: https://ipld.io/docs/codecs/known/dag-pb/ 173 | [dag-pb-format]: https://ipld.io/specs/codecs/dag-pb/spec/#logical-format 174 | [DAG-JSON]: https://ipld.io/docs/codecs/known/dag-json/ 175 | [DAG-CBOR]: https://ipld.io/docs/codecs/known/dag-cbor/ 176 | [CAR]: https://ipld.io/specs/transport/car/ 177 | [ipfs/in-web-browsers/issues/182]: https://github.com/ipfs/in-web-browsers/issues/182 178 | [ipfs/specs/pull/328]: https://github.com/ipfs/specs/pull/328 179 | [ipfs/kubo/issues/8823]: https://github.com/ipfs/kubo/issues/8823 180 | [ipfs/kubo/pull/9335]: https://github.com/ipfs/kubo/pull/9335 181 | [ipfs/go-ipfs/issues/7552]: https://github.com/ipfs/go-ipfs/issues/7552 182 | [f-dag-pb]: https://dweb.link/ipfs/bafybeiegxwlgmoh2cny7qlolykdf7aq7g6dlommarldrbm7c4hbckhfcke 183 | [f-dag-pb-json]: https://dweb.link/ipfs/bafkreidmwhhm6myajxlpu7kofe3aqwf4ezxxn46cp5fko7mb6x74g4k5nm 184 | [rfc8259-sec12]: https://datatracker.ietf.org/doc/html/rfc8259#section-12 185 | [rfc8949-sec10]: https://datatracker.ietf.org/doc/html/rfc8949#section-10 186 | [dag-json-spec]: https://ipld.io/specs/codecs/dag-json/spec/ 187 | [dag-cbor-spec]: https://ipld.io/specs/codecs/dag-cbor/spec/ 188 | -------------------------------------------------------------------------------- /src/ipips/ipip-0337.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0337: Delegated Content Routing HTTP API" 3 | date: 2022-10-18 4 | ipip: ratified 5 | editors: 6 | - name: Gus Eggert 7 | github: guseggert 8 | - name: Marcin Rataj 9 | github: lidel 10 | url: https://lidel.org/ 11 | relatedIssues: 12 | - https://github.com/ipfs/specs/pull/337 13 | order: 337 14 | tags: ['ipips'] 15 | --- 16 | 17 | ## Summary 18 | 19 | This IPIP specifies an HTTP API for delegated content routing. 20 | 21 | ## Motivation 22 | 23 | Idiomatic and first-class HTTP support for delegated routing is an important requirement for large content routing providers, 24 | and supporting large content providers is a key strategy for driving down IPFS content routing latency. 25 | These providers must handle high volumes of traffic and support many users, so leveraging industry-standard tools and services 26 | such as HTTP load balancers, CDNs, reverse proxies, etc. is a requirement. 27 | To maximize compatibility with standard tools, IPFS needs an HTTP API specification that uses standard HTTP idioms and payload encoding. 28 | The Reframe spec for delegated content routing is an experimental attempt at this, 29 | but it has resulted in a very unidiomatic HTTP API which is difficult to implement and is incompatible with many existing tools. 30 | The cost of a proper redesign, implementation, and maintenance of Reframe and its implementation is too high relative to the urgency of having a delegated content routing HTTP API. 31 | 32 | Note that this does not supplant nor deprecate Reframe. Ideally in the future, Reframe and its implementation would receive the resources needed to map the IDL to idiomatic HTTP, 33 | and implementations of this spec could then be rewritten in the IDL, maintaining backwards compatibility. 34 | 35 | We expect this API to be extended beyond "content routing" in the future, so additional IPIPs may rename this to something more general such as "Delegated Routing HTTP API". 36 | 37 | ## Detailed design 38 | 39 | See the Delegated Content Routing HTTP API spec (:cite[http-routing-v1]) included with this IPIP. 40 | 41 | ## Design rationale 42 | 43 | To understand the design rationale, it is important to consider the concrete Reframe limitations that we know about: 44 | 45 | - Reframe method types using the HTTP transport are encoded inside IPLD-encoded messages 46 | - This prevents URL-based pattern matching on methods, which makes it hard and expensive to do basic HTTP scaling and optimizations: 47 | - Configuring different caching strategies for different methods 48 | - Configuring reverse proxies on a per-method basis 49 | - Routing methods to specific backends 50 | - Method-specific reverse proxy config such as timeouts 51 | - Developer UX is poor as a result, e.g. for CDN caching you must encode the entire request message and pass it as a query parameter 52 | - This was initially done by URL-escaping the raw bytes 53 | - Not possible to consume correctly using standard JavaScript (see [edelweiss#61](https://github.com/ipld/edelweiss/issues/61)) 54 | - Shipped in Kubo 0.16 55 | - Packing a CID into a struct, encoding it with DAG-CBOR, multibase-encoding that, percent-encoding that, and then passing it in a URL, rather than merely passing the CID in the URL, is needlessly complex from a user's perspective, and has already made it difficult to manually construct requests or interpret logs 56 | - Added complexity of "Cacheable" methods supporting both POSTs and GETs 57 | - The required streaming support and message groups add a lot of implementation complexity, but streaming does not currently work for cacheable methods sent over HTTP 58 | - Ex for FindProviders, the response is buffered anyway for ETag calculation 59 | - There are no limits on response sizes nor ways to impose limits and paginate 60 | - This is useful for routers that have highly variable resolution time, to send results as soon as possible, but this is not a use case we are focusing on right now and we can add it later 61 | - The Identify method is not implemented because it is not currently useful 62 | - This is because Reframe's ambition is to be a generic catch-all bag of methods across protocols, while delegated routing use case only requires a subset of its methods. 63 | - Client and server implementations are difficult to write correctly, because of the non-standard wire formats and conventions 64 | - Example: [bug reported by implementer](https://github.com/ipld/edelweiss/issues/62), and [another one](https://github.com/ipld/edelweiss/issues/61) 65 | - The Go implementation is [complex](https://github.com/ipfs/go-delegated-routing/blob/main/gen/proto/proto_edelweiss.go) and [brittle](https://github.com/ipfs/go-delegated-routing/blame/main/client/provide.go#L51-L100), and is currently maintained by IPFS Stewards who are already over-committed with other priorities 66 | - Only the HTTP transport has been designed and implemented, so it's unclear if the existing design will work for other transports, and what their use cases and requirements are 67 | - This means Reframe can't be trusted to be transport-agnostic until there is at least a second transport implemented (e.g. as a reframe-over-libp2p protocol) 68 | - There's naming confusion around "Reframe, the protocol" and "Reframe, the set of methods" 69 | 70 | So this API proposal makes the following changes: 71 | 72 | - The Delegated Content Routing API is defined using HTTP semantics, and can be implemented without introducing Reframe concepts nor IPLD 73 | - There is a clear distinction between the [Kubo RPC](https://docs.ipfs.tech/reference/kubo/rpc/) and the vendor-agnostic Routing V1 HTTP API. 74 | - "Method names" and cache-relevant parameters are pushed into the URL path 75 | - Streaming support is removed, and default response size limits are added. 76 | - We will add streaming support in a subsequent IPIP, but we are trying to minimize the scope of this IPIP to what is immediately useful 77 | - Bodies are encoded using idiomatic JSON, instead of using IPLD codecs, and are compatible with OpenAPI specifications 78 | - The JSON uses human-readable string encodings of common data types 79 | - CIDs are encoded as CIDv1 strings with a multibase prefix (e.g. base32), for consistency with CLIs, browsers, and [gateway URLs](https://docs.ipfs.io/how-to/address-ipfs-on-web/) 80 | - Multiaddrs use the [human-readable format](https://github.com/multiformats/multiaddr#specification) that is used in existing tools and Kubo CLI commands such as `ipfs id` or `ipfs swarm peers` 81 | - Byte array values, such as signatures, are multibase-encoded strings (with an `m` prefix indicating Base64) 82 | - The "Identify" method and "message groups" are not included 83 | - The "GetIPNS" and "PutIPNS" methods are not included. Instead, a separate Delegated IPNS HTTP API aims to firstly facilitate naming system delegation, and secondly, pave the way for future iterations of IPNS with less interdependency with content routing. For more information, see Delegated IPNS HTTP API (:cite[ipip-0379]). 84 | 85 | ### User benefit 86 | 87 | The cost of building and operating content routing services will be much lower, as developers will be able to maximally reuse existing industry-standard tooling. 88 | Users will not need to learn a new RPC protocol and tooling to consume or expose the API. 89 | This will result in more content routing providers, each providing a better experience for users, driving down content routing latency across the IPFS network 90 | and increasing data availability. 91 | 92 | ### Compatibility 93 | 94 | #### Backwards Compatibility 95 | 96 | IPFS Stewards will implement this API in [go-delegated-routing](https://github.com/ipfs/go-delegated-routing), using breaking changes in a new minor version. 97 | Because the existing Reframe spec can't be safely used in JavaScript and we won't be investing time and resources into changing the wire format implemented in edelweiss to fix it, 98 | the experimental support for Reframe in Kubo will be deprecated in the next release and delegated content routing will subsequently use this HTTP API. 99 | We may decide to re-add Reframe support in the future once these issues have been resolved.- 100 | 101 | #### Forwards Compatibility 102 | 103 | Standard HTTP mechanisms for forward compatibility are used: 104 | 105 | - The API is versioned using a version number prefix in the path 106 | - The `Accept` and `Content-Type` headers are used for content type negotiation, allowing for backwards-compatible additions of new MIME types, hypothetically such as: 107 | - `application/cbor` for binary-encoded responses 108 | - `application/x-ndjson` for streamed responses 109 | - `application/octet-stream` if the content router can provide the content/block directly 110 | - New paths+methods can be introduced in a backwards-compatible way 111 | - Parameters can be added using either new query parameters or new fields in the request/response body. 112 | - Provider records are both opaque and versioned to allow evolution of schemas and semantics for the same transfer protocol 113 | 114 | As a proof-of-concept, the tests for the initial implementation of this HTTP API were successfully tested with a libp2p transport using [libp2p/go-libp2p-http](https://github.com/libp2p/go-libp2p-http), demonstrating viability for also using this API over libp2p. 115 | 116 | ### Security 117 | 118 | - All CID requests are sent to a central HTTPS endpoint as plain text, with TLS being the only protection against third-party observation. 119 | - While privacy is not a concern in the current version, plans are underway to add a separate endpoint that prioritizes lookup privacy. Follow the progress in related pre-work in [IPIP-272 (double hashed DHT)](https://github.com/ipfs/specs/pull/373/) and [ipni#5 (reader privacy in indexers)](https://github.com/ipni/specs/pull/5). 120 | - The usual JSON parsing rules apply. To prevent potential Denial of Service (DoS) attack, clients should ignore responses larger than 100 providers and introduce a byte size limit that is applicable to their use case. 121 | 122 | ### Alternatives 123 | 124 | - Reframe (general-purpose RPC) was evaluated, see "Design rationale" section for rationale why it was not selected. 125 | 126 | ### Copyright 127 | 128 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 129 | -------------------------------------------------------------------------------- /src/ipips/ipip-0351.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0351: IPNS Signed Records Response Format on HTTP Gateways" 3 | date: 2022-11-29 4 | ipip: ratified 5 | editors: 6 | - name: Henrique Dias 7 | github: hacdias 8 | url: https://hacdias.com/ 9 | - name: Marcin Rataj 10 | github: lidel 11 | url: https://lidel.org/ 12 | relatedIssues: 13 | - https://github.com/ipfs/specs/issues/320 14 | - https://github.com/ipfs/specs/pull/351 15 | - https://github.com/ipfs/kubo/pull/9399 16 | order: 351 17 | tags: ['ipips'] 18 | --- 19 | 20 | ## Summary 21 | 22 | Add IPNS Signed Records response format to the [HTTP Gateway](/http-gateways/). 23 | 24 | ## Motivation 25 | 26 | Currently, the gateway allows for trustless retrieval of data under the `/ipfs` 27 | namespace, but fetching the data as a CAR, or Block, and then verifying it locally. 28 | This is especially important for light IPFS clients, so that they can retrieve 29 | data from other gateways without delegating any of the trust to them. Unfortunately, 30 | this is not possible under the `/ipns` namespace. 31 | 32 | In contrary to DNSLink, IPNS provides cryptographically-verifiable records that 33 | can be verified by the client. This means that, if a gateway is able to provide 34 | the IPNS signed record to an HTTP client, trustless retrieval would also be available 35 | under the `/ipns` namespace. 36 | 37 | In this IPIP, we propose adding :cite[ipns-record] as a response 38 | format to the gateway under the `/ipns` namespace, allowing for trustless 39 | retrieval of IPNS records over HTTP as [application/vnd.ipfs.ipns-record](https://www.iana.org/assignments/media-types/application/vnd.ipfs.ipns-record) content type (multicodec `0x0300`). 40 | 41 | ## Detailed design 42 | 43 | - :cite[trustless-gateway] can now provide a signed IPNS record upon request for `/ipns/{ipns-name}` path. 44 | - To request the IPNS record, use one of the following methods: 45 | - Include the `Accept: application/vnd.ipfs.ipns-record` HTTP header in the request. 46 | - Include the `format=ipns-record` query parameter in the request URL. 47 | - The HTTP response containing the verifiable IPNS record will have the following format: 48 | - Header: `Content-Type: application/vnd.ipfs.ipns-record` 49 | - Body: :cite[ipns-record] serialized as the `IpnsEntry` protobuf. 50 | 51 | ## Test fixtures 52 | 53 | This IPIP got ratified before 54 | [gateway-conformance](https://github.com/ipfs/gateway-conformance) existed. 55 | 56 | 57 | The [reference implementation in Kubo 0.19](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.19.md#signed-ipns-record-response-format) 58 | provides reusable [assertions](https://github.com/ipfs/kubo/blob/v0.19.2/test/sharness/t0124-gateway-ipns-record.sh). 59 | 60 | :::issue 61 | Until vendor-agnostic fixtures are added to the conformance test suite ([tracking issue](https://github.com/ipfs/gateway-conformance/issues/3)), 62 | IPNS records for testing can be generated in Kubo by creating an IPNS record: 63 | 64 | ```bash 65 | $ ipfs key gen 66 | k51Key 67 | 68 | $ ipfs name publish /ipfs/bafyHash --key= --ttl= 69 | Published to k51Key: /ipfs/bafyHash 70 | 71 | $ ipfs routing get /ipns/k51Key > key.pb 72 | ``` 73 | ::: 74 | 75 | ## Design rationale 76 | 77 | The current gateway already supports different response formats via the 78 | `Accept` HTTP header and the `format` URL query. This IPIP proposes adding 79 | one more supported format to that list. 80 | 81 | ### User benefit 82 | 83 | By providing IPNS records through the gateway, IPFS light clients are able 84 | to race multiple gateways in search for an IPNS record for a certain IPNS key. 85 | This way, IPFS light clients do not necessarily need to implement the required 86 | machinery to fetch IPNS records from other IPFS nodes through the DHT or PubSub. 87 | 88 | In addition, the retrieval of IPNS records is trustless in the sense that they can 89 | be verified by the client since the IPNS record includes a cryptographic signature 90 | provided by its creator. 91 | 92 | ### Compatibility 93 | 94 | This IPIP proposes a new format to be added to the gateway, but does not change 95 | any prior format. Therefore, this IPIP is backwards compatible. Please note 96 | that IPNS records are also added to the :cite[trustless-gateway] specification. 97 | 98 | ### Copyright 99 | 100 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 101 | -------------------------------------------------------------------------------- /src/ipips/ipip-0379.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0379: Delegated IPNS HTTP API" 3 | date: 2023-02-13 4 | ipip: ratified 5 | editors: 6 | - name: Masih H. Derkani 7 | github: masih 8 | - name: Marcin Rataj 9 | github: lidel 10 | url: https://lidel.org/ 11 | relatedIssues: 12 | - https://github.com/ipfs/specs/issues/343 13 | - https://github.com/ipfs/specs/pull/337 14 | - https://github.com/ipfs/specs/pull/377 15 | xref: 16 | - ipns-record 17 | order: 379 18 | tags: ['ipips'] 19 | --- 20 | 21 | ## Summary 22 | 23 | This IPIP specifies `/routing/v1/ipns` HTTP API to offload naming system onto another process or server. 24 | 25 | ## Motivation 26 | 27 | One of the motivations of this document is to introduce simple to use HTTP APIs and ultimately reduce barrier for interaction across alternative systems. 28 | 29 | Expanding on the motivations of :cite[ipip-0337], the work here concentrates on delegation of IPNS over HTTP API. Naming is part of the core IPFS DHT functionality. 30 | The performance of naming system over the IPFS DHT can suffer from long delays due to churn of records and quorum requirements. 31 | 32 | ## Detailed design 33 | 34 | Add `/routing/v1/ipns` to the existing :cite[http-routing-v1] specification. 35 | 36 | ## Design rationale 37 | 38 | The rationale for delegated IPNS over HTTP APIs closely follows the reasoning listed in :cite[ipip-0337]. 39 | 40 | The document proposes the following: 41 | - HTTP `GET` and `PUT` semantics for publication and resolution of IPNS records. 42 | - Use of existing :ref[IPNS Record] serialization format as HTTP `Content-Type` [`application/vnd.ipfs.ipns-record`](https://www.iana.org/assignments/media-types/application/vnd.ipfs.ipns-record). 43 | 44 | ### User benefit 45 | 46 | The ability to offload naming to another process or server using a simple HTTP 47 | API brings several benefits: 48 | 49 | - It reduces the resource drain on light clients such as JavaScript running on 50 | a web page, mobile devices, IoT devices, and gateway HTTP services. 51 | - It enables scaling of IPNS resolution separately from retrieval and 52 | deserialization/verification services. 53 | - Expands the utility of IPNS beyond IPFS nodes, contributes to its broader 54 | adoption and impact by reducing integration costs. 55 | 56 | ### Compatibility 57 | 58 | See the "Compatibility" section of :cite[ipip-0337]. 59 | 60 | #### Serialization Format 61 | 62 | Standard IPNS record serialization format is used, making it fully compatible with the existing IPNS ecosystem. 63 | 64 | :cite[ipns-record] uses [`application/vnd.ipfs.ipns-record`](https://www.iana.org/assignments/media-types/application/vnd.ipfs.ipns-record) protobuf serialization format. 65 | This format is widely in use in IPNS over PubSub and DHT routing systems. 66 | Further, interoperability across the existing and HTTP APIs is also desirable in order to reduce the barrier for adoption of the delegated HTTP APIs. 67 | 68 | To maximize interoperability with existing ecosystem, the canonical IPNS record serialization format :cite[ipns-record] (`0x0300`) can be requested with content type `application/vnd.ipfs.ipns-record`. 69 | 70 | ### Security 71 | 72 | All interaction over the APIs should use TLS to protect against third-party observation and tampering. 73 | Additionally, the IPNS records must be validated according to the rules stated in :cite[ipns-record] before further processing. 74 | 75 | To avoid Denial of Service attack, maximum IPNS record size defined in :cite[ipns-record] applies. 76 | 77 | Privacy in delegated IPNS is out of scope for this IPIP. 78 | 79 | ### Alternatives 80 | 81 | See: 82 | - IPNS over (libp2p over HTTP). While it may be possible in the future, it has more narrow utility and way bigger implementation complexity than libp2p-agnostic HTTP API with protobuf GET/PUT. 83 | - Reframe; it was deprecated and now removed. Historical reasons can be found in :cite[ipip-0337]. 84 | - JSON IPNS Record representation was descoped due to open question how signing should work and unclear use case ([discussion](https://github.com/ipfs/specs/pull/379#discussion_r1107898543)). 85 | - API-specific max record size was descoped, as it would introduce interop problems harmful to the IPNS ecosystem. 86 | 87 | ### Copyright 88 | 89 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 90 | -------------------------------------------------------------------------------- /src/ipips/ipip-0383.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0383: Compact Denylist Format" 3 | date: 2023-03-09 4 | ipip: proposal 5 | editors: 6 | - name: Hector Sanjuan 7 | github: hsanjuan 8 | affiliation: 9 | name: Protocol Labs 10 | url: https://protocol.ai/ 11 | relatedIssues: 12 | - https://github.com/ipfs/specs/issues/298 13 | - https://github.com/ipfs/specs/pull/299 14 | - https://github.com/ipfs/specs/pull/340 15 | order: 383 16 | tags: ['ipips'] 17 | --- 18 | 19 | ## Summary 20 | 21 | This IPIP introduces a line-based denylist format for content blocking on IPFS 22 | focused on simplicity, scalability and ease-of-use. 23 | 24 | A reference Go implementation of a denylist parser and Blocker component for 25 | the Kubo (go-ipfs) stack exists at https://github.com/ipfs-shipyard/nopfs. 26 | 27 | ## Motivation 28 | 29 | IPFS implementations should support content moderation, particularly when it 30 | comes to deployments of publicly-accessible infrastructure like gateways. 31 | 32 | The first step in a larger strategy to enable decentralized content moderation 33 | in IPFS setups is to agree in a denylist format that different implementations 34 | can rely on and share. 35 | 36 | ## Detailed design 37 | 38 | See :cite[compact-denylist-format]. 39 | 40 | ## Design rationale 41 | 42 | This proposal introduces a new denylist format which aims to fulfil the 43 | following aspects, which are a must for such a system: 44 | 45 | * Efficient parsing at scale. Compact. 46 | * Simplicity and extensibility for extra features, both in future versions of 47 | the spec and in custom systems. 48 | * Easy to read and to understand. 49 | * Integration-ready: Avoid the requirement of custom tooling or implementation. 50 | support to manage denylists. Text-file operations as interface for 51 | list-editing. 52 | * Support the necessary types of blocks (by cid, by path, double-hash etc.) 53 | needed by users and operators. 54 | * IPFS and DAGification friendly. 55 | 56 | The proposed design is part of a holistic approach to content-moderation for IPFS for which we have the following detailed wishlist of items ultimately related to the denylist format: 57 | 58 | - Regarding the type of blocking: 59 | - Ability to block content from being retrieved, stored or served by multihash 60 | - Ability to block content that is referenced with an IPFS-path from a blocked multihash or traversing a blocked multihash. 61 | - Ability to block by regexp-matching an IPFS path 62 | - Ability to block based on content-type (i.e. only store/serve plain-text,and pictures) 63 | - Ability to block based on CID codec (only allow Codec X) 64 | - Ability to block based on multihash function (”no identity multihashes”) 65 | - Ability to block IPNS names 66 | 67 | - Regarding the lists: 68 | - Compact format, compression friendly 69 | - Line-based so that updates can be watched 70 | - Lists support CIDs 71 | - Lists support CIDs+path (explicit) 72 | - Lists support CIDs+path (implicit - everything referenced from CID) 73 | - Lists support double-hashed multi-hashes 74 | - Lists support double-hashed cid+path (current badbits format) 75 | - Lists can be edited by hand on a text editor 76 | - Lists are ipfs-replication-friendly (adding a new entry does not require downloading more than 1 IPFS block, to sync the list). 77 | - Lists support comments 78 | - Lists support gateway http error hints (i.e. type of block) 79 | - `echo "/ipfs/cid" >> ~/.config/ipfs/denylists/custom` should work 80 | - Lists have a header section with information about the list. 81 | 82 | - Regarding the implementation: 83 | - Multiple denylists should be supported 84 | - Hot-reloading of list (no restart of IPFS required) 85 | - List removal does not require restart 86 | - Minimal introduction of latency 87 | - Minimal memory footprint (i.e. only read minimum amount of data into memory) 88 | - Clean denylist module entrypoints (easy integration in current ipfs stack layers) 89 | - Portable architecture (to other IPFS implementations). i.e. good interfaces to switch from an embedded implementation to something that could run separately, or embedded in other languages (i.e. even servicing multiple ipfs daemons). 90 | - Text-based API. `ipfs deny ` and the like are nice-to-have but not a must to work with denylists. 91 | - Security in mind: do not enable amplification attacks through lists (i.e. someone requesting a recursively blocked CID repeteadly over the gateway endpoint causes traversal of the whole CID-DAG. 92 | 93 | - Regarding list distribution: 94 | - Ability to subscribe to multiple lists, and fetch any updates as they happen 95 | - Ability to publish own lists so that others can subscribe to them 96 | - List-subscription configuration or file details remote lists that the user is subscribed to. Editable by hand. 97 | - Ability to subscribe to list subscriptions. 98 | - List subscriptions can carry context (i.e. publisher, email, type of blocking. 99 | 100 | ### User benefit 101 | 102 | Users and developers will benefit from a list format that is easy to work with because: 103 | 104 | * It can be understood by just looking at it. 105 | * It can be edited by hand. 106 | * Implementations can choose to support different aspects (i.e. blocking but no optional hints). 107 | * Denylist parsers are easy and stupid. 108 | 109 | ### Compatibility 110 | 111 | The old JSON-based Protocol Labs denylist format 112 | [https://badbits.dwebops.pub/denylist.json](https://web.archive.org/web/20230610082307/https://badbits.dwebops.pub/denylist.json) can be easily converted into the 113 | proposed compact format. This is shown at . 114 | 115 | ### Alternatives 116 | 117 | This proposal is a follow up to a [previous proposal](https://github.com/ipfs/specs/pull/340), which has several shortcomings that make it not very practical when working at scale. Both list formats can co-exist though but ultimately it will be a matter of implementation support, and it would be better to settle on one thing. 118 | 119 | It is also a followup on the "badbits" denylist format, which has similar issues and is not flexible enough. 120 | 121 | ### Copyright 122 | 123 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 124 | -------------------------------------------------------------------------------- /src/ipips/ipip-0386.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0386: Subdomain Gateway Interop with _redirects" 3 | date: 2023-03-18 4 | ipip: ratified 5 | editors: 6 | - name: suicide 7 | github: suicide 8 | relatedIssues: 9 | - https://github.com/ipfs/boxo/pull/326 10 | - https://github.com/ipfs/boxo/pull/412 11 | order: 386 12 | tags: ['ipips'] 13 | --- 14 | 15 | ## Summary 16 | 17 | This IPIP provides a detailed clarification on the interoperability between the 18 | Path and Subdomain Gateway. Specifically, it ensures that features such as 19 | :cite[web-redirects-file] are executed only AFTER a redirect to a subdomain has 20 | taken place. 21 | 22 | ## Motivation 23 | 24 | When hosting a modern Single Page Application on IPFS, one wants to use native 25 | URLs to share the app with other users, e.g. `ipns://example.org/cool/feature`. 26 | On traditional hosting, deep links are redirected or rewritten to a single 27 | entry point, e.g. `/index.html`, and the router on the client side evaluates 28 | the path segments to render the correct content. 29 | 30 | The `_redirects` file, defined in :cite[web-redirects-file], 31 | supports such applications when a Subdomain Gateway is used directly. However, 32 | the current resolution of native URLs uses a Path Gateway link scheme and 33 | subsequently fails to resolve the URL correctly. 34 | 35 | For example: 36 | 37 | - `ipns://example.org/some/path` is resolved as 38 | - `http://{gateway}/ipns/example.org/some/path` 39 | - this request fails with 404 as the resource `/some/path` does not exist. 40 | 41 | NOTE: The `kubo` (<0.20) gateway returns a 404 including a new `Location` header 42 | already pointing to the correct subdomain URL. But browsers do not follow the 43 | header as the status is 404, and the response contains a `text/plain` body 44 | string. 45 | 46 | When using a Subdomain Gateway on the proper host, the path can be resolved 47 | using the `_redirects` file: 48 | 49 | - `http://example-org.ipns.{gateway}/some/path` is redirected (301) to 50 | - `http://example-org.ipns.{gateway}/correct/path` as defined in 51 | `_redirects`file 52 | 53 | 54 | ## Detailed design 55 | 56 | This IPIP suggests the following resolution steps for DNSLink names, CIDs 57 | should be resolved similarly: 58 | 59 | - `ipns://example.org/some/path` is resolved as 60 | - `http://{gateway}/ipns/example.org/some/path` is redirected (301) to the 61 | Subdomain Gateway 62 | - `http://example-org.ipns.{gateway}/some/path` is redirected (301) to 63 | - `http://example-org.ipns.{gateway}/correct/path` as defined in `_redirects`. 64 | 65 | A Subdomain Gateway that provides interoperability with Path-Gateway-style URLs 66 | should redirect to a Subdomain-Gateway-style URL first and then try to resolve 67 | a given path. This allows the usage of a potential `_redirects` file in the 68 | root. 69 | 70 | This change subsequently fixes the resolution of native URLs in browsers using 71 | the companion extension and browsers like Brave. 72 | 73 | A paragraph is added to the :cite[subdomain-gateway] 74 | spec that describes the preservation of path segments and query parameters 75 | during the interop redirect. Furthermore, it specifies that gateway 76 | implementations should redirect to subdomain URLs if a resource is initially 77 | not found in the directory identified by the CID or DNSLink name. 78 | 79 | ## Test fixtures 80 | 81 | The example from the :cite[web-redirects-file] can be re-used, 82 | `QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj`. 83 | 84 | ``` 85 | $ ipfs ls /ipfs/QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj 86 | Qmd9GD7Bauh6N2ZLfNnYS3b7QVAijbud83b8GE8LPMNBBP 7 404.html 87 | QmSmR9NShZ89VEBrn9SBy7Xxvjw8Qe6XArD5GqtHvbtBM3 7 410.html 88 | QmVQqj9oZig9tH3ENHo4bxV5pNgssUwFCXUjAJAVcZVbJG 7 451.html 89 | QmZU3kboiyi9jV59D8Mw8wzuvsr3HmvskqhYRRhdFA8wRq 317 _redirects 90 | QmaWDLb4gnJcJbT1Df5X3j91ysiwkkyxw6329NLiC1KMDR - articles/ 91 | QmS6ZNKE9s8fsHoEnArsZXnzMWijKddhXXDsAev8LdTT5z 9 index.html 92 | QmNwEgMrExwSsE8DCjZjahYfHUfkSWRhtqSkQUh4Fk3udD 7 one.html 93 | QmVe2GcTbEPZkMbjVoQ9YieVGKCHmuHMcJ2kbSCzuBKh2s - redirected-splat/ 94 | QmUGVnZaofnd5nEDvT2bxcFck7rHyJRbpXkh9znjrJNV92 7 two.html 95 | ``` 96 | 97 | The `_redirects` file is as follows. 98 | 99 | ``` 100 | $ ipfs cat /ipfs/QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj/_redirects 101 | /redirect-one /one.html 102 | /301-redirect-one /one.html 301 103 | /302-redirect-two /two.html 302 104 | /200-index /index.html 200 105 | /posts/:year/:month/:day/:title /articles/:year/:month/:day/:title 301 106 | /splat/* /redirected-splat/:splat 301 107 | /not-found/* /404.html 404 108 | /gone/* /410.html 410 109 | /unavail/* /451.html 451 110 | /* /index.html 200 111 | ``` 112 | 113 | Following redirects should occur: 114 | 115 | ``` 116 | $ curl -v http://{gateway}/ipfs/QmYBhLYDwVFvxos9h8CGU2ibaY66QNgv8hpfewxaQrPiZj/redirect-one 117 | ... 118 | < HTTP/1.1 301 Moved Permanently 119 | < Location: http://bafybeiesjgoros75o5meijhfvnxmy7kzkynhqijlzmypw3nry6nvsjqkzy.ipfs.{gateway}/redirect-one 120 | ... 121 | ``` 122 | 123 | Subsequent requests should comply with :cite[ipip-0002]. 124 | 125 | ## Design rationale 126 | 127 | Gateways like `kubo` (before v0.20) already support the `_redirect` and 128 | subdomain redirect, but block the redirect chain by returning a 404 when a 129 | resource is not found on the Path Gateway. By moving the 404 to occur at the 130 | subdomain, users get another chance to find the resources they are looking for. 131 | 132 | ### User benefit 133 | 134 | Currently, users are presented with an error message when they request a 135 | resource on a Path Gateway intended for a Subdomain Gateway. Since a given 136 | gateway would redirect on a valid resource anyway, redirecting to the subdomain 137 | URL on a potentially invalid resource would improve usability and 138 | compatibility. 139 | 140 | ### Compatibility 141 | 142 | This proposal fixes a bug in handling of `_redirects` files. 143 | 144 | The current behavior is defined in :cite[path-gateway]. The 145 | 404 return code indicates that a resource does not exist. Changing this to a 146 | 301 redirect that is subsequently answered with a 404 from the subdomain URL 147 | is a breaking change, but the old behavior should be considered as a bug. 148 | 149 | Requesting an existing resource on the Path Gateway URL in `kubo` already 150 | returns a 301 redirect: client expectation is for the behavior to be the same 151 | for invalid paths. 152 | 153 | 154 | ### Security 155 | 156 | Security should not be compromised as the resource is not delivered from the 157 | Path Gateway URL but from the subsequent subdomain URL that offers improved 158 | security due to host separation. 159 | 160 | ### Alternatives 161 | 162 | Gateways could continue to return a 404 response for the non-existing resource, 163 | but also include an HTML body containing a redirect link. This would help users 164 | to find the requested site, but comes with worse UX than the fix proposed in 165 | this IPIP. 166 | 167 | ### Copyright 168 | 169 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 170 | -------------------------------------------------------------------------------- /src/ipips/ipip-0410.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0410: Streaming NDJSON in Routing HTTP API" 3 | date: 2023-05-12 4 | ipip: ratified 5 | editors: 6 | - name: Henrique Dias 7 | github: hacdias 8 | url: https://hacdias.com/ 9 | relatedIssues: 10 | - https://github.com/ipfs/specs/issues/344 11 | - https://github.com/ipfs/boxo/pull/18 12 | - https://github.com/ipfs/kubo/pull/9868 13 | - https://github.com/ipfs/kubo/pull/9874 14 | order: 410 15 | tags: ['ipips'] 16 | --- 17 | 18 | ## Summary 19 | 20 | Introduce backwards-compatible streaming support to the Routing V1 HTTP API. 21 | For this, we use the `Accept` HTTP header (:cite[rfc9110]) for content type negotiation, as well 22 | as the Newline Delimited JSON ([NDJSON]) format. 23 | 24 | ## Motivation 25 | 26 | The main motivation for this change is to allow servers to respond faster to the 27 | client with provider records, as soon as they are available. In the current state, 28 | the client requests a list of providers for a CID from the server. Then, the client 29 | has to wait for the server to collect their final list of providers. After that, 30 | the server can respond with the full list of providers. 31 | 32 | This is a big source of latency when `/routing/v1` is used for delegating DHT lookups, 33 | where the client is forced to wait for the server to finish DHT walk. 34 | 35 | With streaming support, the server is able to respond with provider records as soon 36 | as they are available. This reduces latency and allows for faster content discovery. 37 | 38 | In addition, streaming responses may produce an unlimited amount of results, which 39 | is not the case for non-streamed responses. 40 | 41 | ## Detailed Design 42 | 43 | In summary, streaming is supported by using the `Accept` HTTP header, which is used 44 | for content type negotiation as described in :cite[rfc9110]. The client sends an 45 | `Accept` HTTP header starting with `application/x-ndjson`, which is the content 46 | type for [NDJSON]. The following happens: 47 | 48 | - The client adds the `Accept` HTTP header in the request starting with `application/x-ndjson`. 49 | - The server checks the `Accept` HTTP header from the request and, if it contains 50 | `application/x-ndjson`, they reply with NDJSON. If they don't support NDJSON, they 51 | can reply with JSON. 52 | - The server response MUST contain a `Content-Type` HTTP header indicating the 53 | response type, which may be either `application/json` for non-streaming responses, 54 | and `application/x-ndjson` for streamed responses. 55 | 56 | For more details regarding the design, check :cite[http-routing-v1]. 57 | 58 | ## Design Rationale 59 | 60 | This feature is designed such that it does not break compatibility with existing 61 | clients and servers. The `Accept` HTTP header is OPTIONAL. By default, the server 62 | MUST respond with `application/json` unless the client explicitly asked for 63 | `application/x-ndjson`. If the server does not support NDJSON, it is allowed 64 | to still respond with non-streamed JSON. 65 | 66 | ### User Benefit 67 | 68 | Users (clients) will benefit from this change as the servers will now be able 69 | to respond more promptly to provider record requests. Instead of waiting for the whole 70 | list to be constructed, servers can now return each provider record one by one, 71 | in a streaming fashion. 72 | 73 | The client will be able to close connection at any time, reducing load on both ends. 74 | 75 | The main use cases for this IPIP are light clients and services which want to 76 | delegate DHT lookups to external service. With streaming, clients will be able 77 | to receive results as soon the delegated service learns about new record, which 78 | directly impacts the content load speeds perceived by the end user. 79 | 80 | ### Compatibility 81 | 82 | The introduced changes are backwards-compatible. The introduced header is completely 83 | optional, and a server that does not support streaming is able to respond with a non-streaming 84 | response to the client. Equally, non-streaming responses are the default. Therefore, a 85 | client that does not support streaming will not receive a streamed response. 86 | 87 | ### Security 88 | 89 | Security considerations are equivalent as the ones in :cite[ipip-0337]. 90 | 91 | ### Copyright 92 | 93 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 94 | 95 | [NDJSON]: http://ndjson.org/ 96 | -------------------------------------------------------------------------------- /src/ipips/ipip-0412.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0412: Signaling Block Order in CARs on HTTP Gateways" 3 | date: 2023-05-15 4 | ipip: ratified 5 | editors: 6 | - name: Marcin Rataj 7 | github: lidel 8 | url: https://lidel.org/ 9 | affiliation: 10 | name: Protocol Labs 11 | url: https://protocol.ai/ 12 | - name: Jorropo 13 | github: Jorropo 14 | affiliation: 15 | name: Protocol Labs 16 | url: https://protocol.ai/ 17 | relatedIssues: 18 | - https://github.com/ipfs/specs/issues/348 19 | - https://github.com/ipfs/specs/pull/330 20 | - https://github.com/ipfs/specs/pull/402 21 | - https://github.com/ipfs/specs/pull/412 22 | order: 412 23 | tags: ['ipips'] 24 | --- 25 | 26 | ## Summary 27 | 28 | Adds support for additional, optional content type options that allow the 29 | client and server to signal or negotiate a specific block order in the returned 30 | CAR. 31 | 32 | ## Motivation 33 | 34 | We want to make it easier to build light-clients for IPFS. We want them to have 35 | low memory footprints on arbitrary sized files. The main pain point preventing 36 | this is the fact that CAR ordering isn't specified. 37 | 38 | This requires keeping some kind of reference either on disk, or in memory to 39 | previously seen blocks for two reasons. 40 | 41 | 1. Blocks can arrive out of order, meaning when a block is consumed (data is 42 | read and returned to the consumer) and when it's received might not match. 43 | 44 | 1. Blocks can be reused multiple times, this is handy for cases when you plan 45 | to cache on disk but not at all when you want to process a stream with use & 46 | forget policy. 47 | 48 | What we really want is for the gateway to help us a bit, and give us blocks in 49 | a useful order. 50 | 51 | The existing Trustless Gateway specification does not provide a mechanism for 52 | negotiating the order of blocks in CAR responses. 53 | 54 | This IPIP aims to improve the status quo. 55 | 56 | ## Detailed design 57 | 58 | CAR content type 59 | ([`application/vnd.ipld.car`](https://www.iana.org/assignments/media-types/application/vnd.ipld.car)) 60 | already supports `version` parameter, which allows gateway to indicate which 61 | CAR flavor is returned with the response. 62 | 63 | The proposed solution introduces two new parameters for the content type headers 64 | in HTTP requests and responses: `order` and `dups`. 65 | 66 | The `order` parameter allows the client to indicate its preference for a 67 | specific block order in the CAR response, and the `dups` parameter specifies 68 | whether duplicate blocks are allowed in the response. 69 | 70 | A Client SHOULD send `Accept` HTTP header to leverage content type negotiation 71 | based on section 12.5.1 of :cite[rfc9110] to get the preferred response type. 72 | 73 | More details in Section 5. (CAR Responses) of :cite[trustless-gateway]. 74 | 75 | ## Design rationale 76 | 77 | The proposed specification change aims to address the limitations of the 78 | existing Trustless Gateway specification by introducing a mechanism for 79 | negotiating the block order in CAR responses. 80 | 81 | By allowing clients to indicate their preferred block order, Trustless Gateways 82 | can cache CAR responses for popular content, resulting in improved performance 83 | and reduced network load. Clients benefit from more efficient data handling by 84 | deserializing blocks as they arrive, 85 | 86 | We reuse exiting HTTP content type negotiation, and the CAR content type, which 87 | already had the optional `version` parameter. 88 | 89 | ### User benefit 90 | 91 | The proposed specification change brings several benefits to end users: 92 | 93 | 1. Improved Performance: Gateways can decide on their implicit default ordering 94 | and cache CAR responses for popular content. In turn, clients can benefit 95 | from strong `Etag` in ordered (deterministic) responses. This reduces the 96 | response time for subsequent requests, resulting in faster content retrieval 97 | for users. 98 | 99 | 2. Reduced Memory Usage: Clients no longer need to buffer the entire CAR 100 | response in memory until the deserialization of the requested entity is 101 | finished. With the ability to deserialize blocks as they arrive, users can 102 | conserve memory resources, especially when dealing with large CAR responses. 103 | 104 | 3. Efficient Data Handling: By discarding blocks as soon as the CID is 105 | validated and data is deserialized, clients can efficiently process the data 106 | in real-time. This is particularly useful for light clients, IoT devices, 107 | mobile web browsers, and other streaming applications where immediate access 108 | to the data is required. 109 | 110 | 4. Customizable Ordering: Clients can indicate their preferred block order in the 111 | `Accept` header, allowing them to prioritize specific ordering strategies that 112 | align with their use cases. This flexibility enhances the user experience 113 | and empowers users to optimize content retrieval according to their needs. 114 | 115 | ### Compatibility 116 | 117 | The proposed specification change is backward compatible with existing client 118 | and server implementations. 119 | 120 | Trustless Gateways that do not support the negotiation of block order in CAR 121 | responses will continue to function as before, providing their existing default 122 | behavior, and the clients will be able to detect it by inspecting the 123 | `Content-Type` header present in HTTP response. 124 | 125 | Clients that do not send the `Accept` header or do not recognize the `order` 126 | and `dups` parameters in the `Content-Type` header will receive and process CAR 127 | responses as they did before: buffering/caching all blocks until done with the 128 | final deserialization. 129 | 130 | Existing implementations can choose to adopt the new specification and 131 | implement support for the negotiation of block order incrementally. This allows 132 | for a smooth transition and ensures compatibility with both new and old 133 | clients. 134 | 135 | ### Security 136 | 137 | The proposed specification change does not introduce any negative security 138 | implications beyond those already present in the existing Trustless Gateway 139 | specification. It focuses on enhancing performance and data handling without 140 | affecting the underlying security model of IPFS. 141 | 142 | Light clients with support for `order` and `dups` CAR content type parameters 143 | will be able to detect malicious response faster, reducing risks of 144 | memory-based DoS attacks from malicious gateways. 145 | 146 | ### Alternatives 147 | 148 | Several alternative approaches were considered before arriving at the proposed solution: 149 | 150 | 1. Implicit Server-Side Configuration: Instead of negotiating the block order, 151 | in the CAR response, the Trustless Gateway could have a server-side 152 | configuration that specifies the default order. However, this approach would 153 | limit the flexibility for clients, requiring them to have prior knowledge 154 | about order supported by each gateway. 155 | 156 | 2. Fixed Block Order: Another option was to enforce a fixed block order in the 157 | CAR responses. However, this approach would not cater to the varying needs 158 | and preferences of different clients and use cases, and is not backward 159 | compatible with the existing Trustless Gateways which return CAR responses 160 | with Weak `Etag` and unspecified block order. 161 | 162 | 3. Separate `X-` HTTP Header: Introduction of a separate HTTP reader was 163 | rejected because we try to use HTTP semantics where possible, and gateways 164 | already use HTTP content type negotiation for CAR `version` and reusing it 165 | saves a few bytes in each round-trip. Also, :cite[rfc6648] advises against 166 | use of `X-` and similar constructs in new protocols. 167 | 168 | 4. The decision to not implement a single preset pack with predefined behavior, 169 | instead of separate parameters for order and duplicates (dups), was driven 170 | by considerations of ambiguity and potential future problems when adding 171 | more determinism to responses. For instance, if we were to include a new 172 | behavior like `foo=y|n` alongside an existing preset like `pack=orderdfs+dupsy`, 173 | it would either necessitate the addition of a separate parameter or impose 174 | the adoption of a new version of every preset (e.g., `orderdfs-dupsy+fooy` and 175 | `orderdfs+dupsy+foon`). Maintaining and deploying such changes across a 176 | decentralized ecosystem, where gateways may operate on different software, 177 | becomes more complex. In contrast, utilizing separate parameters for each 178 | behavior enables easier maintenance and deployment in a decentralized 179 | ecosystem with varying gateway software. 180 | 181 | The proposed solution of negotiating the block order through headers is 182 | future-proof, allows for flexibility, interoperability, and customization while 183 | maintaining compatibility with existing implementations. 184 | 185 | ## Test fixtures 186 | 187 | Implementation compliance can be determined by testing the negotiation process 188 | between clients and Trustless Gateways using various combinations of `order` and 189 | `dups` parameters. 190 | 191 | Relevant tests were added to 192 | [gateway-conformance](https://github.com/ipfs/gateway-conformance) test suite 193 | in [#87](https://github.com/ipfs/gateway-conformance/pull/87), and include the below fixture. 194 | 195 | - `bafybeihchr7vmgjaasntayyatmp5sv6xza57iy2h4xj7g46bpjij6yhrmy` 196 | ([CAR](https://github.com/ipfs/gateway-conformance/raw/v0.3.0/fixtures/trustless_gateway_car/dir-with-duplicate-files.car)) 197 | - An UnixFS directory with two files that are the same (same CID). 198 | - If `dups=n`, then there should be no duplicate blocks in the returned CAR. 199 | - If `dups=y`, then the blocks of the file are sent twice. 200 | - The same fixture can be used for testing `order=dfs` and checking if blocks that belong to files arrive in the DFS order. 201 | - It is encouraged to also test DFS order with HAMT fixture such as `bafybeidbclfqleg2uojchspzd4bob56dqetqjsj27gy2cq3klkkgxtpn4i` 202 | ([CAR](https://github.com/ipfs/gateway-conformance/raw/v0.3.0/fixtures/trustless_gateway_car/single-layer-hamt-with-multi-block-files.car)) 203 | 204 | ### Copyright 205 | 206 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 207 | -------------------------------------------------------------------------------- /src/ipips/ipip-0417.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0417: Delegated Peer Routing HTTP API" 3 | date: 2023-05-29 4 | ipip: ratified 5 | editors: 6 | - name: Henrique Dias 7 | github: hacdias 8 | url: https://hacdias.com/ 9 | affiliation: 10 | name: Protocol Labs 11 | url: https://protocol.ai/ 12 | relatedIssues: 13 | - https://github.com/ipfs/specs/pull/410 14 | - https://github.com/ipfs/kubo/pull/9877 15 | order: 417 16 | tags: ['ipips'] 17 | --- 18 | 19 | ## Summary 20 | 21 | This IPIP specifies `/routing/v1/peers/{peer-id}` HTTP API to offload peer routing onto another server. 22 | 23 | ## Motivation 24 | 25 | The motivation of this IPIP extends the one of :cite[ipip-0337] and :cite[ipip-0379], 26 | which introduced delegated content routing and delegated naming, respectively. Now, 27 | we expand upon those basis to introduce peer routing, reducing the barrier for interaction 28 | across different systems. 29 | 30 | ## Detailed design 31 | 32 | Add `/routing/v1/peers/{peer-id}` to the existing :cite[http-routing-v1] specification, 33 | as well as the new Peer schema, that replaces the existing "known transfer protocols". 34 | 35 | ## Design rationale 36 | 37 | In line with the remaining Routing V1 API, this IPIP introduces a new HTTP GET 38 | endpoint that is used to retrieve peer records for a certain peer. 39 | 40 | ### User benefit 41 | 42 | The user benefit brought by this PR is similar to the one in :cite[ipip-0379], 43 | but instead of offloading the naming process, we offload the peer discovery and 44 | routing. 45 | 46 | ### Compatibility 47 | 48 | The section "Known Transfer Protocols" has been removed and replaced by a "Known Schemas" 49 | section. Before, we used to have protocol specific schemas, such as `bitswap` and `graphsync-filecoinv1`. 50 | 51 | The usage of these schemas is no longer encouraged. Instead, clients and 52 | servers SHOULD be updated to use the new, more generic, `peer` schema, which 53 | avoids returning the same peer multiple times, making results more efficient 54 | when a peer supports more than one protocol. 55 | 56 | See more in the "Compatibility" section of :cite[ipip-0337]. 57 | 58 | ### Security 59 | 60 | See the "Security" section of :cite[ipip-0337]. 61 | 62 | ### Alternatives 63 | 64 | See the "Alternatives" section of :cite[ipip-0337]. 65 | 66 | ### Copyright 67 | 68 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 69 | -------------------------------------------------------------------------------- /src/ipips/ipip-0428.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP-0428: Allowing V2-Only Records in IPNS" 3 | date: 2023-07-24 4 | ipip: ratified 5 | editors: 6 | - name: Marcin Rataj 7 | github: lidel 8 | url: https://lidel.org/ 9 | affiliation: 10 | name: Protocol Labs 11 | url: https://protocol.ai/ 12 | - name: Henrique Dias 13 | github: hacdias 14 | url: https://hacdias.com/ 15 | affiliation: 16 | name: Protocol Labs 17 | url: https://protocol.ai/ 18 | relatedIssues: 19 | - https://github.com/ipfs/specs/issues/376 20 | - https://github.com/ipfs/boxo/pull/339 21 | - https://github.com/ipfs/kubo/pull/9932 22 | - https://github.com/ipfs/js-ipns/pull/234 23 | order: 428 24 | tags: ['ipips'] 25 | --- 26 | 27 | ## Summary 28 | 29 | Introduce support for creation and validation of compact, V2-only IPNS Records. 30 | 31 | ## Motivation 32 | 33 | IPNS record creation and validation is overly complex due to the legacy of 34 | decisions made in 2021. 35 | 36 | The "V1+V2" record creation and validation was reverse-engineered and documented 37 | the current state in [ipfs/specs#319](https://github.com/ipfs/specs/pull/319), 38 | which created a base for specifications to improve upon. 39 | 40 | A quick refresher on how IPNS Record lifecycle works today (2023 Q2): 41 | 42 | - _Record Creation_ produces both V1 and V2 signatures, and the record has 43 | duplicated values in both top level protobuf AND `data` CBOR field. 44 | 45 | - _Record Validation_ only cares about V2 signature, but still requires fields 46 | related to V1 to be always present in a record and match values from CBOR in 47 | `data` field, for the record to be considered valid. 48 | 49 | We've been producing and expecting these hybrid V1+V2 records [since 2021](https://github.com/ipfs/js-ipns/pull/121). 50 | 51 | An unfortunate result is that all mandatory values MUST be duplicated, even 52 | when both ends use a modern client that only cares about `signatureV2` that 53 | guards CBOR field, otherwise the record will not be valid. 54 | 55 | What this IPIP aims to improve is allow implementations to produce lean, 56 | V2-only IPNS records and ensure clients will interpret them as valid IPNS. 57 | 58 | ## Detailed design 59 | 60 | Finish V2 story by making V2-Only records possible, namely: 61 | 62 | - Record Creation: document and default to lean V2-Only records, keep V1+V2 as legacy 63 | backward-compatible variant. 64 | 65 | - Record Validation: adjust the algorithm to stop requiring V1 fields when there is no 66 | `signatureV1`. 67 | 68 | For details, see the updated :cite[ipns-record] specification. 69 | 70 | ## Design rationale 71 | 72 | For modern IPNS, the outer `IpnsEntry` protobuf should effectively only have 73 | two required fields: `data` and its `signatureV2`, and such record, as long 74 | signature is valid, should be accepted as valid. 75 | 76 | At the same time, we don't want to break existig software, especially software 77 | and hardware devices which use IPNS for pulling updates. 78 | 79 | We can get to that future in two steps: 80 | 81 | 1. Reference implementations (boxo/ipns, js-ipns) will keep producing V1+V2 82 | records as backward-compatible default, but we adjust validation algorithm 83 | to allow V2-only records, and support creation of such records as opt-in in 84 | modern implementations of IPFS+IPNS, like Kubo (GO) and Helia (JS). 85 | - Namely, only check/require fields to be duplicated in top level protobuf IF 86 | `signatureV1` is present in the `IpnsEntry` protobuf. 87 | - IF there is no `signatureV1`, the V1 record would be invalid anyway. 88 | - IF there is no `signatureV1` but `signatureV2` and `data` fields 89 | are present and valid, the V2-only record should be considered valid. 90 | - This will allow people to build V2-only systems that produce records that 91 | are considered valid. 92 | 93 | 2. At some point in the future, e.g. when we see the majority of the public 94 | swarm supports V2-Only records, libraries like boxo/ipns, js-ipns and 95 | implementations like Kubo will stop producing V1+V2 and switch to publishing 96 | V2-only records that are protobuf with only two fields: Data 97 | CBOR+signatureV2. 98 | 99 | ### User benefit 100 | 101 | - End users: the main benefit for end user is the smaller size of IPNS Records and 102 | less complexity during creation/validation of modern V2-only records. 103 | 104 | - Developers interested in IPNS: by making IPNS Record creation as simple as 105 | "create DAG-CBOR with these fields, and sign it", and validation to 106 | "signatureV2 should match the DAG-CBOR value and key". We've removed surface 107 | for bugs, making it easier to reason about for use in greenfield projects. 108 | 109 | - IPFS ecosystem: lowering the complexity related to IPNS record creation and 110 | validation makes it more likely for third-party interoperable IPNS 111 | implementations to happen. 112 | 113 | ### Compatibility 114 | 115 | - This is backward-compatible, we adjust validation logic to allow V2-only 116 | records, but all V1+V2 records that are being used in the wild today are 117 | still valid 118 | 119 | - V2-only rollout is not part of this IPIP. 120 | - Our suggestion is to default to creating V1+V2 records for now, keeping 121 | backward-compatibility with the old IPNS clients. 122 | 123 | - Creation of V2-only records should be introduced as an explicit opt-in. It 124 | is up to implementations to decide when it is feasible to default to 125 | creating V2-only records on IPNS publish. 126 | 127 | ### Security 128 | 129 | - `IpnsEntry.signatureV1` (protobuf field) is parsed only by legacy clients, modern ones ignore this value 130 | 131 | It is highly advised to implement validation conformance tests using the fixtures 132 | included at the end of this IPIP. 133 | 134 | ### Alternatives 135 | 136 | Describe alternate designs that were considered and related work. 137 | 138 | 1. Just switch to V2-only as the new default! 139 | - No, this would be a breaking change. We have to do this in two steps, 140 | because we've rushed the way V2 was introduced in 2021, and STILL require 141 | field copying, even when `signatureV1` is missing. So technically there 142 | was never "V2", it was more like "V1.5". Only with this IPIP, we finally 143 | adjust validation to only care about CBOR values when there is no 144 | `signatureV1`. 145 | 146 | 2. Why keeping the outer protobuf envelope? Could we make IPNS DAG-CBOR-only? 147 | - Due to how long it takes for decentralized network nodes to upgrade, we prefer evolution rather than revolution. 148 | - Protobuf is a useful envelope for two reasons: 149 | 1. Ensures the opaque V2-only record can be passed and stored in existing infrastructure. 150 | 2. Allows us to evolve IPNS record ("V3") in the future without impacting existing infrastructure. 151 | 152 | ## Test fixtures 153 | 154 | To make testing easier below are test vectors in form of IPNS records along 155 | with the expected verification results. These test records are valid for 100 156 | years, making them safe for use in CI tests. 157 | 158 | 1. [V1-only](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5dm4tm0wt8srkg9h9suud4wuiwjimndrkydqm81cqtlb5ak6p7ku_v1.ipns-record) → record invalid 159 | 2. [V1+V2](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5dlkw8pxuw9qmqayfdeh4kfebhmreauqdc6a7c3y7d5i9fi8mk9w_v1-v2.ipns-record) (both signatures valid) → record valid, value points at `/ipfs/bafkqaddwgevxmmraojswg33smq` 160 | 3. [V1+V2](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5dlmit2tuwdvnx4sbnyqgmvbxftl0eo3f33wwtb9gr7yozae9kpw_v1-v2-broken-v1-value.ipns-record) (both signatures valid, but 'value' is different in V1 pb vs V2 CBOR) → record invalid 161 | 4. [V1+V2](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5diamp7qnnvs1p1gzmku3eijkeijs3418j23j077zrkok63xdm8c_v1-v2-broken-signature-v2.ipns-record) (only signatureV1 valid) → record invalid 162 | 5. [V1+V2](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5dilgf7gorsh9vcqqq4myo6jd4zmqkuy9pxyxi5fua3uf7axph4y_v1-v2-broken-signature-v1.ipns-record) (only signatureV2 valid) → record valid, value points at `/ipfs/bafkqahtwgevxmmrao5uxi2bamjzg623fnyqhg2lhnzqxi5lsmuqhmmi` 163 | 6. [V2-only](https://dweb.link/ipfs/bafybeifkipmlz2fehxda6y7x752uolfed7bdd46jzdammpfga5zrnkq33u/k51qzi5uqu5dit2ku9mutlfgwyz8u730on38kd10m97m36bjt66my99hb6103f_v2.ipns-record) (no V1 fields) → record valid 164 | 165 | :::note 166 | 167 | Implementers can either write own tests against the above test vectors, or run 168 | [gateway-conformance](https://github.com/ipfs/gateway-conformance/) test suite, 169 | which includes tests for these vectors since 170 | [gateway-conformance/pull/157](https://github.com/ipfs/gateway-conformance/pull/157). 171 | 172 | ::: 173 | 174 | ### Copyright 175 | 176 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 177 | -------------------------------------------------------------------------------- /src/ipips/ipip-0484.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'IPIP-0484: Opt-in Filtering in Routing V1 HTTP API' 3 | date: 2024-10-29 4 | ipip: ratified 5 | editors: 6 | - name: Daniel Norman 7 | github: 2color 8 | affiliation: 9 | name: Shipyard 10 | url: https://ipshipyard.com 11 | relatedIssues: 12 | - https://github.com/ipfs/ipfs-check/issues/70 13 | - https://github.com/ipfs/boxo/pull/678 14 | order: 484 15 | tags: ['ipips'] 16 | --- 17 | 18 | ## Summary 19 | 20 | Add opt-in support for filtering specific network transports and/or transfer protocols to the Delegated Routing v1 HTTP endpoints via HTTP GET parameters. 21 | 22 | ## Motivation 23 | 24 | IPFS aims to allow ubiquitous data exchange across different runtimes and platforms. One of the most challenging aspects of this goal is the diversity of network conditions and capabilities across different environments. Web browsers have a very limited network stack, and most web browsers do not support the full range of network transport protocols that are commonly used in other IPFS implementations. 25 | 26 | The Delegated Routing v1 API empowers resource constrained clients like web browsers by significantly reducing the number of network connections necessary to fetch content addressed data directly from provider peers. 27 | 28 | However, there are many cases where most of the results from the Delegated Routing v1 API are not actionable by clients, because the client does not support either the **network transport protocol** or the **transfer protocol** of the provider. 29 | 30 | For instance, web browsers are limited to a specific set of network transport protocols, namely HTTPS, Secure WebSockets, WebTransport (emerging), and WebRTC. Consequently, providing information about providers that exclusively support TCP and/or UDP is not beneficial for browser-based clients, as they are unable to utilize such connections. 31 | 32 | Moreover, [Helia](https://github.com/ipfs/helia/), the most actively maintained browser IPFS implementation, supports block retrieval by CID with Bitswap and Trustless Gateways, but does not support Graphsync. 33 | 34 | This means that returning providers that only support raw TCP, raw UDP/QUIC, or Graphsync from the Delegated Routing API is not useful for browser clients, and results in unnecessary network traffic for browser clients. 35 | 36 | ## Note on terminology 37 | 38 | The term **"transport"** is overloaded in the IPFS ecosystem. 39 | 40 | In the context of this IPIP, we refer to the network layer transport protocol, e.g. TCP, QUIC, WebTransport, as **"network transport protocol"** to avoid ambiguity. 41 | 42 | **"Transfer protocol"** refers to data transfer protocols, i.e. content-addressed block retrieval protocols, e.g. Bitswap, Graphsync, HTTP. 43 | 44 | ## Detailed design 45 | 46 | ### Network Address Filtering 47 | 48 | The proposed change is to add a `?filter-addrs` parameter to the `GET /routing/v1/providers/{cid}` and `GET /routing/v1/peers/{peer-id}` endpoints of :cite[http-routing-v1]: 49 | 50 | - Add a `?filter-addrs=` optional parameter to `GET /routing/v1/providers/{CID}` that indicates which network transports to return by filtering the multiaddrs in the `Addrs` field of the [Peer schema]. 51 | - The value of the `filter-addrs` parameter is a comma-separated list of network transport protocol _name strings_ as defined in the [multiaddr protocol registry](https://github.com/multiformats/multiaddr/blob/master/protocols.csv), e.g. `?filter-addrs=webtransport`. 52 | - `unknown` can be be passed to include providers whose multiaddrs are unknown, e.g. `?filter-addrs=unknown`. This allows filtering providers whose multiaddrs are unknown at the time of filtering. 53 | - Multiaddrs are filtered by checking if the protocol name appears in any of the multiaddrs (logical OR). 54 | - Negative filtering is done by prefixing the protocol name with `!`, e.g. to skip IPv6 and QUIC addrs: `?filter-addrs=!ip6,!quic-v1`. Note that negative filtering is done by checking if the protocol name does not appear in any of the multiaddrs (logical AND). 55 | - If no parameter is passed, the default behavior is to return the original list of addresses unchanged. 56 | - If only negative filters are provided, addresses not passing any of the negative filters are included. 57 | - If positive filters are provided, only addresses passing at least one positive filter (and no negative filters) are included. 58 | - If both positive and negative filters are provided, the address must pass all negative filters and at least one positive filter to be included. 59 | - If there are no multiaddrs that match the passed transports, the provider is omitted from the response. 60 | - Filtering is case-insensitive. 61 | 62 | ### IPFS Protocol Filtering 63 | 64 | The proposed change is to add a `?filter-protocols` parameter to the `GET /routing/v1/providers/{cid}` and `GET /routing/v1/peers/{peer-id}` endpoints of :cite[http-routing-v1]: 65 | 66 | - Add a `?filter-protocols=` optional parameter to `GET /routing/v1/providers/{CID}` to filter providers based on the `Protocol` field of the [Peer schema]. 67 | - The `filter-protocols` parameter is a comma-separated list of transfer protocol names, e.g. `?filter-protocols=transport-bitswap`. 68 | - Transfer protocols names should be treated as opaque strings and have a max length of 63 characters. A non-exhaustive list of transfer protocols are defined per convention in the [multicodec registry](https://github.com/multiformats/multicodec/blob/3b7b52deb31481790bc4bae984d8675bda4e0c82/table.csv#L149-L151). 69 | - Implementations MUST preserve all transfer protocol names when returning a positive result that matches one or more of them. 70 | - A special `unknown` name can be be passed to include providers whose transfer protocol list is empty (unknown), e.g. `?filter-protocols=unknown`. This allows for including providers returned from the DHT that do not contain explicit transfer protocol information. 71 | - Providers are filtered by checking if the transfer protocol name appears in the `Protocols` array (logical OR). 72 | - If the provider doesn't match any of the passed transfer protocols, the provider is omitted from the response. 73 | - If a provider passes the filter, it is returned unchanged, i.e. the full set of protocols is returned including protocols that not included in the filter. (note that this is different from `filter-addrs` where only the multiaddrs that pass the filter are returned) 74 | - Filtering is case-insensitive. 75 | - If no parameter is passed, the default behavior is to not filter by transfer protocol. 76 | 77 | :::note 78 | Even though some of existing IPFS transfer protocol names start with `transport`, e.g. `transport-bitswap`, `transport-graphsync-filecoinv1`, and `transport-ipfs-gateway-http`, they should not to be confused with the network transport protocols used in peer addresses, which are filtered using the `filter-addrs` parameter. 79 | ::: 80 | 81 | ## Design rationale 82 | 83 | - Using these query parameters improves cache efficiency, as the response will be smaller and more specific to the client's needs. 84 | - Backward compatibility is maintained by not changing the default behavior of the API. 85 | - Use of protocol name rather than codes makes it easier for human debugging. 86 | - DHT providers currently do not contain any transfer protocol information. `unknown` can be passed to `filter-protocols` to include such providers. 87 | - Since provider records are independent of peer records, and it's pretty common to have provider records without up-to-date multiaddrs for that peer, `unknown` can be passed to `filter-addrs` to include such providers. 88 | - Combining transfer protocol and transport protocol filters is done by ANDing the results of the filters, e.g. `?filter-addrs=webtransport&filter-protocols=transport-bitswap` will return providers that support bitswap and have a webtransport multiaddr. 89 | 90 | ### User benefit 91 | 92 | By filtering out providers that do not support the desired network transport protocol and/or transfer protocol, the client can reduce the traffic necessary in order to fetch the data. 93 | 94 | Moreover, it makes it much easier to determine whether there are any browser-usable providers for a given CID, which is a common use case for clients. 95 | 96 | ### Compatibility 97 | 98 | This should not effect existing clients or servers. 99 | 100 | The default behavior when `?filter-addrs` and `?filter-protocols` is not passed is left unspecified, this IPIP is limited to opt-in behavior. 101 | 102 | ### Copyright 103 | 104 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 105 | 106 | [Peer schema]: https://specs.ipfs.tech/routing/http-routing-v1/#peer-schema 107 | -------------------------------------------------------------------------------- /src/ipns/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: InterPlanetary Naming System 3 | description: | 4 | The InterPlanetary Naming System (IPNS) is a naming system responsible for creating, reading and updating mutable pointers to data. 5 | --- 6 | 7 | {% include 'header.html' %} 8 | 9 |
10 | {% include 'list.html', posts: collections.ipns %} 11 |
12 | 13 | {% include 'footer.html' %} 14 | -------------------------------------------------------------------------------- /src/ipns/ipns-pubsub-router.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: IPNS PubSub Router 3 | description: Specifies how to publish and retrieve IPNS records using libp2p PubSub router. 4 | date: 2022-11-09 5 | maturity: reliable 6 | editors: 7 | - name: Adin Schmahmann 8 | github: aschmahmann 9 | - name: Marcin Rataj 10 | github: lidel 11 | url: https://lidel.org/ 12 | xref: 13 | - ipns-record 14 | tags: ['ipns'] 15 | order: 1 16 | --- 17 | 18 | :ref[InterPlanetary Naming System (IPNS)] is a naming system responsible for the creating, reading and updating of mutable pointers to data. 19 | IPNS consists of a public/private asymmetric cryptographic key pair, a record type and a protocol. 20 | Part of the protocol involves a routing layer that is used for the distribution and discovery of new or updated IPNS records. 21 | 22 | The IPNS PubSub router uses [libp2p PubSub](https://github.com/libp2p/specs/tree/master/pubsub) as a base, and adds persistence on top of it to ensure IPNS updates are always available to a connected network. 23 | An inherent property of the IPNS PubSub Router is that IPNS records are republishable by peers other than the peer that originated the record. 24 | This implies that as long as a peer on the network has an IPNS record it can be made available to other peers (although the records may be ignored if they are received after the IPNS record's End-of-Life/EOL). 25 | 26 | # Introduction 27 | 28 | Each time a node publishes an updated IPNS record for a particular key it is propagated by the router into the network where network nodes can choose to accept or reject the new record. 29 | When a node attempts to retrieve an IPNS record from the network it uses the router to query for the IPNS record(s) associated with the IPNS key; the node then validates the received records. 30 | 31 | In this spec we address building a router based on a PubSub system, particularly focusing on libp2p PubSub. 32 | 33 | # PubSub Protocol Overview 34 | 35 | The protocol has four components: 36 | 37 | - IPNS Records and Validation (:cite[ipns-record]) 38 | - [libp2p PubSub](https://github.com/libp2p/specs/tree/master/pubsub) 39 | - Translating an IPNS record name to/from a PubSub topic 40 | - Layering persistence onto libp2p PubSub 41 | 42 | # Translating an IPNS record name to/from a PubSub topic 43 | 44 | For a given IPNS local record key described in the IPNS Specification the PubSub topic is: 45 | 46 | **Topic format:** `/record/base64url-unpadded(key)` 47 | 48 | where base64url-unpadded is an unpadded base64url as specified in :cite[rfc4648]. 49 | 50 | # Layering persistence onto libp2p PubSub 51 | 52 | libp2p PubSub does not have any notion of persistent data built into it. However, we can layer persistence on top of PubSub by utilizing [libp2p Fetch](https://github.com/libp2p/specs/tree/master/fetch). 53 | 54 | The protocol has the following steps: 55 | 56 | 1. Start State: Node `A` subscribes to the PubSub topic `t` corresponding to the local IPNS record key `k` 57 | 2. `A` notices that a node `B` has connected to it and subscribed to `t` 58 | 3. Some time passes (might be 0 seconds, or could use a more complex system to determine the duration) 59 | 4. `A` sends `B` a Fetch request for `k` 60 | 5. If Fetch returns a record that supersedes `A`'s current record then `A` updates its record and Publishes it to the network 61 | 62 | Note: PubSub does not guarantee that a message sent by a peer `A` will be received by a peer `B` and it's possible 63 | (e.g. in systems like [gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub)) 64 | that this is true even if `A` and `B` are already connected. Therefore, whenever `A` notices **any** node that has 65 | connected to it and subscribed to `t` it should run the Fetch protocol as described above. However, developers may have routers 66 | with properties that allow the amount of time in step 3 to increase arbitrarily large (including infinite) amounts. 67 | 68 | # Protocol 69 | 70 | A node `A` putting and getting updates to an IPNS key `k`, with computed PubSub topic `t` 71 | 72 | 1. PubSub subscribe to `t` 73 | 2. Run the persistence protocol, both to fetch data and return data to those that request it 74 | 3. When updating a record do a PubSub Publish and keep the record locally 75 | 4. When receiving a record if it's better than the current record keep it and republish the message 76 | 5. (Optional) Periodically republish the best record available 77 | 78 | Note: 5 is optional because it is not necessary. However, receiving duplicate records are already handled efficiently 79 | by the above logic and properly running the persistence protocol can be difficult (as in the example below). Periodic 80 | republishing can then act as a fall-back plan in the event of errors in the persistence protocol. 81 | 82 | Persistence Error Example: 83 | 84 | 1. `B` connects to `A` 85 | 2. `A` gets the latest record (`R1`) from `B` 86 | 3. `B` then disconnects from `A` 87 | 4. `B` publishes `R2` 88 | 5. `B` reconnects to `A` 89 | 90 | If `A`'s checking of when `B` reconnects has problems it could miss `R2` (e.g. if it polled subscribed peers 91 | every 10 seconds) 92 | 93 | # Implementations 94 | 95 | - Kubo 96 | - 97 | - 98 | - 99 | - 100 | -------------------------------------------------------------------------------- /src/meta/code-of-conduct.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Code of Conduct 3 | description: The code of conduct that all community participants are held to. 4 | date: 2015-03-19 5 | maturity: stable 6 | editors: 7 | - name: Juan Benet 8 | github: jbenet 9 | - name: Harlan T Wood 10 | github: harlantwood 11 | - name: Richard Littauer 12 | github: RichardLitt 13 | - name: Michelle Hertzfeld 14 | github: meiqimichelle 15 | - name: Matt Zumwalt 16 | github: flyingzumwalt 17 | - name: Hector Sanjuan 18 | github: hsanjuan 19 | - name: Jim Garrison 20 | github: garrison 21 | tags: ['meta'] 22 | order: 0 23 | --- 24 | 25 | We believe that our mission is best served in an environment that is friendly, safe, and accepting, and free from 26 | intimidation or harassment. Towards this end, certain behaviors and practices will not be tolerated. 27 | 28 | :::note 29 | 30 | This document was [copied from GitHub](https://github.com/ipfs/community/commits/master/code-of-conduct.md), the 31 | editiors listed are the contributors to that file in chronological order. 32 | 33 | ::: 34 | 35 | ## tl;dr 36 | 37 | - Be respectful. 38 | - We're here to help: abuse@ipfs.io 39 | - Abusive behavior is never tolerated. 40 | - Violations of this code may result in swift and permanent expulsion from the IPFS community. 41 | - "Too long, didn't read" is not a valid excuse for not knowing what is in this document. 42 | 43 | ## Scope 44 | 45 | We expect all members of the IPFS community to abide by this Code of Conduct at all times in all IPFS community venues, online and in person, and in one-on-one communications pertaining to IPFS affairs. 46 | 47 | This policy covers the usage of IPFS public infrastructure, including the IPFS.io HTTP gateways, as well as other IPFS websites, IPFS related events, and any other services offered by or on behalf of the IPFS community. It also 48 | applies to behavior in the context of the IPFS Open Source project communities, including but not limited to public GitHub repositories, IRC channels, social media, mailing lists, and public events. 49 | 50 | This Code of Conduct is in addition to, and does not in any way nullify or invalidate, any other terms or conditions related to use of IPFS services. 51 | 52 | The definitions of various subjective terms such as "discriminatory", "hateful", or "confusing" will be decided at the sole discretion of the [IPFS abuse team](#contact-info). 53 | 54 | ## Friendly Harassment-Free Space 55 | 56 | We are committed to providing a friendly, safe and welcoming environment for all, regardless of gender identity, sexual orientation, disability, ethnicity, religion, age, physical appearance, body size, race, or similar personal characteristics. 57 | 58 | We ask that you please respect that people have differences of opinion regarding technical choices, and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a single right answer. A difference of technology preferences is not a license to be rude. 59 | 60 | Any spamming, trolling, flaming, baiting, or other attention-stealing behavior is not welcome, and will not be tolerated. 61 | 62 | Harassing other users is never tolerated, whether via public or private media. 63 | 64 | Avoid using offensive or harassing nicknames, or other identifiers that might detract from a friendly, safe, and welcoming environment for all. 65 | 66 | Harassment includes, but is not limited to: any behavior that threatens or demeans another person or group, or produces an unsafe environment; harmful or prejudicial verbal or written comments related to gender, gender expression, gender identity, sexual orientation, disability, ethnicity, religion, age, physical appearance, body size, race, or similar personal characteristics; inappropriate use of nudity, sexual images, and/or sexually explicit language in public spaces; threats of physical or non-physical harm; deliberate intimidation, stalking or following; harassing photography or recording; sustained disruption of talks or other events; inappropriate physical contact; and unwelcome sexual attention. 67 | 68 | Media shared through public infrastructure run by the IPFS team must not contain illegal or infringing content. You should only publish content via IPFS public infrastructure if you have the right to do so. This includes complying with all software license agreements or other intellectual property restrictions. You will be solely responsible for any violation of laws or 69 | others’ intellectual property rights. 70 | 71 | ## Reporting Violations of this Code of Conduct 72 | 73 | If you believe someone is harassing you or has otherwise violated this Code of Conduct, please [contact us](#contact-info) to send us an abuse report. If this is the initial report of a problem, please include as much detail as possible. It is easiest for us to address issues when we have more context. 74 | 75 | If you are at an event organized by the IPFS community, contact a _duty officer_ or event staff. To learn more about our procedures handling incidents and reports at events, read the [procedures for in-person events](code-of-conduct-procedures-for-events.md) 76 | 77 | ## Copyright Violations 78 | 79 | We respect the intellectual property of others and ask that you do too. If you believe any content or other materials available through public IPFS infrastructure violates a copyright held by you and you would like to submit a notice pursuant to the Digital Millennium Copyright Act or other similar international law, you can submit a notice to the [abuse team](#contact-info) for service. 80 | 81 | (We will add a physical mailing address here when we acquire one). 82 | 83 | Please make sure your notice meets the Digital Millennium Copyright Act requirements. 84 | 85 | ## Consequences 86 | 87 | All content published to public IPFS infrastructure is hosted at the sole discretion of the IPFS team. 88 | 89 | Unacceptable behavior from any community member will not be tolerated. 90 | 91 | Anyone asked to stop unacceptable behavior is expected to comply immediately. 92 | 93 | If a community member engages in unacceptable behavior, the IPFS team may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event or service). 94 | 95 | ## Addressing Grievances 96 | 97 | If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify the IPFS team. We will do our best to ensure that your grievance is handled appropriately. 98 | 99 | In general, we will choose the course of action that we judge as being most in the interest of fostering a safe and friendly community. 100 | 101 | On IRC, let one of the ops know if you think that someone has transgressed against the Code of Conduct. If you would like to be an op and promise to help maintain and abide by the code, please let us know. 102 | 103 | ## Contact Info 104 | 105 | Please contact abuse@ipfs.io if you need to report a problem or address a grievance related to an abuse report. 106 | 107 | We will keep all matters confidential and they will only be shared within the team, with the exception of legal counsel when deemed necessary, or unless given explicit permission by the reporter. 108 | 109 | If you prefer, you can contact any of the abuse team members separately. The abuse team is formed by: 110 | 111 | - molly@ipfs.io 112 | - dietrich@ipfs.io 113 | - michael@ipfs.io 114 | - ebony@ipfs.io 115 | - hector@ipfs.io 116 | 117 | When contacting an individual directly, they will ask for explicit permission to share the details with the rest of the team. When not granted, they will keep details of the matters as confidential as possible (particularly private or identity information) but they may still share broad strokes as necessary to resolve the issue. 118 | 119 | You are also encouraged to contact us if you are curious about something that might be "on the line" between appropriate and inappropriate content. We are happy to provide guidance to help you be a successful part of our community. 120 | 121 | ## Events 122 | 123 | Please see our [Code of Conduct Events Addendum](https://github.com/ipfs/community/commits/master/code-of-conduct-for-events.md) in the original repo. 124 | 125 | ## Changes 126 | 127 | This is a living document and may be updated from time to time. Please refer to the git history for this document to view the changes. 128 | 129 | ## Credit and License 130 | 131 | This Code of Conduct is based on the [npm Code of Conduct](https://www.npmjs.com/policies/conduct) and the [CodeOfConduct4Lib](https://github.com/code4lib/code-of-conduct/blob/master/code_of_conduct.md), which itself is based on the [example policy](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment) from the [Geek Feminism wiki](http://geekfeminism.wikia.com/), created by the Ada Initiative and other volunteers. 132 | 133 | This document may be reused under a [Creative Commons Attribution-ShareAlike License](http://creativecommons.org/licenses/by-sa/4.0/). 134 | -------------------------------------------------------------------------------- /src/meta/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: Meta 3 | description: | 4 | Meta contains all the non-technical documents that conspire to make the standards 5 | project work: what the core values are, what the governance model is, how to produce documents, 6 | etc. 7 | --- 8 | 9 | {% include 'header.html' %} 10 | 11 |
12 | {% include 'list.html', posts: collections.meta %} 13 |
14 | 15 | {% include 'footer.html' %} 16 | -------------------------------------------------------------------------------- /src/meta/ipip-process.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "IPIP: Improvement Process for IPFS Specifications" 3 | description: > 4 | The specification documenting the process through which a new IPIP should be proposed. 5 | date: 2023-02-23 6 | editors: 7 | - name: Marcin Rataj 8 | github: lidel 9 | url: https://lidel.org/ 10 | - name: Guillaume Michel 11 | github: guillaumemichel 12 | - name: Henrique Dias 13 | github: hacdias 14 | url: https://hacdias.com/ 15 | order: 1 16 | --- 17 | 18 | ## Introduction 19 | 20 | IPIP aims to focus protocol design discussions into an orderly process that: 21 | 22 | 1. Provides good visibility into to the full set of proposals 23 | 2. Keeps the full discussion for a proposal in one place, providing historical context 24 | 3. Ensures stakeholders in the project can be aware of proposed changes and participate 25 | in the decision making process 26 | 4. Provides a mechanism to ensure proposals are given consideration and decisions get made 27 | 28 | ## Process design 29 | 30 | We adopted a formal change management process for the [ipfs/specs][1] repository, providing a 31 | minimal structure for opening, reviewing, and merging specification changes. 32 | 33 | [1]: https://github.com/ipfs/specs/ 34 | 35 | ### What is an IPIP? 36 | 37 | IPIP Provides an orderly mechanism for considering proposed changes to IPFS specifications. 38 | **An IPIP proposal is not to be the spec itself; the approval of an IPIP leads to an update to 39 | a specification.** 40 | 41 | To illustrate: 42 | 43 | - In order to understand how (hypothetical) WebDAV Gateway works, one would 44 | read contents of specs in `ipfs/specs/src/webdav-gateway.md`. 45 | - IPIP in `ipfs/specs/src/ipips/ipip-000N.md` would only include 46 | **Motivation** and explainer why certain design decisions were made at a 47 | certain point in time. Initial `ipip-000N.md` would explain 48 | why we added WebDAV spec in the first place. 49 | 50 | ### What changes need the IPIP process? 51 | 52 | - **Does Not Need IPIP**: The spec has a bug - something that is plainly a mistake 53 | - **Does Not need IPIP**: Adding more details, test vectors, and editorials/cosmetic changes 54 | - **Needs IPIP**: An addition to the protocol 55 | - **Needs IPIP**:Things that could cause an interop issues require a PR with fix and IPIP in 56 | `ipfs/specs/src/ipips/ipip-000M.md` explaining why we make the 57 | breaking spec change, compatibility/migration considerations etc. 58 | 59 | ## Improvement lifecycle 60 | 61 | ### Opening an improvement proposal (IPIP) 62 | 63 | Changes to IPFS specifications can be proposed by opening a Git pull-request 64 | (PR) against the `ipfs/specs` repository. 65 | 66 | In addition to specification changes, such PR must include a short **IPIP 67 | document** based on the template in [`ipfs/specs/ipip-template.md`](https://github.com/ipfs/specs/blob/main/ipip-template.md). 68 | 69 | When a new specification file is added to the repo, it should be based on 70 | the template at [`ipfs/specs/template.md`](https://github.com/ipfs/specs/blob/main/template.md). 71 | 72 | When naming a new proposal, don't try to introduce an IPIP number; we will do that only for 73 | IPIPs that are approved before accepting into `main` branch. 74 | 75 | Proposals are officially submitted when a pull request into `main` is opened 76 | 77 | Proposals that were reviewed as useful, but rejected for now, will be moved into `IPIP/deferred` folder and then merged into `main` 78 | 79 | ### Reviewing IPIPs 80 | 81 | 1. [Specs Stewards] will do an initial triage of newly opened PRs roughly monthly. They'll try to filter out 82 | noise, so that community consideration is given only to reasonable proposals; others they'll reject. 83 | 2. Specs Stewards will post to the forums linking to the proposal; directing feedback/discussion to 84 | take place in GitHub on the PR 85 | 3. After a month of discussion, Specs Stewards will review again. If there are no substantive disagreements 86 | with the proposal, including within Spec Stewards, the proposal will be approved. 87 | 4. If discussion or modification is still underway and appears to be leading to a resolution, it can be held 88 | open for another month 89 | 5. Proposals that are generating ongoing discussion and seem contentious or stuck will be brought in for 90 | consideration at a monthly sync, to be announced at least a week ahead of time on the forum. 91 | 6. After discussion, Spec Stewards will make call on whether to approve or reject the proposal. 92 | 7. At this point approved proposals get assigned a number (encoded in the filename), 93 | and merged into the IPIP folder on `main` branch. Potentially useful (but rejected for now) 94 | proposals should be also merged to `main`, but in a subfolder called `/IPIP/deferred`. Proposals rejected in initial 95 | triage will simply have the PR declined. 96 | 8. IPIP author and two approving [Specs Stewards] are added to `CODEOWNERS` file to be 97 | automatically asked to review any future changes to files added or modified by the IPIP. 98 | 99 | ### Things not covered by this document 100 | 101 | [Specs Stewards] will adjust the process based on usage patterns. 102 | 103 | [Specs Stewards]: https://github.com/orgs/ipfs/teams/specs-stewards/members 104 | 105 | ## Copyright 106 | 107 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 108 | -------------------------------------------------------------------------------- /src/routing/index.html: -------------------------------------------------------------------------------- 1 | --- 2 | title: Routing 3 | description: | 4 | Routing is the way to determine where to find a given content, peer, or IPNS record. 5 | --- 6 | 7 | {% include 'header.html' %} 8 | 9 |
10 | {% include 'list.html', posts: collections.routing %} 11 |
12 | 13 | {% include 'footer.html' %} 14 | -------------------------------------------------------------------------------- /template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 18 | ${body} 19 |
20 | 21 | 22 | -------------------------------------------------------------------------------- /template.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Specification Title 3 | description: > 4 | Specification short description. 5 | date: YYYY-MM_DD 6 | maturity: wip 7 | editors: 8 | - name: Your Name 9 | github: yourGitHubHandle 10 | --- 11 | 12 | TODO: One paragraph explanation of the specification 13 | 14 | ## Introduction 15 | 16 | TODO: Clearly explain why the specification exists, what is the problem solved here. 17 | 18 | ## Specification 19 | 20 | TODO: Explain things in depth. 21 | 22 | The resulting specification should be detailed enough to allow competing, 23 | interoperable implementations. 24 | 25 | ### Test fixtures 26 | 27 | TODO: List relevant CIDs. Describe how implementations can use them to determine 28 | specification compliance. 29 | 30 | ### Security 31 | 32 | TODO: Explain the security implications/considerations relevant to the spec. 33 | 34 | ### Privacy and User Control 35 | 36 | TODO: Note if there are any privacy or user control considerations that should be 37 | taken into account by the implementers. 38 | 39 | ## Copyright 40 | 41 | Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). 42 | --------------------------------------------------------------------------------