├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── LICENSE
├── README.md
├── package.json
├── src
├── add.js
├── health
│ ├── graph.js
│ ├── index.js
│ └── metrics.js
├── id.js
├── index.js
├── peers
│ ├── index.js
│ ├── ls.js
│ └── rm.js
├── pin
│ ├── add.js
│ ├── index.js
│ ├── ls.js
│ └── rm.js
├── recover.js
├── status.js
├── sync.js
├── utils
│ ├── default-config.js
│ ├── file-result-stream-converter.js
│ ├── load-commands.js
│ ├── module-config.js
│ ├── multipart.js
│ ├── prepare-file.js
│ ├── request-api.js
│ ├── request.js
│ ├── send-files-stream.js
│ ├── stream-to-json-value.js
│ └── stream-to-value.js
└── version.js
└── test
├── add.spec.js
├── health.graph.spec.js
├── health.metrics.spec.js
├── helpers
└── index.js
├── id.spec.js
├── init.spec.js
├── peers.ls.spec.js
├── peers.rm.spec.js
├── pin.add.spec.js
├── pin.ls.spec.js
├── pin.rm.spec.js
├── recover.spec.js
├── status.spec.js
├── sync.spec.js
└── version.spec.js
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | package-lock.json
3 | gulpfile.js
4 | assets
5 |
6 | **/node_modules
7 | **/*.log
8 | **/*.swp
9 |
10 | .nyc_output
11 | coverage
12 | *.lcov
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 |
3 | node_js:
4 | - 'stable'
5 | - 10
6 | - 9
7 | - 8
8 |
9 | os:
10 | - linux
11 | - osx
12 |
13 | install:
14 | - npm i
15 | - npm i -g codecov
16 |
17 | script:
18 | - npm run test:coverage
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## [0.0.9](https://github.com/cluster-labs/ipfs-cluster-api/compare/v0.0.6...v0.0.9) (2019-05-25)
2 |
3 | ### Bug Fixes
4 |
5 | - remove `peers.add` endpoint ([#8](https://github.com/cluster-labs/ipfs-cluster-api/issues/8))([ea660f5](https://github.com/cluster-labs/ipfs-cluster-api/commit/ea660f5b77389c78e2b07ea080035c4530827dce))
6 |
7 | ### Other Improvements
8 |
9 | - improved code coverage
10 |
11 | - added docs to add [Basic Auth headers](https://github.com/cluster-labs/ipfs-cluster-api#custom-headers)
12 |
13 | ## [0.0.6](https://github.com/cluster-labs/ipfs-cluster-api/compare/v0.0.5...v0.0.6) (2019-05-20)
14 |
15 | ### Bug Fixes
16 |
17 | - importing IpfsCluster in multiple ways ([#4](https://github.com/cluster-labs/ipfs-cluster-api/issues/4))([
18 | d7da873](https://github.com/cluster-labs/ipfs-cluster-api/commit/d7da873c9615cf60a0a829bd02b45965c1455e89))
19 |
20 | - Not able to ADD peer to a cluster using HTTP endpoint ([#5](https://github.com/cluster-labs/ipfs-cluster-api/issues/5))([
21 | 44659ce](https://github.com/cluster-labs/ipfs-cluster-api/commit/44659cedca17dc97bde7e4508e64dd3470d004ac))
22 |
23 | ### Other Improvements
24 |
25 | - Added Extensive documentation for available `option`(s) for various commands
26 |
27 | ## [0.0.5](https://github.com/cluster-labs/ipfs-cluster-api/compare/v0.0.3...v0.0.5) (2019-05-11)
28 |
29 | ### Bug Fixes
30 |
31 | - 'add' command doesn't return the CID for the added data. ([#3](https://github.com/cluster-labs/ipfs-cluster-api/issues/3))([6a7a1b](https://github.com/cluster-labs/ipfs-cluster-api/commit/6a7a1b5899f2c3e37ccd0bd0766f51aec0e721a4))
32 |
33 | ### New features
34 |
35 | - Browser Support
36 | - using browserify
37 | - using webpack
38 | - using CDN links
39 |
40 | ## [0.0.3](https://github.com/cluster-labs/ipfs-cluster-api/compare/v0.0.2...v0.0.3) (2019-05-07)
41 |
42 | ### New features
43 |
44 | - support for passing `options` in the commands: `add`, `pin`, `peers`, `health`, `status`, `sync`, `recover`
45 |
46 | ### Other improvements
47 |
48 | - Extensive README
49 |
50 | - Reached feature parity with Go [`ipfs-cluster-ctl`](https://cluster.ipfs.io/documentation/ipfs-cluster-ctl/)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 Tom O'Donnell, 2019 Protocol Labs, Inc, 2019 ClusterLabs (TowardsBlockchain)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | A Javascript client library for the IPFS Cluster HTTP API.
6 |
7 | [](https://clusterlabs.io) [](http://github.com/cluster-labs/ipfscloud-web) [](https://badge.fury.io/js/ipfs-cluster-api)
8 |
9 | [](https://gitter.im/ipfs-cluster-api/community)
10 | [](https://travis-ci.org/cluster-labs/ipfs-cluster-api)
11 | [](https://codecov.io/gh/cluster-labs/ipfs-cluster-api)
12 | ---
13 |
14 | **UNOFFICIAL AND ALPHA**
15 |
16 | This is a port of `ipfs/js-ipfs-api` adapted for the API exposed by `ipfs/ipfs-cluster`.
17 |
18 | ## Maintainer
19 |
20 | [**Vaibhav Saini**](https://github.com/vasa-develop)
21 |
22 | ## Table of Contents
23 |
24 | - [Install](#install)
25 | - [Running the daemon with the right port](#running-the-daemon-with-the-right-port)
26 | - [Importing the module and usage](#importing-the-module-and-usage)
27 | - [In a web browser through Browserify](#through-browserify)
28 | - [In a web browser through Webpack](#through-webpack)
29 | - [In a web browser through CDN](#from-cdn)
30 | - [Custom Headers](#custom-headers)
31 | - [Usage](#usage)
32 | - [API Docs](#api)
33 | - [Callbacks and promises](#callbacks-and-promises)
34 | - [Development](#development)
35 | - [Contribute](#contribute)
36 | - [Historical Context](#historical)
37 | - [License](#license)
38 |
39 |
40 | ## Install
41 |
42 | This module uses node.js, and can be installed through npm:
43 |
44 | ```
45 | npm install --save ipfs-cluster-api
46 | ```
47 |
48 | We support both the Current and Active LTS versions of Node.js. Please see [nodejs.org](https://nodejs.org/) for what these currently are.
49 |
50 | ### Running the daemon with the right port
51 |
52 | **ipfs daemon**
53 |
54 | To make `ipfs-cluster-service` work, you need to have a `ipfs` local daemon running. It needs to be open on the right port. `5001` is the default, and is used in the examples below, but it can be set to whatever you need.
55 |
56 | ```
57 | # Show the ipfs config API port to check it is correct
58 | > ipfs config Addresses.API
59 | /ip4/127.0.0.1/tcp/5001
60 | # Set it if it does not match the above output
61 | > ipfs config Addresses.API /ip4/127.0.0.1/tcp/5001
62 | # Restart the daemon after changing the config
63 |
64 | # Run the daemon
65 | > ipfs daemon
66 | ```
67 |
68 | **ipfs-cluster-service daemon**
69 |
70 | To interact with the API, you need to have a daemon running. It needs to be open on the right port. `9094` is the default, and is used in the examples below, but it can be set to whatever you need. You can setup `ipfs-cluster-service` by following [**this installation guide**](https://github.com/ipfs/ipfs-cluster#install).
71 |
72 | After installing run the daemon.
73 |
74 | ```
75 | # Run the daemon
76 | > ipfs-cluster-service daemon
77 | ```
78 |
79 | ### Importing the module and usage
80 |
81 | ```javascript
82 | const ipfsCluster = require('ipfs-cluster-api')
83 |
84 | // connect to ipfs daemon API server
85 | const cluster = ipfsCluster('localhost', '9094', { protocol: 'http' }) // leaving out the arguments will default to these values
86 |
87 | // or connect with multiaddr
88 | const cluster = ipfsCluster('/ip4/127.0.0.1/tcp/9094')
89 |
90 | // or using options
91 | const cluster = ipfsCluster({ host: 'localhost', port: '9094', protocol: 'http' })
92 |
93 | // or specifying a specific API path
94 | const cluster = ipfsCluster({ host: '1.1.1.1', port: '80', 'api-path': '/some/api/path' })
95 | ```
96 |
97 | ### In a web browser
98 |
99 | #### **through Browserify**
100 | Same as in Node.js, you just have to [browserify](http://browserify.org/) to bundle the code before serving it.
101 | > Note: The code uses `es6`, so you have to use [babel](https://babeljs.io/) to convert the code into `es5` before using `browserify`.
102 |
103 | #### **through webpack**
104 | Same as in Node.js, you just have to [webpack](https://webpack.js.org/) to bundle the the code before serving it.
105 | > Note: The code uses `es6`, so you have to use [babel](https://babeljs.io/) to convert the code into `es5` before using `webpack`.
106 |
107 | #### **from CDN**
108 |
109 | Instead of a local installation (and browserification) you may request a remote copy of IPFS API from unpkg CDN.
110 |
111 | To always request the latest version, use the following:
112 | ```html
113 |
114 |
115 |
116 |
117 | ```
118 |
119 | CDN-based IPFS Cluster API provides the `IpfsClusterAPI` constructor as a method of the global `window` object. Example:
120 |
121 | ```javascript
122 | // connect to ipfs daemon API server
123 | const cluster = IpfsClusterAPI('localhost', '9094', { protocol: 'http' }) // leaving out the arguments will default to these values
124 |
125 | // or connect with multiaddr
126 | const cluster = IpfsClusterAPI('/ip4/127.0.0.1/tcp/9094')
127 |
128 | // or using options
129 | const cluster = IpfsClusterAPI({ host: 'localhost', port: '9094', protocol: 'http' })
130 |
131 | // or specifying a specific API path
132 | const cluster = IpfsClusterAPI({ host: '1.1.1.1', port: '80', 'api-path': '/some/api/path' })
133 | ```
134 |
135 | If you omit the host and port, the client will parse `window.host`, and use this information. This also works, and can be useful if you want to write apps that can be run from multiple different gateways:
136 |
137 | ```javascript
138 | const cluster = window.IpfsClusterAPI()
139 | ```
140 |
141 | ### **Custom Headers**
142 |
143 | If you wish to send custom headers with each request made by this library, for example, the Authorization header. You can use the config to do so:
144 |
145 | ```javascript
146 | const cluster = ipfsCluster({
147 | host: 'localhost',
148 | port: 9094,
149 | protocol: 'http',
150 | headers: {
151 | authorization: 'Basic ' + TOKEN
152 | }
153 | })
154 | ```
155 |
156 |
157 | ## Usage
158 |
159 | ### API
160 |
161 | The API is currently a work-in-progress. The exposed methods are designed
162 | to be similar to `ipfs-cluster-ctl` provided in [`ipfs/ipfs-cluster`](https://github.com/ipfs/ipfs-cluster).
163 |
164 | - [`add`](#adding-&-pinning-data-to-cluster)
165 | - [`cluster.add(data, [options], [callback])`](#add)
166 | - [`peers`](#peer-management)
167 | - [`cluster.peers.ls([callback])`](#peersls)
168 | - [`cluster.peers.rm(peerid, [callback])`](#peersremove)
169 | - [`pin`](#pins-management)
170 | - [`cluster.pin.ls([options], [callback])`](#pinls)
171 | - [`cluster.pin.add(cid, [options], [callback])`](#pinadd)
172 | - [`cluster.pin.rm(cid, [callback])`](#pinremove)
173 | - [`health`](#health)
174 | - [`cluster.health.graph([callback])`](#graph)
175 | - [`cluster.health.metrics(type, [callback])`](#metrics)
176 | - [`miscellaneous`](#node-management)
177 | - [`cluster.id([callback])`](#id)
178 | - [`cluster.version([callback])`](#version)
179 | - [`cluster.status([cid], [options], [callback])`](#status)
180 | - [`cluster.sync([cid], [options], [callback])`](#sync)
181 | - [`cluster.recover([cid], [options], [callback])`](#recover)
182 |
183 |
184 | ### Adding & pinning data to cluster
185 | #### **`add`**
186 | > Add and pin data to the cluster
187 |
188 | Add allows to add and replicate content to several ipfs daemons, performing a Cluster Pin operation on success. It takes elements from local paths as well as from web URLs (accessed with a GET request).
189 |
190 | Cluster Add is equivalent to "ipfs add" in terms of DAG building, and supports the same options for adjusting the chunker, the DAG layout etc. However, it will allocate the content and send it directly to the allocated peers (among which may not necessarily be the local ipfs daemon).
191 |
192 | Once the adding process is finished, the content is fully added to all allocations and pinned in them. This makes cluster add slower than a local ipfs add, but the result is a fully replicated CID on completion. If you prefer faster adding, add directly to the local IPFS and trigger a cluster "pin add".
193 |
194 |
195 | **`cluster.add(data, [options], [callback])`**
196 |
197 | Where `data` may be:
198 |
199 | - a [`Buffer instance`](https://www.npmjs.com/package/buffer)
200 | - a [`Readable Stream`](https://www.npmjs.com/package/readable-stream)
201 | - a [`Pull Stream`](https://www.npmjs.com/package/pull-stream)
202 | - an array of objects, each of the form:
203 | ```javascript
204 | {
205 | path: '/tmp/myfile.txt', // The file path
206 | content: // A Buffer, Readable Stream or Pull Stream with the contents of the file
207 | }
208 | ```
209 | If no `content` is passed, then the path is treated as an empty directory
210 |
211 |
212 | `options` is an optional object argument that might include the following keys:
213 | - `replication-min` (int, default: 0): Sets the minimum replication factor for pinning this file
214 | - `replication-max` (int, default: 0): Sets the maximum replication factor for pinning this: file
215 | - `name` (string, default: ""): Sets a name for this pin
216 | - `shard` (bool, default: false)
217 | - `shard-size` (int, default: 0)
218 | - `recursive` (bool, default: false): Add directory paths recursively
219 | - `layout` (string, default: false): Dag layout to use for dag generation: balanced or trickle
220 | - `chunker` (string, default: "size-262144"): 'size-[size]' or 'rabin-[min]-[avg]-[max]'
221 | - `raw-leaves` (bool, default: false): Use raw blocks for leaves
222 | - `hidden` (bool, default: false): Include files that are hidden. Only takes effect on recursive add
223 | - `wrap-with-directory` (bool, default: false): Wrap a with a directory object
224 | - `progress` (bool, default: false)
225 | - `cid-version` (int, default: 0)
226 | - `hash` (string, default: "sha2-256"): Hash function to use. Implies cid-version=1
227 | - `stream-channels` (bool, default: true)
228 | - `nocopy` (bool, default: false): Add the URL using filestore. Implies raw-leaves
229 |
230 |
231 | `callback` must follow `function (err, res) {}` signature, where `err` is an error if the operation was not successful. If successful, `res` will return an object of following form:
232 |
233 | ```javascript
234 | {
235 | path: '/path/to/file/foo.txt',
236 | hash: 'QmRG3FXAW76xD7ZrjCWk8FKVaTRPYdMtwzJHZ9gArzHK5f',
237 | size: 2417
238 | }
239 | ```
240 |
241 | If no `callback` is passed, a promise is returned.
242 |
243 | ### Example
244 | ```javascript
245 | cluster.add(Buffer.from("vasa"), (err, result) => {
246 | err ? console.error(err) : console.log(result)
247 | })
248 | ```
249 | ### Peer management
250 | > Lists, adds & removes peers from the cluster
251 |
252 | #### **`peers`**
253 |
254 | #### **`peers.ls`**
255 | > Lists the peers in the cluster
256 |
257 | This command tells IPFS Cluster to no longer manage a CID. This will trigger unpinning operations in all the IPFS nodes holding the content.
258 |
259 | When the request has succeeded, the command returns the status of the CID in the cluster. The CID should disappear from the list offered by "pin ls", although unpinning operations in the cluster may take longer or fail.
260 |
261 | **`cluster.peers.ls([callback])`**
262 |
263 | `callback` must follow `function (err, res) {}` signature, where `err` is an error if the operation was not successful. If successful, `res` returns a information abount the connected peers in the following form:
264 | ```json
265 | [ { "id": "QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn",
266 | "addresses":
267 | [ "/p2p-circuit/ipfs/QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn",
268 | "/ip4/127.0.0.1/tcp/9096/ipfs/QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn",
269 | "/ip4/10.184.9.134/tcp/9096/ipfs/QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn",
270 | "/ip4/172.17.0.1/tcp/9096/ipfs/QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn",
271 | "/ip4/172.18.0.1/tcp/9096/ipfs/QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn" ],
272 | "cluster_peers": [ "QmPq34QAMCFLNTXWtM3pc7qeQ2kneuCgLZjSVywWoEumRn" ],
273 | "cluster_peers_addresses": null,
274 | "version": "0.10.1",
275 | "commit": "",
276 | "rpc_protocol_version": "/ipfscluster/0.10/rpc",
277 | "error": "",
278 | "ipfs":
279 | { "id": "QmdKAFhAAnc6U3ik6XfEDVKEsok7TnQ1yeyXmnnvGFmBhx",
280 | "addresses": [/*Array*/],
281 | "error": "" },
282 | "peername": "jarvis" } ]
283 | ```
284 |
285 | ### Example
286 | ```javascript
287 | cluster.peers.ls((err, peers) => {
288 | err ? console.error(err) : console.log(peers)
289 | })
290 | ```
291 |
292 | #### **`peers.remove`**
293 | > Removes peer from the cluster
294 |
295 | This command removes a peer from the cluster. If the peer is online, it will automatically shut down. All other cluster peers should be online for the operation to succeed, otherwise some nodes may be left with an outdated list of cluster peers.
296 |
297 | **`cluster.peers.rm(peerid, [callback])`**
298 |
299 | Where `peerid` is the `id` of the peer to be removed.
300 |
301 | `callback` must follow `function (err, res) {}` signature, where `err` is an error if the operation was not successful.
302 |
303 | If no `callback` is passed, a promise is returned.
304 |
305 | ### Example
306 | ```javascript
307 | cluster.peers.rm("QmdKAFhAAnc6U3ik6XfEDVKEsok7TnQ1yeyXmnnvGFmBhx", (err) => {
308 | err ? console.error(err) : console.log("peer removed")
309 | })
310 | ```
311 |
312 | ### Pins management
313 | > Lists, adds & removes pins from the pinlist of the cluster
314 |
315 | #### **`pin`**
316 |
317 | #### **`pin.ls`**
318 | > Lists the pins in the pinlist
319 |
320 | This command will list the CIDs which are tracked by IPFS Cluster and to which peers they are currently allocated. This list does not include any monitoring information about the IPFS status of the CIDs, it merely represents the list of pins which are part of the shared state of the cluster. For IPFS-status information about the pins, use "status".
321 |
322 | **`cluster.pin.ls([options], [callback])`**
323 |
324 | `options` is an optional object argument that might include the following keys:
325 | * `filter`: (default: `pin`) The filter only takes effect when listing all pins. The possible values are:
326 |
327 | - all
328 | - pin
329 | - meta-pin
330 | - clusterdag-pin
331 | - shard-pin
332 |
333 | `callback` must follow `function (err, pins) {}` signature, where `err` is an error if the operation was not successful. If successful, `pins` returns the list of pins.
334 |
335 | If no `callback` is passed, a promise is returned.
336 |
337 | ### Example
338 |
339 | ```javascript
340 | cluster.pin.ls({filter: 'all'}, (err, pins) => {
341 | err ? console.error(err) : console.log(pins)
342 | })
343 | ```
344 |
345 |
346 |
347 |
348 |
349 | #### **`pin.add`**
350 | > Adds a pin to the cluster
351 |
352 | This command tells IPFS Cluster to start managing a CID. Depending on the pinning strategy, this will trigger IPFS pin requests. The CID will become part of the Cluster's state and will tracked from this point.
353 |
354 | When the request has succeeded, the command returns the status of the CID in the cluster and should be part of the list offered by "pin ls".
355 |
356 | An optional replication factor can be provided: -1 means "pin everywhere" and 0 means use cluster's default setting. Positive values indicate how many peers should pin this content.
357 |
358 | An optional allocations argument can be provided, allocations should be a comma-separated list of peer IDs on which we want to pin. Peers in allocations are prioritized over automatically-determined ones, but replication factors would stil be respected.
359 |
360 | **`cluster.pin.add(cid, [options], [callback])`**
361 |
362 | Where `cid` is the [CID](https://docs.ipfs.io/guides/concepts/cid/) of the data to be pinned.
363 |
364 | `options` is an optional object argument that might include the following keys:
365 | - `replication-min` (int, default: 0): Sets the minimum replication factor for this pin
366 | - `replication-max` (int, default: 0): Sets the maximum replication factor for this pin
367 | - `replication` (int, default: 0): Sets a custom replication factor (overrides `replication-min` and `replication-max`)
368 | - `name` (int, default: ""): Sets a name for this pin
369 | - `user-allocations`: (string array): Optional comma-separated list of peer IDs where data will be pinned
370 | - `shard_size` (int, default: 0)
371 |
372 | `callback` must follow `function (err) {}` signature, where `err` is an error if the operation was not successful.
373 |
374 | If no `callback` is passed, a promise is returned.
375 |
376 | ### Example
377 | ```javascript
378 | cluster.pin.add(CID, (err) => {
379 | err ? console.error(err) : console.log('pin added')
380 | })
381 | ```
382 |
383 |
384 |
385 | #### **`pin.remove`**
386 | > Removes a pin from the pinlist
387 |
388 | This command tells IPFS Cluster to no longer manage a CID. This will trigger unpinning operations in all the IPFS nodes holding the content.
389 |
390 | When the request has succeeded, the command returns the status of the CID in the cluster. The CID should disappear from the list offered by "pin ls", although unpinning operations in the cluster may take longer or fail.
391 |
392 |
393 |
394 | **`cluster.pin.rm(cid, [callback])`**
395 |
396 | Where `cid` is the [CID](https://docs.ipfs.io/guides/concepts/cid/) of the data to be unpinned.
397 |
398 | `callback` must follow `function (err) {}` signature, where `err` is an error if the operation was not successful.
399 |
400 | If no `callback` is passed, a promise is returned.
401 |
402 | ### Example
403 | ```javascript
404 | const CID = "QmU4xZd9Yj7EzRj5ntw6AJ1VkbWNe1jXRM56KoRLkTxKch"
405 |
406 | cluster.pin.rm(CID, (err) => {
407 | err ? console.error(err) : console.log(`${CID} unpinned`)
408 | })
409 | ```
410 |
411 |
412 |
413 | #### Node management
414 | #### **`id`**
415 | > Gets the connected peer's name, address info
416 |
417 | This command displays information about the peer that the tool is contacting.
418 |
419 | **`cluster.id([callback])`**
420 |
421 | `callback` must follow `function (err, id) {}` signature, where `err` is an error if the operation was not successful. If successful, `id` returns the information about the peer that the tool is contacting.
422 |
423 | If no `callback` is passed, a promise is returned.
424 |
425 | ### Example
426 |
427 | ```javascript
428 | cluster.id((err, id) => {
429 | err ? console.error(err) : console.log(id)
430 | })
431 | ```
432 |
433 | #### **`version`**
434 | > Gets the current version of IPFS Cluster version
435 |
436 | This command retrieves the IPFS Cluster version and can be used
437 | to check that it matches the CLI version
438 |
439 | **`cluster.version([callback])`**
440 |
441 | `callback` must follow `function (err, version) {}` signature, where `err` is an error if the operation was not successful. If successful, `version` will return the IPFS Cluster version.
442 |
443 | If no `callback` is passed, a promise is returned.
444 |
445 | ### Example
446 | ```javascript
447 | cluster.version((err, version) => {
448 | err ? console.error(err) : console.log(version)
449 | })
450 | ```
451 |
452 | #### **`health`**
453 |
454 | #### **`graph`**
455 | > Lists the health graph of the cluster
456 |
457 | This command queries all connected cluster peers and their ipfs peers to generate a graph of the connections. Output is a dot file encoding the cluster's connection state.
458 |
459 | * **`cluster.health.graph([callback])`**
460 |
461 | `callback` must follow `function (err, graph) {}` signature, where `err` is an error if the operation was not successful. If successful, `graph` returns the cluster's current state.
462 |
463 | If no `callback` is passed, a promise is returned.
464 |
465 | #### Example
466 |
467 | ```javascript
468 | cluster.health.graph((err, health) => {
469 | err ? console.error(err) : console.log(health)
470 | })
471 | ```
472 |
473 | #### **`metrics`**
474 | > Lists the health metrics of the cluster
475 |
476 | This commands displays the latest valid metrics of the given type logged by this peer for all current cluster peers.
477 |
478 | * **`cluster.health.metrics(type, [callback])`**
479 |
480 | `type` is the type of the monitoring desired(`freespace` OR `ping`)
481 |
482 | `callback` must follow `function (err, metrics) {}` signature, where `err` is an error if the operation was not successful. If successful, `metrics` returns the desired metrics.
483 |
484 | If no `callback` is passed, a promise is returned.
485 |
486 | ### Example
487 |
488 | ```javascript
489 | cluster.health.metrics('freespace', (err, metrics) => {
490 | err ? console.error(err) : console.log(metrics)
491 | })
492 | ```
493 |
494 | #### **`status`**
495 | > Retrieves the status of the CIDs tracked by IPFS Cluster
496 |
497 | This command retrieves the status of the CIDs tracked by IPFS Cluster, including which member is pinning them and any errors. If a CID is provided, the status will be only fetched for a single
498 | item. Metadata CIDs are included in the status response
499 |
500 | The status of a CID may not be accurate. A manual sync can be triggered with "sync".
501 |
502 | When the `local` option is set, it will only fetch the status from the contacted cluster peer. By default, status will be fetched from all peers.
503 |
504 | When the `filter` option is set, it will only fetch the peer information where status of the pin matches at least one of the filter values.
505 |
506 | **`cluster.status([cid], [options], [callback])`**
507 |
508 | Where `cid` is the [CID](https://docs.ipfs.io/guides/concepts/cid/) of the data for which we need the status.
509 |
510 | `options` is an optional object argument that might include the following keys:
511 | * `filter`(string): list of filters
512 | - error
513 | - cluster_error
514 | - pin_error
515 | - pin_queued
516 | - pinned
517 | - pinning
518 | - queued
519 | - remote
520 | - unpin_error
521 | - unpin_queued
522 | - unpinned
523 | - unpinning
524 |
525 | * `local`(boolean): if set `true`, runs operation only on the contacted peer
526 |
527 | `callback` must follow `function (err, res) {}` signature, where `err` is an error if the operation was not successful. If successful `res` returns the status of the passed `cid`
528 |
529 | If no `callback` is passed, a promise is returned.
530 |
531 | ### Example
532 | ```javascript
533 | const CID = "QmU4xZd9Yj7EzRj5ntw6AJ1VkbWNe1jXRM56KoRLkTxKch"
534 |
535 | cluster.status(CID, { filter: 'pinned', local: true }, (err, res) => {
536 | err ? console.error(err) : console.log(res)
537 | })
538 | ```
539 |
540 | #### **`sync`**
541 | > Syncs the pinset/CID across all the peers in the cluster
542 |
543 | This command asks Cluster peers to verify that the current status of tracked CIDs is accurate by triggering queries to the IPFS daemons that pin them. If a CID is provided, the sync and recover operations will be limited to that single item.
544 |
545 | Unless providing a specific CID, the command will output only items which have changed status because of the sync or are in error state in some node, therefore, the output should be empty if no operations were performed.
546 |
547 | CIDs in error state may be manually recovered with "recover".
548 |
549 | When the `local` option is passed, it will only trigger sync operations on the contacted peer. By default, all peers will sync.
550 |
551 | **`cluster.sync([cid], [options], [callback])`**
552 |
553 | Where `cid` is the [CID](https://docs.ipfs.io/guides/concepts/cid/) of the data to be synced.
554 |
555 | `options` is an optional object argument that might include the following keys:
556 | * `local`(boolean): if set `true`, runs operation only on the contacted peer
557 |
558 | `callback` must follow `function (err) {}` signature, where `err` is an error if the operation was not successful.
559 |
560 | If no `callback` is passed, a promise is returned.
561 |
562 | ### Example
563 |
564 | ```javascript
565 | const CID = "QmU4xZd9Yj7EzRj5ntw6AJ1VkbWNe1jXRM56KoRLkTxKch"
566 |
567 | cluster.sync(CID, { local: true }, (err) => {
568 | err ? console.error(err) : console.log(`${CID} synced`)
569 | })
570 | ```
571 |
572 | #### **`recover`**
573 | > re-track or re-forget CIDs in error state
574 |
575 | This command asks Cluster peers to re-track or re-forget CIDs in error state, usually because the IPFS pin or unpin operation has failed.
576 |
577 | The command will wait for any operations to succeed and will return the status of the item upon completion. Note that, when running on the full sets of tracked CIDs (without argument), it may take a considerably long time.
578 |
579 | When the `local` option is set, it will only trigger recover operations on the contacted peer (as opposed to on every peer).
580 |
581 | For now, ONLY requests with parameter `local=true` are supported
582 |
583 |
584 | **`cluster.recover([cid], [options], [callback])`**
585 |
586 | Where `cid` is the [CID](https://docs.ipfs.io/guides/concepts/cid/) of the data to be recovered.
587 |
588 | `options` is an optional object argument that might include the following keys:
589 | * `local`(boolean, default: true): if set `true` it will only trigger recover
590 | operations on the contacted peer
591 |
592 | `callback` must follow `function (err) {}` signature, where `err` is an error if the operation was not successful.
593 |
594 | If no `callback` is passed, a promise is returned.
595 |
596 | ### Example
597 | ```javascript
598 | const CID = "QmU4xZd9Yj7EzRj5ntw6AJ1VkbWNe1jXRM56KoRLkTxKch"
599 |
600 | cluster.recover(CID, { local: true }, (err) => {
601 | err ? console.error(err) : console.log(`${CID} recovered`)
602 | })
603 | ```
604 |
605 | ## Development
606 |
607 | ### Testing
608 | We run tests by executing `npm test` in a terminal window. This will run Node.js tests.
609 |
610 | ## Contribute
611 |
612 | The `ipfs-cluster-api` is a work in progress. As such, there's a few things you can do right now to help out:
613 |
614 | - **[Check out the existing issues](https://github.com/cluster-labs/ipfs-cluster-api/issues)**!
615 | - **Perform code reviews**. More eyes will help
616 | - speed the project along
617 | - ensure quality and
618 | - reduce possible future bugs.
619 | - **Add tests**. There can never be enough tests.
620 |
621 | You can also checkout our **[other projects](https://github.com/cluster-labs)**
622 |
623 | It's recommended to follow the [Contribution Guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_JS.md).
624 |
625 | ## Historical Context
626 |
627 | This module started as a direct mapping from the Go `ipfs-cluster-ctl` to a JavaScript implementation.
628 |
629 | ## License
630 |
631 | [MIT](LICENSE)
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ipfs-cluster-api",
3 | "version": "0.0.9",
4 | "description": "A JS client library for the IPFS Cluster HTTP API.",
5 | "leadMaintainer": "Vaibhav Saini ",
6 | "main": "src/index.js",
7 | "scripts": {
8 | "test": "mocha",
9 | "test:coverage": "codecov"
10 | },
11 | "license": "MIT",
12 | "dependencies": {
13 | "async": "^3.0.0",
14 | "concat-stream": "^2.0.0",
15 | "detect-node": "^2.0.4",
16 | "flatmap": "0.0.3",
17 | "is-pull-stream": "0.0.0",
18 | "iso-stream-http": "^0.1.2",
19 | "multiaddr": "^6.0.6",
20 | "ndjson": "^1.5.0",
21 | "once": "^1.4.0",
22 | "promisify-es6": "^1.0.3",
23 | "pull-to-stream": "^0.1.1",
24 | "pump": "^3.0.0",
25 | "qs": "^6.5.1"
26 | },
27 | "devDependencies": {
28 | "@babel/core": "^7.4.4",
29 | "@babel/preset-env": "^7.4.4",
30 | "babel-core": "^6.26.3",
31 | "babel-preset-env": "^1.7.0",
32 | "babelify": "^10.0.0",
33 | "browserify": "^16.2.3",
34 | "chai": "^4.2.0",
35 | "gulp": "^4.0.2",
36 | "gulp-babel": "^8.0.0",
37 | "gulp-rename": "^1.4.0",
38 | "gulp-sourcemaps": "^2.6.5",
39 | "gulp-uglify": "^3.0.2",
40 | "mocha": "^6.1.4",
41 | "vinyl-buffer": "^1.0.1",
42 | "vinyl-source-stream": "^2.0.0"
43 | },
44 | "babel": {
45 | "presets": [
46 | "@babel/preset-env"
47 | ]
48 | },
49 | "keywords": [
50 | "ipfs",
51 | "ipfs-cluster",
52 | "ipfscloud",
53 | "ipfs-cluster-api",
54 | "js-ipfs-cluster",
55 | "js-ipfs-http-client",
56 | "clusterlabs"
57 | ],
58 | "contributors": [
59 | "Vaibhav Saini ",
60 | "Tom O'Donnell "
61 | ],
62 | "bugs": {
63 | "url": "https://github.com/cluster-labs/ipfs-cluster-api/issues"
64 | },
65 | "homepage": "https://github.com/cluster-labs/ipfs-cluster-api"
66 | }
67 |
--------------------------------------------------------------------------------
/src/add.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const ConcatStream = require('concat-stream')
5 | const once = require('once')
6 | const isStream = require('is-stream')
7 | const isSource = require('is-pull-stream').isSource
8 | const FileResultStreamConverter = require('./utils/file-result-stream-converter')
9 | const SendFilesStream = require('./utils/send-files-stream')
10 |
11 | module.exports = (send) => {
12 | const createAddStream = SendFilesStream(send, 'add')
13 |
14 | const add = promisify((_files, options, _callback) => {
15 | if (typeof options === 'function') {
16 | _callback = options
17 | options = null
18 | }
19 |
20 | const callback = once(_callback)
21 |
22 | if (!options) {
23 | options = {}
24 | }
25 | options.converter = FileResultStreamConverter
26 |
27 | // Buffer, pull stream or Node.js stream
28 | const isBufferOrStream = obj => Buffer.isBuffer(obj) || isStream.readable(obj) || isSource(obj)
29 | // An object like { content?, path? }, where content isBufferOrStream and path isString
30 | const isContentObject = obj => {
31 | if (typeof obj !== 'object') return false
32 | // path is optional if content is present
33 | if (obj.content) return isBufferOrStream(obj.content)
34 | // path must be a non-empty string if no content
35 | return Boolean(obj.path) && typeof obj.path === 'string'
36 | }
37 | // An input atom: a buffer, stream or content object
38 | const isInput = obj => isBufferOrStream(obj) || isContentObject(obj)
39 | // All is ok if data isInput or data is an array of isInput
40 | const ok = isInput(_files) || (Array.isArray(_files) && _files.every(isInput))
41 |
42 | if (!ok) {
43 | return callback(new Error('invalid input: expected buffer, readable stream, pull stream, object or array of objects'))
44 | }
45 |
46 | const files = [].concat(_files)
47 |
48 | const stream = createAddStream({ qs: options })
49 | const concat = ConcatStream((result) => callback(null, result))
50 | stream.once('error', callback)
51 | stream.pipe(concat)
52 |
53 | files.forEach((file) => stream.write(file))
54 | stream.end()
55 | })
56 |
57 | return function () {
58 | const args = Array.from(arguments)
59 |
60 | // If we files.add(), then promisify thinks the pull stream is
61 | // a callback! Add an empty options object in this case so that a promise
62 | // is returned.
63 | if (args.length === 1 && isSource(args[0])) {
64 | args.push({})
65 | }
66 |
67 | return add.apply(null, args)
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/health/graph.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 |
7 | return promisify((callback) => {
8 | send({
9 | path: 'health/graph',
10 | }, callback)
11 | })
12 | }
13 |
--------------------------------------------------------------------------------
/src/health/index.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const moduleConfig = require('../utils/module-config')
4 |
5 | module.exports = (arg) => {
6 | const send = moduleConfig(arg)
7 |
8 | return {
9 | graph: require('./graph')(send),
10 | metrics: require('./metrics')(send)
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/src/health/metrics.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 |
7 | return promisify((arg, callback) => {
8 | var monitorPath = 'monitor/metrics';
9 | if (arg) {
10 | monitorPath += '/' + arg
11 | }
12 | send({
13 | path: monitorPath,
14 | }, callback)
15 | })
16 | }
17 |
--------------------------------------------------------------------------------
/src/id.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const moduleConfig = require('./utils/module-config')
5 |
6 | module.exports = (arg) => {
7 | const send = moduleConfig(arg)
8 |
9 | return promisify((callback) => {
10 | send({
11 | path: 'id',
12 | }, callback)
13 | })
14 | }
15 |
--------------------------------------------------------------------------------
/src/index.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const getConfig = require('./utils/default-config')
4 | const getRequestAPI = require('./utils/request-api')
5 | const loadCommands = require('./utils/load-commands')
6 | const multiaddr = require('multiaddr')
7 |
8 | function IpfsClusterAPI(hostOrMultiaddr, port, opts) {
9 | // convert all three params to objects that we can merge.
10 | let hostAndPort = {}
11 |
12 | if (!hostOrMultiaddr) {
13 | // autoconfigure host and port in browser
14 | if (typeof self !== 'undefined') {
15 | const split = self.location.host.split(':')
16 | hostAndPort.host = split[0]
17 | hostAndPort.port = split[1]
18 | }
19 | } else if (multiaddr.isMultiaddr(hostOrMultiaddr)) {
20 | hostAndPort = toHostAndPort(hostOrMultiaddr)
21 | } else if (typeof hostOrMultiaddr === 'object') {
22 | hostAndPort = hostOrMultiaddr
23 | } else if (typeof hostOrMultiaddr === 'string') {
24 | if (hostOrMultiaddr[0] === '/') {
25 | // throws if multiaddr is malformed or can't be converted to a nodeAddress
26 | hostAndPort = toHostAndPort(multiaddr(hostOrMultiaddr))
27 | } else {
28 | // hostOrMultiaddr is domain or ip address as a string
29 | hostAndPort.host = hostOrMultiaddr
30 | }
31 | }
32 |
33 | if (port && typeof port !== 'object') {
34 | port = { port: port }
35 | }
36 | const config = Object.assign(getConfig(), hostAndPort, port, opts)
37 | const requestAPI = getRequestAPI(config)
38 | const cmds = loadCommands(requestAPI)
39 | cmds.send = requestAPI
40 | cmds.Buffer = Buffer
41 |
42 | return cmds
43 | }
44 |
45 | // throws if multiaddr can't be converted to a nodeAddress
46 | function toHostAndPort (multiaddr) {
47 | const nodeAddr = multiaddr.nodeAddress()
48 | return {
49 | host: nodeAddr.address,
50 | port: nodeAddr.port
51 | }
52 | }
53 |
54 | exports = module.exports = IpfsClusterAPI
55 |
--------------------------------------------------------------------------------
/src/peers/index.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const moduleConfig = require('../utils/module-config')
4 |
5 | module.exports = (arg) => {
6 | const send = moduleConfig(arg)
7 |
8 | return {
9 | rm: require('./rm')(send),
10 | ls: require('./ls')(send)
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/src/peers/ls.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 | return promisify((callback) => {
7 | send({
8 | path: 'peers',
9 | }, callback)
10 | })
11 | }
12 |
--------------------------------------------------------------------------------
/src/peers/rm.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 | return promisify((arg, callback) => {
7 | var rmPath = `peers/${arg}`
8 | send({
9 | path: rmPath,
10 | method: 'DELETE',
11 | }, callback)
12 | })
13 | }
14 |
--------------------------------------------------------------------------------
/src/pin/add.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 | return promisify((arg, opts, callback) => {
7 | if (typeof opts == 'function') {
8 | callback = opts
9 | opts = undefined
10 | }
11 |
12 | var addPath = `pins/${arg}`
13 |
14 | send({
15 | method: 'POST',
16 | path: addPath,
17 | qs: opts
18 | }, callback)
19 | })
20 | }
21 |
--------------------------------------------------------------------------------
/src/pin/index.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const moduleConfig = require('../utils/module-config')
4 |
5 | module.exports = (arg) => {
6 | const send = moduleConfig(arg)
7 |
8 | return {
9 | add: require('./add')(send),
10 | rm: require('./rm')(send),
11 | ls: require('./ls')(send)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/src/pin/ls.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 | return promisify((opts, callback) => {
7 |
8 | if(typeof opts === 'function') {
9 | callback = opts
10 | opts = { filter : 'pin' }
11 | }
12 |
13 | send({
14 | path: 'allocations',
15 | qs: opts
16 | }, callback)
17 | })
18 | }
19 |
--------------------------------------------------------------------------------
/src/pin/rm.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 |
5 | module.exports = (send) => {
6 | return promisify((arg, callback) => {
7 | var rmPath = `pins/${arg}`
8 | send({
9 | path: rmPath,
10 | method: 'DELETE',
11 | }, callback)
12 | })
13 | }
14 |
--------------------------------------------------------------------------------
/src/recover.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const moduleConfig = require('./utils/module-config')
5 |
6 | module.exports = (arg) => {
7 | const send = moduleConfig(arg)
8 |
9 | return promisify((args, opts, callback) => {
10 | if(typeof opts == 'function') {
11 | callback = opts
12 | if(typeof args !== 'string') {
13 | opts = args
14 | args = undefined
15 | }
16 | else {
17 | opts = undefined
18 | }
19 | }
20 | if(typeof args === 'function') {
21 | callback = args
22 | args = undefined
23 | }
24 | var recoverPath = 'pins/recover'
25 |
26 | if(args) {
27 | recoverPath = `pins/${args}/recover`
28 | }
29 |
30 | send({
31 | method: 'POST',
32 | path: recoverPath,
33 | qs: opts || { local: true }
34 | }, callback)
35 | })
36 | }
--------------------------------------------------------------------------------
/src/status.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const moduleConfig = require('./utils/module-config')
5 |
6 | module.exports = (arg) => {
7 | const send = moduleConfig(arg)
8 |
9 | return promisify((cid, opts, callback) => {
10 | if (typeof cid === 'function') {
11 | callback = cid
12 | cid = undefined
13 | opts = undefined
14 | }
15 | if (typeof opts === 'function') {
16 | callback = opts
17 | if(typeof cid === 'string') {
18 | opts = undefined
19 | }
20 | else {
21 | opts = cid
22 | cid = undefined
23 | }
24 | }
25 |
26 | var statusPath = 'pins';
27 | if (cid) {
28 | statusPath += '/' + cid;
29 | }
30 |
31 | send({
32 | path: statusPath,
33 | qs: opts
34 | }, callback)
35 | })
36 | }
37 |
--------------------------------------------------------------------------------
/src/sync.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const moduleConfig = require('./utils/module-config')
5 |
6 | module.exports = (arg) => {
7 | const send = moduleConfig(arg)
8 |
9 | return promisify((cid, opts, callback) => {
10 | if (typeof cid == 'function') {
11 | callback = cid
12 | cid = undefined
13 | opts = undefined
14 | }
15 | if(typeof opts == 'function') {
16 | callback = opts
17 | if(typeof cid == 'string') {
18 | opts = undefined
19 | }
20 | else{
21 | opts = cid
22 | cid = undefined
23 | }
24 | }
25 |
26 | var syncPath = 'pins/sync'
27 | if (cid) {
28 | syncPath = `pins/${cid}/sync`
29 | }
30 |
31 | send({
32 | method: 'POST',
33 | path: syncPath,
34 | qs: opts
35 | }, callback)
36 | })
37 | }
38 |
--------------------------------------------------------------------------------
/src/utils/default-config.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const pkg = require('../../package.json')
4 |
5 | exports = module.exports = () => {
6 | return {
7 | 'api-path': '/',
8 | 'user-agent': `/node-${pkg.name}/${pkg.version}/`,
9 | host: 'localhost',
10 | port: '9094',
11 | protocol: 'http'
12 | }
13 | }
--------------------------------------------------------------------------------
/src/utils/file-result-stream-converter.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const TransformStream = require('readable-stream').Transform
4 |
5 | /*
6 | Transforms a stream of {Name, Hash} objects to include size
7 | of the DAG object.
8 |
9 | Usage: inputStream.pipe(new FileResultStreamConverter())
10 |
11 | Input object format:
12 | {
13 | name: '/path/to/file/foo.txt',
14 | cid: { '/': 'QmRG3FXAW76xD7ZrjCWk8FKVaTRPYdMtwzJHZ9gArzHK5f' },
15 | size: 2417
16 | }
17 |
18 | Output object format:
19 | {
20 | path: '/path/to/file/foo.txt',
21 | hash: 'QmRG3FXAW76xD7ZrjCWk8FKVaTRPYdMtwzJHZ9gArzHK5f',
22 | size: 2417
23 | }
24 | */
25 | class FileResultStreamConverter extends TransformStream {
26 | constructor (options) {
27 | const opts = Object.assign({}, options || {}, { objectMode: true })
28 | super(opts)
29 | }
30 |
31 | _transform (obj, enc, callback) {
32 | if (!obj.name) {
33 | return callback()
34 | }
35 |
36 | callback(null, {
37 | path: obj.name,
38 | hash: obj.cid['/'],
39 | size: parseInt(obj.size, 10)
40 | })
41 | }
42 | }
43 |
44 | module.exports = FileResultStreamConverter
45 |
--------------------------------------------------------------------------------
/src/utils/load-commands.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | function getCommands() {
4 | const cmds = {
5 | add: require('../add'),
6 | id: require('../id'),
7 | peers: require('../peers'),
8 | pin: require('../pin'),
9 | status: require('../status'),
10 | sync: require('../sync'),
11 | recover: require('../recover'),
12 | version: require('../version'),
13 | health: require('../health')
14 | }
15 |
16 | return cmds
17 | }
18 |
19 | function loadCommands(send) {
20 | const files = getCommands()
21 | const cmds = {}
22 |
23 | Object.keys(files).forEach((file) => {
24 | cmds[file] = files[file](send)
25 | })
26 |
27 | return cmds
28 | }
29 |
30 | module.exports = loadCommands
31 |
--------------------------------------------------------------------------------
/src/utils/module-config.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const requestAPI = require('./request-api')
4 |
5 | module.exports = (arg) => {
6 |
7 | if (typeof arg === 'function') {
8 | return arg
9 | } else if (typeof arg === 'object') {
10 | return requestAPI(arg)
11 | } else {
12 | throw new Error('Argument must be a send function or a config object.')
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/src/utils/multipart.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const Transform = require('readable-stream').Transform
4 | const isNode = require('detect-node')
5 | const isSource = require('is-pull-stream').isSource
6 | const toStream = require('pull-to-stream')
7 |
8 | const PADDING = '--'
9 | const NEW_LINE = '\r\n'
10 | const NEW_LINE_BUFFER = Buffer.from(NEW_LINE)
11 |
12 | class Multipart extends Transform {
13 | constructor (options) {
14 | super(Object.assign({}, options, { objectMode: true, highWaterMark: 1 }))
15 |
16 | this._boundary = this._generateBoundary()
17 | this._files = []
18 | this._draining = false
19 | }
20 |
21 | _flush () {
22 | this.push(Buffer.from(PADDING + this._boundary + PADDING + NEW_LINE))
23 | this.push(null)
24 | }
25 |
26 | _generateBoundary () {
27 | var boundary = '--------------------------'
28 | for (var i = 0; i < 24; i++) {
29 | boundary += Math.floor(Math.random() * 10).toString(16)
30 | }
31 |
32 | return boundary
33 | }
34 |
35 | _transform (file, encoding, callback) {
36 | if (Buffer.isBuffer(file)) {
37 | this.push(file)
38 | return callback() // early
39 | }
40 | // not a buffer, must be a file
41 | this._files.push(file)
42 | this._maybeDrain(callback)
43 | }
44 |
45 | _maybeDrain (callback) {
46 | if (!this._draining) {
47 | if (this._files.length) {
48 | this._draining = true
49 | const file = this._files.shift()
50 | this._pushFile(file, (err) => {
51 | this._draining = false
52 | if (err) {
53 | this.emit('error', err)
54 | } else {
55 | this._maybeDrain(callback)
56 | }
57 | })
58 | } else {
59 | this.emit('drained all files')
60 | callback()
61 | }
62 | } else {
63 | this.once('drained all files', callback)
64 | }
65 | }
66 |
67 | _pushFile (file, callback) {
68 | const leading = this._leading(file.headers || {})
69 |
70 | this.push(leading)
71 |
72 | let content = file.content || Buffer.alloc(0)
73 |
74 | if (Buffer.isBuffer(content)) {
75 | this.push(content)
76 | this.push(NEW_LINE_BUFFER)
77 | return callback() // early
78 | }
79 |
80 | if (isSource(content)) {
81 | content = toStream.readable(content)
82 | }
83 |
84 | // From now on we assume content is a stream
85 |
86 | content.once('error', this.emit.bind(this, 'error'))
87 |
88 | content.once('end', () => {
89 | this.push(NEW_LINE_BUFFER)
90 | callback()
91 |
92 | // TODO: backpressure!!! wait once self is drained so we can proceed
93 | // This does not work
94 | // this.once('drain', () => {
95 | // callback()
96 | // })
97 | })
98 |
99 | content.on('data', (data) => {
100 | const drained = this.push(data)
101 | // Only do the drain dance on Node.js.
102 | // In browserland, the underlying stream
103 | // does NOT drain because the request is only sent
104 | // once this stream ends.
105 | if (!drained && isNode) {
106 | content.pause()
107 | this.once('drain', () => content.resume())
108 | }
109 | })
110 | }
111 |
112 | _leading (headers) {
113 | var leading = [PADDING + this._boundary]
114 |
115 | Object.keys(headers).forEach((header) => {
116 | leading.push(header + ': ' + headers[header])
117 | })
118 |
119 | leading.push('')
120 | leading.push('')
121 |
122 | const leadingStr = leading.join(NEW_LINE)
123 |
124 | return Buffer.from(leadingStr)
125 | }
126 | }
127 |
128 | module.exports = Multipart
129 |
--------------------------------------------------------------------------------
/src/utils/prepare-file.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const isNode = require('detect-node')
4 | const flatmap = require('flatmap')
5 |
6 | function loadPaths (opts, file) {
7 | const path = require('path')
8 | const fs = require('fs')
9 | const glob = require('glob')
10 |
11 | const followSymlinks = opts.followSymlinks != null ? opts.followSymlinks : true
12 |
13 | file = path.resolve(file)
14 | const stats = fs.statSync(file)
15 |
16 | if (stats.isDirectory() && !opts.recursive) {
17 | throw new Error('Can only add directories using --recursive')
18 | }
19 |
20 | if (stats.isDirectory() && opts.recursive) {
21 | // glob requires a POSIX filename
22 | file = file.split(path.sep).join('/')
23 | const fullDir = file + (file.endsWith('/') ? '' : '/')
24 | let dirName = fullDir.split('/')
25 | dirName = dirName[dirName.length - 2] + '/'
26 | const mg = new glob.sync.GlobSync('**/*', {
27 | cwd: file,
28 | follow: followSymlinks,
29 | dot: opts.hidden,
30 | ignore: opts.ignore
31 | })
32 |
33 | return mg.found
34 | .map((name) => {
35 | const fqn = fullDir + name
36 | // symlinks
37 | if (mg.symlinks[fqn] === true) {
38 | return {
39 | path: dirName + name,
40 | symlink: true,
41 | dir: false,
42 | content: fs.readlinkSync(fqn)
43 | }
44 | }
45 |
46 | // files
47 | if (mg.cache[fqn] === 'FILE') {
48 | return {
49 | path: dirName + name,
50 | symlink: false,
51 | dir: false,
52 | content: fs.createReadStream(fqn)
53 | }
54 | }
55 |
56 | // directories
57 | if (mg.cache[fqn] === 'DIR' || mg.cache[fqn] instanceof Array) {
58 | return {
59 | path: dirName + name,
60 | symlink: false,
61 | dir: true
62 | }
63 | }
64 | // files inside symlinks and others
65 | })
66 | // filter out null files
67 | .filter(Boolean)
68 | }
69 |
70 | return {
71 | path: path.basename(file),
72 | content: fs.createReadStream(file)
73 | }
74 | }
75 |
76 | function prepareFile (file, opts) {
77 | let files = [].concat(file)
78 |
79 | return flatmap(files, (file) => {
80 | if (typeof file === 'string') {
81 | if (!isNode) {
82 | throw new Error('Can only add file paths in node')
83 | }
84 |
85 | return loadPaths(opts, file)
86 | }
87 |
88 | if (file.path && !file.content) {
89 | file.dir = true
90 | return file
91 | }
92 |
93 | if (file.content || file.dir) {
94 | return file
95 | }
96 |
97 | return {
98 | path: '',
99 | symlink: false,
100 | dir: false,
101 | content: file
102 | }
103 | })
104 | }
105 |
106 | exports = module.exports = prepareFile
107 |
--------------------------------------------------------------------------------
/src/utils/request-api.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const Qs = require('qs')
4 | const once = require('once')
5 | const request = require('./request')
6 | const streamToValue = require('./stream-to-value')
7 | const streamToJsonValue = require('./stream-to-json-value')
8 | const qsDefaultEncoder = require('qs/lib/utils').encode
9 | const isNode = require('detect-node')
10 | const ndjson = require('ndjson')
11 | const pump = require('pump')
12 | const log = require('debug')('ipfs-cluster-api:request')
13 |
14 | function parseError (res, cb) {
15 | const error = new Error(`Server responded with ${res.statusCode}`)
16 |
17 | streamToJsonValue(res, (err, payload) => {
18 | if (err) {
19 | return cb(err)
20 | }
21 |
22 | if (payload) {
23 | error.code = payload.code || payload.Code
24 | error.message = payload.message || payload.Message ||payload.toString()
25 | }
26 | cb(error)
27 | })
28 | }
29 |
30 | function onRes (buffer, cb) {
31 | return (res) => {
32 | const stream = Boolean(res.headers['x-stream-output'])
33 | const chunkedObjects = Boolean(res.headers['x-chunked-output'])
34 | const isJson = res.headers['content-type'] &&
35 | res.headers['content-type'].indexOf('application/json') === 0
36 |
37 | if (res.req) {
38 | log(res.req.method, `${res.req.getHeaders().host}${res.req.path}`, res.statusCode, res.statusMessage)
39 | } else {
40 | log(res.url, res.statusCode, res.statusMessage)
41 | }
42 |
43 | if (res.statusCode >= 400 || !res.statusCode) {
44 | return parseError(res, cb)
45 | }
46 |
47 | // Return the response stream directly
48 | if (stream && !buffer) {
49 | return cb(null, res)
50 | }
51 |
52 | // Return a stream of JSON objects
53 | if (chunkedObjects && isJson) {
54 | const outputStream = ndjson.parse()
55 | pump(res, outputStream)
56 | res.on('end', () => {
57 | let err = res.trailers['x-stream-error']
58 | if (err) {
59 | // Not all errors are JSON
60 | try {
61 | err = JSON.parse(err)
62 | } catch (e) {
63 | err = { Message: err }
64 | }
65 | outputStream.emit('error', new Error(err.Message))
66 | }
67 | })
68 | return cb(null, outputStream)
69 | }
70 |
71 | // Return a JSON object
72 | if (isJson) {
73 | return streamToJsonValue(res, cb)
74 | }
75 |
76 | // Return a value
77 | return streamToValue(res, cb)
78 | }
79 | }
80 |
81 | function requestAPI (config, options, callback) {
82 | callback = once(callback)
83 | options.qs = options.qs || {}
84 |
85 | if (Array.isArray(options.path)) {
86 | options.path = options.path.join('/')
87 | }
88 | if (options.args && !Array.isArray(options.args)) {
89 | options.args = [options.args]
90 | }
91 | if (options.args) {
92 | options.qs.arg = options.args
93 | }
94 | if (options.progress) {
95 | options.qs.progress = true
96 | }
97 |
98 | if (options.qs.r) {
99 | options.qs.recursive = options.qs.r
100 | // From IPFS 0.4.0, it throws an error when both r and recursive are passed
101 | delete options.qs.r
102 | }
103 |
104 | options.qs['stream-channels'] = true
105 |
106 | if (options.stream) {
107 | options.buffer = false
108 | }
109 |
110 | // this option is only used internally, not passed to daemon
111 | delete options.qs.followSymlinks
112 |
113 | const method = options.method || 'GET'
114 | var headers = options.header || {}
115 | headers = Object.assign(headers, config.headers)
116 |
117 |
118 | if (isNode) {
119 | // Browsers do not allow you to modify the user agent
120 | headers['User-Agent'] = config['user-agent']
121 | }
122 |
123 | if (options.multipart) {
124 | if (!options.multipartBoundary) {
125 | return callback(new Error('No multipartBoundary'))
126 | }
127 |
128 | headers['Content-Type'] = `multipart/form-data; boundary=${options.multipartBoundary}`
129 | }
130 |
131 | const qs = Qs.stringify(options.qs, {
132 | arrayFormat: 'repeat',
133 | encoder: data => {
134 | // TODO: future releases of qs will provide the default
135 | // encoder as a 2nd argument to this function; it will
136 | // no longer be necessary to import qsDefaultEncoder
137 | if (Buffer.isBuffer(data)) {
138 | let uriEncoded = ''
139 | for (const byte of data) {
140 | // https://tools.ietf.org/html/rfc3986#page-14
141 | // ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D), period (%2E), underscore (%5F), or tilde (%7E)
142 | if (
143 | (byte >= 0x41 && byte <= 0x5A) ||
144 | (byte >= 0x61 && byte <= 0x7A) ||
145 | (byte >= 0x30 && byte <= 0x39) ||
146 | (byte === 0x2D) ||
147 | (byte === 0x2E) ||
148 | (byte === 0x5F) ||
149 | (byte === 0x7E)
150 | ) {
151 | uriEncoded += String.fromCharCode(byte)
152 | } else {
153 | const hex = byte.toString(16)
154 | // String.prototype.padStart() not widely supported yet
155 | const padded = hex.length === 1 ? `0${hex}` : hex
156 | uriEncoded += `%${padded}`
157 | }
158 | }
159 | return uriEncoded
160 | }
161 | return qsDefaultEncoder(data)
162 | }
163 | })
164 | const req = request(config.protocol)({
165 | hostname: config.host,
166 | path: `${config['api-path']}${options.path}?${qs}`,
167 | port: config.port,
168 | method: method,
169 | headers: headers,
170 | protocol: `${config.protocol}:`
171 | }, onRes(options.buffer, callback))
172 |
173 | req.on('error', (err) => {
174 | callback(err)
175 | })
176 |
177 | if (!options.stream) {
178 | req.end()
179 | }
180 |
181 | return req
182 | }
183 |
184 | exports = module.exports = (config) => {
185 | const send = (options, callback) => {
186 | if (typeof options !== 'object') {
187 | return callback(new Error('no options were passed'))
188 | }
189 |
190 | return requestAPI(config, options, callback)
191 | }
192 |
193 | send.andTransform = (options, transform, callback) => {
194 | return send(options, (err, res) => {
195 | if (err) {
196 | return callback(err)
197 | }
198 | transform(res, callback)
199 | })
200 | }
201 |
202 | return send
203 | }
204 |
--------------------------------------------------------------------------------
/src/utils/request.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const httpRequest = require('http').request
4 | const httpsRequest = require('https').request
5 |
6 | module.exports = (protocol) => {
7 | if (protocol.indexOf('https') === 0) {
8 | return httpsRequest
9 | }
10 |
11 | return httpRequest
12 | }
13 |
--------------------------------------------------------------------------------
/src/utils/send-files-stream.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const { Duplex } = require('readable-stream')
4 | const eachSeries = require('async/eachSeries')
5 | const isStream = require('is-stream')
6 | const once = require('once')
7 | const prepareFile = require('./prepare-file')
8 | const Multipart = require('./multipart')
9 |
10 | function headers (file) {
11 | const name = file.path
12 | ? encodeURIComponent(file.path)
13 | : ''
14 |
15 | const header = { 'Content-Disposition': `file; filename="${name}"` }
16 |
17 | if (!file.content) {
18 | header['Content-Type'] = 'application/x-directory'
19 | } else if (file.symlink) {
20 | header['Content-Type'] = 'application/symlink'
21 | } else {
22 | header['Content-Type'] = 'application/octet-stream'
23 | }
24 |
25 | return header
26 | }
27 |
28 | module.exports = (send, path) => {
29 | return (options) => {
30 | let request
31 | let ended = false
32 | let writing = false
33 |
34 | options = options ? Object.assign({}, options, options.qs) : {}
35 |
36 | const multipart = new Multipart()
37 |
38 | const retStream = new Duplex({ objectMode: true })
39 |
40 | retStream._read = (n) => {}
41 |
42 | retStream._write = (file, enc, _next) => {
43 | const next = once(_next)
44 | try {
45 | const files = prepareFile(file, options)
46 | .map((file) => Object.assign({ headers: headers(file) }, file))
47 |
48 | writing = true
49 | eachSeries(
50 | files,
51 | (file, cb) => multipart.write(file, enc, cb),
52 | (err) => {
53 | writing = false
54 | if (err) {
55 | return next(err)
56 | }
57 | if (ended) {
58 | multipart.end()
59 | }
60 | next()
61 | })
62 | } catch (err) {
63 | next(err)
64 | }
65 | }
66 |
67 | retStream.once('finish', () => {
68 | if (!ended) {
69 | ended = true
70 | if (!writing) {
71 | multipart.end()
72 | }
73 | }
74 | })
75 |
76 | const qs = options.qs || {}
77 |
78 | qs['cid-version'] = propOrProp(options, 'cid-version', 'cidVersion')
79 | qs['raw-leaves'] = propOrProp(options, 'raw-leaves', 'rawLeaves')
80 | qs['only-hash'] = propOrProp(options, 'only-hash', 'onlyHash')
81 | qs['wrap-with-directory'] = propOrProp(options, 'wrap-with-directory', 'wrapWithDirectory')
82 | qs.hash = propOrProp(options, 'hash', 'hashAlg')
83 |
84 | const args = {
85 | path: path,
86 | method: 'POST',
87 | qs: qs,
88 | args: options.args,
89 | multipart: true,
90 | multipartBoundary: multipart._boundary,
91 | stream: true,
92 | recursive: true,
93 | progress: options.progress
94 | }
95 |
96 | multipart.on('error', (err) => {
97 | retStream.emit('error', err)
98 | })
99 |
100 | request = send(args, (err, response) => {
101 | if (err) {
102 | return retStream.emit('error', err)
103 | }
104 |
105 | if (!response) {
106 | // no response, which means everything is ok, so we end the retStream
107 | return retStream.push(null) // early
108 | }
109 |
110 | if (!isStream(response)) {
111 | retStream.push(response)
112 | retStream.push(null)
113 | return
114 | }
115 |
116 | response.on('error', (err) => retStream.emit('error', err))
117 |
118 | if (options.converter) {
119 | response.on('data', (d) => {
120 | if (d.Bytes && options.progress) {
121 | options.progress(d.Bytes)
122 | }
123 | })
124 |
125 | const Converter = options.converter
126 | const convertedResponse = new Converter()
127 | convertedResponse.once('end', () => retStream.push(null))
128 | convertedResponse.on('data', (d) => retStream.push(d))
129 | response.pipe(convertedResponse)
130 | } else {
131 | response.on('data', (d) => {
132 | if (d.Bytes && options.progress) {
133 | options.progress(d.Bytes)
134 | }
135 | retStream.push(d)
136 | })
137 | response.once('end', () => retStream.push(null))
138 | }
139 | })
140 |
141 | // signal the multipart that the underlying stream has drained and that
142 | // it can continue producing data..
143 | request.on('drain', () => multipart.emit('drain'))
144 |
145 | multipart.pipe(request)
146 |
147 | return retStream
148 | }
149 | }
150 |
151 | function propOrProp (source, prop1, prop2) {
152 | if (prop1 in source) {
153 | return source[prop1]
154 | } else if (prop2 in source) {
155 | return source[prop2]
156 | }
157 | }
158 |
--------------------------------------------------------------------------------
/src/utils/stream-to-json-value.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const streamToValue = require('./stream-to-value')
4 |
5 | function streamToJsonValue (res, callback) {
6 | streamToValue(res, (err, data) => {
7 | if (err) {
8 | return callback(err)
9 | }
10 |
11 | if (!data || data.length == 0) {
12 | return callback()
13 | }
14 |
15 | if (Buffer.isBuffer(data)) {
16 | data = data.toString()
17 | }
18 |
19 | let res
20 | try {
21 | res = JSON.parse(data)
22 | } catch (err) {
23 | callback(err)
24 | }
25 |
26 | callback(null, res)
27 | })
28 | }
29 |
30 | module.exports = streamToJsonValue
31 |
--------------------------------------------------------------------------------
/src/utils/stream-to-value.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const pump = require('pump')
4 | const concat = require('concat-stream')
5 |
6 | function streamToValue (response, callback) {
7 | pump(
8 | response,
9 | concat((data) => callback(null, data)),
10 | (err) => {
11 | callback(err)
12 | }
13 | )
14 | }
15 |
16 | module.exports = streamToValue
17 |
--------------------------------------------------------------------------------
/src/version.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | const promisify = require('promisify-es6')
4 | const moduleConfig = require('./utils/module-config')
5 |
6 | module.exports = (arg) => {
7 | const send = moduleConfig(arg)
8 |
9 | return promisify((callback) => {
10 | send({
11 | path: 'version',
12 | }, callback)
13 | })
14 | }
15 |
--------------------------------------------------------------------------------
/test/add.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 | const fs = require('fs')
4 |
5 | const obj = {
6 | path: 'src/add.js',
7 | content: Buffer.from(fs.readFileSync('src/add.js'))
8 | }
9 |
10 | describe('add', () => {
11 |
12 | it('adds a file Buffer to ipfs and pins it in the cluster', (done) => {
13 | cluster.add(obj, (err, result) => {
14 | assert.notExists(err, 'throws error while adding a file Buffer to ipfs')
15 | done()
16 | })
17 | })
18 |
19 | it('adds a file object to ipfs and pins it in the cluster', (done) => {
20 | cluster.add(Buffer.from("vasa"), (err, result) => {
21 | assert.notExists(err, 'throws error while adding a file object to ipfs')
22 | done()
23 | })
24 | })
25 |
26 | it('throws error while adding a string as arg param', (done) => {
27 | cluster.add('vasa', (err, result) => {
28 | assert.exists(err, 'does not throw error while adding a string as arg param')
29 | done()
30 | })
31 | })
32 | })
--------------------------------------------------------------------------------
/test/health.graph.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('health.graph', () => {
5 | it('creates a graph displaying connectivity of cluster peers (without options)', (done) => {
6 | cluster.health.graph((err, health) => {
7 | assert.notExists(err, 'throws error while creating a graph displaying connectivity of cluster peers (without options)')
8 | done()
9 | })
10 | })
11 | })
12 |
--------------------------------------------------------------------------------
/test/health.metrics.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('health.metrics', () => {
5 | it('logs \'freespace\' metrics for a peer', (done) => {
6 | cluster.health.metrics('freespace', (err, metrics) => {
7 | assert.notExists(err, 'throws error while logging metrics for a peer')
8 | done()
9 | })
10 | })
11 |
12 | it('logs \'ping\' metrics for a peer', (done) => {
13 | cluster.health.metrics('ping', (err, metrics) => {
14 | assert.notExists(err, 'throws error while logging metrics for a peer')
15 | done()
16 | })
17 | })
18 | })
--------------------------------------------------------------------------------
/test/helpers/index.js:
--------------------------------------------------------------------------------
1 | const ipfsCluster = require('../../')
2 |
3 | // IPFS Cluster node (ipfs-cluster-service) must be running at port 9094 on the same machine
4 | const cluster = ipfsCluster(process.env.CLUSTER_HOST || '127.0.0.1', '9094', {protocol: 'http'})
5 |
6 | module.exports = cluster
--------------------------------------------------------------------------------
/test/id.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('id', () => {
5 | it('shows cluster peer and ipfs daemon information', (done) => {
6 | cluster.id((err, id) => {
7 | assert.notExists(err, 'throws error while fetching cluster peer and ipfs daemon information')
8 | done()
9 | })
10 | })
11 | })
12 |
--------------------------------------------------------------------------------
/test/init.spec.js:
--------------------------------------------------------------------------------
1 | const assert = require('chai').assert
2 | const ipfsCluster = require('../src')
3 |
4 | // connect to ipfs daemon API server
5 | const cluster1 = ipfsCluster('localhost', '9094', { protocol: 'http' }) // leaving out the arguments will default to these values
6 |
7 | // or connect with multiaddr
8 | const cluster2 = ipfsCluster('/ip4/127.0.0.1/tcp/9094')
9 |
10 | // or using options
11 | const cluster3 = ipfsCluster({ host: 'localhost', port: '9094', protocol: 'http' })
12 |
13 | // or specifying a specific API path
14 | const cluster4 = ipfsCluster({ host: 'localhost', port: '9094', 'api-path': '/' })
15 |
16 | describe('init', () => {
17 | it('connects to a cluster node using (host, port, opts)', (done) => {
18 | cluster1.id((err, id) => {
19 | assert.equal(err, null, 'throws error while conncting to cluster node')
20 | assert.containsAllKeys(
21 | id,
22 | ['id', 'addresses', 'cluster_peers', 'version', 'rpc_protocol_version', 'ipfs', 'peername'],
23 | 'missing params in id of the connected node')
24 | assert.equal(id.error, '', 'throws error while conncting to cluster node')
25 | done()
26 | })
27 | })
28 |
29 | it('connects to a cluster node using (host, port, opts)', (done) => {
30 | cluster2.id((err, id) => {
31 | assert.equal(err, null, 'throws error while conncting to cluster node')
32 | assert.containsAllKeys(
33 | id,
34 | ['id', 'addresses', 'cluster_peers', 'version', 'rpc_protocol_version', 'ipfs', 'peername'],
35 | 'missing params in id of the connected node')
36 | assert.equal(id.error, '', 'throws error while conncting to cluster node')
37 | done()
38 | })
39 | })
40 |
41 | it('connects to a cluster node using (host, port, opts)', (done) => {
42 | cluster3.id((err, id) => {
43 | assert.equal(err, null, 'throws error while conncting to cluster node')
44 | assert.containsAllKeys(
45 | id,
46 | ['id', 'addresses', 'cluster_peers', 'version', 'rpc_protocol_version', 'ipfs', 'peername'],
47 | 'missing params in id of the connected node')
48 | assert.equal(id.error, '', 'throws error while conncting to cluster node')
49 | done()
50 | })
51 | })
52 |
53 | it('connects to a cluster node using (host, port, opts)', (done) => {
54 | cluster4.id((err, id) => {
55 | assert.equal(err, null, 'throws error while conncting to cluster node')
56 | assert.containsAllKeys(
57 | id,
58 | ['id', 'addresses', 'cluster_peers', 'version', 'rpc_protocol_version', 'ipfs', 'peername'],
59 | 'missing params in id of the connected node')
60 | assert.equal(id.error, '', 'throws error while conncting to cluster node')
61 | done()
62 | })
63 | })
64 |
65 | })
66 |
--------------------------------------------------------------------------------
/test/peers.ls.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('peers.ls', () => {
5 | it('lists cluster peers', (done) => {
6 | cluster.peers.ls((err, peers) => {
7 | assert.notExists(err, 'throws error while fetching the list of cluster peers')
8 | done()
9 | })
10 | })
11 | })
12 |
--------------------------------------------------------------------------------
/test/peers.rm.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('peers.rm', () => {
5 |
6 | it('throws error while removing peer with invalid id', (done) => {
7 | cluster.peers.rm("invalidID", (err) => {
8 | assert.notDeepEqual(err, null, "removes peer with invalid id")
9 | done()
10 | })
11 | })
12 |
13 | cluster.peers.ls((err, peers) => {
14 | assert.equal(err, null, 'throws error while fetching the list of cluster peers')
15 | if (peers.length>=2){
16 | it('removes a cluster peer by id', (done) => {
17 | cluster.peers.rm(peers[1].id, {}, (err) => {
18 | assert.equal(err, null, 'throws error while removing a cluster peer by id')
19 | done()
20 | })
21 | })
22 | }
23 | })
24 | })
25 |
--------------------------------------------------------------------------------
/test/pin.add.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmYfGFTxovH4pU4EQymD875ArcmizJon8n1iBVXpSZbwvG"
5 |
6 | describe('pin.add', () => {
7 | it('pins a CID in the cluster (without options)', (done) => {
8 | cluster.pin.add(CID, (err) => {
9 | assert.notExists(err, 'throws error while pinning a CID in the cluster (without options)')
10 | done()
11 | })
12 | })
13 |
14 | it('pins a CID in the cluster(with \'name\' option)', (done) => {
15 | cluster.pin.add("Qme3dHNjq2Uz34jt3P2Dj72ZfYxPix8NkQjwXDrcZpogpK", { allocations: ["QmSPWhEHZGB6URg1EGNCMDHFHTqv73pM9T4AfG6AvpW2Gm"], name : "named_CID", replication_factor_min: 0, replication_factor_max: 7 }, (err) => {
16 | assert.notExists(err, 'throws error while pinning a CID in the cluster (without options)')
17 | done()
18 | })
19 | })
20 | })
21 |
--------------------------------------------------------------------------------
/test/pin.ls.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmRAQB6YaCyidP37UdDnjFY5vQuiBrcqdyoW1CuDgwxkD4"
5 |
6 | describe('pin.ls', () => {
7 |
8 | it('lists details for CIDs with no \'filter\' option', (done) => {
9 | cluster.pin.ls((err, details) => {
10 | assert.notExists(err, 'throws error while listing details for CIDs with no \'filter\' option')
11 | done()
12 | })
13 | })
14 |
15 | it('lists details for CIDs with \'all\' filter', (done) => {
16 | cluster.pin.ls({ filter: 'all' }, (err, details) => {
17 | assert.notExists(err, 'throws error while listing details for CIDs with \'all\' filter')
18 | done()
19 | })
20 | })
21 |
22 | it('lists details for CIDs with \'pin\' filter', (done) => {
23 | cluster.pin.ls({ filter: 'pin' }, (err, details) => {
24 | assert.notExists(err, 'throws error while listing details for CIDs with \'pin\' filter')
25 | done()
26 | })
27 | })
28 |
29 | it('lists details for CIDs with \'meta-pin\' filter', (done) => {
30 | cluster.pin.ls({ filter: 'meta-pin' }, (err, details) => {
31 | assert.notExists(err, 'throws error while listing details for CIDs with \'meta-pin\' filter')
32 | done()
33 | })
34 | })
35 |
36 | it('lists details for CIDs with \'clusterdag-pin\' filter', (done) => {
37 | cluster.pin.ls({ filter: 'clusterdag-pin' }, (err, details) => {
38 | assert.notExists(err, 'throws error while listing details for CIDs with \'clusterdag-pin\' filter')
39 | done()
40 | })
41 | })
42 |
43 | it('lists details for CIDs with \'shard-pin\' filter', (done) => {
44 | cluster.pin.ls({ filter: 'shard-pin' }, (err, details) => {
45 | assert.notExists(err, 'throws error while listing details for CIDs with \'shard-pin\' filter')
46 | done()
47 | })
48 | })
49 |
50 | })
--------------------------------------------------------------------------------
/test/pin.rm.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmYfGFTxovH4pU4EQymD875ArcmizJon8n1iBVXpSZbwvG"
5 |
6 | describe('pin.rm', () => {
7 | it('unpins a CID from the cluster (without options)', (done) => {
8 | cluster.pin.rm(CID, (err) => {
9 | assert.notExists(err, 'throws error while unpinning a CID from the cluster (without options)')
10 | done()
11 | })
12 | })
13 | })
14 |
--------------------------------------------------------------------------------
/test/recover.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmRAQB6YaCyidP37UdDnjFY5vQuiBrcqdyoW1CuDgwxkD4"
5 |
6 | describe('recover', () => {
7 |
8 | it('attempts to re-pin/unpin a CID in error state (local=true)', (done) => {
9 | cluster.recover(CID, { local: true }, (err) => {
10 | assert.notExists(err, 'throws error while attempting to re-pin/unpin a CID in error state (local)')
11 | done()
12 | })
13 | })
14 |
15 | it('attempts to re-pin/unpin all CIDs in error state (local=true)', (done) => {
16 | cluster.recover({ local: true }, (err) => {
17 | assert.notExists(err, 'throws error while attempting to re-pin/unpin all CIDs in error state (local)')
18 | done()
19 | })
20 | })
21 |
22 | it('attempts to re-pin/unpin all CIDs in error state (local=true)', (done) => {
23 | cluster.recover((err) => {
24 | assert.notExists(err, 'throws error while attempting to re-pin/unpin all CIDs in error state (cluster)')
25 | done()
26 | })
27 | })
28 |
29 | it('attempts to re-pin/unpin a CID in error state (local=true)', (done) => {
30 | cluster.recover(CID, (err) => {
31 | assert.notExists(err, 'throws error while attempting to re-pin/unpin a CID in error state (cluster)')
32 | done()
33 | })
34 | })
35 | })
36 |
--------------------------------------------------------------------------------
/test/status.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmRAQB6YaCyidP37UdDnjFY5vQuiBrcqdyoW1CuDgwxkD4"
5 |
6 | describe('status', () => {
7 |
8 | it('lists current status of tracked CIDs with \'cluster_error\' status (local state)', (done) => {
9 | cluster.status(CID, { filter: 'cluster_error', local: true }, (err, details) => {
10 | assert.notExists(err, 'throws error while listing current status of tracked CIDs \'cluster_error\' status (local state)')
11 | done()
12 | })
13 | })
14 |
15 | it('lists current status of tracked CIDs with \'pin_error\' status (local state)', (done) => {
16 | cluster.status({ filter: 'error', local: true }, (err, details) => {
17 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'pin_error\' status (local state)')
18 | done()
19 | })
20 | })
21 |
22 | it('lists current status of tracked CIDs with \'unpin_error\' status (local state)', (done) => {
23 | cluster.status({ filter: 'unpin_error', local: true }, (err, details) => {
24 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'unpin_error\' status (local state)')
25 | done()
26 | })
27 | })
28 |
29 | it('lists current status of tracked CIDs with \'error\' status (local state)', (done) => {
30 | cluster.status({ filter: 'error', local: true }, (err, details) => {
31 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'error\' status (local state)')
32 | done()
33 | })
34 | })
35 |
36 | it('lists current status of tracked CIDs with \'pinning\' status (local state)', (done) => {
37 | cluster.status({ filter: 'pinning', local: true }, (err, details) => {
38 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'pinning\' status (local state)')
39 | done()
40 | })
41 | })
42 |
43 | it('lists current status of tracked CIDs with \'pinned\' status (local state)', (done) => {
44 | cluster.status({ filter: 'pinned', local: true }, (err, details) => {
45 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'pinned\' status (local state)')
46 | done()
47 | })
48 | })
49 |
50 | it('lists current status of tracked CIDs with \'unpinning\' status (local state)', (done) => {
51 | cluster.status({ filter: 'unpinning', local: true }, (err, details) => {
52 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'unpinning\' status (local state)')
53 | done()
54 | })
55 | })
56 |
57 | it('lists current status of tracked CIDs with \'unpinned\' status (local state)', (done) => {
58 | cluster.status({ filter: 'unpinned', local: true }, (err, details) => {
59 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'unpinned\' status (local state)')
60 | done()
61 | })
62 | })
63 |
64 | it('lists current status of tracked CIDs with \'remote\' status (local state)', (done) => {
65 | cluster.status({ filter: 'remote', local: true }, (err, details) => {
66 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'remote\' status (local state)')
67 | done()
68 | })
69 | })
70 |
71 | it('lists current status of tracked CIDs with \'pin_queued\' status (local state)', (done) => {
72 | cluster.status({ filter: 'pin_queued', local: true }, (err, details) => {
73 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'pin_queued\' status (local state)')
74 | done()
75 | })
76 | })
77 |
78 | it('lists current status of tracked CIDs with \'unpin_queued\' status (local state)', (done) => {
79 | cluster.status({ filter: 'unpin_queued', local: true }, (err, details) => {
80 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'unpin_queued\' status (local state)')
81 | done()
82 | })
83 | })
84 |
85 | it('lists current status of tracked CIDs with \'queued\' status (local state)', (done) => {
86 | cluster.status({ filter: 'queued', local: true }, (err, details) => {
87 | assert.notExists(err, 'throws error while listing current status of tracked CIDs with \'queued\' status (local state)')
88 | done()
89 | })
90 | })
91 |
92 | it('lists current status of a tracked CID', (done) => {
93 | cluster.status(CID, (err, details) => {
94 | assert.notExists(err, 'throws error while listing current status of a tracked CID')
95 | done()
96 | })
97 | })
98 |
99 | it('lists current status', (done) => {
100 | cluster.status((err, details) => {
101 | assert.notExists(err, 'throws error while listing current status')
102 | done()
103 | })
104 | })
105 | })
106 |
--------------------------------------------------------------------------------
/test/sync.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | const CID = "QmRAQB6YaCyidP37UdDnjFY5vQuiBrcqdyoW1CuDgwxkD4"
5 |
6 | describe('sync', () => {
7 |
8 | it('checks status of all seen status against status reported by the IPFS daemon', (done) => {
9 | cluster.sync({ local: true }, (err) => {
10 | assert.notExists(err, 'throws error while checking status of all seen status against status reported by the IPFS daemon')
11 | done()
12 | })
13 | })
14 |
15 | it('checks status of a CID on the local cluster node', (done) => {
16 | cluster.sync(CID, { local: true }, (err) => {
17 | assert.notExists(err, 'throws error while checking status of a CID on the local cluster node')
18 | done()
19 | })
20 | })
21 |
22 | it('checks status of pinset from all the cluster peers', (done) => {
23 | cluster.sync((err) => {
24 | assert.notExists(err, 'throws error while checking status of pinset from all the cluster peers')
25 | done()
26 | })
27 | })
28 |
29 | it('checks status of a CID against status reported by the IPFS daemon', (done) => {
30 | cluster.sync(CID, (err) => {
31 | assert.notExists(err, 'throws error while checking status of a CID against status reported by the IPFS daemon')
32 | done()
33 | })
34 | })
35 | })
36 |
--------------------------------------------------------------------------------
/test/version.spec.js:
--------------------------------------------------------------------------------
1 | const cluster = require('./helpers')
2 | const assert = require('chai').assert
3 |
4 | describe('version', () => {
5 | it('shows version', (done) => {
6 | cluster.version((err, version) => {
7 | assert.notExists(err, 'throws error while fetching cluster peer and ipfs daemon information')
8 | done()
9 | })
10 | })
11 | })
12 |
--------------------------------------------------------------------------------