├── .gitignore ├── LICENSE ├── README.md ├── bin.js ├── example.js ├── help.txt ├── index.js ├── logo.png └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | cauf 3 | hyperfs 4 | mnt 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Mathias Buus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hyperfs 2 | 3 | A content-addressable union file system that replicates using [hyperlog](https://github.com/mafintosh/hyperlog) and is build on top of fuse, leveldb, and node 4 | 5 | ![logo.png](logo.png) 6 | 7 | ## Usage 8 | 9 | First install hyperfs from npm 10 | 11 | ``` sh 12 | npm install -g hyperfs 13 | ``` 14 | 15 | hyperfs requires fuse. If your installation fails make sure you have the [fuse requirements](https://github.com/mafintosh/fuse-bindings#requirements) installed for your platform. 16 | 17 | ``` sh 18 | hyperfs create test # create a new fs volume 19 | hyperfs mount test ./mnt # mount test on ./mnt 20 | ``` 21 | 22 | Now open a the folder ./mnt in your favorite file explorer and start making some changes. 23 | 24 | Using the terminal: 25 | 26 | ``` sh 27 | mkdir mnt/test 28 | echo hello world > mnt/test/hello.txt 29 | ``` 30 | 31 | Now quit your file explorer and go back to the terminal where you ran `hyperfs mount ...`. 32 | Hit CTRL-C to unmount the volume. 33 | 34 | Now lets snapshot that volume so we can replicate it. Snapshotting just makes a readonly layer 35 | of the changes you've made. You can use a snapshot as a base fs for a new volume 36 | 37 | ``` sh 38 | hyperfs snapshot test 39 | ``` 40 | 41 | This will print the snapshot hash when it succeeds. 42 | Now lets replicate the file system to another machine. 43 | 44 | ``` sh 45 | # assuming hyperfs is installed on example.com 46 | hyperfs replicate ssh://user@example.com 47 | ``` 48 | 49 | Now ssh into example.com and run 50 | 51 | ``` 52 | hyperfs create test --node= 53 | hyperfs mount test mnt 54 | ``` 55 | 56 | Now if you enter ./mnt you'll see that its the volume from your local machine. 57 | 58 | ## A container file system 59 | 60 | You can use hyperfs to build your own docker-like container platform 61 | 62 | ``` sh 63 | npm i mini-container -g # a minimal container runtime 64 | apt-get install debootstrap # for installing base distros into a folder 65 | # create an ubuntu volume 66 | hyperfs create ubuntu 67 | # execute debootstrap on this volume. installs base ubuntu trusty 68 | # note: this will take a while (> 20 mins) 69 | hyperfs exec ubuntu 'debootstrap --variant=buildd --arch amd64 trusty . http://archive.ubuntu.com/ubuntu/' 70 | # snapshot ubuntu so we can use it later for other containers 71 | hyperfs snapshot ubuntu -m 'ubuntu trusty core installation' 72 | # create a container volume that inherits from our ubuntu base (your hash might be different) 73 | hyperfs create my-container --node=674a896ec3477d921429dd900da0bab9e32b23aa7f8509c82f1d8b39f42678fe 74 | # install git and curl in our container volume using mini-container 75 | hyperfs exec my-container 'mini-container "apt-get update"' 76 | hyperfs exec my-container 'mini-container "apt-get -y install git curl"' 77 | # snapshot the new container so we can share it 78 | hyperfs snapshot my-container 79 | ``` 80 | 81 | Now to run a bash session inside our container locally we just do 82 | 83 | ``` sh 84 | # to exit :) 85 | hyperfs exec my-container 'mini-container "/bin/bash"' 86 | ``` 87 | 88 | Or to replicate our containers we just do 89 | 90 | ``` sh 91 | hyperfs replicate ssh://user@remote.com 92 | ssh user@remote.com 93 | hyperfs create my-container --node= 94 | hyperfs exec my-container 'mini-container "/bin/bash"' 95 | ``` 96 | 97 | The above example only works on Linux but since hyperfs is only a file system it 98 | would work on OSX too assuming you changed debootstrap and the other commands to the OSX equivalents. 99 | 100 | ## Content addressed 101 | 102 | hyperfs is content addressed on the file level. This means that if you install ubuntu 103 | twice on two different volumes most of the data will only be stored once. This also means 104 | that if you install ubuntu and replicate to another peer that installed ubuntu independently 105 | replication will probably be really fast since almost all of the data is shared. 106 | 107 | ## Help 108 | 109 | ``` 110 | hyperfs # prints help 111 | ``` 112 | 113 | ## License 114 | 115 | MIT 116 | -------------------------------------------------------------------------------- /bin.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var fs = require('fs') 4 | var minimist = require('minimist') 5 | var mkdirp = require('mkdirp') 6 | var path = require('path') 7 | var transports = require('transport-stream')({command: 'hyperfs replicate -'}) 8 | var execspawn = require('execspawn') 9 | var pretty = require('pretty-bytes') 10 | var log = require('single-line-log').stderr 11 | var argv = minimist(process.argv.slice(2), {alias: {message: 'm', quiet: 'q', debug: 'D', store: 's', node: 'n'}}) 12 | var s = argv.store || 'hyperfs' 13 | 14 | mkdirp.sync(s) 15 | 16 | var hyperfs = require('./')(s) 17 | 18 | var cmd = argv._[0] 19 | 20 | if (cmd === 'list' || cmd === 'ls') { 21 | hyperfs.list().on('data', console.log) 22 | return 23 | } 24 | 25 | if (cmd === 'show') { 26 | hyperfs.show(argv._[1], function (err, val) { 27 | if (err) throw err 28 | hyperfs.readSnapshot(val.snapshot, function (err, rs) { 29 | if (err) throw err 30 | rs.on('data', function (data) { 31 | console.log(JSON.stringify(data)) 32 | }) 33 | }) 34 | }) 35 | return 36 | } 37 | 38 | if (cmd === 'version') { 39 | console.log(require('./package.json').version) 40 | return 41 | } 42 | 43 | if (cmd === 'nodes') { 44 | hyperfs.nodes().on('data', function (data) { 45 | console.log(data.key+': ' + (data.value.message || '(no message)')) 46 | }) 47 | return 48 | } 49 | 50 | if (cmd === 'remove' || cmd === 'rm') { 51 | hyperfs.remove(argv._[1], function () { 52 | console.log(argv._[1] + ' removed') 53 | }) 54 | return 55 | } 56 | 57 | if (cmd === 'info') { 58 | hyperfs.info(argv._[1], console.log) 59 | return 60 | } 61 | 62 | if (cmd === 'create') { 63 | if (!argv._[1]) throw new Error('volume required') 64 | hyperfs.create(argv._[1], argv, function (err) { 65 | if (err) throw err 66 | }) 67 | return 68 | } 69 | 70 | if (cmd === 'replicate') { 71 | var stream = transports(argv._[1]) 72 | var rs = hyperfs.replicate(argv) 73 | var lastUpdate = 0 74 | 75 | stream.on('warn', console.error) 76 | 77 | if (argv._[1] !== '-' && !argv.quiet && process.stdin.isTTY) { 78 | var read = 0 79 | var written = 0 80 | var msg = [] 81 | 82 | var print = function () { 83 | var str = 'Downloaded ' + pretty(read) + ' and uploaded ' + pretty(written) + '\n' 84 | for (var i = 0; i < Math.min(10, msg.length); i++) { 85 | str += msg[i] + '\n' 86 | } 87 | if (msg.length > 10) msg.shift() 88 | log(str) 89 | } 90 | 91 | var printMaybe = function () { 92 | var time = Date.now() 93 | if (time - lastUpdate > 200) { 94 | lastUpdate = time 95 | print() 96 | } 97 | } 98 | 99 | rs.on('read', function (len) { 100 | read += len 101 | printMaybe() 102 | }) 103 | 104 | rs.on('write', function (len) { 105 | written += len 106 | printMaybe() 107 | }) 108 | 109 | rs.on('receive-data', function (data) { 110 | msg.push('- receive-data: ' + data) 111 | print() 112 | }) 113 | 114 | rs.on('receive-snapshot', function (hash) { 115 | msg.push('- receive-snapshot: ' + hash) 116 | print() 117 | }) 118 | 119 | rs.on('send-data', function (data) { 120 | msg.push('- send-data: ' + data) 121 | print() 122 | }) 123 | 124 | rs.on('send-snapshot', function (hash) { 125 | msg.push('- send-snapshot: ' + hash) 126 | print() 127 | }) 128 | } 129 | 130 | stream.pipe(rs).pipe(stream) 131 | return 132 | } 133 | 134 | if (cmd === 'exec') { 135 | if (!argv._[1]) throw new Error('volume required') 136 | var folder = argv.mnt || path.join(s, 'mnt', argv._[1]) 137 | 138 | mkdirp(folder, function (err) { 139 | if (err) throw err 140 | var mnt = hyperfs.mount(argv._[1], folder, argv) 141 | 142 | mnt.on('ready', function () { 143 | var proc = execspawn(argv._[2], { 144 | cwd: folder, 145 | stdio: 'inherit' 146 | }) 147 | 148 | proc.on('exit', function (code) { 149 | hyperfs.unmount(folder, function () { 150 | process.exit(code) 151 | }) 152 | }) 153 | }) 154 | 155 | process.on('SIGINT', function () { 156 | hyperfs.unmount(folder, function () { 157 | process.exit() 158 | }) 159 | }) 160 | }) 161 | return 162 | } 163 | 164 | if (cmd === 'mount') { 165 | if (!argv._[1]) throw new Error('volume required') 166 | var mnt = hyperfs.mount(argv._[1], argv._[2] || 'mnt', argv) 167 | mnt.on('ready', function () { 168 | console.log(mnt.id, 'mounted') 169 | ;[].concat(mnt.nodes).reverse().forEach(function (l) { 170 | console.log('<-- ' + l) 171 | }) 172 | }) 173 | process.on('SIGINT', function () { 174 | hyperfs.unmount(argv._[2] || 'mnt', function () { 175 | process.exit() 176 | }) 177 | }) 178 | return 179 | } 180 | 181 | if (cmd === 'snapshot') { 182 | hyperfs.snapshot(argv._[1], argv, function (err, key) { 183 | if (err) throw err 184 | console.error(key) 185 | }) 186 | return 187 | } 188 | 189 | console.log(fs.readFileSync(__dirname + '/help.txt', 'utf-8')) -------------------------------------------------------------------------------- /example.js: -------------------------------------------------------------------------------- 1 | require('mkdirp').sync('./cauf') 2 | require('mkdirp').sync('./cauf2') 3 | 4 | var cauf = require('./')('./cauf') 5 | var cauf2 = require('./')('./cauf2') 6 | 7 | var s = cauf.replicate() 8 | 9 | s.pipe(cauf2.replicate()).pipe(s) 10 | 11 | // // return 12 | // cauf.create('test5', {ancestor: '0f535d6400e9fb1ba94fbe035f0d0c402e4cbcdf716d77488205945585deb8af'}, function () { 13 | // var mount = cauf.mount('test5', 'mnt') 14 | 15 | // mount.on('ready', function () { 16 | // // console.log('ready') 17 | // // cauf.snapshot(mount.id).on('finish', function () { 18 | // // console.log('snapshot finished', this.key) 19 | // // }) 20 | // console.log('Mounted ' + mount.id + ' on ./mnt') 21 | // }) 22 | 23 | // process.on('SIGINT', function () { 24 | // cauf.unmount('mnt', function () { 25 | // process.exit() 26 | // }) 27 | // }) 28 | // }) 29 | -------------------------------------------------------------------------------- /help.txt: -------------------------------------------------------------------------------- 1 | hyperfs [options] 2 | 3 | create 4 | create a new volume. 5 | use --node= to create it from a snapshot node 6 | the volume data will be stored in ./hyperfs 7 | 8 | mount 9 | mount a new volume on . 10 | use ctrl-c to unmount 11 | 12 | remove 13 | remove a volume 14 | 15 | list 16 | list all volumes 17 | 18 | exec 19 | execute a command inside a mounted volume. 20 | forwards stdio and unmounts when the command ends 21 | the volume will be mounted on ./hyperfs/mnt/ 22 | while the command is running 23 | 24 | replicate 25 | replicate all snapshots to a remote hyperfs. 26 | - hyperfs replicate ./other-folder 27 | - hyperfs replicate ssh://user@example.com:other-folder 28 | - dupsh 'hyperfs replicate -' 'some-stream-to-remote' 29 | 30 | snapshot --message 'friendly message' 31 | snapshot a volume. will print the node hash when it succeeds 32 | use this hash as the --node argument in create to create another 33 | volume that looks like this one 34 | 35 | nodes 36 | print all snapshot nodes 37 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var path = require('path') 2 | var level = require('level') 3 | var crypto = require('crypto') 4 | var mkdirp = require('mkdirp') 5 | var pump = require('pump') 6 | var fs = require('fs') 7 | var fuse = require('fuse-bindings') 8 | var lexint = require('lexicographic-integer') 9 | var union = require('sorted-union-stream') 10 | var events = require('events') 11 | var mknod = require('mknod') 12 | var through = require('through2') 13 | var concurrent = require('through2-concurrent') 14 | var subleveldown = require('subleveldown') 15 | var enumerate = require('level-enumerate') 16 | var from = require('from2') 17 | var hyperlog = require('hyperlog') 18 | var multiplex = require('multiplex') 19 | var os = require('os') 20 | 21 | var noop = function () {} 22 | var ENOENT = new Error('ENOENT') 23 | ENOENT.code = 'ENOENT' 24 | 25 | module.exports = function (home) { 26 | var hyperfs = {} 27 | var db = level(path.join(home, 'db')) 28 | 29 | var metadata = subleveldown(db, 'metadata', {valueEncoding: 'json'}) 30 | var inodes = subleveldown(db, 'inodes', {valueEncoding: 'json'}) 31 | var snapshots = subleveldown(db, 'snapshots') 32 | var ancestors = subleveldown(db, 'ancestors') 33 | var volumes = subleveldown(db, 'volumes', {valueEncoding: 'json'}) 34 | var log = hyperlog(subleveldown(db, 'log')) 35 | 36 | var writeablePath = function () { 37 | var name = crypto.randomBytes(32).toString('hex') 38 | return path.join('writeable', name.slice(0, 2), name.slice(2, 4), name.slice(4)) 39 | } 40 | 41 | var readablePath = function (hash) { 42 | return path.join('readable', hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)) 43 | } 44 | 45 | var toIndexKey = function(name) { 46 | var depth = name.split('/').length - 1 47 | return lexint.pack(depth, 'hex') + '!' + name 48 | } 49 | 50 | hyperfs.show = function (key, cb) { 51 | log.get(key, function (err, node) { 52 | if (err) return cb(err) 53 | cb(null, JSON.parse(node.value.toString())) 54 | }) 55 | } 56 | 57 | hyperfs.put = function (id, name, data, cb) { 58 | var key = id + '!' + toIndexKey(name) 59 | if (!data.ctime) data.ctime = Date.now() 60 | if (!data.mtime) data.mtime = Date.now() 61 | metadata.put(key, data, cb) 62 | } 63 | 64 | hyperfs.del = function (id, name, cb) { 65 | var key = id + '!' + toIndexKey(name) 66 | metadata.del(key, cb) 67 | } 68 | 69 | hyperfs.get = function (id, name, cb) { 70 | var key = id + '!' + toIndexKey(name) 71 | metadata.get(key, cb) 72 | } 73 | 74 | hyperfs.unmount = function (mnt, cb) { 75 | fuse.unmount(mnt, cb) 76 | } 77 | 78 | var dirStream = function (layer, key) { 79 | return metadata.createReadStream({ 80 | gt: layer + '!' + key, 81 | lt: layer + '!' + key + '\xff' 82 | }) 83 | } 84 | 85 | var getInode = function (layer, ino, cb) { 86 | inodes.get(layer + '!' + lexint.pack(ino, 'hex'), cb) 87 | } 88 | 89 | var putInode = function (layer, ino, data, cb) { 90 | inodes.put(layer + '!' + lexint.pack(ino, 'hex'), data, cb) 91 | } 92 | 93 | var delInode = function (layer, ino, cb) { 94 | inodes.del(layer + '!' + lexint.pack(ino, 'hex'), cb) 95 | } 96 | 97 | var countInodes = function (layer, cb) { 98 | var rs = inodes.createKeyStream({ 99 | gt: layer + '!', 100 | lt: layer + '!\xff', 101 | limit: 1, 102 | reverse: true 103 | }) 104 | 105 | var cnt = 0 106 | 107 | rs.on('data', function (data) { 108 | cnt = lexint.unpack(data.split('!')[1], 'hex') 109 | }) 110 | 111 | rs.on('error', function (err) { 112 | cb(err) 113 | }) 114 | 115 | rs.on('end', function () { 116 | cb(null, cnt) 117 | }) 118 | } 119 | 120 | var toCompareKey = function (data) { 121 | return data.key.slice(data.key.indexOf('!') + 1) 122 | } 123 | 124 | hyperfs.hasBlob = function (hash, cb) { 125 | fs.stat(path.join(home, readablePath(hash)), function (err) { 126 | cb(null, !err) 127 | }) 128 | } 129 | 130 | hyperfs.createBlobReadStream = function (key) { 131 | return fs.createReadStream(path.join(home, readablePath(key))) 132 | } 133 | 134 | hyperfs.createBlobWriteStream = function (cb) { 135 | var filename = path.join(os.tmpdir(), 'hyperfs-tmp-' + crypto.randomBytes(32).toString('hex')) 136 | var hash = crypto.createHash('sha256') 137 | 138 | var write = function (data, enc, cb) { 139 | hash.update(data) 140 | cb(null, data) 141 | } 142 | 143 | var ws = fs.createWriteStream(filename) 144 | var hasher = through(write) 145 | 146 | pump(hasher, ws, function (err) { 147 | var key = hash.digest('hex') 148 | var newFilename = path.join(home, readablePath(key)) 149 | 150 | mkdirp(path.join(newFilename, '..'), function (err) { 151 | if (err) return cb(err) 152 | fs.rename(filename, newFilename, function (err) { 153 | if (err) return cb(err) 154 | cb(null, key) 155 | }) 156 | }) 157 | }) 158 | 159 | return hasher 160 | } 161 | 162 | hyperfs.readSnapshot = function (key, cb) { 163 | snapshots.get(key, function (err, space) { 164 | if (err) return cb(err) 165 | 166 | var rs = snapshots.createValueStream({ 167 | gt: space + '!', 168 | lt: space + '!\xff', 169 | valueEncoding: 'json' 170 | }) 171 | 172 | cb(null, rs) 173 | }) 174 | } 175 | 176 | hyperfs.snapshot = function (id, opts, cb) { // don't mutate the layer while running this for now 177 | if (typeof opts === 'function') return hyperfs.snapshot(id, null, opts) 178 | if (!opts) opts = {} 179 | if (!cb) cb = noop 180 | 181 | var message = opts.message 182 | 183 | var onindex = function (v) { 184 | var key = v.snapshot 185 | 186 | if (!v.snapshot) return cb() 187 | 188 | volumes.put(id, v, function (err) { 189 | if (err) return cb(err) 190 | 191 | var write = function (data, enc, cb) { 192 | hyperfs.put(key, data.name, {special: data.special, deleted: data.deleted, mode: data.mode, uid: data.uid, gid: data.gid, ino: data.ino, rdev: data.rdev}, function (err) { 193 | if (err) return cb(err) 194 | hyperfs.del(id, data.name, function () { 195 | getInode(id, data.ino || 0, function (err, inode) { 196 | if (err && err.notFound) return cb() // we already processed this one 197 | if (err) return cb(err) 198 | 199 | if (opts.debug) console.error('Snapshotting', data.name) 200 | 201 | if (!data.data || data.special) { 202 | putInode(key, data.ino, inode, function (err) { 203 | if (err) return cb(err) 204 | delInode(id, data.ino, cb) 205 | }) 206 | return 207 | } 208 | 209 | var filename = readablePath(data.data) 210 | mkdirp(path.join(home, filename, '..'), function (err) { 211 | if (err) return cb(err) 212 | fs.rename(path.join(home, inode.data), path.join(home, filename), function () { // ignore errors for now to be resumeable 213 | inode.data = filename 214 | putInode(key, data.ino, inode, function (err) { 215 | if (err) return cb(err) 216 | delInode(id, data.ino, cb) 217 | }) 218 | }) 219 | }) 220 | }) 221 | }) 222 | }) 223 | } 224 | 225 | hyperfs.readSnapshot(key, function (err, rs) { 226 | if (err) return cb(err) 227 | 228 | pump(rs, through.obj(write), function () { 229 | var node = { 230 | snapshot: key, 231 | message: message || '' 232 | } 233 | 234 | log.add(v.node ? [v.node] : [], JSON.stringify(node), function (err, node) { 235 | if (err) return cb(err) 236 | 237 | v.node = node.key 238 | v.snapshot = null 239 | 240 | volumes.put(id, v, function (err) { 241 | if (err) return cb(err) 242 | cb(null, node.key) 243 | }) 244 | }) 245 | }) 246 | }) 247 | }) 248 | } 249 | 250 | volumes.get(id, function (err, v) { 251 | if (err) return cb(new Error('Volume does not exist')) 252 | if (v.snapshot) return onindex(v) 253 | 254 | var space = crypto.randomBytes(32).toString('hex') 255 | var snapshotHash = crypto.createHash('sha256') 256 | var i = 0 257 | 258 | pump( 259 | metadata.createReadStream({ 260 | gt: id + '!', 261 | lt: id + '!\xff' 262 | }), 263 | through.obj(function (file, enc, cb) { 264 | var name = file.key.slice(file.key.lastIndexOf('!') + 1) 265 | 266 | if (opts.debug) console.error('Indexing', name) 267 | 268 | getInode(id, file.value.ino || 0, function (err, data) { 269 | if (err && !err.notFound) return cb(err) 270 | 271 | var ondone = function () { 272 | var val = JSON.stringify({ 273 | name: name, 274 | deleted: file.value.deleted, 275 | special: file.value.special, 276 | data: file.hash, 277 | mode: file.value.mode, 278 | rdev: file.value.rdev, 279 | uid: file.value.uid, 280 | gid: file.value.gid, 281 | ino: file.value.ino 282 | }) 283 | 284 | snapshotHash.update(val) 285 | snapshots.put(space + '!' + lexint.pack(i++, 'hex'), val, cb) 286 | } 287 | 288 | if (!data || !data.data || file.value.special) return ondone() 289 | 290 | var hash = crypto.createHash('sha256') 291 | var rs = fs.createReadStream(path.join(home, data.data)) 292 | 293 | rs.on('data', function (data) { 294 | hash.update(data) 295 | }) 296 | rs.on('error', cb) 297 | rs.on('end', function () { 298 | file.hash = hash.digest('hex') 299 | ondone() 300 | }) 301 | }) 302 | }), 303 | function (err) { 304 | if (err) return cb(err) 305 | var key = snapshotHash.digest('hex') 306 | snapshots.put(key, space, function (err) { 307 | if (err) return cb(err) 308 | v.snapshot = key 309 | onindex(v) 310 | }) 311 | } 312 | ) 313 | }) 314 | } 315 | 316 | hyperfs.nodes = function () { 317 | var write = function (node, enc, cb) { 318 | node.value = JSON.parse(node.value) 319 | cb(null, node) 320 | } 321 | 322 | return pump(log.createReadStream(), through.obj(write)) 323 | } 324 | 325 | hyperfs.ancestors = function (key, cb) { 326 | var list = [] 327 | 328 | var loop = function (key) { 329 | log.get(key, function (err, node) { 330 | if (err) return cb(err) 331 | list.unshift({node: node.key, snapshot: JSON.parse(node.value).snapshot}) 332 | if (!node.links.length) return cb(null, list) 333 | loop(node.links[0]) 334 | }) 335 | } 336 | 337 | loop(key) 338 | } 339 | 340 | hyperfs.list = function () { 341 | return volumes.createKeyStream() 342 | } 343 | 344 | hyperfs.info = function (key, cb) { 345 | return volumes.get(key, cb) 346 | } 347 | 348 | hyperfs.remove = function (key, cb) { 349 | if (!cb) cb = noop 350 | 351 | var write = function (data, enc, cb) { 352 | var done = function (err) { 353 | if (err) return cb(err) 354 | metadata.del(data.key, cb) 355 | } 356 | 357 | if (!data.value.ino) return done() 358 | delInode(key, data.value.ino, done) 359 | } 360 | 361 | pump(metadata.createReadStream({gt: key + '!', lt: key + '!\xff'}), through.obj(write), function (err) { 362 | if (err) return cb(err) 363 | volumes.del(key, cb) 364 | }) 365 | } 366 | 367 | hyperfs.create = function (key, opts, cb) { 368 | if (typeof opts === 'function') return hyperfs.create(key, null, opts) 369 | if (!cb) cb = noop 370 | if (!opts) opts = {} 371 | volumes.get(key, function (_, v) { 372 | if (v) return cb(new Error('volume already exists')) 373 | volumes.put(key, {id: key, node: opts.node}, cb) 374 | }) 375 | } 376 | 377 | hyperfs.replicate = function (opts) { 378 | if (!opts) opts = {} 379 | 380 | var drains = [] 381 | var blobs = 0 382 | 383 | var onblobwrite = function (data, enc, cb) { 384 | plex.emit('write', data.length) 385 | cb(null, data) 386 | } 387 | 388 | var onblobread = function (data, enc, cb) { 389 | plex.emit('read', data.length) 390 | cb(null, data) 391 | } 392 | 393 | var plex = multiplex(function (stream, id) { 394 | var parts = id.split('/') 395 | 396 | if (parts[0] === 's') { 397 | var encode = function (data, enc, cb) { 398 | if (blobs) return drains.push(encode.bind(null, data, enc, cb)) 399 | cb(null, JSON.stringify(data)) 400 | } 401 | 402 | plex.emit('send-snapshot', parts[1]) 403 | hyperfs.readSnapshot(parts[1], function (err, rs) { 404 | if (err) return stream.destroy(err) 405 | pump(rs, through.obj(encode), stream) 406 | }) 407 | return 408 | } 409 | 410 | if (parts[0] === 'd') { 411 | plex.emit('send-data', parts[1]) 412 | blobs++ 413 | graph.cork() 414 | pump(hyperfs.createBlobReadStream(parts[1]), through(onblobwrite), stream, function () { 415 | blobs-- 416 | if (!blobs) { 417 | while (drains.length) drains.shift()() 418 | } 419 | graph.uncork() 420 | }) 421 | return 422 | } 423 | }) 424 | 425 | var logOutgoing = plex.createStream('hyperlog') 426 | var logIncoming = plex.receiveStream('hyperlog') 427 | 428 | var onnode = function (node, enc, cb) { 429 | var value = JSON.parse(node.value.toString()) 430 | var s = plex.createStream('s/' + value.snapshot) 431 | var hash = crypto.createHash('sha256') 432 | var space = crypto.randomBytes(32).toString('hex') 433 | var ptr = 0 434 | 435 | plex.emit('receive-snapshot', value.snapshot) 436 | 437 | var write = function (data, enc, cb) { 438 | var val = data.obj 439 | 440 | var done = function () { 441 | var meta = {special: val.special, deleted: val.deleted, mode: val.mode, uid: val.uid, gid: val.gid, ino: val.ino, rdev: val.rdev} 442 | hyperfs.put(value.snapshot, val.name, meta, function (err) { 443 | if (err) return cb(err) 444 | if (!val.ino) return cb(null, data) 445 | getInode(value.snapshot, val.ino, function (_, inode) { 446 | inode = inode || {refs: [], data: val.data && readablePath(val.data)} 447 | if (inode.refs.indexOf(val.name) === -1) inode.refs.push(val.name) 448 | putInode(value.snapshot, val.ino, inode, function (err) { 449 | if (err) return cb(err) 450 | cb(null, data) 451 | }) 452 | }) 453 | }) 454 | } 455 | 456 | if (!val.data) return done() 457 | 458 | hyperfs.hasBlob(val.data, function (err, exists) { 459 | if (err) return cb(err) 460 | if (exists) return done() 461 | plex.emit('receive-data', val.data) 462 | pump(plex.createStream('d/' + val.data, {chunked: true}), through(onblobread), hyperfs.createBlobWriteStream(function (err, key) { 463 | if (err) return cb(err) 464 | done() 465 | })) 466 | }) 467 | } 468 | 469 | var onhash = function (data, enc, cb) { 470 | snapshots.put(space + '!' + lexint.pack(data.i, 'hex'), data.raw, {valueEncoding: 'utf-8'}, cb) 471 | } 472 | 473 | var updateHash = function (val, enc, cb) { 474 | var raw = val.toString() 475 | hash.update(raw) 476 | cb(null, {i: ptr++, raw: raw, obj: JSON.parse(raw)}) 477 | } 478 | 479 | // hwm should be to set to a really high number as we handle that in the protocol 480 | // TODO: make module that "buffers" in leveldb 481 | pump(s, through.obj({highWaterMark: 1000000}), through.obj(updateHash), concurrent.obj({maxConcurrency: 64}, write), through.obj(onhash), function (err) { 482 | if (err) return cb(err) 483 | 484 | if (hash.digest('hex') !== value.snapshot) return cb(new Error('checksum mismatch')) 485 | plex.emit('node', node) 486 | 487 | snapshots.put(value.snapshot, space, function (err) { 488 | if (err) return cb(err) 489 | cb(null, node) 490 | }) 491 | }) 492 | } 493 | 494 | var graph = log.replicate({live: opts.live, process: through.obj({highWaterMark: 100}, onnode)}) 495 | 496 | graph.on('error', function (err) { 497 | plex.destroy(err) 498 | }) 499 | 500 | pump(logIncoming, graph, logOutgoing, function () { 501 | plex.end() 502 | }) 503 | 504 | return plex 505 | } 506 | 507 | hyperfs.mount = function (key, mnt, opts) { 508 | if (!opts) opts = {} 509 | 510 | var mount = new events.EventEmitter() 511 | 512 | mount.id = null 513 | mount.layers = null 514 | mount.node = null 515 | mount.mountpoint = mnt 516 | mount.inodes = 0 517 | mount.unmount = hyperfs.unmount.bind(hyperfs, mnt) 518 | 519 | var wrap = function (cb) { 520 | return function (err) { 521 | if (err) return cb(fuse.errno(err.code)) 522 | cb(0) 523 | } 524 | } 525 | 526 | var get = function (name, cb) { 527 | var loop = function (i) { 528 | if (i < 0) return cb(ENOENT) 529 | hyperfs.get(mount.layers[i], name, function (err, file) { 530 | if (err) return loop(i - 1) 531 | if (file.deleted) return cb(ENOENT) 532 | cb(null, file, mount.layers[i]) 533 | }) 534 | } 535 | 536 | loop(mount.layers.length - 1) 537 | } 538 | 539 | var del = function (name, ino, cb) { 540 | if (opts.debug) console.log('delete:', name) 541 | var oninode = function (err) { 542 | if (err) return cb(err) 543 | getInode(mount.id, ino, function (err, data) { 544 | if (err) return cb() 545 | var i = data.refs.indexOf(name) 546 | if (i < 0) throw new Error('BAD INODE: ' + name) 547 | data.refs.splice(i, 1) 548 | if (data.refs.length) return putInode(mount.id, ino, data, cb) 549 | delInode(mount.id, ino, function (err) { 550 | if (err) return cb(err) 551 | if (!data.data) return cb() 552 | fs.unlink(path.join(home, data.data), cb) 553 | }) 554 | }) 555 | } 556 | 557 | var loop = function (i) { 558 | if (i === mount.layers.length - 1) return hyperfs.del(mount.id, name, oninode) 559 | hyperfs.get(mount.layers[i], name, function (err, file) { 560 | if (err) return loop(i + 1) 561 | hyperfs.put(mount.id, name, {deleted: true}, oninode) 562 | }) 563 | } 564 | 565 | loop(0) 566 | } 567 | 568 | var cow = function (name, cb) { // TODO: batch for me for speed/consistency 569 | get(name, function (err, file, layer) { 570 | if (err && name === '/') return cb(null, {mode: root.mode}) 571 | if (err) return cb(err) 572 | if (layer === mount.id) return cb(null, file) 573 | 574 | if (opts.debug) console.log('copy-on-write:', name) 575 | 576 | var store = function (data) { 577 | if (data.refs.length === 1) { 578 | hyperfs.put(mount.id, name, file, function (err) { 579 | if (err) return cb(err) 580 | cb(null, file) 581 | }) 582 | return 583 | } 584 | 585 | var i = 0 586 | var loop = function (err) { 587 | if (err) return cb(err) 588 | if (i === data.refs.length) return cb(null, file) 589 | var r = data.refs[i++] 590 | get(r, function (err, file) { 591 | if (err) return cb(err) 592 | hyperfs.put(mount.id, r, file, loop) 593 | }) 594 | } 595 | 596 | loop(0) 597 | } 598 | 599 | var copy = function (from, to, cb) { 600 | mkdirp(path.join(home, to, '..'), function (err) { 601 | if (err) return cb(err) 602 | if (file.special) return mknod(path.join(home, to), file.mode, file.rdev, cb) 603 | pump(fs.createReadStream(path.join(home, from)), fs.createWriteStream(path.join(home, to)), cb) 604 | }) 605 | } 606 | 607 | getInode(mount.id, file.ino, function (err) { 608 | if (!err) return cb(null, file) // already copied 609 | getInode(layer, file.ino, function (err, data) { 610 | if (err) return cb(err) 611 | 612 | if (!data.data && !file.special) { 613 | putInode(mount.id, file.ino, data, function (err) { 614 | if (err) return cb(err) 615 | store(data) 616 | }) 617 | return 618 | } 619 | 620 | var newPath = writeablePath() 621 | copy(data.data, newPath, function (err) { 622 | if (err) return cb(err) 623 | putInode(mount.id, file.ino, {refs: data.refs, data: newPath}, function (err) { 624 | if (err) return cb(err) 625 | store(data) 626 | }) 627 | }) 628 | }) 629 | }) 630 | }) 631 | } 632 | 633 | var ready = function (root) { 634 | var ops = {} 635 | 636 | ops.force = true 637 | ops.options = ['suid', 'dev'] 638 | ops.displayFolder = true 639 | 640 | ops.statfs = function (pathname, cb) { // TODO: return actual corrent data here instead 641 | cb(0, { 642 | bsize: 1000000, 643 | frsize: 1000000, 644 | blocks: 1000000, 645 | bfree: 1000000, 646 | bavail: 1000000, 647 | files: 1000000, 648 | ffree: 1000000, 649 | favail: 1000000, 650 | fsid: 1000000, 651 | flag: 1000000, 652 | namemax: 1000000 653 | }) 654 | } 655 | 656 | ops.link = function (name, dest, cb) { 657 | if (opts.debug) console.log('link:', name, dest) 658 | 659 | cow(name, function (err, file) { 660 | if (err) return cb(fuse.errno(err.code)) 661 | hyperfs.put(mount.id, dest, file, function (err) { 662 | if (err) return cb(fuse.errno(err.code)) 663 | getInode(mount.id, file.ino, function (err, data) { 664 | if (err) return cb(fuse.errno(err.code)) 665 | data.refs.push(dest) 666 | putInode(mount.id, file.ino, data, wrap(cb)) 667 | }) 668 | }) 669 | }) 670 | } 671 | 672 | ops.fgetattr = function (name, fd, cb) { 673 | if (name === '/') return cb(0, root) 674 | 675 | var onfile = function (err, file, layer) { 676 | if (err) return cb(fuse.errno(err.code)) 677 | 678 | if (file.special && layer !== mount.id) { 679 | cow(name, function (err, file) { 680 | onfile(err, file, mount.id) 681 | }) 682 | return 683 | } 684 | 685 | var nlink = 1 686 | var onstat = function (err, stat) { 687 | if (err) return cb(fuse.errno(err.code)) 688 | cb(0, { 689 | mode: file.mode, 690 | size: file.size || stat.size, 691 | blksize: 4096, 692 | blocks: stat.blocks, 693 | dev: stat.dev, 694 | rdev: file.rdev || stat.rdev, 695 | nlink: nlink, 696 | ino: file.ino || stat.ino, 697 | uid: file.uid || process.getuid(), 698 | gid: file.gid || process.getgid(), 699 | mtime: new Date(file.mtime || 0), 700 | ctime: new Date(file.ctime || 0), 701 | atime: new Date(file.mtime || 0) 702 | }) 703 | } 704 | 705 | if (file.mode & 040000) return onstat(null, root) 706 | getInode(layer, file.ino, function (err, inode) { 707 | if (err && fd > -1) return fs.fstat(fd, onstat) 708 | if (err) throw new Error('NO INODE FOR ' + name) 709 | if (err) return cb(fuse.errno(err.code)) 710 | 711 | nlink = inode.refs.length 712 | if (fd < 0) fs.lstat(path.join(home, inode.data), onstat) 713 | else fs.fstat(fd, onstat) 714 | }) 715 | } 716 | 717 | get(name, onfile) 718 | } 719 | 720 | ops.getattr = function (name, cb) { 721 | ops.fgetattr(name, -1, cb) 722 | } 723 | 724 | ops.readdir = function (name, cb) { 725 | if (!/\/$/.test(name)) name += '/' 726 | 727 | var key = toIndexKey(name) 728 | var result = [] 729 | 730 | var stream = dirStream(mount.layers[mount.layers.length - 1], key) 731 | for (var i = mount.layers.length - 2; i >= 0; i--) { 732 | stream = union(stream, dirStream(mount.layers[i], key), toCompareKey) 733 | } 734 | 735 | stream.on('error', wrap(cb)) 736 | 737 | stream.on('data', function (data) { 738 | if (data.value.deleted) return 739 | result.push(data.key.slice(data.key.lastIndexOf('/') + 1)) // haxx 740 | }) 741 | 742 | stream.on('end', function () { 743 | cb(null, result) 744 | }) 745 | } 746 | 747 | ops.truncate = function (name, size, cb) { 748 | if (opts.debug) console.log('truncate:', name, size) 749 | cow(name, function (err, file) { 750 | if (err) return cb(fuse.errno(err.code)) 751 | getInode(mount.id, file.ino, function (err, data) { 752 | if (err) return cb(fuse.errno(err.code)) 753 | fs.truncate(path.join(home, data.data), size, wrap(cb)) 754 | }) 755 | }) 756 | } 757 | 758 | ops.ftruncate = function (name, fd, size, cb) { 759 | if (opts.debug) console.log('ftruncate:', name, fd, size) 760 | fs.ftruncate(fd, size, wrap(cb)) 761 | } 762 | 763 | ops.fsync = function (name, fd, datasync, cb) { 764 | fs.fsync(fd, wrap(cb)) 765 | } 766 | 767 | ops.rename = function (name, dest, cb) { 768 | if (opts.debug) console.log('rename:', name, dest) 769 | ops.link(name, dest, function (errno) { 770 | if (errno) return cb(errno) 771 | ops.unlink(name, cb) 772 | }) 773 | } 774 | 775 | ops.mknod = function (name, mode, dev, cb) { 776 | if (opts.debug) console.log('mknod:', name, mode, dev) 777 | var inode = ++mount.inodes 778 | var filename = writeablePath() 779 | 780 | putInode(mount.id, inode, {data: filename, refs: [name]}, function (err) { 781 | if (err) return cb(fuse.errno(err.code)) 782 | mkdirp(path.join(home, filename, '..'), function (err) { 783 | if (err) return cb(fuse.errno(err.code)) 784 | mknod(path.join(home, filename), mode, dev, function (err) { 785 | if (err) return cb(fuse.errno(err.code)) 786 | hyperfs.put(mount.id, name, {special: true, rdev: dev, mode: mode, ino: inode}, wrap(cb)) 787 | }) 788 | }) 789 | }) 790 | } 791 | 792 | ops.open = function (name, flags, cb) { 793 | var open = function (layer, ino) { 794 | getInode(layer, ino, function (err, data) { 795 | if (err) return cb(fuse.errno(err.code)) 796 | fs.open(path.join(home, data.data), flags, function (err, fd) { 797 | if (err) return cb(fuse.errno(err.code)) 798 | cb(0, fd) 799 | }) 800 | }) 801 | } 802 | 803 | var readonly = function () { 804 | get(name, function (err, file, layer) { 805 | if (err) return cb(fuse.errno(err.code)) 806 | if (file.special) return writeMaybe() // special file - always cow 807 | open(layer, file.ino) 808 | }) 809 | } 810 | 811 | var writeMaybe = function () { 812 | cow(name, function (err, file) { 813 | if (err) return cb(fuse.errno(err)) 814 | open(mount.id, file.ino) 815 | }) 816 | } 817 | 818 | if (flags === 0) readonly() // readonly 819 | else writeMaybe() // cow 820 | } 821 | 822 | ops.create = function (name, mode, cb) { 823 | if (opts.debug) console.log('create:', name, mode) 824 | var inode = ++mount.inodes 825 | var filename = writeablePath() 826 | 827 | putInode(mount.id, inode, {data: filename, refs: [name]}, function (err) { 828 | if (err) return cb(fuse.errno(err.code)) 829 | mkdirp(path.join(home, filename, '..'), function (err) { 830 | if (err) return cb(fuse.errno(err.code)) 831 | fs.open(path.join(home, filename), 'w+', mode, function (err, fd) { 832 | if (err) return cb(fuse.errno(err.code)) 833 | hyperfs.put(mount.id, name, {mode: mode, ino: inode}, function (err) { 834 | if (err) return cb(fuse.errno(err.code)) 835 | cb(0, fd) 836 | }) 837 | }) 838 | }) 839 | }) 840 | } 841 | 842 | ops.unlink = function (name, cb) { 843 | if (opts.debug) console.log('unlink:', name) 844 | cow(name, function (err, file) { // TODO: don't copy file if refs === 1 and deleting 845 | if (err) return cb(fuse.errno(err.code)) 846 | del(name, file.ino, wrap(cb)) 847 | }) 848 | } 849 | 850 | ops.mkdir = function (name, mode, cb) { 851 | if (opts.debug) console.log('mkdir:', name, mode) 852 | var inode = ++mount.inodes 853 | putInode(mount.id, inode, {refs: [name]}, function (err) { 854 | if (err) return cb(fuse.errno(err.code)) 855 | hyperfs.put(mount.id, name, {mode: mode | 040000, ino: inode}, wrap(cb)) 856 | }) 857 | } 858 | 859 | ops.rmdir = function (name, cb) { 860 | if (opts.debug) console.log('rmdir:', name) 861 | cow(name, function (err, file) { 862 | if (err) return cb(fuse.errno(err.code)) 863 | del(name, file.ino, wrap(cb)) 864 | }) 865 | } 866 | 867 | ops.write = function (name, fd, buf, len, offset, cb) { 868 | fs.write(fd, buf, 0, len, offset, function (err, bytes) { 869 | if (err) return cb(fuse.errno(err.code)) 870 | cb(bytes) 871 | }) 872 | } 873 | 874 | ops.read = function (name, fd, buf, len, offset, cb) { 875 | fs.read(fd, buf, 0, len, offset, function (err, bytes) { 876 | if (err) return cb(fuse.errno(err.code)) 877 | cb(bytes) 878 | }) 879 | } 880 | 881 | ops.release = function (name, fd, cb) { 882 | fs.close(fd, wrap(cb)) 883 | } 884 | 885 | ops.symlink = function (name, dest, cb) { 886 | if (opts.debug) console.log('symlink:', name, dest) 887 | ops.create(dest, 41453, function (errno, fd) { 888 | if (errno) return cb(errno) 889 | 890 | var buf = new Buffer(name) 891 | var pos = 0 892 | var loop = function () { 893 | fs.write(fd, buf, 0, buf.length, pos, function (err, bytes) { 894 | if (err) return cb(fuse.errno(err.code)) 895 | if (bytes === buf.length) return fs.close(fd, wrap(cb)) 896 | pos += bytes 897 | buf = buf.slice(bytes) 898 | loop() 899 | }) 900 | } 901 | 902 | loop() 903 | }) 904 | } 905 | 906 | ops.readlink = function (name, cb) { 907 | get(name, function (err, file, layer) { 908 | if (err) return cb(fuse.errno(err.code)) 909 | getInode(layer, file.ino, function (err, data) { 910 | if (err) return cb(fuse.errno(err.code)) 911 | fs.readFile(path.join(home, data.data), 'utf-8', function (err, res) { 912 | if (err) return cb(fuse.errno(err.code)) 913 | cb(0, res) 914 | }) 915 | }) 916 | }) 917 | } 918 | 919 | ops.chmod = function (name, mode, cb) { 920 | if (opts.debug) console.log('chmod:', name, mode) 921 | cow(name, function (err, file) { 922 | if (err) return cb(fuse.errno(err.code)) 923 | file.mode = mode 924 | hyperfs.put(mount.id, name, file, wrap(cb)) 925 | }) 926 | } 927 | 928 | ops.chown = function (name, uid, gid, cb) { 929 | if (opts.debug) console.log('chown:', name, uid, gid) 930 | cow(name, function (err, file) { 931 | if (err) return cb(fuse.errno(err.code)) 932 | if (uid > -1) file.uid = uid 933 | if (gid > -1) file.gid = gid 934 | hyperfs.put(mount.id, name, file, wrap(cb)) 935 | }) 936 | } 937 | 938 | ops.utimens = function (name, ctime, mtime, cb) { 939 | if (opts.time === false) return cb(0) 940 | cow(name, function (err, file) { 941 | if (err) return cb(fuse.errno(err.code)) 942 | file.ctime = ctime.getTime() 943 | file.mtime = mtime.getTime() 944 | hyperfs.put(mount.id, name, file, wrap(cb)) 945 | }) 946 | } 947 | 948 | fuse.mount(mnt, ops, function (err) { 949 | if (err) return mount.emit('error', err) 950 | mount.emit('ready') 951 | }) 952 | } 953 | 954 | var onlayers = function (err, layers) { 955 | if (err) return mount.emit('error', err) 956 | 957 | var toSnapshot = function (val) { 958 | return val.snapshot 959 | } 960 | 961 | var toNode = function (val) { 962 | return val.node 963 | } 964 | 965 | mount.layers = layers.map(toSnapshot).concat(mount.id) // push writable layer 966 | mount.nodes = layers.map(toNode) 967 | 968 | var done = function () { 969 | mkdirp(mnt, function (err) { 970 | if (err) return mount.emit('error', err) 971 | fs.stat(mnt, function (err, st) { 972 | if (err) return mount.emit('error', err) 973 | ready(st) 974 | }) 975 | }) 976 | } 977 | 978 | mount.inodes = 1024 979 | var loop = function (i) { 980 | if (i < 0) return done() 981 | countInodes(mount.layers[i], function (_, cnt) { 982 | if (cnt) mount.inodes = Math.max(cnt, mount.inodes) 983 | loop(i - 1) 984 | }) 985 | } 986 | 987 | loop(mount.layers.length - 1) 988 | } 989 | 990 | volumes.get(key, function (err, v) { 991 | if (err) return mount.emit('error', new Error('Volume does not exist')) 992 | 993 | mount.id = key 994 | mount.mountpoint = mnt 995 | mount.node = v.node 996 | if (!v.node) return onlayers(null, []) 997 | hyperfs.ancestors(v.node, onlayers) 998 | }) 999 | 1000 | return mount 1001 | } 1002 | 1003 | return hyperfs 1004 | } 1005 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mafintosh/hyperfs/86f9a45a2e39bc534bf952dd73c1f02ba1a38ca8/logo.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hyperfs", 3 | "version": "2.2.3", 4 | "description": "A content-addressable union file system that replicates and is build on top of fuse, leveldb and node", 5 | "main": "index.js", 6 | "bin": { 7 | "hyperfs": "bin.js" 8 | }, 9 | "dependencies": { 10 | "cuid": "^1.2.5", 11 | "execspawn": "^0.2.1", 12 | "from2": "^1.3.0", 13 | "fuse-bindings": "^2.4.3", 14 | "hyperlog": "^3.7.0", 15 | "level": "^1.0.0", 16 | "level-enumerate": "^1.0.1", 17 | "level-temp": "^2.1.0", 18 | "lexicographic-integer": "^1.1.0", 19 | "minimist": "^1.1.1", 20 | "mkdirp": "^0.5.1", 21 | "mknod": "^1.0.3", 22 | "multiplex": "^6.0.4", 23 | "pretty-bytes": "^2.0.1", 24 | "pump": "^1.0.0", 25 | "single-line-log": "^0.4.1", 26 | "sorted-union-stream": "^2.1.1", 27 | "subleveldown": "^2.0.0", 28 | "through2": "^0.6.5", 29 | "through2-concurrent": "^1.0.0", 30 | "transport-stream": "^1.1.0" 31 | }, 32 | "devDependencies": {}, 33 | "repository": { 34 | "type": "git", 35 | "url": "https://github.com/mafintosh/hyperfs.git" 36 | }, 37 | "author": "Mathias Buus (@mafintosh)", 38 | "license": "MIT", 39 | "bugs": { 40 | "url": "https://github.com/mafintosh/hyperfs/issues" 41 | }, 42 | "homepage": "https://github.com/mafintosh/hyperfs" 43 | } 44 | --------------------------------------------------------------------------------