├── test ├── test.gif ├── protocol_test.js ├── helpers_test.js └── fdfs_test.js ├── index.js ├── Makefile ├── lib ├── logger.js ├── helpers.js ├── tracker.js ├── fdfs.js ├── protocol.js └── storage.js ├── package.json ├── LICENSE-MIT ├── .gitignore ├── .npmignore └── README.md /test/test.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenboxiang/fdfs-client/HEAD/test/test.gif -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-13 4 | * Time: 下午6:48 5 | */ 6 | 'use strict'; 7 | 8 | module.exports = require('./lib/fdfs'); -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TESTS = test/**/*_test.js 2 | REPORTER = spec 3 | TIMEOUT = 10000 4 | MOCHA_OPTS = 5 | 6 | install: 7 | @npm install --registry=http://r.cnpmjs.org 8 | 9 | test: install 10 | @NODE_ENV=test ./node_modules/mocha/bin/mocha \ 11 | --reporter $(REPORTER) \ 12 | --timeout $(TIMEOUT) \ 13 | --harmony \ 14 | $(MOCHA_OPTS) \ 15 | $(TESTS) 16 | 17 | test-cov: 18 | @rm -f coverage.html 19 | @$(MAKE) test MOCHA_OPTS='--require blanket' REPORTER=html-cov > coverage.html 20 | @ls -lh coverage.html 21 | 22 | test-all: test test-cov 23 | 24 | .PHONY: test -------------------------------------------------------------------------------- /lib/logger.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-15 4 | * Time: 下午1:59 5 | */ 6 | 'use strict'; 7 | 8 | var _logger = console 9 | 10 | var logger = { 11 | setLogger: function(newLogger) { 12 | _logger = newLogger 13 | } 14 | } 15 | 16 | var methods = 'log trace debug info warn error fatal'.split(' ') 17 | methods.forEach(function(method) { 18 | logger[method] = function() { 19 | var oriLog = _logger[method] || _logger['log'] 20 | oriLog.apply(_logger, arguments) 21 | } 22 | }) 23 | 24 | module.exports = logger 25 | 26 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fdfs-client", 3 | "version": "0.5.3", 4 | "description": "Client for FastDFS", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [ 10 | "fdfs", "fastdfs" 11 | ], 12 | "author": "chenboxiang ", 13 | "license": "MIT", 14 | "dependencies": { 15 | "bignumber.js": "1.4.1", 16 | "is-type-of": "0.3.0", 17 | "lodash": "2.4.1" 18 | }, 19 | "devDependencies": { 20 | "blanket": "1.1.6", 21 | "co": "3.0.6", 22 | "debug": "1.0.2", 23 | "expect.js": "0.3.1", 24 | "mocha": "1.20.1", 25 | "thunkify-wrap": "0.1.2" 26 | }, 27 | "config": { 28 | "blanket": { 29 | "pattern": "//fdfs-client/lib/[^/.]+\\.js$/i", 30 | "onlyCwd": true 31 | } 32 | }, 33 | "repository": { 34 | "type": "git", 35 | "url": "https://github.com/chenboxiang/fdfs-client" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 boxiang chen 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm 2 | 3 | ## Directory-based project format 4 | .idea/ 5 | # if you remove the above rule, at least ignore user-specific stuff: 6 | # .idea/workspace.xml 7 | # .idea/tasks.xml 8 | # and these sensitive or high-churn files: 9 | # .idea/dataSources.ids 10 | # .idea/dataSources.xml 11 | # .idea/sqlDataSources.xml 12 | # .idea/dynamic.xml 13 | 14 | ## File-based project format 15 | *.ipr 16 | *.iws 17 | *.iml 18 | 19 | ## Additional for IntelliJ 20 | out/ 21 | 22 | # generated by mpeltonen/sbt-idea plugin 23 | .idea_modules/ 24 | 25 | # generated by JIRA plugin 26 | atlassian-ide-plugin.xml 27 | 28 | # generated by Crashlytics plugin (for Android Studio and Intellij) 29 | com_crashlytics_export_strings.xml 30 | 31 | 32 | 33 | # node ignore 34 | 35 | # Logs 36 | logs/ 37 | *.log 38 | 39 | # Runtime data 40 | pids 41 | *.pid 42 | *.seed 43 | 44 | # Directory for instrumented libs generated by jscoverage/JSCover 45 | lib-cov/ 46 | 47 | # Coverage directory used by tools like istanbul 48 | coverage/ 49 | 50 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 51 | .grunt 52 | 53 | # Compiled binary addons (http://nodejs.org/api/addons.html) 54 | build/Release/ 55 | 56 | # Dependency directory 57 | # Deployed apps should consider commenting this line out: 58 | # see https://npmjs.org/doc/faq.html#Should-I-check-my-node_modules-folder-into-git 59 | node_modules/ 60 | npm-debug.log -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm 2 | 3 | ## Directory-based project format 4 | .idea/ 5 | # if you remove the above rule, at least ignore user-specific stuff: 6 | # .idea/workspace.xml 7 | # .idea/tasks.xml 8 | # and these sensitive or high-churn files: 9 | # .idea/dataSources.ids 10 | # .idea/dataSources.xml 11 | # .idea/sqlDataSources.xml 12 | # .idea/dynamic.xml 13 | 14 | ## File-based project format 15 | *.ipr 16 | *.iws 17 | *.iml 18 | 19 | ## Additional for IntelliJ 20 | out/ 21 | 22 | # generated by mpeltonen/sbt-idea plugin 23 | .idea_modules/ 24 | 25 | # generated by JIRA plugin 26 | atlassian-ide-plugin.xml 27 | 28 | # generated by Crashlytics plugin (for Android Studio and Intellij) 29 | com_crashlytics_export_strings.xml 30 | 31 | 32 | 33 | # node ignore 34 | 35 | # Logs 36 | logs/ 37 | *.log 38 | 39 | # Runtime data 40 | pids 41 | *.pid 42 | *.seed 43 | 44 | # Directory for instrumented libs generated by jscoverage/JSCover 45 | lib-cov/ 46 | 47 | # Coverage directory used by tools like istanbul 48 | coverage/ 49 | 50 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 51 | .grunt 52 | 53 | # Compiled binary addons (http://nodejs.org/api/addons.html) 54 | build/Release/ 55 | 56 | # Dependency directory 57 | # Deployed apps should consider commenting this line out: 58 | # see https://npmjs.org/doc/faq.html#Should-I-check-my-node_modules-folder-into-git 59 | node_modules/ 60 | npm-debug.log 61 | test/ 62 | examples/ -------------------------------------------------------------------------------- /test/protocol_test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-15 4 | * Time: 下午6:43 5 | */ 6 | 'use strict' 7 | 8 | var expect = require('expect.js') 9 | var protocol = require('../lib/protocol') 10 | 11 | describe('protocol', function() { 12 | describe('#packHeader(command, bodyLength, status)', function() { 13 | it('should pack a correct protocol header', function() { 14 | var buffer = protocol.packHeader(1, 0x11223344, 0) 15 | expect(buffer.readUInt8(0)).to.be(0x00) 16 | expect(buffer.readUInt8(1)).to.be(0x00) 17 | expect(buffer.readUInt8(2)).to.be(0x00) 18 | expect(buffer.readUInt8(3)).to.be(0x00) 19 | expect(buffer.readUInt8(4)).to.be(0x11) 20 | expect(buffer.readUInt8(5)).to.be(0x22) 21 | expect(buffer.readUInt8(6)).to.be(0x33) 22 | expect(buffer.readUInt8(7)).to.be(0x44) 23 | expect(buffer.readUInt8(8)).to.be(0x01) 24 | expect(buffer.readUInt8(9)).to.be(0x00) 25 | }) 26 | }) 27 | 28 | describe('#packMetaData(metaData)', function() { 29 | it('should serialize structure meta to correct string which will be send to server', function() { 30 | var raw = protocol.packMetaData({meta1: 'test1', meta2: 'test2'}) 31 | expect(raw).to.be('meta1\u0002test1\u0001meta2\u0002test2') 32 | }) 33 | }) 34 | 35 | describe('#parseMetaData(raw)', function() { 36 | it('should deserialize raw meta to correct structure meta', function() { 37 | var parsed = protocol.parseMetaData('meta1\u0002test1\u0001meta2\u0002test2') 38 | expect(parsed).to.eql({meta1: 'test1', meta2: 'test2'}) 39 | }) 40 | }) 41 | }) -------------------------------------------------------------------------------- /test/helpers_test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-14 4 | * Time: 下午10:53 5 | */ 6 | 'use strict' 7 | 8 | var helpers = require('../lib/helpers') 9 | var expect = require('expect.js') 10 | 11 | describe('helpers', function() { 12 | describe('#number2Buffer(number, bytes)', function() { 13 | it('number should convert to correct buffer', function() { 14 | var buffer = helpers.number2Buffer(0x11223344, 4) 15 | expect(buffer.length).to.be(4) 16 | expect(buffer.readUInt8(0)).to.be(0x11) 17 | expect(buffer.readUInt8(1)).to.be(0x22) 18 | expect(buffer.readUInt8(2)).to.be(0x33) 19 | expect(buffer.readUInt8(3)).to.be(0x44) 20 | }) 21 | }) 22 | 23 | describe('#buildConstProp(value)', function() { 24 | it('should return a only read property object which pass to Object.defineProperty', function() { 25 | var prop = helpers.buildConstProp(1) 26 | expect(prop).to.eql({ 27 | configurable: false, 28 | writable: false, 29 | value: 1 30 | }) 31 | }) 32 | }) 33 | 34 | describe('#trim(str)', function() { 35 | it('should be trim and remove all \\u0000 char which in left and right', function() { 36 | var ori = '\u0000\u0000char\u0000' 37 | expect(helpers.trim(ori)).to.be('char') 38 | }) 39 | }) 40 | 41 | describe('#id2gf(fileId)', function() { 42 | it('should return a object which contains correct group and filename', function() { 43 | var gf = helpers.id2gf('group1/M00/00/00/wKgAalHctyGAIpSuAAAFxYHYCdQ59-part3.conf') 44 | expect(gf).to.eql({ 45 | group: 'group1', 46 | filename: 'M00/00/00/wKgAalHctyGAIpSuAAAFxYHYCdQ59-part3.conf' 47 | }) 48 | }) 49 | }) 50 | }) -------------------------------------------------------------------------------- /lib/helpers.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-13 4 | * Time: 下午8:50 5 | */ 6 | 'use strict' 7 | 8 | var BigNumber = require('bignumber.js') 9 | var Buffer = require('buffer').Buffer 10 | var trimReg = /^\u0000+|\u0000+$/g 11 | 12 | module.exports = { 13 | /** 14 | * 将unsigned number转换为Big-Endian buffer 15 | * @param number 16 | * @param bytes buffer的字节数 17 | */ 18 | number2Buffer: function(number, bytes) { 19 | if (number < 0) { 20 | throw new Error('"number" must greater than or equal to zero.') 21 | } 22 | bytes = bytes || 8 23 | var bn = new BigNumber(String(number)) 24 | // 转换为16进制字符串 25 | var hex = bn.toString(16) 26 | 27 | // ------- 将hex length补充成bytes x 2,不足则高位补0,超过则去掉高位 28 | var length = hex.length 29 | var targetLength = bytes * 2 30 | if (length < targetLength) { 31 | var i = targetLength - length 32 | while (i > 0) { 33 | hex = '0' + hex 34 | i-- 35 | } 36 | 37 | } else if (length > targetLength) { 38 | hex = hex.substring(length - targetLength) 39 | } 40 | 41 | // ------ 填充到buffer里,高位在前 42 | var buffer = new Buffer(bytes) 43 | var offset = 0 44 | while (offset < bytes) { 45 | var bn = Number("0x" + hex.substring(offset * 2, (offset * 2) + 2)) 46 | buffer.writeUInt8(bn, offset) 47 | offset++ 48 | } 49 | 50 | return buffer 51 | }, 52 | 53 | /** 54 | * 构造Object.defineProperty中指定的property属性,并达到const声明的效果,只读,不可写 55 | * @param value 56 | * @returns {{configurable: boolean, writable: boolean, value: *}} 57 | */ 58 | buildConstProp: function(value) { 59 | return { 60 | configurable: false, 61 | writable: false, 62 | value: value 63 | } 64 | }, 65 | 66 | /** 67 | * 在String.prototype.trim的基础上再去掉\u0000 68 | * 默认trim的处理见下: 69 | * https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/String/trim 70 | * http://blog.stevenlevithan.com/archives/faster-trim-javascript 71 | * http://jsperf.com/mega-trim-test 72 | * @param str 73 | * @returns {string} 74 | */ 75 | trim: function(str) { 76 | return str.trim().replace(trimReg, '') 77 | }, 78 | 79 | /** 80 | * file id conver to group and filename 81 | * @param fileId 82 | * @returns {{group: string, filename: string}} 83 | */ 84 | id2gf: function(fileId) { 85 | var pos = fileId.indexOf('/') 86 | return { 87 | group: fileId.substring(0, pos), 88 | filename: fileId.substring(pos + 1) 89 | } 90 | } 91 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > 很抱歉,此项目已停止维护! 2 | 3 | # Nodejs Client for FastDFS 4 | 5 | [FastDFS](http://bbs.chinaunix.net/forum-240-1.html) 是国人开发的分布式的小文件存储系统。这个项目是FastDFS的nodejs客户端,用来与FastDFS Server进行交互,进行文件的相关操作。我测试过的server版本是4.0.6。 6 | 配合[co](https://github.com/visionmedia/co)使用,可以用[co-fdfs-client](https://github.com/chenboxiang/co-fdfs-client)。 7 | 8 | # 安装 9 | ```shell 10 | npm install fdfs-client 11 | ``` 12 | 13 | # 使用 14 | ```javascript 15 | var fdfs = new FdfsClient({ 16 | // tracker servers 17 | trackers: [ 18 | { 19 | host: 'tracker.fastdfs.com', 20 | port: 22122 21 | } 22 | ], 23 | // 默认超时时间10s 24 | timeout: 10000, 25 | // 默认后缀 26 | // 当获取不到文件后缀时使用 27 | defaultExt: 'txt', 28 | // charset默认utf8 29 | charset: 'utf8' 30 | }) 31 | ``` 32 | 以上是一些基本配置,你还可以自定义你的日志输出工具,默认是使用console 33 | 例如你要使用[debug](https://github.com/visionmedia/debug)作为你的日志输出工具,你可以这么做: 34 | ```javascript 35 | var debug = require('debug')('fdfs') 36 | var fdfs = new FdfsClient({ 37 | // tracker servers 38 | trackers: [ 39 | { 40 | host: 'tracker.fastdfs.com', 41 | port: 22122 42 | } 43 | ], 44 | logger: { 45 | log: debug 46 | } 47 | }) 48 | ``` 49 | 50 | ### 上传文件 51 | 注:以下fileId为group + '/' + filename,以下的所有操作使用的fileId都是一样 52 | 53 | 通过本地文件名上传 54 | ```javascript 55 | fdfs.upload('test.gif', function(err, fileId) { 56 | // fileId 为 group + '/' + filename 57 | }) 58 | ``` 59 | 60 | 上传Buffer 61 | ```javascript 62 | var fs = require('fs') 63 | // 注意此处的buffer获取方式只为演示功能,实际不会这么去构建buffer 64 | var buffer = fs.readFileSync('test.gif') 65 | fdfs.upload(buffer, function(err, fileId) { 66 | 67 | }) 68 | ``` 69 | 70 | ReadableStream 71 | ```javascript 72 | var fs = require('fs') 73 | var rs = fs.createReadStream('test.gif') 74 | fdfs.upload(rs, function(err, fileId) { 75 | 76 | }) 77 | ``` 78 | 79 | 其他一些options,作为第2个参数传入 80 | ```js 81 | fdfs.upload('test.gif', { 82 | // 指定文件存储的group,不指定则由tracker server分配 83 | group: 'group1', 84 | // file bytes, file参数为ReadableStream时必须指定 85 | size: 1024, 86 | // 上传文件的后缀,不指定则获取file参数的后缀,不含(.) 87 | ext: 'jpg' 88 | 89 | }, function(err, fileId) { 90 | 91 | }) 92 | ``` 93 | 94 | ### 下载文件 95 | 96 | 下载到本地 97 | ```js 98 | fdfs.download(fileId, 'test_download.gif', function(err) { 99 | 100 | }) 101 | ``` 102 | 103 | 下载到WritableStream 104 | ```js 105 | var fs = require('fs') 106 | var ws = fs.createWritableStream('test_download.gif') 107 | fdfs.download(fileId, ws, function(err) { 108 | 109 | }) 110 | 111 | ``` 112 | 113 | 下载文件片段 114 | ```js 115 | fdfs.download(fileId, { 116 | target: 'test_download.part', 117 | offset: 5, 118 | bytes: 5 119 | }, function(err) { 120 | 121 | }) 122 | ``` 123 | 124 | ### 删除文件 125 | 126 | ```js 127 | fdfs.del(fileId, function(err) { 128 | 129 | }) 130 | ``` 131 | 132 | ### 获取文件信息 133 | 134 | ```js 135 | fdfs.getFileInfo(fileId, function(err, fileInfo) { 136 | // fileInfo有4个属性 137 | // { 138 | // // 文件大小 139 | // size: 140 | // // 文件创建的时间戳,单位为秒 141 | // timestamp: 142 | // // 校验和 143 | // crc32: 144 | // // 最初上传到的storage server的ip 145 | // addr: 146 | // } 147 | console.log(fileInfo) 148 | }) 149 | ``` 150 | 151 | ### 文件的Meta Data 152 | 153 | 设置Meta Data, 我只贴出来文件签名信息吧,flag字段如果不传则默认是O 154 | ```js 155 | /** 156 | * @param fileId 157 | * @param metaData {key1: value1, key2: value2} 158 | * @param flag 'O' for overwrite all old metadata (default) 159 | 'M' for merge, insert when the meta item not exist, otherwise update it 160 | * @param callback 161 | */ 162 | fdfs.setMetaData(fileId, metaData, flag, callback) 163 | ``` 164 | 165 | 获取Meta Data 166 | ```js 167 | fdfs.getMetaData(fileId, function(err, metaData) { 168 | console.log(metaData) 169 | }) 170 | ``` 171 | 172 | 173 | ### 错误处理 174 | 175 | 当无tracker可用时会触发error事件 176 | ```javascript 177 | fdfs.on('error', function(err) { 178 | // 在这里处理错误 179 | }) 180 | ``` 181 | 182 | # 测试 183 | 测试时需要用到co,所以需要node版本0.11+。测试前请确保配置好FastDFS的Server地址,为tracker.fastdfs.com:22122,或者修改test/fdfs_test.js中的client配置,然后执行如下命令: 184 | ```shell 185 | make test 186 | ``` 187 | 188 | # 帮助 189 | 有任何问题请提交到Github Issue里 190 | 191 | # 授权协议 192 | MIT -------------------------------------------------------------------------------- /test/fdfs_test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-16 4 | * Time: 上午9:06 5 | */ 6 | 'use strict'; 7 | 8 | var FdfsClient = require('../lib/fdfs') 9 | var path = require('path') 10 | var co = require('co') 11 | var thunkify = require('thunkify-wrap') 12 | var expect = require('expect.js') 13 | var fs = require('fs') 14 | var debug = require('debug')('fdfs') 15 | 16 | /** 17 | * 比较两个Buffer对象是否相等 18 | * @param {Buffer} src 19 | * @param {Buffer} target 20 | * @return {Boolean} 21 | */ 22 | function isBufferEqual(src, target) { 23 | if (src.length !== target.length) { 24 | return false 25 | } 26 | for (var i = 0; i < src.length; i++) { 27 | if (src[i] !== target[i]) { 28 | return false 29 | } 30 | } 31 | return true 32 | } 33 | 34 | describe('fdfs', function() { 35 | var fdfs 36 | before(function() { 37 | fdfs = new FdfsClient({ 38 | trackers: [ 39 | { 40 | host: 'tracker.fastdfs.com', 41 | port: 22122 42 | } 43 | ], 44 | logger: { 45 | log: debug 46 | } 47 | }) 48 | thunkify(fdfs, fdfs, 'upload download del remove setMetaData getMetaData getFileInfo'.split(' ')) 49 | }) 50 | 51 | describe('#upload(file, options, callback), #download(fileId, options, callback), #del(fileId, callback)', function() { 52 | it('should upload, download and delete file successfully', function(done) { 53 | co(function *() { 54 | var src = path.join(__dirname, 'test.gif') 55 | var fileId = yield fdfs.upload(src) 56 | // 验证下载的文件 57 | var target = path.join(__dirname, 'test_download.gif') 58 | yield fdfs.download(fileId, target) 59 | expect(isBufferEqual(fs.readFileSync(src), fs.readFileSync(target))).to.be(true) 60 | fs.unlinkSync(target) 61 | 62 | // 删除文件 63 | yield fdfs.del(fileId) 64 | // 再下载下看看是否已删除 65 | var error = null 66 | try { 67 | yield fdfs.download(fileId, target) 68 | 69 | } catch (err) { 70 | error = err 71 | } 72 | fs.unlinkSync(target) 73 | expect(error).to.be.an(Error) 74 | 75 | })(done) 76 | }) 77 | 78 | it('should download by specified offset and bytes', function(done) { 79 | co(function *() { 80 | var src = path.join(__dirname, 'test.gif') 81 | var fileId = yield fdfs.upload(src) 82 | var target = path.join(__dirname, 'test_download.gif') 83 | var options = { 84 | target: target, 85 | offset: 5, 86 | bytes: 5 87 | } 88 | yield fdfs.download(fileId, options) 89 | expect(isBufferEqual(fs.readFileSync(src).slice(options.offset, options.offset + options.bytes), fs.readFileSync(target))).to.be(true) 90 | fs.unlinkSync(target) 91 | 92 | })(done) 93 | }) 94 | 95 | it('should option.ext be the uploaded file\'s ID extension', function(done) { 96 | co(function *() { 97 | var src = path.join(__dirname, 'test.gif') 98 | var fileId = yield fdfs.upload(src, { 99 | ext: 'jpg' 100 | }) 101 | expect(path.extname(fileId)).to.be('.jpg') 102 | yield fdfs.del(fileId) 103 | 104 | })(done) 105 | }) 106 | 107 | it('should option.group be the uploaded file\'s group', function(done) { 108 | co(function *() { 109 | var src = path.join(__dirname, 'test.gif') 110 | var fileId = yield fdfs.upload(src, { 111 | group: 'group1' 112 | }) 113 | expect(fileId.substring(0, fileId.indexOf('/'))).to.be('group1') 114 | yield fdfs.del(fileId) 115 | 116 | })(done) 117 | }) 118 | }) 119 | 120 | describe('#setMetaData(fileId, metaData, flag, callback), #getMetaData(fileId, callback)', function() { 121 | it('meta data by set should equal to the meta data by get', function(done) { 122 | co(function *() { 123 | var src = path.join(__dirname, 'test.gif') 124 | var fileId = yield fdfs.upload(src) 125 | var metaData = { 126 | meta1: 'value2', 127 | meta2: 'value2' 128 | } 129 | yield fdfs.setMetaData(fileId, metaData) 130 | var metaDataFetched = yield fdfs.getMetaData(fileId) 131 | expect(metaDataFetched).to.be.eql(metaData) 132 | 133 | yield fdfs.del(fileId) 134 | 135 | })(done) 136 | }) 137 | 138 | it('meta data should be overridden', function(done) { 139 | co(function *() { 140 | var src = path.join(__dirname, 'test.gif') 141 | var fileId = yield fdfs.upload(src) 142 | var metaData = { 143 | meta1: 'value2', 144 | meta2: 'value2' 145 | } 146 | yield fdfs.setMetaData(fileId, metaData) 147 | yield fdfs.setMetaData(fileId, { 148 | meta3: 'value3' 149 | }, 'O') 150 | var metaDataFetched = yield fdfs.getMetaData(fileId) 151 | expect(metaDataFetched).to.be.eql({ 152 | meta3: 'value3' 153 | }) 154 | 155 | yield fdfs.del(fileId) 156 | 157 | })(done) 158 | }) 159 | 160 | it('meta data should be merged', function(done) { 161 | co(function *() { 162 | var src = path.join(__dirname, 'test.gif') 163 | var fileId = yield fdfs.upload(src) 164 | var metaData = { 165 | meta1: 'value2', 166 | meta2: 'value2' 167 | } 168 | yield fdfs.setMetaData(fileId, metaData) 169 | yield fdfs.setMetaData(fileId, { 170 | meta3: 'value3' 171 | }, 'M') 172 | var metaDataFetched = yield fdfs.getMetaData(fileId) 173 | expect(metaDataFetched).to.be.eql({ 174 | meta1: 'value2', 175 | meta2: 'value2', 176 | meta3: 'value3' 177 | }) 178 | 179 | yield fdfs.del(fileId) 180 | 181 | })(done) 182 | }) 183 | }) 184 | 185 | describe('#getFileInfo(fileId, callback)', function() { 186 | it('should return file info', function(done) { 187 | co(function *() { 188 | var src = path.join(__dirname, 'test.gif') 189 | var fileId = yield fdfs.upload(src) 190 | var fileInfo = yield fdfs.getFileInfo(fileId) 191 | expect(fileInfo.size).not.to.be(undefined) 192 | expect(fileInfo.timestamp).not.to.be(undefined) 193 | expect(fileInfo.crc32).not.to.be(undefined) 194 | expect(fileInfo.addr).not.to.be(undefined) 195 | yield fdfs.del(fileId) 196 | })(done) 197 | }) 198 | }) 199 | 200 | }) -------------------------------------------------------------------------------- /lib/tracker.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-13 4 | * Time: 下午10:17 5 | */ 6 | 'use strict' 7 | 8 | var Buffer = require('buffer').Buffer 9 | var net = require('net') 10 | var EventEmitter = require('events').EventEmitter 11 | var util = require('util') 12 | var logger = require('./logger') 13 | var protocol = require('./protocol') 14 | var Storage = require('./storage') 15 | var helpers = require('./helpers') 16 | var is = require('is-type-of') 17 | 18 | 19 | function Tracker(config) { 20 | EventEmitter.call(this) 21 | 22 | this.config = config 23 | this._name = config.host + ':' + config.port 24 | } 25 | 26 | // extends from EventEmitter 27 | util.inherits(Tracker, EventEmitter) 28 | 29 | // ---------------- private methods 30 | 31 | Tracker.prototype._getConnection = function() { 32 | return this._newConnection() 33 | } 34 | 35 | Tracker.prototype._newConnection = function() { 36 | var self = this 37 | var socket = new net.Socket() 38 | logger.debug('connect to tracker server [%s]', this._name) 39 | socket.setTimeout(this.config.timeout) 40 | socket.connect(this.config.port, this.config.host) 41 | 42 | socket.on('error', function(err) { 43 | self.emit('error', err) 44 | }) 45 | 46 | socket.on('timeout', function() { 47 | socket.destroy() 48 | self.emit('error', new Error('connect to tracker server [' + self._name + '] timeout.')) 49 | }) 50 | 51 | socket.on('connect', function() { 52 | logger.debug('tracker server [%s] is connected', self._name) 53 | }) 54 | 55 | return socket 56 | } 57 | 58 | // ---------------- public methods 59 | 60 | /** 61 | * query storage server to upload file 62 | * 获取指定group的storage实例,如果不指定则tracker server会随机返回1个 63 | * @param group 64 | */ 65 | Tracker.prototype.getStoreStorage = function(group, callback) { 66 | // 验证group是否过长 67 | if (group && group.length > protocol.FDFS_GROUP_NAME_MAX_LEN) { 68 | throw new Error('group name [' + group + '] is too long') 69 | } 70 | logger.debug('get a upload storage server from tracker server: [%s]', this._name) 71 | 72 | var self = this 73 | var socket = this._getConnection() 74 | socket.on('connect', function() { 75 | // ----------- 获取1个可用storage server 信息 76 | // -------- 封装header并发送 77 | var command 78 | var bodyLength 79 | if (!group) { 80 | command = protocol.TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE 81 | bodyLength = 0 82 | 83 | } else { 84 | command = protocol.TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE 85 | bodyLength = protocol.FDFS_GROUP_NAME_MAX_LEN 86 | } 87 | var header = protocol.packHeader(command, bodyLength, 0) 88 | logger.debug('send header to tracker server [%s]', self._name) 89 | socket.write(header) 90 | 91 | // -------- 发送body 92 | if (group) { 93 | var body = new Buffer(bodyLength) 94 | // 默认都填充上0 95 | body.fill(0) 96 | var groupBL = Buffer.byteLength(group, self.config.charset) 97 | body.write(group, 0, groupBL, self.config.charset) 98 | logger.debug('send body to tracker server [%s]', self._name) 99 | socket.write(body) 100 | } 101 | }) 102 | 103 | protocol.recvPacket( 104 | socket, 105 | protocol.TRACKER_PROTO_CMD_RESP, 106 | protocol.TRACKER_QUERY_STORAGE_STORE_BODY_LEN, 107 | function(err, body) { 108 | if (null != err) { 109 | callback(err) 110 | return 111 | } 112 | 113 | var storageConfig = _parseStorage(body, self.config.charset, true) 114 | storageConfig.timeout = self.config.timeout 115 | storageConfig.charset = self.config.charset 116 | var storage = new Storage(storageConfig) 117 | logger.debug('get store storage server info: %j ', storage.config) 118 | callback(null, storage) 119 | } 120 | ) 121 | } 122 | 123 | /** 124 | * query which storage server to download the file 125 | * 若只传1个参数则认为是fileId 126 | * 127 | * * TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE 128 | # function: query which storage server to download the file 129 | # request body: 130 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 131 | @ filename bytes: filename 132 | 133 | # response body: 134 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 135 | @ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address 136 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: storage server port 137 | * 138 | * @param group 139 | * @param filename 140 | */ 141 | Tracker.prototype.getFetchStorage = function(group, filename, callback) { 142 | logger.debug('get a fetch storage server from tracker server: [%s]', this._name) 143 | this.getFetchOrUpdateStorage(protocol.TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE, group, filename, callback) 144 | } 145 | 146 | Tracker.prototype.getUpdateStorage = function(group, filename, callback) { 147 | logger.debug('get a update storage server from tracker server: [%s]', this._name) 148 | this.getFetchOrUpdateStorage(protocol.TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE, group, filename, callback) 149 | } 150 | 151 | /** 152 | * # request body: 153 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 154 | @ filename bytes: filename 155 | 156 | # response body: 157 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 158 | @ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address 159 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: storage server port 160 | * @param command TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE or TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE 161 | * @param group 162 | * @param filename 163 | * @param callback 164 | * @private 165 | */ 166 | Tracker.prototype.getFetchOrUpdateStorage = function(command, group, filename, callback) { 167 | if (is.function(filename)) { 168 | callback = filename 169 | var gf = helpers.id2gf(group) 170 | group = gf.group 171 | filename = gf.filename 172 | } 173 | logger.debug('group: %s, filename: %s', group, filename) 174 | var self = this 175 | var socket = this._getConnection() 176 | socket.on('connect', function() { 177 | var packet = protocol.packFileId(command, group, filename, self.config.charset) 178 | socket.write(packet) 179 | }) 180 | 181 | protocol.recvPacket( 182 | socket, 183 | protocol.TRACKER_PROTO_CMD_RESP, 184 | protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE, 185 | function(err, body) { 186 | if (null != err) { 187 | callback(err) 188 | return 189 | } 190 | var storageConfig = _parseStorage(body, self.config.charset) 191 | storageConfig.timeout = self.config.timeout 192 | storageConfig.charset = self.config.charset 193 | var storage = new Storage(storageConfig) 194 | logger.info('get storage server info: %j ', storage.config) 195 | callback(null, storage) 196 | } 197 | ) 198 | } 199 | 200 | // -------------- helper methods 201 | /** 202 | * @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 203 | * @ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address 204 | * @ FDFS_PROTO_PKG_LEN_SIZE bytes: storage server port 205 | * @1 byte: store path index on the storage server {可以没有这个字节} 206 | * 207 | * @param {Buffer} body 208 | * @param {String} charset 209 | * @param {Boolean} hasPathIndex 210 | * @private {Object} 211 | */ 212 | function _parseStorage(body, charset, hasPathIndex) { 213 | var result = {} 214 | 215 | var group = helpers.trim(body.toString(charset, 0, protocol.FDFS_GROUP_NAME_MAX_LEN)) 216 | var ip = helpers.trim(body.toString(charset, protocol.FDFS_GROUP_NAME_MAX_LEN, protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1)) 217 | var port = Number('0x' + body.toString('hex', 218 | protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1, 219 | protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE)) 220 | 221 | result.group = group 222 | result.host = ip 223 | result.port = port 224 | 225 | if (hasPathIndex && 226 | body.length > protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE) { 227 | var storePathIndex = Number('0x' + body.toString('hex', 228 | protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE, 229 | protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE + 1)) 230 | 231 | result.storePathIndex = storePathIndex 232 | } 233 | 234 | return result 235 | } 236 | 237 | module.exports = Tracker 238 | -------------------------------------------------------------------------------- /lib/fdfs.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-13 4 | * Time: 下午8:42 5 | */ 6 | 'use strict' 7 | 8 | var _ = require('lodash') 9 | var Tracker = require('./tracker') 10 | var EventEmitter = require('events').EventEmitter 11 | var util = require('util') 12 | var logger = require('./logger') 13 | var fs = require('fs') 14 | var is = require('is-type-of') 15 | var path = require('path') 16 | var helpers = require('./helpers') 17 | var protocol = require('./protocol') 18 | 19 | var defaults = { 20 | charset: 'utf8', 21 | trackers: [], 22 | // 默认超时时间10s 23 | timeout: 10000, 24 | // 默认后缀 25 | // 当获取不到文件后缀时使用 26 | defaultExt: 'txt' 27 | } 28 | 29 | function FdfsClient(config) { 30 | EventEmitter.call(this) 31 | // config global logger 32 | if (config.logger) { 33 | logger.setLogger(config.logger) 34 | } 35 | this.config = _.extend({}, defaults, config) 36 | 37 | this._checkConfig() 38 | this._init() 39 | this._errorHandle() 40 | } 41 | 42 | // extends from EventEmitter 43 | util.inherits(FdfsClient, EventEmitter) 44 | 45 | // ------------- private methods 46 | /** 47 | * 确认配置是否合法 48 | * @private 49 | */ 50 | FdfsClient.prototype._checkConfig = function() { 51 | 52 | // ------------- 验证trackers是否合法 53 | if (!this.config.trackers) { 54 | throw new Error('you must specify "trackers" in config.') 55 | } 56 | 57 | if (!Array.isArray(this.config.trackers)) { 58 | this.config.trackers = [this.config.trackers] 59 | } 60 | 61 | if (this.config.trackers.length === 0) { 62 | throw new Error('"trackers" in config is empty.') 63 | } 64 | 65 | this.config.trackers.forEach(function(tracker) { 66 | if (!tracker.host || !tracker.port) { 67 | throw new Error('"trackers" in config is invalid, every tracker must all have "host" and "port".') 68 | } 69 | }) 70 | } 71 | 72 | FdfsClient.prototype._init = function() { 73 | // --------- init trackers 74 | var self = this 75 | this._trackers = [] 76 | this.config.trackers.forEach(function(tc) { 77 | tc.timeout = self.config.timeout 78 | tc.charset = self.config.charset 79 | var tracker = new Tracker(tc) 80 | self._trackers.push(tracker) 81 | tracker.on('error', function(err) { 82 | logger.error(err) 83 | // 将有错误的tracker剔除 84 | self._trackers.splice(self._trackers.indexOf(tracker), 1) 85 | // 检查是否还有可用的tracker 86 | if (self._trackers.length === 0) { 87 | self.emit('error', new Error('There are no available trackers, please check your tracker config or your tracker server.')); 88 | } 89 | }) 90 | }) 91 | 92 | } 93 | 94 | FdfsClient.prototype._errorHandle = function() { 95 | // 1. 当没有tracker可用时触发 96 | // 2. 当连接storage错误时触发 97 | this.on('error', function(err) { 98 | logger.error(err) 99 | }) 100 | } 101 | 102 | /** 103 | * 按顺序获取可用的tracker 104 | * @private 105 | */ 106 | FdfsClient.prototype._getTracker = function() { 107 | if (null == this._trackerIndex) { 108 | this._trackerIndex = 0 109 | return this._trackers[0] 110 | 111 | } else { 112 | this._trackerIndex++ 113 | if (this._trackerIndex >= this._trackers.length) { 114 | this._trackerIndex = 0 115 | } 116 | return this._trackers[this._trackerIndex] 117 | } 118 | } 119 | 120 | FdfsClient.prototype._upload = function(file, options, callback) { 121 | var tracker = this._getTracker() 122 | 123 | tracker.getStoreStorage(options.group, function(err, storage) { 124 | if (null != err) { 125 | callback(err) 126 | return 127 | } 128 | storage.upload(file, options, callback) 129 | }) 130 | } 131 | 132 | // ------------- public methods 133 | 134 | /** 135 | * 上传文件 136 | * @param file absolute file path or Buffer or ReadableStream 137 | * @param options 138 | * options.group: 指定要上传的group, 不指定则由tracker server分配 139 | * options.size: file size, file参数为ReadableStream时必须指定 140 | * options.ext: 上传文件的后缀,不指定则获取file参数的后缀,不含(.) 141 | * @param callback 142 | */ 143 | FdfsClient.prototype.upload = function(file, options, callback) { 144 | var self = this 145 | if (is.function(options)) { 146 | callback = options 147 | options = {} 148 | 149 | } else { 150 | if (!options) { 151 | options = {} 152 | } 153 | } 154 | 155 | _normalizeUploadParams(file, options, function(err) { 156 | if (err) { 157 | callback(err) 158 | return 159 | } 160 | if (!options.ext) { 161 | options.ext = self.defaultExt 162 | } 163 | 164 | self._upload(file, options, callback) 165 | }) 166 | } 167 | 168 | /** 169 | * 下载文件 170 | * @param fileId 171 | * @param options options可以直接传options.target 172 | * options.target 下载的文件流将被写入到这里,可以是本地文件名,也可以是WritableStream,如果为空则每次服务器返回数据的时候都会回调callback 173 | * options.offset和options.bytes: 当只想下载文件中的某1片段时指定 174 | * @param callback 若未指定options.target,服务器每次数据的返回都会回调,若指定了options.target,则只在结束时回调一次 175 | */ 176 | FdfsClient.prototype.download = function(fileId, options, callback) { 177 | if (!options || is.function(options)) { 178 | callback(new Error('options.target is not specified')) 179 | return 180 | } 181 | 182 | // 直接传入target 183 | if (!options.target) { 184 | var ori = options 185 | options = {} 186 | options.target = ori 187 | } 188 | 189 | if (!(is.string(options.target) || is.writableStream(options.target))) { 190 | callback(new Error('options.target is invalid, it\'s type must be String or WritableStream')) 191 | } 192 | 193 | if (is.string(options.target)) { 194 | options.target = fs.createWriteStream(options.target) 195 | } 196 | 197 | this._getTracker().getFetchStorage(fileId, function(err, storage) { 198 | storage.download(fileId, options, callback) 199 | }) 200 | } 201 | 202 | /** 203 | * 删除fileId指定的文件 204 | * @param fileId 205 | * @param callback 206 | */ 207 | FdfsClient.prototype.del = function(fileId, callback) { 208 | this._getTracker().getUpdateStorage(fileId, function(err, storage) { 209 | if (null != err) { 210 | callback(err) 211 | return 212 | } 213 | storage.del(fileId, callback) 214 | }) 215 | } 216 | FdfsClient.prototype.remove = FdfsClient.prototype.del 217 | 218 | /** 219 | * @param fileId 220 | * @param metaData {key1: value1, key2: value2} 221 | * @param flag 'O' for overwrite all old metadata (default) 222 | 'M' for merge, insert when the meta item not exist, otherwise update it 223 | * @param callback 224 | */ 225 | FdfsClient.prototype.setMetaData = function(fileId, metaData, flag, callback) { 226 | if (is.function(flag)) { 227 | callback = flag 228 | flag = 'O' 229 | } 230 | 231 | this._getTracker().getUpdateStorage(fileId, function(err, storage) { 232 | if (null != err) { 233 | callback(err) 234 | return 235 | } 236 | storage.setMetaData(fileId, metaData, flag, callback) 237 | }) 238 | } 239 | 240 | /** 241 | * 获取指定fileId的meta data 242 | * @param fileId 243 | * @param callback 244 | */ 245 | FdfsClient.prototype.getMetaData = function(fileId, callback) { 246 | this._getTracker().getUpdateStorage(fileId, function(err, storage) { 247 | if (null != err) { 248 | callback(err) 249 | return 250 | } 251 | storage.getMetaData(fileId, callback) 252 | }) 253 | } 254 | 255 | /** 256 | * 获取指定fileId的信息 257 | * fileInfo会传给回调,结构如下 258 | * { 259 | * // 文件大小 260 | * size: 261 | * // 文件创建的UTC时间戳,单位为秒 262 | * timestamp: 263 | * crc32: 264 | * // 最初上传到的storage server的ip 265 | * addr: 266 | * } 267 | * @param fileId 268 | * @param callback 269 | */ 270 | FdfsClient.prototype.getFileInfo = function(fileId, callback) { 271 | this._getTracker().getUpdateStorage(fileId, function(err, storage) { 272 | if (err) { 273 | callback(err) 274 | return 275 | } 276 | storage.getFileInfo(fileId, callback) 277 | }) 278 | } 279 | 280 | // -------------- helpers 281 | /** 282 | * 验证file参数是否合法,同时补充一些必要的参数 283 | * 若为String,则需验证是否存在 284 | * 若为ReadableStream,则需验证options.size是否存在 285 | * @param file 286 | * @param options 287 | * @param callback 288 | * @private 289 | */ 290 | function _normalizeUploadParams(file, options, callback) { 291 | if (!file) { 292 | callback(new Error('The "file" parameter is empty.')) 293 | return 294 | } 295 | 296 | if (!(is.string(file) || is.buffer(file) || is.readableStream(file))) { 297 | callback(new Error('The "file" parameter is invalid, it must be a String, Buffer, or ReadableStream')) 298 | return 299 | } 300 | 301 | if (is.string(file)) { 302 | fs.stat(file, function(err, stats) { 303 | if (err || !stats) { 304 | callback(new Error('File [' + file + '] is not exists!')) 305 | return 306 | } 307 | 308 | options.size = stats.size 309 | if (!options.ext) { 310 | options.ext = path.extname(file) 311 | if (options.ext) { 312 | // 去掉. 313 | options.ext = options.ext.substring(1) 314 | } 315 | } 316 | callback(null) 317 | }) 318 | return 319 | } 320 | 321 | if (is.readableStream(file) && !options.size) { 322 | callback(new Error('when the "file" parameter\'s is ReadableStream, options.size must specified')) 323 | return 324 | } 325 | 326 | if (is.buffer(file)) { 327 | options.size = file.length 328 | } 329 | 330 | callback(null) 331 | } 332 | 333 | 334 | // expose 335 | module.exports = FdfsClient -------------------------------------------------------------------------------- /lib/protocol.js: -------------------------------------------------------------------------------- 1 | /** 2 | * 协议相关的处理 3 | * 协议参照 http://bbs.chinaunix.net/thread-2001015-1-1.html 4 | * Author: chenboxiang 5 | * Date: 14-6-15 6 | * Time: 上午9:36 7 | */ 8 | 'use strict' 9 | 10 | var helpers = require('./helpers') 11 | var Buffer = require('buffer').Buffer 12 | var _ = require('lodash') 13 | var logger = require('./logger') 14 | var util = require('util') 15 | var BigNumber = require('bignumber.js') 16 | var is = require('is-type-of') 17 | 18 | var protocol = {} 19 | 20 | Object.defineProperties(protocol, { 21 | FDFS_PROTO_CMD_QUIT: helpers.buildConstProp(82), 22 | TRACKER_PROTO_CMD_SERVER_LIST_GROUP: helpers.buildConstProp(91), 23 | TRACKER_PROTO_CMD_SERVER_LIST_STORAGE: helpers.buildConstProp(92), 24 | TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE: helpers.buildConstProp(93), 25 | 26 | TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE: helpers.buildConstProp(101), 27 | TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE: helpers.buildConstProp(102), 28 | TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE: helpers.buildConstProp(103), 29 | TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE: helpers.buildConstProp(104), 30 | TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL: helpers.buildConstProp(105), 31 | TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL: helpers.buildConstProp(106), 32 | TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL: helpers.buildConstProp(107), 33 | TRACKER_PROTO_CMD_RESP: helpers.buildConstProp(100), 34 | FDFS_PROTO_CMD_ACTIVE_TEST: helpers.buildConstProp(111), 35 | STORAGE_PROTO_CMD_UPLOAD_FILE: helpers.buildConstProp(11), 36 | STORAGE_PROTO_CMD_DELETE_FILE: helpers.buildConstProp(12), 37 | STORAGE_PROTO_CMD_SET_METADATA: helpers.buildConstProp(13), 38 | STORAGE_PROTO_CMD_DOWNLOAD_FILE: helpers.buildConstProp(14), 39 | STORAGE_PROTO_CMD_GET_METADATA: helpers.buildConstProp(15), 40 | STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE: helpers.buildConstProp(21), 41 | STORAGE_PROTO_CMD_QUERY_FILE_INFO: helpers.buildConstProp(22), 42 | STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE: helpers.buildConstProp(23), //create appender file 43 | STORAGE_PROTO_CMD_APPEND_FILE: helpers.buildConstProp(24), //append file 44 | STORAGE_PROTO_CMD_MODIFY_FILE: helpers.buildConstProp(34), //modify appender file 45 | STORAGE_PROTO_CMD_TRUNCATE_FILE: helpers.buildConstProp(36), //truncate appender file 46 | 47 | 48 | FDFS_STORAGE_STATUS_INIT: helpers.buildConstProp(0), 49 | FDFS_STORAGE_STATUS_WAIT_SYNC: helpers.buildConstProp(1), 50 | FDFS_STORAGE_STATUS_SYNCING: helpers.buildConstProp(2), 51 | FDFS_STORAGE_STATUS_IP_CHANGED: helpers.buildConstProp(3), 52 | FDFS_STORAGE_STATUS_DELETED: helpers.buildConstProp(4), 53 | FDFS_STORAGE_STATUS_OFFLINE: helpers.buildConstProp(5), 54 | FDFS_STORAGE_STATUS_ONLINE: helpers.buildConstProp(6), 55 | FDFS_STORAGE_STATUS_ACTIVE: helpers.buildConstProp(7), 56 | FDFS_STORAGE_STATUS_NONE: helpers.buildConstProp(99), 57 | 58 | /** 59 | * for overwrite all old metadata 60 | */ 61 | STORAGE_SET_METADATA_FLAG_OVERWRITE: helpers.buildConstProp('O'), 62 | 63 | /** 64 | * for replace, insert when the meta item not exist, otherwise update it 65 | */ 66 | STORAGE_SET_METADATA_FLAG_MERGE: helpers.buildConstProp('M'), 67 | 68 | FDFS_PROTO_PKG_LEN_SIZE: helpers.buildConstProp(8), 69 | FDFS_PROTO_CMD_SIZE: helpers.buildConstProp(1), 70 | FDFS_GROUP_NAME_MAX_LEN: helpers.buildConstProp(16), 71 | FDFS_IPADDR_SIZE: helpers.buildConstProp(16), 72 | FDFS_DOMAIN_NAME_MAX_SIZE: helpers.buildConstProp(128), 73 | FDFS_VERSION_SIZE: helpers.buildConstProp(6), 74 | FDFS_STORAGE_ID_MAX_SIZE: helpers.buildConstProp(16), 75 | 76 | FDFS_RECORD_SEPERATOR: helpers.buildConstProp('\u0001'), 77 | FDFS_FIELD_SEPERATOR: helpers.buildConstProp('\u0002'), 78 | 79 | FDFS_FILE_EXT_NAME_MAX_LEN: helpers.buildConstProp(6), 80 | FDFS_FILE_PREFIX_MAX_LEN: helpers.buildConstProp(16), 81 | FDFS_FILE_PATH_LEN: helpers.buildConstProp(10), 82 | FDFS_FILENAME_BASE64_LENGTH: helpers.buildConstProp(27), 83 | FDFS_TRUNK_FILE_INFO_LEN: helpers.buildConstProp(16), 84 | 85 | ERR_NO_ENOENT: helpers.buildConstProp(2), 86 | ERR_NO_EIO: helpers.buildConstProp(5), 87 | ERR_NO_EBUSY: helpers.buildConstProp(16), 88 | ERR_NO_EINVAL: helpers.buildConstProp(22), 89 | ERR_NO_ENOSPC: helpers.buildConstProp(28), 90 | ECONNREFUSED: helpers.buildConstProp(61), 91 | ERR_NO_EALREADY: helpers.buildConstProp(114), 92 | 93 | // 成功的STATUS 94 | HEADER_STATUS_SUCCESS: helpers.buildConstProp(0) 95 | }) 96 | 97 | Object.defineProperties(protocol, { 98 | STORAGE_PROTO_CMD_RESP: helpers.buildConstProp(protocol.TRACKER_PROTO_CMD_RESP), 99 | 100 | TRACKER_QUERY_STORAGE_FETCH_BODY_LEN: helpers.buildConstProp(protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE - 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE), 101 | TRACKER_QUERY_STORAGE_STORE_BODY_LEN: helpers.buildConstProp(protocol.FDFS_GROUP_NAME_MAX_LEN + protocol.FDFS_IPADDR_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE), 102 | 103 | PROTO_HEADER_CMD_INDEX: helpers.buildConstProp(protocol.FDFS_PROTO_PKG_LEN_SIZE), 104 | PROTO_HEADER_STATUS_INDEX: helpers.buildConstProp(protocol.FDFS_PROTO_PKG_LEN_SIZE + 1), 105 | 106 | HEADER_BYTE_LENGTH: helpers.buildConstProp(protocol.FDFS_PROTO_PKG_LEN_SIZE + 2) 107 | }) 108 | 109 | // 协议相关的封装方法 110 | _.extend(protocol, { 111 | /** 112 | * 封装协议头 113 | * @param command 114 | * @param bodyLength 115 | * @param status 116 | * @return {Buffer} 117 | */ 118 | packHeader: function(command, bodyLength, status) { 119 | if (null == bodyLength) { 120 | bodyLength = 0 121 | } 122 | 123 | if (null == status) { 124 | status = 0 125 | } 126 | 127 | // ----------- 存放1字节的command和1字节的status 128 | var buffer = new Buffer(2) 129 | buffer.writeUInt8(command, 0) 130 | buffer.writeUInt8(status, 1) 131 | 132 | // 生成8bytes存放body length的buffer 133 | var blBuffer = helpers.number2Buffer(bodyLength, 8) 134 | 135 | // 拼接 136 | return Buffer.concat([blBuffer, buffer]) 137 | }, 138 | 139 | /** 140 | * 解析返回的包 141 | * @param socket 142 | * @param expectedCommand 143 | * @param expectedBodyLength 144 | * @param callback 145 | * @param headerOnly 是否指parse header,下载文件时,解析完header后需将data传递给callback,而非将body接收完 146 | */ 147 | recvPacket: function(socket, expectedCommand, expectedBodyLength, callback, headerOnly) { 148 | var oriCallback = callback 149 | // 收包完毕则关闭掉连接 150 | callback = function() { 151 | cleanup() 152 | if (is.function(oriCallback)) { 153 | oriCallback.apply(null, arguments) 154 | } 155 | } 156 | var headerBufferLen = protocol.HEADER_BYTE_LENGTH 157 | var headerBuffer = new Buffer(headerBufferLen) 158 | // 已填充的length 159 | var headerBufferFilled = 0 160 | // 解析后的header信息 161 | // {status: , bodyLength: } 162 | var header 163 | var bodyBuffer 164 | // 下1次要copy到bodyBuffer中的起始位置 165 | var bodyBufferStart = 0 166 | 167 | socket.on('data', listener) 168 | 169 | function listener(data) { 170 | // --------------- 收到服务器发来消息 171 | // -------- 解析header 172 | if (!header) { 173 | // header parsed 174 | if (headerBufferFilled + data.length >= headerBufferLen) { 175 | var len = headerBufferFilled + data.length 176 | // 只copy剩下的header部分 177 | data.copy(headerBuffer, headerBufferFilled, 0, headerBufferLen - headerBufferFilled) 178 | try { 179 | header = _parseHeader(headerBuffer, expectedCommand, expectedBodyLength) 180 | logger.debug('receive server packet header: %j', header) 181 | // 无body时直接返回 182 | if (header.bodyLength === 0) { 183 | callback(null, header) 184 | return 185 | } 186 | 187 | if (headerOnly) { 188 | oriCallback(null, header) 189 | if (len > headerBufferLen) { 190 | oriCallback(null, data.slice(headerBufferLen - headerBufferFilled)) 191 | } 192 | return 193 | } 194 | 195 | bodyBuffer = new Buffer(header.bodyLength) 196 | 197 | // 还有body的数据则填充到body buffer中 198 | if (len > headerBufferLen) { 199 | data.copy(bodyBuffer, 0, headerBufferLen - headerBufferFilled) 200 | bodyBufferStart = len - headerBufferLen 201 | // 读取完毕 202 | if (bodyBufferStart >= header.bodyLength) { 203 | callback(null, bodyBuffer) 204 | } 205 | } 206 | 207 | } catch (err) { 208 | callback(err) 209 | } 210 | 211 | } else { 212 | data.copy(headerBuffer, headerBufferFilled) 213 | headerBufferFilled += data.length 214 | } 215 | 216 | } else { 217 | // 交由外部处理 218 | if (headerOnly) { 219 | oriCallback(null, data) 220 | return 221 | } 222 | // ---------- 解析body 223 | data.copy(bodyBuffer, bodyBufferStart) 224 | bodyBufferStart += data.length 225 | // 读取完毕 226 | if (bodyBufferStart >= header.bodyLength) { 227 | callback(null, bodyBuffer) 228 | } 229 | } 230 | } 231 | 232 | function cleanup() { 233 | socket.removeListener('data', listener) 234 | // 关闭连接 235 | protocol.closeSocket(socket) 236 | } 237 | }, 238 | 239 | /** 240 | * 给服务器发送关闭指令,同时end socket 241 | * @param socket 242 | */ 243 | closeSocket: function(socket) { 244 | socket.end(protocol.packHeader(protocol.FDFS_PROTO_CMD_QUIT, 0, 0)) 245 | }, 246 | 247 | /** 248 | * 封装只有fileId的包,下载和删除文件时使用 249 | * @param command 250 | * @param group 251 | * @param filename 252 | * @param charset 253 | */ 254 | packFileId: function(command, group, filename, charset) { 255 | // --------- 封装header 256 | var fnLength = Buffer.byteLength(filename, charset) 257 | var bodyLength = protocol.FDFS_GROUP_NAME_MAX_LEN + fnLength 258 | var header = protocol.packHeader(command, bodyLength, 0) 259 | 260 | // --------- 封装body 261 | var body = new Buffer(bodyLength) 262 | // 默认都填充上0 263 | body.fill(0) 264 | var groupBL = Buffer.byteLength(group, charset) 265 | body.write(group, 0, groupBL, charset) 266 | body.write(filename, protocol.FDFS_GROUP_NAME_MAX_LEN, fnLength, charset) 267 | 268 | return Buffer.concat([header, body]) 269 | }, 270 | 271 | /** 272 | * 封装meta data 273 | * 注:返回的是string,由外部写入到buffer中 274 | * @param {Object} metaData 275 | * @return {String} 276 | */ 277 | packMetaData: function(metaData) { 278 | var first = true 279 | var result = '' 280 | 281 | Object.keys(metaData).forEach(function(key) { 282 | if (!first) { 283 | result += protocol.FDFS_RECORD_SEPERATOR 284 | 285 | } else { 286 | first = false 287 | } 288 | var value = metaData[key] 289 | result += key 290 | result += protocol.FDFS_FIELD_SEPERATOR 291 | result += value 292 | }) 293 | 294 | return result 295 | }, 296 | 297 | /** 298 | * raw meta data to structure 299 | * @param raw 300 | */ 301 | parseMetaData: function(raw) { 302 | var result = {} 303 | var md = raw.split(protocol.FDFS_RECORD_SEPERATOR) 304 | md.forEach(function(item) { 305 | var arr = item.split(protocol.FDFS_FIELD_SEPERATOR) 306 | var key = helpers.trim(arr[0]) 307 | var value = helpers.trim(arr[1]) 308 | result[key] = value 309 | }) 310 | 311 | return result 312 | } 313 | }) 314 | 315 | function _parseHeader(headerBuffer, expectedCommand, expectedBodyLength) { 316 | // validate buffer length 317 | if (headerBuffer.length !== protocol.FDFS_PROTO_PKG_LEN_SIZE + 2) { 318 | throw new Error('receive packet size ' + headerBuffer.length + ' is not equal to the expected header size: ' + protocol.FDFS_PROTO_PKG_LEN_SIZE + 2) 319 | } 320 | 321 | // validate command 322 | var command = Number('0x' + headerBuffer.toString('hex', protocol.PROTO_HEADER_CMD_INDEX, protocol.PROTO_HEADER_CMD_INDEX + 1)) 323 | if (expectedCommand !== command) { 324 | throw new Error('receive command: ' + command + ' is not equal to the expected command: ' + expectedCommand) 325 | } 326 | 327 | // 应用层错误 328 | var status = Number('0x' + headerBuffer.toString('hex', protocol.PROTO_HEADER_STATUS_INDEX, protocol.PROTO_HEADER_STATUS_INDEX + 1)) 329 | if (status !== protocol.HEADER_STATUS_SUCCESS) { 330 | throw new Error('receive packet errno is: ' + status) 331 | } 332 | 333 | // validate body length 334 | var bodyLength = new BigNumber(headerBuffer.toString('hex', 0, protocol.FDFS_PROTO_PKG_LEN_SIZE), 16).toNumber() 335 | if (null != expectedBodyLength && expectedBodyLength !== bodyLength) { 336 | throw new Error('receive packet body length: ' + bodyLength + ' is not equal to the expected: ' + expectedBodyLength) 337 | } 338 | 339 | return { 340 | status: protocol.HEADER_STATUS_SUCCESS, 341 | bodyLength: bodyLength 342 | } 343 | } 344 | 345 | module.exports = protocol 346 | -------------------------------------------------------------------------------- /lib/storage.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Author: chenboxiang 3 | * Date: 14-6-13 4 | * Time: 下午10:18 5 | */ 6 | 'use strict'; 7 | 8 | var EventEmitter = require('events').EventEmitter 9 | var util = require('util') 10 | var net = require('net') 11 | var logger = require('./logger') 12 | var protocol = require('./protocol') 13 | var fs = require('fs') 14 | var helpers = require('./helpers') 15 | var BigNumber = require('bignumber.js') 16 | var is = require('is-type-of') 17 | 18 | 19 | function Storage(config) { 20 | EventEmitter.call(this) 21 | 22 | this.config = config 23 | this._name = config.host + ':' + config.port 24 | } 25 | 26 | util.inherits(Storage, EventEmitter) 27 | 28 | // ------------- private methods 29 | 30 | Storage.prototype._getConnection = function() { 31 | return this._newConnection() 32 | } 33 | 34 | Storage.prototype._newConnection = function() { 35 | var self = this 36 | var socket = new net.Socket() 37 | logger.debug('connect to storage server [%s].', this._name) 38 | socket.setTimeout(this.config.timeout) 39 | socket.connect(this.config.port, this.config.host) 40 | 41 | socket.on('error', function(err) { 42 | self.emit('error', err) 43 | }) 44 | 45 | socket.on('timeout', function() { 46 | socket.destroy() 47 | self.emit('error', new Error('connect to storage server [' + self._name + '] timeout.')) 48 | }) 49 | 50 | socket.on('connect', function() { 51 | logger.debug('storage server [%s] is connected', self._name) 52 | }) 53 | 54 | return socket 55 | } 56 | 57 | // --------- upload相关 58 | 59 | // ------------- public methods 60 | 61 | /** 62 | * # request body: 63 | @ 1 byte: store path index on the storage server 64 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: file size 65 | @ FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.) 66 | @ file size bytes: file content 67 | 68 | # response body: 69 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 70 | @ filename bytes: filename 71 | * @param file 72 | * @param options 73 | * @param callback 74 | */ 75 | Storage.prototype.upload = function(file, options, callback) { 76 | var self = this 77 | var socket = this._getConnection() 78 | socket.on('connect', function() { 79 | logger.debug('start upload file to storage server [%s]', self._name) 80 | // ------------- 封装header并发送 81 | var command = protocol.STORAGE_PROTO_CMD_UPLOAD_FILE 82 | var bodyLength = 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_FILE_EXT_NAME_MAX_LEN + options.size 83 | var header = protocol.packHeader(command, bodyLength, 0) 84 | socket.write(header) 85 | 86 | // ------------- 封装并发送body 87 | // ------ 除file content外的内容 88 | var buffer = new Buffer(1 + protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_FILE_EXT_NAME_MAX_LEN) 89 | buffer.fill(0) 90 | buffer.writeUInt8(self.config.storePathIndex, 0) 91 | helpers.number2Buffer(options.size, protocol.FDFS_PROTO_PKG_LEN_SIZE).copy(buffer, 1) 92 | var extBL = Buffer.byteLength(options.ext, self.config.charset) 93 | buffer.write(options.ext, 1 + protocol.FDFS_PROTO_PKG_LEN_SIZE, extBL, self.config.charset) 94 | 95 | socket.write(buffer) 96 | 97 | // ------ 发送file content 98 | if (is.string(file)) { 99 | file = fs.createReadStream(file) 100 | } 101 | 102 | // buffer 103 | if (is.buffer(file)) { 104 | socket.write(file) 105 | 106 | // stream 107 | } else { 108 | file.pipe(socket, {end: false}) 109 | } 110 | }) 111 | 112 | protocol.recvPacket( 113 | socket, 114 | protocol.STORAGE_PROTO_CMD_RESP, 115 | null, 116 | function(err, body) { 117 | if (null != err) { 118 | callback(err) 119 | return 120 | } 121 | 122 | // 校验body 123 | if (body.length <= protocol.FDFS_GROUP_NAME_MAX_LEN) { 124 | callback(new Error('response body length: ' + body.length + ' <= ' + protocol.FDFS_GROUP_NAME_MAX_LEN)) 125 | return 126 | } 127 | 128 | var fileId = _parseFileId(body, self.config.charset) 129 | callback(null, fileId) 130 | }) 131 | } 132 | 133 | /** 134 | * * STORAGE_PROTO_CMD_SET_METADATA 135 | * 136 | # function: set meta data 137 | # request body: 138 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: filename length 139 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: meta data size 140 | @ 1 bytes: operation flag, 141 | 'O' for overwrite all old metadata 142 | 'M' for merge, insert when the meta item not exist, otherwise update it 143 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 144 | @ filename bytes: filename 145 | @ meta data bytes: each meta data seperated by \x01, 146 | name and value seperated by \x02 147 | # response body: none 148 | * @param fileId 149 | * @param metaData 150 | * @param callback 151 | */ 152 | Storage.prototype.setMetaData = function(fileId, metaData, flag, callback) { 153 | if (!flag) flag = protocol.STORAGE_SET_METADATA_FLAG_OVERWRITE 154 | 155 | var self = this 156 | var socket = this._getConnection() 157 | var gf = helpers.id2gf(fileId) 158 | var packedMeta = protocol.packMetaData(metaData) 159 | 160 | socket.on('connect', function() { 161 | // ------------- 封装header 162 | var charset = self.config.charset 163 | var command = protocol.STORAGE_PROTO_CMD_SET_METADATA 164 | var fnLength = Buffer.byteLength(gf.filename, charset) 165 | var metaLength = Buffer.byteLength(packedMeta, charset) 166 | var bodyLength = protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE + 1 + 167 | protocol.FDFS_GROUP_NAME_MAX_LEN + fnLength + metaLength 168 | 169 | var header = protocol.packHeader(command, bodyLength, 0) 170 | socket.write(header) 171 | 172 | // ------------- 封装body 173 | var groupLength = Buffer.byteLength(gf.group, charset) 174 | var body = new Buffer(bodyLength) 175 | body.fill(0) 176 | helpers.number2Buffer(fnLength, protocol.FDFS_PROTO_PKG_LEN_SIZE).copy(body, 0) 177 | helpers.number2Buffer(metaLength, protocol.FDFS_PROTO_PKG_LEN_SIZE).copy(body, protocol.FDFS_PROTO_PKG_LEN_SIZE) 178 | body.write(flag, protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE, 1, charset) 179 | body.write(gf.group, protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE + 1, groupLength, charset) 180 | body.write(gf.filename, protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE + 1 + protocol.FDFS_GROUP_NAME_MAX_LEN, fnLength, charset) 181 | body.write(packedMeta, bodyLength - metaLength, metaLength, charset) 182 | 183 | socket.write(body) 184 | }) 185 | 186 | protocol.recvPacket( 187 | socket, 188 | protocol.STORAGE_PROTO_CMD_RESP, 189 | 0, 190 | callback) 191 | } 192 | 193 | /** 194 | * * STORAGE_PROTO_CMD_GET_METADATA 195 | # function: get metat data from storage server 196 | # request body: 197 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 198 | @ filename bytes: filename 199 | # response body 200 | @ meta data buff, each meta data seperated by \x01, name and value seperated by \x02 201 | * @param fileId 202 | * @param callback 203 | */ 204 | Storage.prototype.getMetaData = function(fileId, callback) { 205 | var self = this 206 | var gf = helpers.id2gf(fileId) 207 | var socket = this._getConnection() 208 | socket.on('connect', function() { 209 | var packet = protocol.packFileId(protocol.STORAGE_PROTO_CMD_GET_METADATA, gf.group, gf.filename, self.config.charset) 210 | socket.write(packet) 211 | }) 212 | 213 | protocol.recvPacket( 214 | socket, 215 | protocol.STORAGE_PROTO_CMD_RESP, 216 | null, 217 | function(err, body) { 218 | if (err) { 219 | callback(err) 220 | return 221 | } 222 | 223 | var rawMeta = body.toString(self.config.charset) 224 | if (rawMeta) { 225 | var metaData = protocol.parseMetaData(rawMeta) 226 | callback(null, metaData) 227 | 228 | } else { 229 | callback(null, rawMeta) 230 | } 231 | }) 232 | } 233 | 234 | /** 235 | * 删除文件 236 | * STORAGE_PROTO_CMD_DELETE_FILE 237 | * # request body: 238 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 239 | @ filename bytes: filename 240 | 241 | # response body: none 242 | * @param fileId 243 | * @param callback 244 | */ 245 | Storage.prototype.del = function(fileId, callback) { 246 | var self = this 247 | var gf = helpers.id2gf(fileId) 248 | var socket = this._getConnection() 249 | 250 | socket.on('connect', function() { 251 | var packet = protocol.packFileId(protocol.STORAGE_PROTO_CMD_DELETE_FILE, gf.group, gf.filename, self.config.charset) 252 | socket.write(packet) 253 | }) 254 | 255 | protocol.recvPacket( 256 | socket, 257 | protocol.STORAGE_PROTO_CMD_RESP, 258 | 0, 259 | callback) 260 | } 261 | 262 | /** 263 | * STORAGE_PROTO_CMD_DOWNLOAD_FILE 264 | # function: download/fetch file from storage server 265 | # request body: 266 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: file offset 267 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: download file bytes 268 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 269 | @ filename bytes: filename 270 | 271 | # response body: 272 | @ file content 273 | * @param fileId 274 | * @param options 275 | * options.target 下载的文件流将被写入到这里,可以是本地文件名,也可以是WritableStream,如果为空则每次服务器返回数据的时候都会回调callback 276 | * options.offset和options.bytes: 当只想下载文件中的某1片段时指定 277 | * @param callback 若未指定options.target,服务器每次数据的返回都会回调,若指定了options.target,则只在结束时回调一次 278 | */ 279 | Storage.prototype.download = function(fileId, options, callback) { 280 | var self = this 281 | var gf = helpers.id2gf(fileId) 282 | var socket = this._getConnection() 283 | socket.on('connect', function() { 284 | var charset = self.config.charset 285 | // --------- 封装header 286 | var fnLength = Buffer.byteLength(gf.filename, charset) 287 | var bodyLength = protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_GROUP_NAME_MAX_LEN + fnLength 288 | var header = protocol.packHeader(protocol.STORAGE_PROTO_CMD_DOWNLOAD_FILE, bodyLength, 0) 289 | 290 | // --------- 封装body 291 | var body = new Buffer(bodyLength) 292 | // 默认都填充上0 293 | body.fill(0) 294 | if (options.offset) { 295 | helpers.number2Buffer(options.offset, protocol.FDFS_PROTO_PKG_LEN_SIZE).copy(body) 296 | } 297 | if (options.bytes) { 298 | helpers.number2Buffer(options.bytes, protocol.FDFS_PROTO_PKG_LEN_SIZE).copy(body, protocol.FDFS_PROTO_PKG_LEN_SIZE) 299 | } 300 | var groupBL = Buffer.byteLength(gf.group, charset) 301 | body.write(gf.group, protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE, groupBL, charset) 302 | body.write(gf.filename, protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_PROTO_PKG_LEN_SIZE + protocol.FDFS_GROUP_NAME_MAX_LEN, fnLength, charset) 303 | 304 | socket.write(Buffer.concat([header, body])) 305 | }) 306 | 307 | var header 308 | var target = options.target 309 | // 已接收的body length 310 | var recvLength = 0 311 | 312 | protocol.recvPacket( 313 | socket, 314 | protocol.STORAGE_PROTO_CMD_RESP, 315 | null, 316 | function(err, data) { 317 | if (err) { 318 | callback(err) 319 | return 320 | } 321 | 322 | // header 323 | if (!is.buffer(data)) { 324 | header = data 325 | 326 | // body 327 | } else { 328 | if (!target) { 329 | callback(null, data, header.bodyLength) 330 | 331 | } else { 332 | target.write(data) 333 | } 334 | 335 | recvLength += data.length 336 | // 读取完毕 337 | if (recvLength >= header.bodyLength) { 338 | protocol.closeSocket(socket) 339 | callback(null) 340 | } 341 | } 342 | }, 343 | true) 344 | } 345 | 346 | /** 347 | * * STORAGE_PROTO_CMD_QUERY_FILE_INFO 348 | # function: query file info from storage server 349 | # request body: 350 | @ FDFS_GROUP_NAME_MAX_LEN bytes: group name 351 | @ filename bytes: filename 352 | 353 | # response body: 354 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: file size 355 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: file create timestamp 356 | @ FDFS_PROTO_PKG_LEN_SIZE bytes: file CRC32 signature 357 | @ FDFS_IPADDR_SIZE bytes: file source ip addr 358 | * @param fileId 359 | * @param callback 360 | */ 361 | Storage.prototype.getFileInfo = function(fileId, callback) { 362 | var self = this 363 | var gf = helpers.id2gf(fileId) 364 | var socket = this._getConnection() 365 | var charset = this.config.charset 366 | 367 | socket.on('connect', function() { 368 | var packet = protocol.packFileId(protocol.STORAGE_PROTO_CMD_QUERY_FILE_INFO, gf.group, gf.filename, charset) 369 | socket.write(packet) 370 | }) 371 | 372 | protocol.recvPacket( 373 | socket, 374 | protocol.STORAGE_PROTO_CMD_RESP, 375 | protocol.FDFS_PROTO_PKG_LEN_SIZE * 3 + protocol.FDFS_IPADDR_SIZE, 376 | function(err, body) { 377 | if (err) { 378 | callback(err) 379 | return 380 | } 381 | 382 | var result = {} 383 | 384 | result.size = new BigNumber(body.toString('hex', 0, protocol.FDFS_PROTO_PKG_LEN_SIZE), 16).toString(10) 385 | result.timestamp = new BigNumber(body.toString('hex', protocol.FDFS_PROTO_PKG_LEN_SIZE, protocol.FDFS_PROTO_PKG_LEN_SIZE * 2), 16).toNumber() 386 | result.crc32 = new BigNumber(body.toString('hex', protocol.FDFS_PROTO_PKG_LEN_SIZE * 2, protocol.FDFS_PROTO_PKG_LEN_SIZE * 3), 16).toNumber() 387 | result.addr = helpers.trim(body.toString(charset, protocol.FDFS_PROTO_PKG_LEN_SIZE * 3)) 388 | 389 | callback(null, result) 390 | }) 391 | } 392 | 393 | // -------------- helpers 394 | /** 395 | * parse file id from body 396 | * @param body 397 | * @param charset 398 | * @private 399 | */ 400 | function _parseFileId(body, charset) { 401 | var group = helpers.trim(body.toString(charset, 0, protocol.FDFS_GROUP_NAME_MAX_LEN)) 402 | var filename = helpers.trim(body.toString(charset, protocol.FDFS_GROUP_NAME_MAX_LEN)) 403 | 404 | return group + '/' + filename 405 | } 406 | 407 | module.exports = Storage --------------------------------------------------------------------------------