├── .travis.yml ├── index.js ├── .gitignore ├── package.json ├── tests └── testBosonnlp.js ├── lib └── bosonnlp.js └── README.md /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "node" 4 | - "0.10.32" -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.BosonNLP = require('./lib/bosonnlp'); -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | 5 | # Runtime data 6 | pids 7 | *.pid 8 | *.seed 9 | 10 | # Directory for instrumented libs generated by jscoverage/JSCover 11 | lib-cov 12 | 13 | # Coverage directory used by tools like istanbul 14 | coverage 15 | 16 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 17 | .grunt 18 | 19 | # Compiled binary addons (http://nodejs.org/api/addons.html) 20 | build/Release 21 | 22 | # Dependency directory 23 | # Deployed apps should consider commenting this line out: 24 | # see https://npmjs.org/doc/faq.html#Should-I-check-my-node_modules-folder-into-git 25 | node_modules 26 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bosonnlp", 3 | "version": "0.1.0", 4 | "description": "bosonnlp node sdk.", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "./node_modules/nodeunit/bin/nodeunit tests/test*" 8 | }, 9 | "devDependencies": { 10 | "nodeunit": "*" 11 | }, 12 | "repository": { 13 | "type": "git", 14 | "url": "https://github.com/liwenzhu/bosonnlp.git" 15 | }, 16 | "keywords": [ 17 | "bosonnlp" 18 | ], 19 | "author": "VinceLee ", 20 | "license": "ISC", 21 | "bugs": { 22 | "url": "https://github.com/liwenzhu/bosonnlp/issues" 23 | }, 24 | "homepage": "https://github.com/liwenzhu/bosonnlp" 25 | } 26 | -------------------------------------------------------------------------------- /tests/testBosonnlp.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var bosonnlp = require('../index'); 4 | 5 | // This API_TOKEN is only for travis ci 6 | var boson = new bosonnlp.BosonNLP("59G4ZvQp.2193.0YmDde8uiv3e"); 7 | 8 | var ENTITY_START_POSITION_INDEX = 0; 9 | var ENTITY_END_POSITION_INDEX = 1; 10 | var ENTITY_TYPE = 2; 11 | 12 | function getEntityWord(word, entity) { 13 | var wordStartPosition = entity[ENTITY_START_POSITION_INDEX]; 14 | var wordEndPosition = entity[ENTITY_END_POSITION_INDEX]; 15 | return word.slice(wordStartPosition, wordEndPosition).join(''); 16 | } 17 | 18 | exports.testPunctuation = function (test) { 19 | boson.ner("[成都商报]记者 姚永忠", function (data) { 20 | data = JSON.parse(data)[0]; 21 | var entity = data.entity[0]; 22 | test.equal(data.word.slice(entity[0], entity[1]).join(''), "成都商报"); 23 | test.equal(entity[2], "product_name"); 24 | }); 25 | 26 | boson.ner("成都商报,记者 姚永忠", function (data) { 27 | data = JSON.parse(data)[0]; 28 | var entity = data.entity[0]; 29 | test.equal(data.word.slice(entity[0], entity[1]).join(''), "成都商报"); 30 | test.equal(entity[2], "product_name"); 31 | test.done(); 32 | }) 33 | } 34 | 35 | exports.testNerSingle = function (test) { 36 | var text = "成都商报记者 姚永忠"; 37 | boson.ner(text, function (data) { 38 | data = JSON.parse(data)[0]; 39 | var entity = data.entity[0]; 40 | test.equal(data.word.slice(entity[0], entity[1]).join(''), "成都商报"); 41 | test.equal(entity[2], "product_name"); 42 | test.done(); 43 | }); 44 | }; 45 | 46 | exports.testNerMulti = function (test) { 47 | var text = ["对于该小孩是不是郑尚金的孩子,目前已做亲子鉴定,结果还没出来,", "纪检部门仍在调查之中。成都商报记者 姚永忠"]; 48 | boson.ner(text, function (data) { 49 | data = JSON.parse(data); 50 | var entity = data[0].entity[0]; 51 | test.equal(getEntityWord(data[0].word, entity), "郑尚金"); 52 | test.equal(entity[ENTITY_TYPE], "person_name"); 53 | entity = data[1].entity[0] 54 | test.equal(getEntityWord(data[1].word, entity), "成都商报"); 55 | test.equal(entity[ENTITY_TYPE], "product_name"); 56 | entity = data[1].entity[1] 57 | test.equal(getEntityWord(data[1].word, entity), "记者"); 58 | test.equal(entity[ENTITY_TYPE], "job_title"); 59 | entity = data[1].entity[2] 60 | test.equal(getEntityWord(data[1].word, entity), "姚永忠"); 61 | test.equal(entity[ENTITY_TYPE], "person_name"); 62 | test.done(); 63 | }); 64 | }; 65 | 66 | exports.testTagSingle = function (test) { 67 | var text = "这个世界好复杂"; 68 | boson.tag(text, function (data) { 69 | data = JSON.parse(data)[0]; 70 | test.deepEqual(data.tag, ["r", "n", "d", "a"]); 71 | test.deepEqual(data.word, ["这个", "世界", "好", "复杂"]); 72 | test.done(); 73 | }); 74 | }; 75 | 76 | exports.testTagMulti = function (test) { 77 | var text = ['这个世界好复杂', '计算机是科学么']; 78 | boson.tag(text, function (data) { 79 | data = JSON.parse(data); 80 | test.deepEqual(data[0].tag, ["r", "n", "d", "a"]); 81 | test.deepEqual(data[0].word, ["这个", "世界", "好", "复杂"]); 82 | test.deepEqual(data[1].tag, ["n", "vshi", "n", "y"]); 83 | test.deepEqual(data[1].word, ["计算机", "是", "科学", "么"]); 84 | test.done(); 85 | }); 86 | }; 87 | 88 | exports.testExtractKeywordsSingle = function (test) { 89 | var text = ["病毒式媒体网站:让新闻迅速蔓延"]; 90 | var WORD_INDEX = 1; 91 | boson.extractKeywords(text, function (data) { 92 | data = JSON.parse(data)[0]; 93 | // result of data is [weight, word] 94 | test.equal(data[0][WORD_INDEX],'蔓延'); 95 | test.equal(data[1][WORD_INDEX],'病毒'); 96 | test.equal(data[2][WORD_INDEX],'迅速'); 97 | test.equal(data[3][WORD_INDEX],'网站'); 98 | test.equal(data[4][WORD_INDEX],'新闻'); 99 | test.equal(data[5][WORD_INDEX],'媒体'); 100 | test.equal(data[6][WORD_INDEX],'式'); 101 | test.equal(data[7][WORD_INDEX],'让'); 102 | test.done(); 103 | }); 104 | }; 105 | 106 | exports.testSentiment = function (test) { 107 | var text = ['他是个傻逼','美好的世界']; 108 | boson.sentiment(text, function (data) { 109 | test.equal(true, !!data) 110 | test.done(); 111 | }); 112 | }; 113 | 114 | exports.testDepparser = function (test) { 115 | var text = ['我以最快的速度吃了午饭'] 116 | boson.depparser(text, function (data) { 117 | data = JSON.parse(data)[0] 118 | var head = data.head; 119 | var role = data.role; 120 | var tag = data.tag; 121 | var word = data.word; 122 | test.deepEqual(head, [6, 6, 3, 4, 5, 1, -1, 6, 6]); 123 | test.deepEqual(role, ["SBJ", "MNR", "VMOD", "DEC", "NMOD", "POBJ", "ROOT", "VMOD", "OBJ"]); 124 | test.deepEqual(tag, ["PN", "P", "AD", "VA", "DEC", "NN", "VV", "AS", "NN"]); 125 | test.deepEqual(word, ["我", "以", "最", "快", "的", "速度", "吃", "了", "午饭"]); 126 | test.done(); 127 | }); 128 | }; 129 | 130 | exports.testClassify = function (test) { 131 | var text = ['俄否决安理会谴责叙军战机空袭阿勒颇平民', 132 | '邓紫棋谈男友林宥嘉:我觉得我比他唱得好', 133 | 'Facebook收购印度初创公司']; 134 | boson.classify(text, function (data) { 135 | test.deepEqual(JSON.parse(data), [5, 4, 8]); 136 | test.done(); 137 | }); 138 | }; 139 | 140 | exports.testSuggest = function (test) { 141 | var term = '粉丝'; 142 | var options = {}; 143 | options.top_k = 2; 144 | boson.suggest(term, options, function (data) { 145 | boson.suggest(term, function(data){ 146 | test.done(); 147 | }); 148 | }); 149 | }; 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /lib/bosonnlp.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var http = require('http'); 4 | 5 | var postOptions = { 6 | host: "api.bosonnlp.com", 7 | port: 80, 8 | method: "POST", 9 | headers: { 10 | "Content-Type": "application/json", 11 | "Accept": "application/json", 12 | } 13 | }; 14 | 15 | module.exports = BosonNLP; 16 | 17 | function BosonNLP (token) { 18 | postOptions.headers["X-Token"] = token; 19 | }; 20 | 21 | BosonNLP.prototype.tag = function (data, callback) { 22 | postOptions.path = "/tag/analysis"; 23 | var datas = parseDatas(data); 24 | sendPost(postOptions, datas, callback); 25 | }; 26 | 27 | BosonNLP.prototype.ner = function (data, callback) { 28 | postOptions.path = "/ner/analysis"; 29 | var datas = parseDatas(data); 30 | sendPost(postOptions, datas, callback); 31 | }; 32 | 33 | BosonNLP.prototype.extractKeywords = function (data, callback) { 34 | postOptions.path = "/keywords/analysis"; 35 | var datas = parseDatas(data); 36 | sendPost(postOptions, datas, callback); 37 | }; 38 | 39 | BosonNLP.prototype.sentiment = function (data, callback) { 40 | postOptions.path = "/sentiment/analysis"; 41 | var datas = parseDatas(data); 42 | sendPost(postOptions, datas, callback); 43 | }; 44 | 45 | BosonNLP.prototype.depparser = function (data, callback) { 46 | postOptions.path = "/depparser/analysis"; 47 | var datas = parseDatas(data); 48 | sendPost(postOptions, datas, callback); 49 | }; 50 | 51 | BosonNLP.prototype.classify = function (data, callback) { 52 | postOptions.path = "/classify/analysis"; 53 | var datas = parseDatas(data); 54 | sendPost(postOptions, datas, callback); 55 | }; 56 | 57 | BosonNLP.prototype.rate_limit_status = function ( callback) { 58 | postOptions.path = "/application/rate_limit_status.json"; 59 | // 查询API 频率限制 60 | sendPost(postOptions, null, callback); 61 | }; 62 | BosonNLP.prototype.suggest = function (data, options, callback) { 63 | if (!callback) { 64 | callback = options; 65 | options = {}; 66 | } 67 | callback = callback || function(){}; 68 | options.top_k = options.top_k || 10; 69 | postOptions.path = "/suggest/analysis?top_k=" + options.top_k; 70 | var datas = parseDatas(data); 71 | sendPost(postOptions, datas, callback); 72 | }; 73 | 74 | function parseDatas (data) { 75 | var datas = []; 76 | if (Array.isArray(data)) { 77 | for (var i = 0; i < data.length; i++) { 78 | datas.push(encodeString(data[i])); 79 | } 80 | } else { 81 | datas.push(encodeString(data)); 82 | } 83 | return '[' + datas.toString() + ']'; 84 | }; 85 | 86 | function sendPost (options, body, callback) { 87 | var postReq = http.request(options, function (res) { 88 | var data = []; 89 | res.setEncoding('utf8'); 90 | res.on('data', function (chunk) { 91 | data.push(chunk); 92 | }); 93 | res.on('end', function () { 94 | callback(data.join('')) 95 | }); 96 | }); 97 | 98 | postReq.end(body); 99 | }; 100 | 101 | function encodeString (data) { 102 | data = "\"" + escape(data).replace(/\%/g, '\\') + "\""; 103 | data = restorePunctuation(data); 104 | return data; 105 | }; 106 | 107 | function restorePunctuation (data) { 108 | data = data.replace(/\\A0/g, " "); 109 | data = data.replace(/\\A2/g, "\\u00A2"); 110 | data = data.replace(/\\A3/g, "\\u00A3"); 111 | data = data.replace(/\\A4/g, "\\u00A4"); 112 | data = data.replace(/\\A5/g, "\\u00A5"); 113 | data = data.replace(/\\A6/g, "\\u00A6"); 114 | data = data.replace(/\\A7/g, "\\u00A7"); 115 | data = data.replace(/\\A8/g, "\\u00A8"); 116 | data = data.replace(/\\A9/g, "\\u00A9"); 117 | data = data.replace(/\\AA/g, "\\u00AA"); 118 | data = data.replace(/\\AB/g, "\\u00AB"); 119 | data = data.replace(/\\AC/g, "\\u00AC"); 120 | data = data.replace(/\\AD/g, "\\u00AD"); 121 | data = data.replace(/\\AE/g, "\\u00AE"); 122 | data = data.replace(/\\AF/g, "\\u00AF"); 123 | data = data.replace(/\\B0/g, "\\u00B0"); 124 | data = data.replace(/\\B1/g, "\\u00B1"); 125 | data = data.replace(/\\B2/g, "\\u00B2"); 126 | data = data.replace(/\\B3/g, "\\u00B3"); 127 | data = data.replace(/\\B4/g, "\\u00B4"); 128 | data = data.replace(/\\B5/g, "\\u00B5"); 129 | data = data.replace(/\\B6/g, "\\u00B6"); 130 | data = data.replace(/\\B7/g, "."); 131 | data = data.replace(/\\B8/g, "\\u00B8"); 132 | data = data.replace(/\\B9/g, "\\u00B9"); 133 | data = data.replace(/\\BA/g, "\\u00BA"); 134 | data = data.replace(/\\BB/g, "\\u00BB"); 135 | data = data.replace(/\\BC/g, "\\u00BC"); 136 | data = data.replace(/\\BD/g, "\\u00BD"); 137 | data = data.replace(/\\BE/g, "\\u00BE"); 138 | data = data.replace(/\\BF/g, "\\u00BF"); 139 | data = data.replace(/\\D7/g, " "); 140 | data = data.replace(/\\0A/g, " "); 141 | data = data.replace(/\\0D/g, " "); 142 | data = data.replace(/\\20/g, " "); 143 | data = data.replace(/\\21/g, "!"); 144 | data = data.replace(/\\22/g, "\""); 145 | data = data.replace(/\\23/g, "#"); 146 | data = data.replace(/\\24/g, "$"); 147 | data = data.replace(/\\25/g, "%"); 148 | data = data.replace(/\\26/g, "&"); 149 | data = data.replace(/\\27/g, "\'"); 150 | data = data.replace(/\\28/g, "("); 151 | data = data.replace(/\\29/g, ")"); 152 | data = data.replace(/\\2C/g, ","); 153 | data = data.replace(/\\3A/g, ":"); 154 | data = data.replace(/\\3B/g, ";"); 155 | data = data.replace(/\\3C/g, "<"); 156 | data = data.replace(/\\3D/g, "="); 157 | data = data.replace(/\\3E/g, ">"); 158 | data = data.replace(/\\3F/g, "?"); 159 | data = data.replace(/\\5B/g, "["); 160 | data = data.replace(/\\5D/g, "]"); 161 | data = data.replace(/\\5E/g, "^"); 162 | data = data.replace(/\\7B/g, "{"); 163 | data = data.replace(/\\7C/g, "|"); 164 | data = data.replace(/\\7D/g, "}"); 165 | data = data.replace(/\\7E/g, "~"); 166 | return data; 167 | }; 168 | 169 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | BosonNLP 2 | ======== 3 | 4 | BosonNLP is a node sdk for http://bosonnlp.com . 5 | 6 | [![Build Status](https://travis-ci.org/liwenzhu/bosonnlp.svg?branch=master)](https://travis-ci.org/liwenzhu/bosonnlp) 7 | [![npm](https://img.shields.io/npm/v/bosonnlp.svg)](https://npmjs.org/package/bosonnlp) 8 | [![NPM Downloads](https://img.shields.io/npm/dt/bosonnlp.svg)](https://npmjs.org/package/bosonnlp) 9 | 10 | 11 | Installation 12 | ------------ 13 | 14 | ```bash 15 | $ npm install bosonnlp 16 | ``` 17 | 18 | Usage 19 | ----- 20 | 21 | ```javascript 22 | var bosonnlp = require('bosonnlp'); 23 | var nlp = new bosonnlp.BosonNLP('YOUR_API_KEY'); 24 | nlp.ner('成都商报记者 姚永忠', function (result) { 25 | console.log(result); 26 | }); 27 | //[{"tag": ["ns", "n", "n", "nr"], 28 | // "word": ["成都", "商报", "记者", "姚永忠"], 29 | // "entity": [[0, 2, "product_name"], [3, 4, "person_name"]]}] 30 | ``` 31 | 32 | API 33 | --- 34 | 35 | * __tag(content, callback)__ - Tokenization and part of speech tagging. 36 | * __ner(content, callback)__ - Named-entity recognition. 37 | * __extractKeywords(content, callback)__ - Tokenization and compute word weight. 38 | * __sentiment(content, callback)__ - Automatic detection of opinions embodied in text. 39 | * __depparser(content, callback)__ - Work out the grammatical structure of sentences 40 | * __classify(content, callback)__ - categorization the given articles. 41 | * __suggest(term, callback)__ - Get relative words. 42 | 43 | tag 44 | --- 45 | 46 | [POS Tagging DOC](http://docs.bosonnlp.com/tag_rule.html) 47 | 48 | ```javascript 49 | var bosonnlp = require('bosonnlp'); 50 | var nlp = new bosonnlp.BosonNLP('YOUR_API_KEY'); 51 | 52 | var text = "这个世界好复杂"; 53 | boson.tag(text, function (data) { 54 | console.log(data); 55 | }); 56 | // [{"tag": ["r", "n", "d", "a"], 57 | // "word": ["这个", "世界", "好", "复杂"]}] 58 | 59 | var text = ['这个世界好复杂', '计算机是科学么']; 60 | boson.tag(text, function (data) { 61 | data = JSON.parse(data); 62 | 63 | // ["r", "n", "d", "a"] 64 | console.log(data[0].tag); 65 | 66 | // ["这个", "世界", "好", "复杂"] 67 | console.log(data[0].word); 68 | 69 | // ["n", "vshi", "n", "y"] 70 | console.log(data[1].tag); 71 | 72 | // ["计算机", "是", "科学", "么"] 73 | console.log(data[1].word); 74 | }); 75 | ``` 76 | 77 | ner 78 | --- 79 | 80 | ```javascript 81 | var bosonnlp = require('bosonnlp'); 82 | var nlp = new bosonnlp.BosonNLP('YOUR_API_KEY'); 83 | nlp.ner('成都商报记者 姚永忠', function (result) { 84 | console.log(result); 85 | }); 86 | //[{"tag": ["ns", "n", "n", "nr"], 87 | // "word": ["成都", "商报", "记者", "姚永忠"], 88 | // "entity": [[0, 2, "product_name"], [3, 4, "person_name"]]}] 89 | 90 | var content = ["对于该小孩是不是郑尚金的孩子,目前已做亲子鉴定,结果还没出来,", 91 | "纪检部门仍在调查之中。成都商报记者 姚永忠"]; 92 | nlp.ner(content, function (result) { 93 | console.log(result); 94 | }); 95 | //[{"tag": ["p","r","n","vshi","d","vshi","nr","ude","n","wd","t","d","v","n","n","wd","n","d","d","v","wd"], 96 | // "word": ["对于","该","小孩","是","不","是","郑尚金","的","孩子",",","目前","已","做","亲子","鉴定",",", 97 | // "结果","还","没","出来",","], 98 | // "entity": [[6, 7, "person_name"]]}, 99 | // {"tag": ["n","n","d","p","v","f","wj","ns","n","n","nr"], 100 | // "word": ["纪检","部门","仍","在","调查","之中","。","成都","商报","记者","姚永忠"], 101 | // "entity": [[7,9,"product_name"],[9,10,"job_title"],[10,11,"person_name"]]}] 102 | ``` 103 | 104 | extractKeywords 105 | --------------- 106 | 107 | ```javascript 108 | var bosonnlp = require('bosonnlp'); 109 | var nlp = new bosonnlp.BosonNLP('YOUR_API_KEY'); 110 | var text = ["病毒式媒体网站:让新闻迅速蔓延"]; 111 | nlp.extractKeywords(text, function (data) { 112 | data = JSON.parse(data); 113 | console.log(data); 114 | }); 115 | ``` 116 | 117 | sentiment 118 | --------- 119 | 120 | ```javascript 121 | var text = ['他是个傻逼','美好的世界']; 122 | boson.sentiment(text, function (data) { 123 | // [非负面概率, 负面概率] 124 | // [[0.6519134382562579, 0.34808656174374203], [0.92706110187413, 0.07293889812586994]] 125 | console.log(data); 126 | }); 127 | ``` 128 | 129 | depparser 130 | --------- 131 | [Depparser Doc](http://docs.bosonnlp.com/depparser.html) 132 | 133 | 名称 | 解释 |举例 134 | ----|--------|--- 135 | ROOT | 核心词 | 警察*打击*犯罪。 136 | SBJ | 主语成分 | *警察*打击犯罪。 137 | OBJ | 宾语成分 | 警察打击*犯罪*。 138 | PU | 标点符号 | 你好*!* 139 | TMP | 时间成分 | *昨天下午*下雨了。 140 | LOC | 位置成分 | 我*在北京*开会。 141 | MNR | 方式成分 | 我*以最快的速度*冲向了终点。 142 | POBJ | 介宾成分 | 他*对客人*很热情。 143 | PMOD | 介词修饰 | 这个产品*直*到今天才完成。 144 | NMOD | 名词修饰 | 这是一个*大*错误。 145 | VMOD | 动词修饰 | 我*狠狠地*打*了*他。 146 | VRD | 动结式 | (第二动词为第一动词结果) 福建省*涌现出*大批人才。 147 | DEG | 连接词| “的”结构 *我*的妈妈是超人。 148 | DEV | “地”结构| 他*狠狠*地看我一眼。 149 | LC | 位置词结构 | 我在*书房*里吃饭。 150 | M | 量词结构 | 我有*一*只小猪。 151 | AMOD | 副词修饰 | 一批*大*中企业折戟上海。 152 | PRN | 括号成分 | 北京(首都)很大。 153 | VC | 动词| “是”修饰 我把你*看做*是妹妹。 154 | COOR | 并列关系 | 希望能*贯彻* *执行*该方针 155 | CS | 从属连词成分 | 如果*可行*,我们进行推广。 156 | DEC | 关系从句| “的” 这是*以前不曾遇到过*的情况。 157 | 158 | ```javascript 159 | var text = ['我以最快的速度吃了午饭'] 160 | boson.depparser(text, function (data) { 161 | console.log("depparser:", data); 162 | }); 163 | ``` 164 | 165 | classify 166 | -------- 167 | [Classes Doc](http://docs.bosonnlp.com/classify.html) 168 | 169 | 编号 | 分类 170 | ----|---- 171 | 0 | 体育 172 | 1 | 教育 173 | 2 | 财经 174 | 3 | 社会 175 | 4 | 娱乐 176 | 5 | 军事 177 | 6 | 国内 178 | 7 | 科技 179 | 8 | 互联网 180 | 9 | 房产 181 | 10 | 国际 182 | 11 | 女人 183 | 12 | 汽车 184 | 13 | 游戏 185 | 186 | ```javascript 187 | var text = ['俄否决安理会谴责叙军战机空袭阿勒颇平民', 188 | '邓紫棋谈男友林宥嘉:我觉得我比他唱得好', 189 | 'Facebook收购印度初创公司']; 190 | boson.classify(text, function (data) { 191 | // [5, 4, 8] 192 | console.log("classify:", data); 193 | test.done(); 194 | }); 195 | ``` 196 | 197 | suggest 198 | ------- 199 | 200 | ```javascript 201 | var term = '粉丝'; 202 | boson.suggest(term, function (data) { 203 | console.log("suggest:", data); 204 | }); 205 | 206 | var options = {}; 207 | // options.top_k default 10 208 | options.top_k = 2; 209 | boson.suggest(term, options, function (data) { 210 | console.log("suggest:", data); 211 | }); 212 | 213 | ``` 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | --------------------------------------------------------------------------------