├── .gitignore ├── tsconfig.json ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── LICENSE ├── package.json ├── dist └── index.d.ts ├── index.js └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // Change this to match your project 3 | "include": [ 4 | "*" 5 | ], 6 | "compilerOptions": { 7 | // Tells TypeScript to read JS files, as 8 | // normally they are ignored as source files 9 | "allowJs": true, 10 | // Generate d.ts files 11 | "declaration": true, 12 | // This compiler run should 13 | // only output d.ts files 14 | "emitDeclarationOnly": true, 15 | // Types should go into this directory. 16 | // Removing this would place the .d.ts files 17 | // next to the .js files 18 | "outDir": "dist" 19 | } 20 | } -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.158.0/containers/javascript-node/.devcontainer/base.Dockerfile 2 | 3 | # [Choice] Node.js version: 14, 12, 10 4 | ARG VARIANT="14-buster" 5 | FROM mcr.microsoft.com/vscode/devcontainers/javascript-node:0-${VARIANT} 6 | 7 | # [Optional] Uncomment this section to install additional OS packages. 8 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 9 | # && apt-get -y install --no-install-recommends 10 | 11 | # [Optional] Uncomment if you want to install an additional version of node using nvm 12 | # ARG EXTRA_NODE_VERSION=10 13 | # RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" 14 | 15 | # [Optional] Uncomment if you want to install more global node modules 16 | # RUN su node -c "npm install -g " 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021 NLP Cloud 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.158.0/containers/javascript-node 3 | { 4 | "name": "Node.js", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | // Update 'VARIANT' to pick a Node version: 10, 12, 14 8 | "args": { "VARIANT": "14" } 9 | }, 10 | 11 | // Set *default* container specific settings.json values on container create. 12 | "settings": { 13 | "terminal.integrated.shell.linux": "/bin/bash" 14 | }, 15 | 16 | // Add the IDs of extensions you want installed when the container is created. 17 | "extensions": [ 18 | "dbaeumer.vscode-eslint" 19 | ], 20 | 21 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 22 | // "forwardPorts": [], 23 | 24 | // Use 'postCreateCommand' to run commands after the container is created. 25 | // "postCreateCommand": "yarn install", 26 | 27 | // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 28 | "remoteUser": "node" 29 | } 30 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlpcloud", 3 | "version": "2.0.8", 4 | "description": "NLP Cloud serves high performance pre-trained or custom models for NER, sentiment-analysis, classification, summarization, paraphrasing, text generation, image generation, code generation, question answering, automatic speech recognition, machine translation, language detection, semantic search, semantic similarity, tokenization, POS tagging, speech synthesis, embeddings, and dependency parsing. It is ready for production, served through a REST API.\n\nThis is the Node.js client for the NLP Cloud API.\n\nMore details here: https://nlpcloud.com\n\nDocumentation: https://docs.nlpcloud.com", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/nlpcloud/nlpcloud-js.git" 12 | }, 13 | "keywords": [ 14 | "api", 15 | "NLP", 16 | "ai", 17 | "deep learning", 18 | "machine learning", 19 | "data science", 20 | "nlpcloud" 21 | ], 22 | "author": "Julien Salinas", 23 | "license": "MIT", 24 | "bugs": { 25 | "url": "https://github.com/nlpcloud/nlpcloud-js/issues" 26 | }, 27 | "homepage": "https://github.com/nlpcloud/nlpcloud-js#readme", 28 | "dependencies": { 29 | "axios": "^1.7.9" 30 | }, 31 | "devDependencies": { 32 | "typescript": "^4.4.3" 33 | }, 34 | "types": "./dist/index.d.ts" 35 | } 36 | -------------------------------------------------------------------------------- /dist/index.d.ts: -------------------------------------------------------------------------------- 1 | export = Client; 2 | declare class Client { 3 | constructor(params: { 4 | model: string; 5 | token: string; 6 | gpu?: boolean; 7 | lang?: string; 8 | async?: boolean; 9 | }); 10 | headers: { 11 | Authorization: string; 12 | 'User-Agent': string; 13 | }; 14 | rootURL: string; 15 | 16 | adGeneration(params: { 17 | keywords: string[] 18 | }): Promise<{ 19 | status: number; 20 | statusText: string; 21 | data: { 22 | generated_text: string; 23 | }; 24 | }>; 25 | 26 | asr(params: { 27 | url?: string, 28 | encodedFile?: string, 29 | inputLanguage?: string 30 | }): Promise<{ 31 | status: number; 32 | statusText: string; 33 | data: { 34 | text: string; 35 | duration: number; 36 | language: string; 37 | segments: { id: number, start: number, end: number, text: string }[]; 38 | words: { id: number, start: number, end: number, text: string, prob: number }[]; 39 | }; 40 | } | { 41 | status: number; 42 | statusText: string; 43 | data: { 44 | url: string; 45 | } 46 | }>; 47 | 48 | asyncResult(params: { 49 | url: string 50 | }): Promise<{ 51 | status: number; 52 | statusText: string; 53 | data: { 54 | created_on: string; 55 | finished_on: string; 56 | request_body: string; 57 | http_code: number; 58 | error_detail: string; 59 | content: string; 60 | }; 61 | } | null >; 62 | 63 | chatbot(params: { 64 | input: string, 65 | context?: string, 66 | history?: { input: string, response: string }[] 67 | }): Promise<{ 68 | status: number; 69 | statusText: string; 70 | data: { 71 | response: string; 72 | history: { input: string, response: string }[]; 73 | }; 74 | }>; 75 | 76 | classification(params: { 77 | text: string, 78 | labels?: string[], 79 | multiClass?: boolean 80 | }): Promise<{ 81 | status: number; 82 | statusText: string; 83 | data: { 84 | labels: string[]; 85 | scores: number[]; 86 | }; 87 | }>; 88 | 89 | codeGeneration(params: { 90 | instruction: string 91 | }): Promise<{ 92 | status: number; 93 | statusText: string; 94 | data: { 95 | generated_code: string; 96 | }; 97 | }>; 98 | 99 | dependencies(params: { 100 | text: string 101 | }): Promise<{ 102 | status: number; 103 | statusText: string; 104 | data: { 105 | words: { text: string, tag: string }[]; 106 | }; 107 | }>; 108 | 109 | embeddings(params: { 110 | sentences: string[] 111 | }): Promise<{ 112 | status: number; 113 | statusText: string; 114 | data: { 115 | embeddings: number[][]; 116 | }; 117 | }>; 118 | 119 | entities(params: { 120 | text: string, 121 | searchedEntity?: string 122 | }): Promise<{ 123 | status: number; 124 | statusText: string; 125 | data: { 126 | entities: { start: number; end: number; type: string; text: string }[]; 127 | }; 128 | }>; 129 | 130 | generation(params: {text: string, 131 | maxLength?: number, 132 | lengthNoInput?: boolean, 133 | endSequence?: string, 134 | removeInput?: boolean, 135 | numBeams?: number, 136 | numReturnSequences?: number, 137 | topK?: number, 138 | topP?: number, 139 | temperature?: number, 140 | repetitionPenalty?: number, 141 | removeEndSequence?: boolean 142 | }): Promise<{ 143 | status: number; 144 | statusText: string; 145 | data: { 146 | generated_text: string; 147 | nb_input_tokens: number; 148 | nb_generated_tokens: number; 149 | }; 150 | }>; 151 | 152 | gsCorrection(params: { 153 | text: string 154 | }): Promise<{ 155 | status: number; 156 | statusText: string; 157 | data: { 158 | correction: string; 159 | }; 160 | } | { 161 | status: number; 162 | statusText: string; 163 | data: { 164 | url: string; 165 | } 166 | }>; 167 | 168 | imageGeneration(params: { 169 | text: string 170 | }): Promise<{ 171 | status: number; 172 | statusText: string; 173 | data: { 174 | url: string; 175 | }; 176 | }>; 177 | 178 | intentClassification(params: { 179 | text: string 180 | }): Promise<{ 181 | status: number; 182 | statusText: string; 183 | data: { 184 | intent: string; 185 | }; 186 | }>; 187 | 188 | kwKpExtraction(params: { 189 | text: string 190 | }): Promise<{ 191 | status: number; 192 | statusText: string; 193 | data: { 194 | keywords_and_keyphrases: string[]; 195 | }; 196 | }>; 197 | 198 | langdetection(params: { 199 | text: string 200 | }): Promise<{ 201 | status: number; 202 | statusText: string; 203 | data: { 204 | languages: any[]; 205 | }; 206 | }>; 207 | 208 | paraphrasing(params: { 209 | text: string 210 | }): Promise<{ 211 | status: number; 212 | statusText: string; 213 | data: { 214 | paraphrased_text: string; 215 | }; 216 | } | { 217 | status: number; 218 | statusText: string; 219 | data: { 220 | url: string; 221 | } 222 | }>; 223 | 224 | question(params: { 225 | context?: string, 226 | question: string 227 | }): Promise<{ 228 | status: number; 229 | statusText: string; 230 | data: { 231 | answer: string; 232 | score: number; 233 | start: number; 234 | end: number; 235 | }; 236 | }>; 237 | 238 | semanticSearch(params: { 239 | text: string, 240 | numResults?: number 241 | }): Promise<{ 242 | status: number; 243 | statusText: string; 244 | data: { 245 | search_results: { score: number, text: string }[]; 246 | }; 247 | }>; 248 | 249 | semanticSimilarity(params: { 250 | sentences: string[] 251 | }): Promise<{ 252 | status: number; 253 | statusText: string; 254 | data: { 255 | score: number; 256 | }; 257 | }>; 258 | 259 | sentenceDependencies(params: { 260 | text: string 261 | }): Promise<{ 262 | status: number; 263 | statusText: string; 264 | data: { 265 | sentence_dependencies: { sentence: string, dependencies: { words: { text: string, tag: string }[], arcs: { start: number, end: number, label: string, text: string, dir: string }[] } }[]; 266 | }; 267 | }>; 268 | 269 | sentiment(params: { 270 | text: string, 271 | target?: string 272 | }): Promise<{ 273 | status: number; 274 | statusText: string; 275 | data: { 276 | scored_labels: { label: string, score: number }[]; 277 | }; 278 | }>; 279 | speechSynthesis(params: { 280 | text: string, 281 | voice?: string 282 | }): Promise<{ 283 | status: number; 284 | statusText: string; 285 | data: { 286 | url: string; 287 | }; 288 | }>; 289 | 290 | summarization(params: { 291 | text: string, 292 | size?: string 293 | }): Promise<{ 294 | status: number; 295 | statusText: string; 296 | data: { 297 | summary_text: string; 298 | }; 299 | } | { 300 | status: number; 301 | statusText: string; 302 | data: { 303 | url: string; 304 | } 305 | }>; 306 | 307 | translation(params: { 308 | text: string, 309 | source?: string, 310 | target: string 311 | }): Promise<{ 312 | status: number; 313 | statusText: string; 314 | data: { 315 | translation_text: string; 316 | }; 317 | }>; 318 | tokens(text: string): Promise<{ 319 | status: number; 320 | statusText: string; 321 | data: { 322 | tokens: { start: number, end: number, index: number, text: string, lemma: string, ws_after: boolean }[]; 323 | }; 324 | }>; 325 | 326 | } 327 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const axios = require('axios') 2 | 3 | const BASE_URL = 'https://api.nlpcloud.io' 4 | const API_VERSION = 'v1' 5 | 6 | class Client { 7 | constructor(params) { 8 | var model = params.model 9 | var token = params.token 10 | var gpu = params.gpu ?? false 11 | var lang = params.lang ?? '' 12 | var async = params.async ?? false 13 | 14 | this.headers = { 15 | 'Authorization': 'Token ' + token, 16 | 'User-Agent': 'nlpcloud-javascript-client' 17 | } 18 | 19 | this.rootURL = BASE_URL + '/' + API_VERSION + '/' 20 | 21 | if (lang == 'en') { 22 | lang = '' 23 | } 24 | if (lang == 'eng_Latn') { 25 | lang = '' 26 | } 27 | 28 | if (gpu) { 29 | this.rootURL += 'gpu/' 30 | } 31 | 32 | if (async) { 33 | this.rootURL += "async/" 34 | } 35 | 36 | if (lang != '') { 37 | this.rootURL += lang + '/' 38 | } 39 | 40 | this.rootURL += model 41 | } 42 | 43 | 44 | adGeneration(params) { 45 | var keywords = params.keywords 46 | 47 | const payload = { 48 | 'keywords': keywords 49 | }; 50 | 51 | return axios.post(this.rootURL + '/' + 'ad-generation', payload, { headers: this.headers }) 52 | } 53 | 54 | asr(params) { 55 | var url = params.url ?? null 56 | var encodedFile = params.encodedFile ?? null 57 | var inputLanguage = params.inputLanguage ?? null 58 | 59 | const payload = { 60 | 'url': url, 61 | 'encoded_file': encodedFile, 62 | 'input_language': inputLanguage 63 | }; 64 | 65 | return axios.post(this.rootURL + '/' + 'asr', payload, { headers: this.headers }) 66 | } 67 | 68 | asyncResult(params) { 69 | var url = params.url 70 | 71 | return axios.get(url, { headers: this.headers }) 72 | } 73 | 74 | chatbot(params) { 75 | var input = params.input 76 | var context = params.context ?? null 77 | var history = params.history ?? null 78 | 79 | const payload = { 80 | 'input': input, 81 | 'context': context, 82 | 'history': history 83 | }; 84 | 85 | return axios.post(this.rootURL + '/' + 'chatbot', payload, { headers: this.headers }) 86 | } 87 | 88 | classification(params) { 89 | var text = params.text 90 | var labels = params.labels ?? null 91 | var multiClass = params.multiClass ?? null 92 | 93 | const payload = { 94 | 'text': text, 95 | 'labels': labels, 96 | 'multi_class': multiClass 97 | }; 98 | 99 | return axios.post(this.rootURL + '/' + 'classification', payload, { headers: this.headers }) 100 | } 101 | 102 | codeGeneration(params) { 103 | var instruction = params.instruction 104 | 105 | const payload = { 106 | 'instruction': instruction 107 | }; 108 | 109 | return axios.post(this.rootURL + '/' + 'code-generation', payload, { headers: this.headers }) 110 | } 111 | 112 | dependencies(params) { 113 | var text = params.text 114 | 115 | const payload = { 116 | 'text': text 117 | }; 118 | 119 | return axios.post(this.rootURL + '/' + 'dependencies', payload, { headers: this.headers }) 120 | } 121 | 122 | embeddings(params) { 123 | var sentences = params.sentences 124 | 125 | const payload = { 126 | 'sentences': sentences 127 | }; 128 | 129 | return axios.post(this.rootURL + '/' + 'embeddings', payload, { headers: this.headers }) 130 | } 131 | 132 | entities(params) { 133 | var text = params.text 134 | var searchedEntity = params.searchedEntity ?? null 135 | 136 | const payload = { 137 | 'text': text, 138 | 'searched_entity': searchedEntity 139 | }; 140 | 141 | return axios.post(this.rootURL + '/' + 'entities', payload, { headers: this.headers }) 142 | } 143 | 144 | generation(params) { 145 | var text = params.text 146 | var maxLength = params.maxLength ?? null 147 | var lengthNoInput = params.lengthNoInput ?? null 148 | var endSequence = params.endSequence ?? null 149 | var removeInput = params.removeInput ?? null 150 | var numBeams = params.numBeams ?? null 151 | var numReturnSequences = params.numReturnSequences ?? null 152 | var topK = params.topK ?? null 153 | var topP = params.topP ?? null 154 | var temperature = params.temperature ?? null 155 | var repetitionPenalty = params.repetitionPenalty ?? null 156 | var badWords = params.badWords ?? null 157 | var removeEndSequence = params.removeEndSequence ?? null 158 | 159 | const payload = { 160 | 'text': text, 161 | 'max_length': maxLength, 162 | 'length_no_input': lengthNoInput, 163 | 'end_sequence': endSequence, 164 | 'remove_input': removeInput, 165 | 'num_beams': numBeams, 166 | 'num_return_sequences': numReturnSequences, 167 | 'top_k': topK, 168 | 'top_p': topP, 169 | 'temperature': temperature, 170 | 'repetition_penalty': repetitionPenalty, 171 | 'bad_words': badWords, 172 | 'remove_end_sequence': removeEndSequence 173 | }; 174 | 175 | return axios.post(this.rootURL + '/' + 'generation', payload, { headers: this.headers }) 176 | } 177 | 178 | gsCorrection(params) { 179 | var text = params.text 180 | 181 | const payload = { 182 | 'text': text 183 | }; 184 | 185 | return axios.post(this.rootURL + '/' + 'gs-correction', payload, { headers: this.headers }) 186 | } 187 | 188 | imageGeneration(params) { 189 | var text = params.text 190 | 191 | const payload = { 192 | 'text': text 193 | }; 194 | 195 | return axios.post(this.rootURL + '/' + 'image-generation', payload, { headers: this.headers }) 196 | } 197 | 198 | intentClassification(params) { 199 | var text = params.text 200 | 201 | const payload = { 202 | 'text': text 203 | }; 204 | 205 | return axios.post(this.rootURL + '/' + 'intent-classification', payload, { headers: this.headers }) 206 | } 207 | 208 | kwKpExtraction(params) { 209 | var text = params.text 210 | 211 | const payload = { 212 | 'text': text 213 | }; 214 | 215 | return axios.post(this.rootURL + '/' + 'kw-kp-extraction', payload, { headers: this.headers }) 216 | } 217 | 218 | langdetection(params) { 219 | var text = params.text 220 | 221 | const payload = { 222 | 'text': text 223 | }; 224 | 225 | return axios.post(this.rootURL + '/' + 'langdetection', payload, { headers: this.headers }) 226 | } 227 | 228 | paraphrasing(params) { 229 | var text = params.text 230 | 231 | const payload = { 232 | 'text': text 233 | }; 234 | 235 | return axios.post(this.rootURL + '/' + 'paraphrasing', payload, { headers: this.headers }) 236 | } 237 | 238 | question(params) { 239 | var question = params.question 240 | var context = params.context ?? null 241 | 242 | const payload = { 243 | 'question': question, 244 | 'context': context 245 | }; 246 | 247 | return axios.post(this.rootURL + '/' + 'question', payload, { headers: this.headers }) 248 | } 249 | 250 | semanticSearch(params) { 251 | var text = params.text 252 | var numResults = params.numResults ?? null 253 | 254 | const payload = { 255 | 'text': text, 256 | 'num_results': numResults 257 | }; 258 | 259 | return axios.post(this.rootURL + '/' + 'semantic-search', payload, { headers: this.headers }) 260 | } 261 | 262 | semanticSimilarity(params) { 263 | var sentences = params.sentences 264 | 265 | const payload = { 266 | 'sentences': sentences 267 | }; 268 | 269 | return axios.post(this.rootURL + '/' + 'semantic-similarity', payload, { headers: this.headers }) 270 | } 271 | 272 | sentenceDependencies(params) { 273 | var text = params.text 274 | 275 | const payload = { 276 | 'text': text 277 | }; 278 | 279 | return axios.post(this.rootURL + '/' + 'sentence-dependencies', payload, { headers: this.headers }) 280 | } 281 | 282 | sentiment(params) { 283 | var text = params.text 284 | var target = params.target ?? null 285 | const payload = { 286 | 'text': text, 287 | 'target': target 288 | }; 289 | 290 | return axios.post(this.rootURL + '/' + 'sentiment', payload, { headers: this.headers }) 291 | } 292 | 293 | speechSynthesis(text, voice = null) { 294 | var text = params.text 295 | var voice = params.voice ?? null 296 | 297 | const payload = { 298 | 'text': text, 299 | 'voice': voice 300 | }; 301 | 302 | return axios.post(this.rootURL + '/' + 'speech-synthesis', payload, { headers: this.headers }) 303 | } 304 | 305 | summarization(params) { 306 | var text = params.text 307 | var size = params.size ?? null 308 | 309 | const payload = { 310 | 'text': text, 311 | 'size': size 312 | }; 313 | 314 | return axios.post(this.rootURL + '/' + 'summarization', payload, { headers: this.headers }) 315 | } 316 | 317 | tokens(params) { 318 | var text = params.text 319 | 320 | const payload = { 321 | 'text': text 322 | }; 323 | 324 | return axios.post(this.rootURL + '/' + 'tokens', payload, { headers: this.headers }) 325 | } 326 | 327 | translation(params) { 328 | var text = params.text 329 | var source = params.source ?? null 330 | var target = params.target 331 | 332 | const payload = { 333 | 'text': text, 334 | 'source': source, 335 | 'target': target 336 | }; 337 | 338 | return axios.post(this.rootURL + '/' + 'translation', payload, { headers: this.headers }) 339 | } 340 | 341 | } 342 | 343 | module.exports = Client -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Node.js Client For NLP Cloud 2 | 3 | This is the Node.js client (with Typescript types) for the [NLP Cloud](https://nlpcloud.com) API. See the [documentation](https://docs.nlpcloud.com) for more details. 4 | 5 | NLP Cloud serves high performance pre-trained or custom models for NER, sentiment-analysis, classification, summarization, dialogue summarization, paraphrasing, intent classification, product description and ad generation, chatbot, grammar and spelling correction, keywords and keyphrases extraction, text generation, image generation, text generation, question answering, automatic speech recognition, machine translation, language detection, semantic search, semantic similarity, tokenization, POS tagging, embeddings, and dependency parsing. It is ready for production, served through a REST API. 6 | 7 | You can either use the NLP Cloud pre-trained models, fine-tune your own models, or deploy your own models. 8 | 9 | If you face an issue, don't hesitate to raise it as a Github issue. Thanks! 10 | 11 | ## Installation 12 | 13 | Install via npm. 14 | 15 | ```shell 16 | npm install nlpcloud --save 17 | ``` 18 | 19 | ## Returned Objects 20 | 21 | All objects returned by the library are [Axios](https://github.com/axios/axios) promises. 22 | 23 | In case of success, results are contained in `response.data`. In case of failure, you can retrieve the status code in `err.response.status` and the error message in `err.response.data.detail`. 24 | 25 | ## Examples 26 | 27 | Here is a full example that summarizes a text using Facebook's Bart Large CNN model, with a fake token: 28 | 29 | ```js 30 | const NLPCloudClient = require('nlpcloud'); 31 | 32 | const client = new NLPCloudClient({model:'bart-large-cnn', token:'4eC39HqLyjWDarjtT1zdp7dc'}) 33 | 34 | client.summarization(`One month after the United States began what has become a 35 | troubled rollout of a national COVID vaccination campaign, the effort is finally 36 | gathering real steam. Close to a million doses -- over 951,000, to be more exact -- 37 | made their way into the arms of Americans in the past 24 hours, the U.S. Centers 38 | for Disease Control and Prevention reported Wednesday. That s the largest number 39 | of shots given in one day since the rollout began and a big jump from the 40 | previous day, when just under 340,000 doses were given, CBS News reported. 41 | That number is likely to jump quickly after the federal government on Tuesday 42 | gave states the OK to vaccinate anyone over 65 and said it would release all 43 | the doses of vaccine it has available for distribution. Meanwhile, a number 44 | of states have now opened mass vaccination sites in an effort to get larger 45 | numbers of people inoculated, CBS News reported.`) 46 | .then(function (response) { 47 | console.log(response.data); 48 | }) 49 | .catch(function (err) { 50 | console.error(err.response.status); 51 | console.error(err.response.data.detail); 52 | }); 53 | ``` 54 | 55 | Here is a full example that does the same thing, but on a GPU: 56 | 57 | ```js 58 | const NLPCloudClient = require('nlpcloud'); 59 | 60 | const client = new NLPCloudClient({model:'bart-large-cnn', token:'4eC39HqLyjWDarjtT1zdp7dc'}, true) 61 | 62 | client.summarization(`One month after the United States began what has become a 63 | troubled rollout of a national COVID vaccination campaign, the effort is finally 64 | gathering real steam. Close to a million doses -- over 951,000, to be more exact -- 65 | made their way into the arms of Americans in the past 24 hours, the U.S. Centers 66 | for Disease Control and Prevention reported Wednesday. That s the largest number 67 | of shots given in one day since the rollout began and a big jump from the 68 | previous day, when just under 340,000 doses were given, CBS News reported. 69 | That number is likely to jump quickly after the federal government on Tuesday 70 | gave states the OK to vaccinate anyone over 65 and said it would release all 71 | the doses of vaccine it has available for distribution. Meanwhile, a number 72 | of states have now opened mass vaccination sites in an effort to get larger 73 | numbers of people inoculated, CBS News reported.`) 74 | .then(function (response) { 75 | console.log(response.data); 76 | }) 77 | .catch(function (err) { 78 | console.error(err.response.status); 79 | console.error(err.response.data.detail); 80 | }); 81 | ``` 82 | 83 | Here is a full example that does the same thing, but on a French text: 84 | 85 | ```js 86 | const NLPCloudClient = require('nlpcloud'); 87 | 88 | const client = new NLPCloudClient({model:'bart-large-cnn', token:'4eC39HqLyjWDarjtT1zdp7dc', gpu:true, lang:'fra_Latn'}) 89 | 90 | client.summarization(`Sur des images aériennes, prises la veille par un vol de surveillance 91 | de la Nouvelle-Zélande, la côte d’une île est bordée d’arbres passés du vert 92 | au gris sous l’effet des retombées volcaniques. On y voit aussi des immeubles 93 | endommagés côtoyer des bâtiments intacts. « D’après le peu d’informations 94 | dont nous disposons, l’échelle de la dévastation pourrait être immense, 95 | spécialement pour les îles les plus isolées », avait déclaré plus tôt 96 | Katie Greenwood, de la Fédération internationale des sociétés de la Croix-Rouge. 97 | Selon l’Organisation mondiale de la santé (OMS), une centaine de maisons ont 98 | été endommagées, dont cinquante ont été détruites sur l’île principale de 99 | Tonga, Tongatapu. La police locale, citée par les autorités néo-zélandaises, 100 | a également fait état de deux morts, dont une Britannique âgée de 50 ans, 101 | Angela Glover, emportée par le tsunami après avoir essayé de sauver les chiens 102 | de son refuge, selon sa famille.`) 103 | .then(function (response) { 104 | console.log(response.data); 105 | }) 106 | .catch(function (err) { 107 | console.error(err.response.status); 108 | console.error(err.response.data.detail); 109 | }); 110 | ``` 111 | 112 | A JSON object is returned: 113 | 114 | ```json 115 | { 116 | "summary_text": "Over 951,000 doses were given in the past 24 hours. That's the largest number of shots given in one day since the rollout began. That number is likely to jump quickly after the federal government gave states the OK to vaccinate anyone over 65. A number of states have now opened mass vaccination sites." 117 | } 118 | ``` 119 | 120 | ## Usage 121 | 122 | ### Client Initialization 123 | 124 | Pass the model you want to use and the NLP Cloud token to the client during initialization. 125 | 126 | The model can either be a pretrained model like `en_core_web_lg`, `bart-large-mnli`... but also one of your custom models, using `custom_model/` (e.g. `custom_model/2568`). 127 | 128 | Your token can be retrieved from your [NLP Cloud dashboard](https://nlpcloud.com/home/token). 129 | 130 | ```js 131 | const NLPCloudClient = require('nlpcloud'); 132 | 133 | const client = new NLPCloudClient({model:'', token:''}) 134 | ``` 135 | 136 | If you want to use a GPU, pass `true` as the gpu argument. 137 | 138 | ```js 139 | const NLPCloudClient = require('nlpcloud'); 140 | 141 | const client = new NLPCloudClient({model:'', token:'', gpu:true}) 142 | ``` 143 | 144 | If you want to use the multilingual add-on in order to process non-English texts, set `''` as the lang argument. For example, if you want to process French text, you should set `lang:'fra_Latn'`. 145 | 146 | ```js 147 | const NLPCloudClient = require('nlpcloud'); 148 | 149 | const client = new NLPCloudClient({model:'', token:'', lang:''}) 150 | ``` 151 | 152 | If you want to make asynchronous requests, pass `true` as the async argument. 153 | 154 | ```js 155 | const NLPCloudClient = require('nlpcloud'); 156 | 157 | const client = new NLPCloudClient({model:'', token:'', async:true) 158 | ``` 159 | 160 | If you are making asynchronous requests, you will always receive a quick response containing a URL. You should then poll this URL with `asyncResult()` on a regular basis (every 10 seconds for example) in order to check if the result is available. Here is an example: 161 | 162 | ```js 163 | client.asyncResult('https://api.nlpcloud.io/v1/get-async-result/21718218-42e8-4be9-a67f-b7e18e03b436') 164 | ``` 165 | 166 | The above returns an object is the response is available. It returns an empty response otherwise (`null`). 167 | 168 | ### Automatic Speech Recognition (Speech to Text) Endpoint 169 | 170 | Call the `asr()` method and pass the following arguments: 171 | 172 | 1. (Optional: either this or the encoded file should be set) `url`: a URL where your audio or video file is hosted 173 | 1. (Optional: either this or the url should be set) `encodedFile`: a base 64 encoded version of your file 174 | 1. (Optional) `inputLanguage`: the language of your file as ISO code 175 | 176 | ```js 177 | client.asr({url:'Your url'}) 178 | ``` 179 | 180 | ### Chatbot Endpoint 181 | 182 | Call the `chatbot()` method and pass the following arguments: 183 | 184 | 1. Your input 185 | 1. (Optional) `context` A general context about the conversation 186 | 1. (Optional) `history` The history of your previous exchanges with the model 187 | 188 | ```js 189 | client.chatbot({text:''}) 190 | ``` 191 | 192 | ### Classification Endpoint 193 | 194 | Call the `classification()` method and pass the following arguments: 195 | 196 | 1. The text you want to classify, as a string 197 | 1. The candidate labels for your text, as an array of strings 198 | 1. (Optional) `multiClass` Whether the classification should be multi-class or not, as a boolean 199 | 200 | ```js 201 | client.classification({text:'', labels:['label 1', 'label 2', ...]}) 202 | ``` 203 | 204 | ### Code Generation Endpoint 205 | 206 | Call the `codeGeneration()` method and pass the instruction for the code you want to generate. 207 | 208 | ```js 209 | client.codeGeneration({instruction:''}) 210 | ``` 211 | 212 | ### Dependencies Endpoint 213 | 214 | Call the `dependencies()` method and pass the text you want to perform part of speech tagging (POS) + arcs on. 215 | 216 | ```js 217 | client.dependencies({text:''}) 218 | ``` 219 | 220 | ### Embeddings Endpoint 221 | 222 | Call the `embeddings()` method and pass an array of blocks of text that you want to extract embeddings from. 223 | 224 | ```js 225 | client.embeddings({sentences:['', '', '', ...]}) 226 | ``` 227 | 228 | The above command returns a JSON object. 229 | 230 | ### Entities Endpoint 231 | 232 | Call the `entities()` method and pass the text you want to perform named entity recognition (NER) on. 233 | 234 | ```js 235 | client.entities({text:''}) 236 | ``` 237 | 238 | ### Generation Endpoint 239 | 240 | Call the `generation()` method and pass the following arguments: 241 | 242 | 1. The block of text that starts the generated text. 256 tokens maximum for GPT-J on CPU, 1024 tokens maximum for GPT-J and GPT-NeoX 20B on GPU, and 2048 tokens maximum for Fast GPT-J and Finetuned GPT-NeoX 20B on GPU. 243 | 1. (Optional) `maxLength`: Optional. The maximum number of tokens that the generated text should contain. 256 tokens maximum for GPT-J on CPU, 1024 tokens maximum for GPT-J and GPT-NeoX 20B on GPU, and 2048 tokens maximum for Fast GPT-J and Finetuned GPT-NeoX 20B on GPU. If `lengthNoInput` is false, the size of the generated text is the difference between `maxLength` and the length of your input text. If `lengthNoInput` is true, the size of the generated text simply is `maxLength`. Defaults to 50. 244 | 1. (Optional) `lengthNoInput`: Whether `minLength` and `maxLength` should not include the length of the input text, as a boolean. If false, `minLength` and `maxLength` include the length of the input text. If true, min_length and `maxLength` don't include the length of the input text. Defaults to false. 245 | 1. (Optional) `endSequence`: A specific token that should be the end of the generated sequence, as a string. For example if could be `.` or `\n` or `###` or anything else below 10 characters. 246 | 1. (Optional) `removeInput`: Whether you want to remove the input text form the result, as a boolean. Defaults to false. 247 | 1. (Optional) `numBeams`: Number of beams for beam search. 1 means no beam search. This is an integer. Defaults to 1. 248 | 1. (Optional) `numReturnSequences`: The number of independently computed returned sequences for each element in the batch, as an integer. Defaults to 1. 249 | 1. (Optional) `topK`: The number of highest probability vocabulary tokens to keep for top-k-filtering, as an integer. Maximum 1000 tokens. Defaults to 0. 250 | 1. (Optional) `topP`: If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation. This is a float. Should be between 0 and 1. Defaults to 0.7. 251 | 1. (Optional) `temperature`: The value used to module the next token probabilities, as a float. Should be between 0 and 1. Defaults to 1. 252 | 1. (Optional) `repetitionPenalty`: The parameter for repetition penalty, as a float. 1.0 means no penalty. Defaults to 1.0. 253 | 1. (Optional) `badWords`: List of tokens that are not allowed to be generated, as a list of strings. Defaults to null. 254 | 1. (Optional) `removeEndSequence`: Optional. Whether you want to remove the `endSequence` string from the result. Defaults to false. 255 | 256 | ```js 257 | client.generation({text:''}) 258 | ``` 259 | 260 | ### Grammar and Spelling Correction Endpoint 261 | 262 | Call the `gsCorrection()` method and pass the text you want to correct. 263 | 264 | ```js 265 | client.gsCorrection({text:''}) 266 | ``` 267 | 268 | ### Image Generation Endpoint 269 | 270 | Call the `imageGeneration()` method and pass the text you want to use to generate your image. 271 | 272 | ```js 273 | client.imageGeneration({text:''}) 274 | ``` 275 | 276 | ### Intent Classification Endpoint 277 | 278 | Call the `intentClassification()` method and pass the text you want to analyze in order to detect the intent. 279 | 280 | ```js 281 | client.intentClassification({text:''}) 282 | ``` 283 | 284 | ### Keywords and Keyphrases Extraction Endpoint 285 | 286 | Call the `kwKpExtraction()` method and pass the text you want to extract keywords and keyphrases from. 287 | 288 | ```js 289 | client.kwKpExtraction({text:''}) 290 | ``` 291 | 292 | ### Language Detection Endpoint 293 | 294 | Call the `langdetection()` method and pass the text you want to analyze in order to detect the languages. 295 | 296 | ```js 297 | client.langdetection({text:''}) 298 | ``` 299 | 300 | ### Question Answering Endpoint 301 | 302 | Call the `question()` method and pass the following: 303 | 304 | 1. Your question 305 | 1. (Optional) A context that the model will use to try to answer your question 306 | 307 | ```js 308 | client.question({question:'', context:''}) 309 | ``` 310 | 311 | ### Semantic Search Endpoint 312 | 313 | Call the `semanticSearch()` method and pass your search query. 314 | 315 | ```python 316 | client.semanticSearch('Your search query') 317 | ``` 318 | 319 | The above command returns a JSON object. 320 | 321 | ### Semantic Similarity Endpoint 322 | 323 | Call the `semanticSimilarity()` method and pass an array made up of 2 blocks of text that you want to compare. 324 | 325 | ```python 326 | client.semanticSimilarity({sentences:['', '']}) 327 | ``` 328 | 329 | The above command returns a JSON object. 330 | 331 | ### Sentence Dependencies Endpoint 332 | 333 | Call the `sentenceDependencies()` method and pass a block of text made up of several sentences you want to perform POS + arcs on. 334 | 335 | ```js 336 | client.sentenceDependencies({text:''}) 337 | ``` 338 | 339 | ### Sentiment Analysis Endpoint 340 | 341 | Call the `sentiment()` method and pass the following: 342 | 343 | 1. The text you want to get the sentiment of 344 | 1. (Optional) The target element that the sentiment should apply to 345 | 346 | ```js 347 | client.sentiment({text:'', target:''}) 348 | ``` 349 | 350 | ### Speech Synthesis Endpoint 351 | 352 | Call the `speechSynthesis()` method and pass the text you want to convert to audio: 353 | 354 | ```js 355 | client.speechSynthesis({text:""}) 356 | ``` 357 | 358 | The above command returns a JSON object. 359 | 360 | ### Summarization Endpoint 361 | 362 | Call the `summarization()` method and pass the text you want to summarize. 363 | 364 | ```js 365 | client.summarization({text:''}) 366 | ``` 367 | 368 | ### Paraphrasing Endpoint 369 | 370 | Call the `paraphrasing()` method and pass the text you want to paraphrase. 371 | 372 | ```js 373 | client.paraphrasing({text:''}) 374 | ``` 375 | 376 | ### Tokenization Endpoint 377 | 378 | Call the `tokens()` method and pass the text you want to tokenize. 379 | 380 | ```js 381 | client.tokens({text:''}) 382 | ``` 383 | 384 | ### Translation Endpoint 385 | 386 | Call the `translation()` method and pass the text you want to translate. 387 | 388 | ```js 389 | client.translation({text:''}) 390 | ``` 391 | --------------------------------------------------------------------------------