├── .gitignore ├── .npmignore ├── LICENSE ├── README.md ├── _config.yml ├── description.txt ├── index.js └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # jekyll config 2 | _config.yml 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Merferry 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Crawl Wikipedia pages and upload TTS to YouTube. 2 | > Do you want to: 3 | > - [Upload Wikipedia TTS videos on YouTube]? 4 |
5 | 6 | 7 | ## setup 8 | 9 | 1. Install [Node.js], if not installed. 10 | 2. Run `npm install -g wikipedia-tts` in [console]. 11 | 3. To install this as a package use `npm install wikipedia-tts`. 12 | 1. Follow setup at [extra-googletts]. 13 | 2. Follow setup at [extra-youtubeuploader]. 14 |
15 | 16 | 17 | ## console 18 | 19 | ```bash 20 | wikipedia-tts [page] [options] 21 | # --help: show this help 22 | # -l, --log: enable log 23 | # -o, --output: set output file 24 | # -d, --db: set crawl database file (crawl.db) 25 | # -p, --priority: set page priority (0) 26 | # -r, --references: set page references (0) 27 | # -s, --status: set page status (0) 28 | # -t, --times: times to crawl/upload (1) 29 | # Environment variables: 30 | # WIKIPEDIATTS_LOG: enable logging (0) 31 | # WIKIPEDIATTS_DB: crawl database file (crawl.db) 32 | 33 | wikipedia-tts "Ladakh" 34 | # "Ladakh" is uploaded to YouTube 35 | 36 | wikipedia-tts add "Plant nutrition" 37 | # "Plant nutrition" is added to crawl list 38 | 39 | wikipedia-tts update "Plant nutrition" --priority 1 40 | # "Plant nutrition" priority is set to 1 41 | # this means it will be crawled/uploaded first 42 | # even if other pages have higher number of references 43 | 44 | wikipedia-tts crawl 45 | # "Plant nutrition" is page links are crawled 46 | # this is because it is on top priority, references 47 | 48 | wikipedia-tts crawl --times 10 49 | # Crawling done recursively 10 times 50 | 51 | wikipedia-tts upload 52 | # Highest ranking page is crawled and uploaded to YouTube 53 | 54 | wikipedia-tts upload --times 10 55 | # Uploading done recursively 10 times 56 | ``` 57 | 58 | 59 | 60 | ## package 61 | 62 | ```javascript 63 | const wikipediaTts = require('wikipedia-tts'); 64 | // wikipediaTts.setup([db path]): db conn (promise) 65 | // wikipediaTts.get, ): {title, priority, references, status} (promise) 66 | // wikipediaTts.add(, ): page (promise) 67 | // wikipediaTts.remove(, ): page (promise) 68 | // wikipediaTts.update(, , [value]): page (promise) 69 | // wikipediaTts.crawl(, [options]): times crawled (promise) 70 | // wikipediaTts.upload(, [options]): times uploaded (promise) 71 | // wikipediaTts(, , [options]): Upload page to YouTube 72 | // -> (promise) 73 | 74 | /* More options: @wikipedia-tts/youtube */ 75 | // [options]: { 76 | // db: $WIKIPEDIATTS_DB||'crawl.db', 77 | // input: { 78 | // text: null, 79 | // image: null, 80 | // tags: null, 81 | // description: null 82 | // } 83 | // } 84 | 85 | 86 | wikipediaTts(null, 'Ladakh'); 87 | // "Ladakh" is uploaded to youtube 88 | 89 | 90 | var db = await wikipediaTts.setup(); 91 | // crawl list is created (crawl.db) 92 | 93 | await wikipediaTts.add(db, 'Plant nutrition'); 94 | // "Plant nutrition" is added to crawl list 95 | 96 | await wikipediaTts.update(db, 'Plant nutrition', {priority: 1}); 97 | // "Plant nutrition" priority is set to 1 98 | // this means it will be crawled/uploaded first 99 | // even if other pages have higher number of references 100 | 101 | await wikipediaTts.crawl(db); 102 | // "Plant nutrition" is page links are crawled 103 | // this is because it is on top priority, references 104 | 105 | await wikipediaTts.crawl(db, {times: 10}); 106 | // Crawling done recursively 10 times 107 | 108 | await wikipediaTts.upload(db); 109 | // Highest ranking page is crawled and uploaded to YouTube 110 | 111 | await wikipediaTts.crawl(db, {times: 10}); 112 | // Uploading done recursively 10 times 113 | ``` 114 | 115 | 116 | [![wikipedia-tts](https://i.imgur.com/Uu0KJ1U.jpg)](https://www.npmjs.com/package/wikipedia-tts) 117 | 118 | [Upload Wikipedia TTS videos on YouTube]: https://www.youtube.com/results?search_query=wikipedia+audio+article 119 | 120 | ![](https://ga-beacon.deno.dev/G-RC63DPBH3P:SH3Eq-NoQ9mwgYeHWxu7cw/github.com/nodef/wikipedia-tts) 121 | 122 | [Node.js]: https://nodejs.org/en/download/ 123 | [console]: https://en.wikipedia.org/wiki/Shell_(computing)#Text_(CLI)_shells 124 | [extra-googletts]: https://www.npmjs.com/package/extra-googletts 125 | [extra-youtubeuploader]: https://www.npmjs.com/package/extra-youtubeuploader 126 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-merlot -------------------------------------------------------------------------------- /description.txt: -------------------------------------------------------------------------------- 1 | This is an audio version of the Wikipedia Article: 2 | ${fullurl} 3 | 4 | 5 | ${toc} 6 | 7 | 8 | Listening is a more natural way of learning, when compared to reading. Written language only began at around 3200 BC, but spoken language has existed long ago. 9 | 10 | Learning by listening is a great way to: 11 | - increases imagination and understanding 12 | - improves your listening skills 13 | - improves your own spoken accent 14 | - learn while on the move 15 | - reduce eye strain 16 | 17 | Now learn the vast amount of general knowledge available on Wikipedia through audio (audio article). You could even learn subconsciously by playing the audio while you are sleeping! If you are planning to listen a lot, you could try using a bone conduction headphone, or a standard speaker instead of an earphone. 18 | 19 | Listen on Google Assistant through Extra Audio: 20 | https://assistant.google.com/services/invoke/uid/0000001a130b3f91 21 | Other Wikipedia audio articles at: 22 | https://www.youtube.com/results?search_query=wikipedia+tts 23 | Upload your own Wikipedia articles through: 24 | https://github.com/nodef/wikipedia-tts 25 | Speaking Rate: ${audio.audioConfig.speakingRate} 26 | Voice name: ${audio.voice.name} 27 | 28 | 29 | "I cannot teach anybody anything, I can only make them think." 30 | - Socrates 31 | 32 | 33 | SUMMARY 34 | ======= 35 | ${description} 36 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const youtubeuploader = require('extra-youtubeuploader'); 3 | const stillvideo = require('extra-stillvideo'); 4 | const googletts = require('extra-googletts'); 5 | const wiki = require('wikijs').default; 6 | const download = require('download'); 7 | const isVideo = require('is-video'); 8 | const boolean = require('boolean'); 9 | const sqlite = require('sqlite'); 10 | const tempy = require('tempy'); 11 | const _ = require('lodash'); 12 | const cp = require('child_process'); 13 | const https = require('https'); 14 | const path = require('path'); 15 | const fs = require('fs'); 16 | 17 | 18 | // Global variables 19 | const E = process.env; 20 | const OPTIONS = { 21 | log: boolean(E['WIKIPEDIATTS_LOG']||'0'), 22 | db: E['WIKIPEDIATTS_DB']||'crawl.db', 23 | times: parseInt(E['WIKIPEDIATTS_TIMES']||'1', 10), 24 | video: { 25 | fitX: parseInt(E['STILLVIDEO_FITX']||'1024', 10), 26 | fitY: parseInt(E['STILLVIDEO_FITY']||'1024', 10) 27 | }, 28 | youtube: { 29 | descriptionpath: E['YOUTUBEUPLOADER_DESCRIPTIONPATH']||path.join(__dirname, 'description.txt'), 30 | title: E['YOUTUBEUPLOADER_TITLE']||'${title} | Wikipedia audio article', 31 | tags: E['YOUTUBEUPLOADER_TAGS']||'${tags},wikipedia audio article,learning by listening,increases imagination and understanding,improves your listening skills,improves your own spoken accent,learn while on the move,reduce eye strain,text to speech', 32 | privacystatus: E['YOUTUBEUPLOADER_PRIVACYSTATUS']||'public', 33 | embeddable: boolean(E['YOUTUBEUPLOADER_EMBEDDABLE']||'true'), 34 | license: E['YOUTUBEUPLOADER_LICENSE']||'creativeCommon', 35 | publicstatsviewable: boolean(E['YOUTUBEUPLOADER_PUBLICSTATSVIEWABLE']||'true'), 36 | category: E['YOUTUBEUPLOADER_CATEGORY']||'27' 37 | } 38 | }; 39 | const VALUE = { 40 | priority: parseInt(E['WIKIPEDIATTS_PRIORITY']||'0', 10), 41 | references: parseInt(E['WIKIPEDIATTS_REFERENCES']||'0', 10), 42 | status: parseInt(E['WIKIPEDIATTS_STATUS']||'0', 10) 43 | }; 44 | const AUDIO_VOICENAME = [ 45 | 'en-AU-Wavenet-A', 'en-AU-Wavenet-B', 'en-AU-Wavenet-C', 'en-AU-Wavenet-D', 46 | 'en-GB-Wavenet-A', 'en-GB-Wavenet-B', 'en-GB-Wavenet-C', 'en-GB-Wavenet-D', 47 | 'en-US-Wavenet-A', 'en-US-Wavenet-B', 'en-US-Wavenet-C', 'en-US-Wavenet-D', 'en-US-Wavenet-E', 'en-US-Wavenet-F' 48 | ]; 49 | const CATEGORY_EXC = /wikipedia|webarchive|infocard|infobox|chembox|article|page|dmy|cs1|[^\w\s\(\)]/i; 50 | const PAGEIMAGES_URL = 'https://en.wikipedia.org/w/api.php?action=query&prop=pageimages&format=json&piprop=original&titles='; 51 | const BLANKIMAGE_URL = 'https://upload.wikimedia.org/wikipedia/commons/thumb/8/8b/Wikipedia-logo-blank.svg/1000px-Wikipedia-logo-blank.svg.png'; 52 | const COMMANDS = new Set(['setup', 'get', 'add', 'remove', 'update', 'upload', 'crawl']); 53 | const IMGFORMAT = /\.(png|jpe?g)$/i; 54 | const FN_NOP = () => 0; 55 | 56 | 57 | // Get random item from array. 58 | function randomItem(arr) { 59 | return arr[Math.floor(arr.length*Math.random())]; 60 | }; 61 | 62 | // Write to file, return promise. 63 | function fsWriteFile(pth, dat, o) { 64 | return new Promise((fres, frej) => fs.writeFile(pth, dat, o, (err) => { 65 | return err? frej(err):fres(pth); 66 | })); 67 | }; 68 | 69 | // Make HTTPS GET request. 70 | function httpsGet(opt) { 71 | return new Promise((fres, frej) => https.get(opt, (res) => { 72 | var err = null, cod = res.statusCode, dat = ''; 73 | if(cod!==200) err = new Error(`HTTPS GET failed (${cod}).\n${opt}`); 74 | if(err) { res.resume(); return frej(err); } 75 | res.setEncoding('utf8'); 76 | res.on('data', (cnk) => dat+=cnk); 77 | res.on('end', () => fres(dat)); 78 | }).on('error', frej)); 79 | }; 80 | 81 | // Download file to temp. 82 | async function downloadTemp(url) { 83 | var ext = path.extname(url); 84 | var pth = tempy.file({extension: ext.substring(1)}); 85 | await download(url, path.dirname(pth), {filename: path.basename(pth)}); 86 | return pth; 87 | }; 88 | 89 | // Get page image from wikipedia pageimages API response. 90 | function wikiPageImage(res) { 91 | var pages = res.query.pages; 92 | if(!pages) return null; 93 | var page = pages[Object.keys(pages)[0]]; 94 | if(!page.original) return null; 95 | return page.original.source; 96 | }; 97 | 98 | // Get image for page. 99 | async function pageImage(pag) { 100 | var wurl = PAGEIMAGES_URL+encodeURIComponent(pag.raw.title); 101 | var img = wikiPageImage(JSON.parse(await httpsGet(wurl))); 102 | if(IMGFORMAT.test(img)) return img; 103 | var img = await pag.mainImage(); 104 | if(IMGFORMAT.test(img)) return img; 105 | var imgs = await pag.images(); 106 | for(var i of imgs||[]) 107 | if(IMGFORMAT.test(i)) return i; 108 | return img||BLANKIMAGE_URL; 109 | }; 110 | 111 | // Get thumb image for page. 112 | async function pageThumbImage(pag, o) { 113 | var url = await pageImage(pag); 114 | if(!url.endsWith('.svg')) return url; 115 | var fx = _.get(o||{}, 'video.fitX', OPTIONS.video.fitX); 116 | url = url.replace(/\/wikipedia\/(.*?)\/(thumb\/)?/, '/wikipedia/$1/thumb/'); 117 | return url+`/${fx}px-`+path.basename(url)+'.jpg'; 118 | }; 119 | 120 | // Get categories for page. 121 | async function pageCategories(pag) { 122 | var cats = await pag.categories(), z = []; 123 | for(var cat of cats) { 124 | var c = cat.replace('Category:', ''); 125 | if(!CATEGORY_EXC.test(c)) z.push(c); 126 | } 127 | return z; 128 | }; 129 | 130 | // Get table of contents for page. 131 | async function pageToc(pag) { 132 | var reTopic = /\(.*?)\<\/span\>\W*?\(.*?)\<\/span\>/ig; 133 | var top = null, tops = [], html = await pag.html(); 134 | while((top=reTopic.exec(html))!=null) 135 | tops.push(top[1]+' '+top[2]); 136 | return tops; 137 | }; 138 | 139 | // Get content for page. 140 | async function pageContent(pag) { 141 | var txt = await pag.content(); 142 | return txt.replace(/\W*==\W*references\W*==[\s\S]*/i, ''); 143 | }; 144 | 145 | // Get forward links for page. 146 | async function pageLinks(pag) { 147 | var z = await pag.links(); 148 | return z; 149 | }; 150 | 151 | // Get page table of contents and audio. 152 | async function pageTocAudio(out, pag, txt, o) { 153 | var tops = await pageToc(pag), toc = ''; 154 | var tt = await googletts(out, txt, o); 155 | for(var i=0, I=tt.length; i0) toc += tt[i].time+' '+(tops[i-1]||tt[i].title)+'\n'; 157 | return toc; 158 | }; 159 | 160 | // Run sql statement with map and join. 161 | function sqlRunMapJoin(db, pre, dat, map, sep) { 162 | for(var i=0, I=dat.length, z= []; i=1? tempy.file({extension: 'mp3'}):out; 199 | var vidf = mod>=2? tempy.file({extension: 'mp4'}):out; 200 | var capf = mod>=2? tempy.file({extension: 'txt'}):null; 201 | var metf = mod>=2? tempy.file({extension: '.json'}):null; 202 | var oaud = audioRandom(Object.assign({log: l}, o.audio)); 203 | if(mod>=0) var toc = await pageTocAudio(audf, p, txt, oaud); 204 | if(mod>=1) await stillvideo(vidf, audf, imgf, Object.assign({log: l}, o.video)); 205 | if(mod>=2) await fsWriteFile(capf, txt); 206 | var val = {title: nam, fullurl, description, tags, toc, audio: oaud}; 207 | if(mod>=2) await fsWriteFile(metf, JSON.stringify(val)); 208 | if(mod>=2) await youtubeuploader(Object.assign({log: l, video: vidf, caption: capf, meta: metf}, o.youtube)); 209 | if(imgf!==img) fs.unlink(imgf, FN_NOP); 210 | if(mod>=1) fs.unlink(audf, FN_NOP); 211 | if(mod>=2) fs.unlink(vidf, FN_NOP); 212 | if(mod>=2) fs.unlink(capf, FN_NOP); 213 | return p; 214 | }; 215 | 216 | // Get a page for crawl. 217 | async function getCrawl(db, o) { 218 | var o = o||{}; 219 | if(o.log) console.log('-getCrawl:'); 220 | var whr = '"status" = 0', ord = '"priority" DESC, "references" DESC'; 221 | var row = await db.get(`SELECT * FROM "pages" WHERE ${whr} ORDER BY ${ord} LIMIT 1`); 222 | if(o.log) console.log(' .row', row); 223 | return row; 224 | }; 225 | 226 | // Get a page for upload. 227 | async function getUpload(db, o) { 228 | var o = o||{}; 229 | if(o.log) console.log('-getUpload:'); 230 | var whr = '"status" = 0 OR "status" = 1', ord = '"priority" DESC, "references" DESC'; 231 | var row = await db.get(`SELECT * FROM "pages" WHERE ${whr} ORDER BY ${ord} LIMIT 1`); 232 | if(o.log) console.log(' .row', row); 233 | return row; 234 | }; 235 | 236 | // Upload page, if unique. 237 | async function uploadUnique(nam, o) { 238 | var o = o||{}; 239 | if(o.log) console.log('-uploadUnique:', nam); 240 | var qry = OPTIONS.youtube.title.replace(/\$\{title\}/g, nam); 241 | var ids = await youtubeuploader.lines({title: qry}); 242 | if(ids.length) { 243 | if(o.log) console.log(' .already exists:', ids); 244 | return 2; 245 | } 246 | try { await wikipediaTts(null, nam, o); } 247 | catch(e) { 248 | console.error(e); 249 | return e.message==='No article found'? -2:-4; 250 | } 251 | return 4; 252 | }; 253 | 254 | // Crawl one page. 255 | async function crawlOne(db, nam, o) { 256 | var o = o||{}; 257 | if(o.log) console.log('-crawlOne:', nam); 258 | var p = await wiki().page(nam); 259 | var lnks = p? await pageLinks(p):[]; 260 | if(o.log) console.log(' .links:', lnks.length); 261 | await sqlRunMapJoin(db, 'INSERT OR IGNORE INTO "pages" VALUES ', lnks, () => '(?, 0, 0, 0)', ', '); 262 | await sqlRunMapJoin(db, 'UPDATE "pages" SET "references" = "references" + 1 WHERE ', lnks, () => '"title" = ?', ' OR '); 263 | return p; 264 | }; 265 | 266 | // Setup crawl list. 267 | async function setup(pth, o) { 268 | var o = _.merge({}, OPTIONS, o), pth = pth||o.db; 269 | var db = await sqlite.open(pth); 270 | if(o.log) console.log('-setup:', pth); 271 | var col = '"title" TEXT PRIMARY KEY, "priority" INTEGER, "references" INTEGER, "status" INTEGER'; 272 | await db.exec(`CREATE TABLE IF NOT EXISTS "pages" (${col})`); 273 | return db; 274 | }; 275 | 276 | // Get a page from crawl list. 277 | async function get(db, nam, o) { 278 | var o = _.merge({}, OPTIONS, o), db = db||o.db; 279 | db = typeof db==='string'? await setup(db, o):db; 280 | var nam = nam||o.input||(await getUpload(db, o)).title; 281 | if(o.log) console.log('-get:', nam); 282 | var row = await db.get('SELECT * "pages" WHERE "title" = ? LIMIT 1', nam); 283 | if(o.log) console.log(' .row:', row); 284 | return row; 285 | }; 286 | 287 | // Add a page to crawl list. 288 | async function add(db, nam, o) { 289 | var o = _.merge({}, OPTIONS, VALUE, o), db = db||o.db; 290 | db = typeof db==='string'? await setup(db, o):db; 291 | var nam = nam||o.input, v = _.pick(o, ['priority', 'references', 'status']); 292 | if(o.log) console.log('-add:', nam, v); 293 | await db.run('INSERT OR IGNORE INTO "pages" VALUES (?, ?, ?, ?)', nam, v.priority, v.references, v.status); 294 | return nam; 295 | }; 296 | 297 | // Remove a page from crawl list. 298 | async function remove(db, nam, o) { 299 | var o = _.merge({}, OPTIONS, o), db = db||o.db; 300 | db = typeof db==='string'? await setup(db, o):db; 301 | var nam = nam||o.input||(await getUpload(db, o)).title; 302 | if(o.log) console.log('-remove:', nam); 303 | await db.run('DELETE FROM "pages" WHERE "title" = ?', nam); 304 | return nam; 305 | }; 306 | 307 | // Update a page in crawl list. 308 | async function update(db, nam, o) { 309 | var o = _.merge({}, OPTIONS, o), db = db||o.db; 310 | db = typeof db==='string'? await setup(db, o):db; 311 | var nam = nam||o.input||(await getUpload(db, o)).title; 312 | var v = _.pick(o, ['priority', 'references', 'status']); 313 | if(o.log) console.log('-update:', nam, v); 314 | var val = {$title: nam}; 315 | for(var k in v) val['$'+k] = v[k]; 316 | var set = Object.keys(v).map(col => `"${col}" = $${col}`).join(', '); 317 | db.run(`UPDATE "pages" SET ${set} WHERE "title" = $title`, val); 318 | return nam; 319 | }; 320 | 321 | // Upload a page. 322 | async function upload(db, o) { 323 | var o = _.merge({}, OPTIONS, o), db = db||o.db; 324 | db = typeof db==='string'? await setup(db, o):db; 325 | if(o.log) console.log('-upload:', _.pick(o, ['loop'])); 326 | for(var i=0, I=o.times||1; i