├── .github └── FUNDING.yml ├── LICENSE ├── README.md └── lib └── resty ├── moongoo.lua └── moongoo ├── auth ├── cr.lua └── scram.lua ├── collection.lua ├── connection.lua ├── cursor.lua ├── database.lua ├── gridfs.lua ├── gridfs └── file.lua └── utils.lua /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | patreon: sarcasticat 2 | custom: https://paypal.me/sarcasticat 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2 | Version 2, December 2004 3 | 4 | Copyright (C) 2004 Sam Hocevar 5 | 6 | Everyone is permitted to copy and distribute verbatim or modified 7 | copies of this license document, and changing it is allowed as long 8 | as the name is changed. 9 | 10 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 11 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 12 | 13 | 0. You just DO WHAT THE FUCK YOU WANT TO. 14 | 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## This project is on hiatus due to lack of free time (unless someone wants to sponsor it's development) 2 | 3 | # LUA-RESTY-MOONGOO 4 | 5 | Adding some lua moondust to the mongo goo 6 | 7 | ## What is it? 8 | 9 | Lua mongodb driver, highly inspired by perl-mango. 10 | Also, möngö is mongolian currency, and mungu is god in Swahili. 11 | 12 | ## Table of Contents 13 | 14 | * [Requirements](#requirements) 15 | * [Installation](#installation) 16 | * [Usage](#usage) 17 | * [Synopsis](#synopsis) 18 | * [Moongoo methods](#moongoo-methods) 19 | * [new](#new) 20 | * [close](#close) 21 | * [db](#db) 22 | * [Database methods](#database-methods) 23 | * [collection](#collection) 24 | * [gridfs](#gridfs) 25 | * [cmd](#cmd) 26 | * [Collection methods](#collection-methods) 27 | * [create](#create) 28 | * [drop](#drop) 29 | * [rename](#rename) 30 | * [options](#options) 31 | * [full_name](#full_name) 32 | * [stats](#stats) 33 | * [index_information](#index_information) 34 | * [ensure_index](#ensure_index) 35 | * [drop_index](#drop_index) 36 | * [find](#find) 37 | * [find_one](#find_one) 38 | * [find_and_modify](#find_and_modify) 39 | * [insert](#insert) 40 | * [update](#update) 41 | * [remove](#remove) 42 | * [save](#save) 43 | * [map_reduce](#map_reduce) 44 | * [aggregate](#aggregate) 45 | * [Cursor methods](#cursor-methods) 46 | * [clone](#clone) 47 | * [tailable](#tailable) 48 | * [await](#await) 49 | * [comment](#comment) 50 | * [hint](#hint) 51 | * [max_scan](#max_scan) 52 | * [max_time_ms](#max_time_ms) 53 | * [read_preference](#read_preference) 54 | * [snapshot](#snapshot) 55 | * [sort](#sort) 56 | * [skip](#skip) 57 | * [limit](#limit) 58 | * [next](#next) 59 | * [rewind](#rewind) 60 | * [all](#all) 61 | * [count](#count) 62 | * [explain](#explain) 63 | * [distinct](#distinct) 64 | * [GridFS methods](#gridfs-methods) 65 | * [list](#list) 66 | * [remove](#remove-1) 67 | * [find_version](#find_version) 68 | * [open](#open) 69 | * [create](#create-1) 70 | * [GridFS file methods](#gridfs-file-methods) 71 | * [content_type](#content_type) 72 | * [filename](#filename) 73 | * [md5](#md5) 74 | * [metadata](#metadata) 75 | * [date](#date) 76 | * [length](#length) 77 | * [chumk_size](#chunk_size) 78 | * [seek](#seek) 79 | * [tell](#tell) 80 | * [read](#read) 81 | * [slurp](#slurp) 82 | * [write](#write) 83 | * [close](#close-1) 84 | * [Authors](#authors) 85 | * [Sponsors](#sponsors) 86 | * [Copyright and License](#copyright-and-license) 87 | 88 | ## Requirements 89 | 90 | * LuaJit or Lua with [BitOp](http://bitop.luajit.org/) 91 | * [lua-libbson](https://github.com/isage/lua-cbson) 92 | * lua-posix 93 | 94 | To use outside of OpenResty you'll also need: 95 | * LuaSocket 96 | * LuaCrypto 97 | 98 | ## Usage 99 | 100 | ### Synopsis 101 | 102 | ```lua 103 | local moongoo = require("resty.moongoo") 104 | local cbson = require("cbson") 105 | 106 | local mg, err = moongoo.new("mongodb://user:password@hostname/?w=2") 107 | if not mg then 108 | error(err) 109 | end 110 | 111 | local col = mg:db("test"):collection("test") 112 | 113 | -- Insert document 114 | local ids, err = col:insert({ foo = "bar"}) 115 | 116 | -- Find document 117 | local doc, err = col:find_one({ foo = "bar"}) 118 | print(doc.foo) 119 | 120 | -- Update document 121 | local doc, err = col:update({ foo = "bar"}, { baz = "yada"}) 122 | 123 | -- Remove document 124 | local status, err = col:remove({ baz = "yada"}) 125 | 126 | -- Close connection or put in OpenResty connection pool 127 | 128 | mg:close() 129 | 130 | ``` 131 | 132 | #### NOTE 133 | You **should** use cbson datatypes for anything other than strings, floats and bools. 134 | All lua numbers are stored as floats. 135 | Empty arrays are treated and stored as empty documents (you can use cbson.array() to forcibly store empty array in mongo). 136 | nil lua values are ignored and not stored, due to nature of lua, use cbson.null. 137 | nil values from mongo are decoded as cbson.null (use `somevar == cbson.null()` for comparison) 138 | 139 | 140 | ### Moongoo methods 141 | #### new 142 | `mgobj, error = moongoo.new(connection_string)` 143 | Creates new Moongoo instance 144 | Refer to [Connection String URI Format](https://docs.mongodb.org/manual/reference/connection-string/) 145 | Currently supported options are: 146 | * w - default is 0 147 | * wtimeoutMS - default is 1000 148 | * journal - default is false 149 | * authMechanism - default depends on mongodb version 150 | * socketTimeoutMS - default is nil. It controls **both** connect and read/write timeout. 151 | It's set to nil by default so you can control connect and read/write separately 152 | with OpenResty lua_socket_connect_timeout, lua_socket_send_timeout and lua_socket_read_timeout. 153 | * ssl - default is false 154 | Use ssl for connection. Currently there's no support for CA or client certificates. 155 | 156 | Moongoo tries really hard to be smart, and opens connection only when needed, 157 | searches for master node in replicaset, and uses relevant to mongodb version auth mechanism. 158 | Downside is: you can't currently issue queries to slave nodes. 159 | 160 | #### close 161 | `mgobj:close()` 162 | Closes mongodb connection (LuaSocket) or puts it in connection pool (OpenResty) 163 | Issuing new read/write commands after will reopen connection. 164 | 165 | #### db 166 | `dbobj = mgobj:db(name)` 167 | Selects database to use. 168 | 169 | [Back to TOC](#table-of-contents) 170 | 171 | 172 | ### Database methods 173 | #### collection 174 | `colobj = dbobj:collection(name)` 175 | Returns new collection object to use. 176 | 177 | #### gridfs 178 | `gridfsobj = dbobj:gridfs(prefix)` 179 | Returns new gridfs object to use. 180 | Default prefix id 'fs'. 181 | 182 | #### cmd 183 | `result, error = dbobj:cmd(command, params)` 184 | Runs database commans. 185 | command is either string with command name, or table { command = value }. 186 | Params are command parameters. 187 | For example, given [distinct](https://docs.mongodb.org/manual/reference/command/distinct/) mongodb command: 188 | ```lua 189 | local result, error = dbobj:cmd( { distinct = "some.collection" }, { key = "somekey } ) 190 | ``` 191 | 192 | [Back to TOC](#table-of-contents) 193 | 194 | 195 | ### Collection methods 196 | #### create 197 | `new_colobj, error = colobj:create(name)` 198 | Creates new collection and returns new collection object for it. 199 | 200 | #### drop 201 | `result, error = colobj:drop()` 202 | Drops collection. 203 | 204 | #### rename 205 | `new_colobj, error = colobj:rename(newname, drop)` 206 | Renames collection, optionally dropping target collection if it exists. 207 | 208 | #### options 209 | `result, error = colobj:options()` 210 | Retuns collection options. 211 | 212 | #### full_name 213 | `result = colobj:full_name()` 214 | Returns full collection namespace (e.g. database.collection). 215 | 216 | #### stats 217 | `result, error = colobj:stats()` 218 | Returns collection statistics. 219 | 220 | #### index_information 221 | `result, error = colobj:index_information()` 222 | Returns info about current indexes. 223 | 224 | #### ensure_index 225 | `result, error = colobj:ensure_index(indexes)` 226 | Creates new index. 227 | indexes **should** be an array, even if it has 1 value.. 228 | Refer to [mongo docs](https://docs.mongodb.org/manual/reference/command/createIndexes/) for index format. 229 | Note, that in moongoo, index names are optional, moongoo will create them for you based on keys. 230 | 231 | #### drop_index 232 | `result, error = colobj:drop_index(index)` 233 | Drops named index from collection. 234 | Refer to [mongo docs](https://docs.mongodb.org/manual/reference/command/dropIndexes/) for index format. 235 | 236 | #### find 237 | `cursorobj = colobj:find(
query,
fields)` 238 | Returns new cusor object for query. 239 | 240 | #### find_one 241 | `doc, error = colobj:find_one(
query,
fields)` 242 | Returns first document conforming to query. 243 | 244 | #### find_and_modify 245 | `doc, error = colobj:find_and_modify(
query,
opts)` 246 | Modifies document, according to [opts](https://docs.mongodb.org/manual/reference/command/findAndModify/) 247 | and returns (by default) old document. 248 | 249 | #### insert 250 | `ids, error_or_number = colobj:insert(docs)` 251 | Inserts new document(s) and returns their id's and number of inserted documents. 252 | 253 | #### update 254 | `number, error = colobj:update(
query,
update,
flags)` 255 | Updates document, according to query and flags. 256 | Supported flags are: 257 | * multi - update multiple documents (default - false) 258 | * upsert - insert document, if not exists (default - false) 259 | 260 | Returns number of updated documents 261 | 262 | #### remove 263 | `number, error = colobj:remove(
queryquery, single)` 264 | Removes document(s) from database. 265 | Returns number of removed documents. 266 | 267 | #### save 268 | `id = colobj:save(
document)` 269 | Saves document to collection. 270 | Basically, this performs update with upsert = true, generating id if it not exist. 271 | See [here](https://docs.mongodb.org/v3.0/reference/method/db.collection.save/) for explanation. 272 | 273 | #### map_reduce 274 | `doc, error = colobj:map_reduce(map, reduce,
flags)` 275 | `new_colobj, error = colobj:map_reduce(map, reduce,
flags)` 276 | Performs map-reduce operation and returns either document with results, 277 | or new collection object (if map-reduce `out` flag set to collection name). 278 | 279 | #### aggregate 280 | `explain, error = colobj:aggregate(pipeline,
opts)` 281 | `new_colobj, error = colobj:aggregate(pipeline,
opts)` 282 | `cursor, error = colobj:aggregate(pipeline,
opts)` 283 | Performs aggregation operation according to pipeline commands. 284 | Returns document, if opts.explain set to true. 285 | Returns collection object, if pipeline has `$out` command as last stage. 286 | Returns new cursor object otherwise. 287 | 288 | [Back to TOC](#table-of-contents) 289 | 290 | 291 | ### Cursor methods 292 | 293 | Note: you can chain-call property/options functions. 294 | 295 | #### clone 296 | `new_cursorobj = cursorobj:clone(explain)` 297 | Clones cursor query, optionally setting `explain` flag. 298 | 299 | #### tailable 300 | `cursorobj = cursorobj:tailable(tailable)` 301 | Sets relevant query options. 302 | 303 | #### await 304 | `cursorobj = cursorobj:await(wait)` 305 | Sets relevant query options. 306 | 307 | #### comment 308 | `cursorobj = cursorobj:comment(comment)` 309 | Sets relevant query options. 310 | 311 | #### hint 312 | `cursorobj = cursorobj:hint(
hint)` 313 | Sets relevant query options. 314 | 315 | #### max_scan 316 | `cursorobj = cursorobj:max_scan(max_scan)` 317 | Sets relevant query options. 318 | 319 | #### max_time_ms 320 | `cursorobj = cursorobj:max_time_ms(max_time_ms)` 321 | Sets relevant query options. 322 | 323 | #### read_preference 324 | `cursorobj = cursorobj:read_preference(read_preference)` 325 | Sets relevant query options. 326 | 327 | #### snapshot 328 | `cursorobj = cursorobj:snapshot(snapshot)` 329 | Sets relevant query options. 330 | 331 | #### sort 332 | `cursorobj = cursorobj:sort(
sort)` 333 | Sets query sorting. 334 | 335 | #### skip 336 | `cursorobj = cursorobj:skip(skip)` 337 | Sets cursor skip option. 338 | 339 | #### limit 340 | `cursorobj = cursorobj:limit(limit)` 341 | Sets cursor limit. 342 | 343 | #### next 344 | `doc, error = cursorobj:next()` 345 | Returns next document, found by query. 346 | 347 | #### rewind 348 | `cursorobj = cursorobj:rewind()` 349 | Resets cursor position. 350 | 351 | #### all 352 | `documents, error = cursorobj:all()` 353 | Returns array, containing all documents found by query. 354 | 355 | #### count 356 | `number = cursorobj:count()` 357 | Returns number of documents, conforming to query. 358 | 359 | #### explain 360 | `doc, error = cursorobj:explain()` 361 | Returns document with query plan explanation. 362 | 363 | #### distinct 364 | `doc, error = cursorobj:distinct(key)` 365 | Finds the distinct values for a specified field, according to query. 366 | 367 | [Back to TOC](#table-of-contents) 368 | 369 | 370 | 371 | ### GridFS methods 372 | #### list 373 | `doc = gridfsobj:list()` 374 | Returns array, containing unique filenames. 375 | 376 | #### remove 377 | `num, err = gridfsobj:remove(id)` 378 | Removes file from GridFS. 379 | Returns number of chunks removed. 380 | 381 | #### find_version 382 | `id, err = gridfsobj:find_version(name, version)` 383 | Find version of file, versions from 0 and upwards point to a specific version, 384 | while version from -1 and downwards point to the most recently added version. 385 | E.g. for file with 3 versions: 0, 1, 2 = -3, -2, -1 386 | 0 or -3 is oldest, while -1 or 2 - newest. 387 | 388 | #### open 389 | `gridfsfile, err = gridfsobj:open(id)` 390 | Opens GridFS file for reading. 391 | 392 | #### create 393 | `gridfsfile = gridfsobj:create(filename,
opts, safe)` 394 | Creates new GridFS file for writing. 395 | If `safe` is true (default), all chunks will be inserted only when you call gridfsfile:close(). 396 | If `safe` is false, chunks will be inserted into db with every gridfsfile:write(...), 397 | last chunk will be inserted on close. 398 | You **must** call gridfsfile:close(), or you'll end up with orphaned chunks. 399 | Safe mode is good for small files, however, as it stores entire file in memory, it's bad for big files. 400 | Non-safe mode uses maximum of (chunkSize*2-1) bytes for any file. 401 | As a side effect, you can :read() or :slurp() file (except for last chunk). 402 | 403 | [Back to TOC](#table-of-contents) 404 | 405 | 406 | 407 | ### GridFS file methods 408 | #### content_type 409 | `val = gridfsfile:content_type()` 410 | Returns file content-type. 411 | 412 | #### filename 413 | `val = gridfsfile:filename()` 414 | Returns file name. 415 | 416 | #### md5 417 | `val = gridfsfile:md5()` 418 | Returns file checksum 419 | 420 | #### metadata 421 | `val = gridfsfile:metadata()` 422 | Returns file metadata document. 423 | 424 | #### date 425 | `val = gridfsfile:date()` 426 | Returns file upload date. 427 | 428 | #### length 429 | `val = gridfsfile:length()` 430 | Returns file size. 431 | 432 | #### chunk_size 433 | `val = gridfsfile:chunk_size()` 434 | Returns file chunk size. 435 | 436 | #### seek 437 | `gridfsfile = gridfsfile:seek(pos)` 438 | Sets offset for reading. 439 | 440 | #### tell 441 | `pos = gridfsfile:tell()` 442 | Returns current reading position. 443 | 444 | #### read 445 | `val, error = gridfsfile:read()` 446 | Returns file data from GridFS, starting from current position and until the end of matching chunk. 447 | 448 | #### slurp 449 | `val, error = gridfsfile:slurp()` 450 | Returns full file contents. 451 | 452 | #### write 453 | `result, error = gridfsfile:write(data)` 454 | Writes data to GridFS file. 455 | 456 | #### close 457 | `id, error = gridfsfile:close()` 458 | Finalizes file by writing queued chunks and metadata. 459 | 460 | [Back to TOC](#table-of-contents) 461 | 462 | 463 | 464 | ## Authors 465 | 466 | Epifanov Ivan 467 | 468 | [Back to TOC](#table-of-contents) 469 | 470 | 471 | ## Sponsors 472 | 473 | [@dev0pz](https://github.com/dev0pz) 474 | 475 | [Back to TOC](#table-of-contents) 476 | 477 | 478 | ## Copyright and License 479 | 480 | This module is licensed under the WTFPL license. 481 | (See LICENSE) 482 | 483 | [Back to TOC](#table-of-contents) 484 | -------------------------------------------------------------------------------- /lib/resty/moongoo.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local connection = require("resty.moongoo.connection") 3 | local database = require("resty.moongoo.database") 4 | local parse_uri = require("resty.moongoo.utils").parse_uri 5 | local auth_scram = require("resty.moongoo.auth.scram") 6 | local auth_cr = require("resty.moongoo.auth.cr") 7 | 8 | 9 | local _M = {} 10 | 11 | _M._VERSION = '0.1' 12 | _M.NAME = 'Moongoo' 13 | 14 | 15 | local mt = { __index = _M } 16 | 17 | function _M.new(uri) 18 | local conninfo = parse_uri(uri) 19 | 20 | if not conninfo.scheme or conninfo.scheme ~= "mongodb" then 21 | return nil, "Wrong scheme in connection uri" 22 | end 23 | 24 | local auth_algo = conninfo.query and conninfo.query.authMechanism or "SCRAM-SHA-1" 25 | local w = conninfo.query and conninfo.query.w or 1 26 | local wtimeout = conninfo.query and conninfo.query.wtimeoutMS or 1000 27 | local journal = conninfo.query and conninfo.query.journal or false 28 | local ssl = conninfo.query and conninfo.query.ssl or false 29 | 30 | local stimeout = conninfo.query.socketTimeoutMS and conninfo.query.socketTimeoutMS or nil 31 | 32 | return setmetatable({ 33 | connection = nil; 34 | w = w; 35 | wtimeout = wtimeout; 36 | journal = journal; 37 | stimeout = stimeout; 38 | hosts = conninfo.hosts; 39 | default_db = conninfo.database; 40 | user = conninfo.user or nil; 41 | password = conninfo.password or ""; 42 | auth_algo = auth_algo, 43 | ssl = ssl, 44 | version = nil 45 | }, mt) 46 | end 47 | 48 | function _M._auth(self, protocol) 49 | if not self.user then return 1 end 50 | 51 | if not protocol or protocol < cbson.int(3) or self.auth_algo == "MONGODB-CR" then 52 | return auth_cr(self:db(self.default_db), self.user, self.password) 53 | else 54 | return auth_scram(self:db(self.default_db), self.user, self.password) 55 | end 56 | 57 | end 58 | 59 | function _M.connect(self) 60 | if self.connection then return self end 61 | local query, err 62 | -- foreach host 63 | for k, v in ipairs(self.hosts) do 64 | -- connect 65 | self.connection, err = connection.new(v.host, v.port, self.stimeout) 66 | if not self.connection then 67 | return nil, err 68 | end 69 | local status, err = self.connection:connect() 70 | if status then 71 | if self.ssl then 72 | self.connection:handshake() 73 | end 74 | if not self.version then 75 | query = self:db(self.default_db):_cmd({ buildInfo = 1 }) 76 | if query then 77 | self.version = query.version 78 | end 79 | end 80 | 81 | local ismaster = self:db("admin"):_cmd("ismaster") 82 | if ismaster and ismaster.ismaster then 83 | -- auth 84 | local r, err = self:_auth(ismaster.maxWireVersion) 85 | if not r then 86 | return nil, err 87 | end 88 | return self 89 | else 90 | -- try to connect to master 91 | if ismaster.primary then 92 | local mhost, mport 93 | string.gsub(ismaster.primary, "([^:]+):([^:]+)", function(host,port) mhost=host; mport=port end) 94 | self.connection:close() 95 | self.connection = nil 96 | self.connection, err = connection.new(mhost, mport, self.stimeout) 97 | if not self.connection then 98 | return nil, err 99 | end 100 | local status, err = self.connection:connect() 101 | if not status then 102 | return nil, err 103 | end 104 | if self.ssl then 105 | self.connection:handshake() 106 | end 107 | if not self.version then 108 | query = self:db(self.default_db):_cmd({ buildInfo = 1 }) 109 | if query then 110 | self.version = query.version 111 | end 112 | end 113 | local ismaster = self:db("admin"):_cmd("ismaster") 114 | if ismaster and ismaster.ismaster then 115 | -- auth 116 | local r, err = self:_auth(ismaster.maxWireVersion) 117 | if not r then 118 | return nil, err 119 | end 120 | return self 121 | else 122 | return nil, "Can't connect to master server" 123 | end 124 | end 125 | end 126 | end 127 | end 128 | return nil, "Can't connect to any of servers" 129 | end 130 | 131 | function _M.close(self) 132 | if self.connection then 133 | self.connection:close() 134 | self.connection = nil 135 | end 136 | end 137 | 138 | function _M.get_reused_times(self) 139 | return self.connection:get_reused_times() 140 | end 141 | 142 | function _M.db(self, dbname) 143 | return database.new(dbname, self) 144 | end 145 | 146 | return _M 147 | -------------------------------------------------------------------------------- /lib/resty/moongoo/auth/cr.lua: -------------------------------------------------------------------------------- 1 | local pass_digest = require("resty.moongoo.utils").pass_digest 2 | 3 | local b64 = ngx and ngx.encode_base64 or require("mime").b64 4 | local unb64 = ngx and ngx.decode_base64 or require("mime").unb64 5 | 6 | local md5 = ngx and ngx.md5 or function(str) return require("crypto").digest("md5", str) end 7 | 8 | local cbson = require("cbson") 9 | 10 | 11 | local function auth(db, username, password) 12 | local r, err = db:_cmd("getnonce", {}) 13 | if not r then 14 | return nil, err 15 | end 16 | 17 | local digest = md5( r.nonce .. username .. pass_digest ( username , password ) ) 18 | 19 | r, err = db:_cmd("authenticate", { 20 | user = username ; 21 | nonce = r.nonce ; 22 | key = digest ; 23 | }) 24 | 25 | if not r then 26 | return nil, err 27 | end 28 | 29 | return 1 30 | end 31 | 32 | return auth -------------------------------------------------------------------------------- /lib/resty/moongoo/auth/scram.lua: -------------------------------------------------------------------------------- 1 | local Hi = require("resty.moongoo.utils").pbkdf2_hmac_sha1 2 | local saslprep = require("resty.moongoo.utils").saslprep 3 | local pass_digest = require("resty.moongoo.utils").pass_digest 4 | local xor_bytestr = require("resty.moongoo.utils").xor_bytestr 5 | 6 | local b64 = ngx and ngx.encode_base64 or require("mime").b64 7 | local unb64 = ngx and ngx.decode_base64 or require("mime").unb64 8 | 9 | local hmac_sha1 = ngx and ngx.hmac_sha1 or function(str, key) return require("crypto").hmac.digest("sha1", key, str, true) end 10 | local sha1_bin = ngx and ngx.sha1_bin or function(str) return require("crypto").digest("sha1", str, true) end 11 | 12 | local cbson = require("cbson") 13 | 14 | 15 | local function auth(db, username, password) 16 | local username = saslprep(username) 17 | local c_nonce = b64(string.sub(tostring(math.random()), 3 , 14)) 18 | 19 | local first_bare = "n=" .. username .. ",r=" .. c_nonce 20 | 21 | local sasl_start_payload = b64("n,," .. first_bare) 22 | 23 | local r, err 24 | r, err = db:_cmd("saslStart", { 25 | mechanism = "SCRAM-SHA-1" ; 26 | autoAuthorize = 1 ; 27 | payload = cbson.binary(sasl_start_payload); 28 | }) 29 | 30 | if not r then 31 | return nil, err 32 | end 33 | 34 | 35 | local conversationId = r['conversationId'] 36 | local server_first = r['payload']:raw() 37 | 38 | local parsed_t = {} 39 | for k, v in string.gmatch(server_first, "(%w+)=([^,]*)") do 40 | parsed_t[k] = v 41 | end 42 | 43 | local iterations = tonumber(parsed_t['i']) 44 | local salt = parsed_t['s'] 45 | local s_nonce = parsed_t['r'] 46 | 47 | if not string.sub(s_nonce, 1, 12) == c_nonce then 48 | return nil, 'Server returned an invalid nonce.' 49 | end 50 | 51 | local without_proof = "c=biws,r=" .. s_nonce 52 | 53 | local pbkdf2_key = pass_digest ( username , password ) 54 | local salted_pass = Hi(pbkdf2_key, iterations, unb64(salt), 20) 55 | 56 | local client_key = hmac_sha1(salted_pass, "Client Key") 57 | local stored_key = sha1_bin(client_key) 58 | local auth_msg = first_bare .. ',' .. server_first .. ',' .. without_proof 59 | local client_sig = hmac_sha1(stored_key, auth_msg) 60 | local client_key_xor_sig = xor_bytestr(client_key, client_sig) 61 | local client_proof = "p=" .. b64(client_key_xor_sig) 62 | local client_final = b64(without_proof .. ',' .. client_proof) 63 | local server_key = hmac_sha1(salted_pass, "Server Key") 64 | local server_sig = b64(hmac_sha1(server_key, auth_msg)) 65 | 66 | r, err = db:_cmd("saslContinue",{ 67 | conversationId = conversationId ; 68 | payload = cbson.binary(client_final); 69 | }) 70 | 71 | if not r then 72 | return nil, err 73 | end 74 | 75 | local parsed_s = r['payload']:raw() 76 | parsed_t = {} 77 | for k, v in string.gmatch(parsed_s, "(%w+)=([^,]*)") do 78 | parsed_t[k] = v 79 | end 80 | if parsed_t['v'] ~= server_sig then 81 | return nil, "Server returned an invalid signature." 82 | end 83 | 84 | if not r['done'] then 85 | r, err = db:_cmd("saslContinue", { 86 | conversationId = conversationId ; 87 | payload = ngx.encode_base64(""); 88 | }) 89 | 90 | if not r then 91 | return nil, err 92 | end 93 | 94 | if not r['done'] then 95 | return nil, 'SASL conversation failed to complete.' 96 | end 97 | 98 | return 1 99 | end 100 | 101 | return 1 102 | end 103 | 104 | return auth 105 | -------------------------------------------------------------------------------- /lib/resty/moongoo/collection.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local generate_oid = require("resty.moongoo.utils").generate_oid 3 | local cursor = require("resty.moongoo.cursor") 4 | 5 | local _M = {} 6 | 7 | local mt = { __index = _M } 8 | 9 | function _M.new(name, db) 10 | return setmetatable({_db = db, name = name}, mt) 11 | end 12 | 13 | function _M._build_write_concern(self) 14 | return { 15 | j = self._db._moongoo.journal; 16 | w = tonumber(self._db._moongoo.w) and cbson.int(self._db._moongoo.w) or self._db._moongoo.w; 17 | wtimeout = cbson.int(self._db._moongoo.wtimeout); 18 | } 19 | end 20 | 21 | local function check_write_concern(doc, ...) 22 | -- even if write concern failed we may still have successful operation 23 | -- so we check for number of affected docs, and only warn if its > 0 24 | -- otherwise, we just return nil and error 25 | 26 | if doc.writeConcernError then 27 | if not doc.n then 28 | return nil, doc.writeConcernError.errmsg 29 | else 30 | print(doc.writeConcernError.errmsg) 31 | end 32 | end 33 | return ... 34 | end 35 | 36 | function _M._get_last_error(self) 37 | local write_concern = self:_build_write_concern() 38 | local cmd = { getLastError = cbson.int(1), j = write_concern.j, w = write_concern.w, wtimeout = write_concern.wtimeout } 39 | 40 | local doc, err = self._db:cmd(cmd) 41 | if not doc then 42 | return nil, err 43 | end 44 | 45 | return doc 46 | end 47 | 48 | function _M._check_last_error(self, ...) 49 | local cmd, err = self:_get_last_error() 50 | 51 | if not cmd then 52 | return nil, err 53 | end 54 | 55 | if tostring(cmd.err) == "null" then 56 | return ... 57 | end 58 | 59 | return nil, tostring(cmd.err) 60 | end 61 | 62 | local function ensure_oids(docs) 63 | local docs = docs 64 | local ids = {} 65 | for k,v in ipairs(docs) do 66 | if not docs[k]._id then 67 | docs[k]._id = cbson.oid(generate_oid()) 68 | end 69 | table.insert(ids, docs[k]._id) 70 | end 71 | return docs, ids 72 | end 73 | 74 | local function build_index_names(docs) 75 | local docs = docs 76 | for k,v in ipairs(docs) do 77 | if not v.name then 78 | local name = {} 79 | for n, d in pairs(v.key) do 80 | table.insert(name, n) 81 | end 82 | name = table.concat(name, '_') 83 | docs[k].name = name 84 | end 85 | end 86 | return docs 87 | end 88 | 89 | function _M.insert(self, docs) 90 | -- ensure we have oids 91 | if #docs == 0 then 92 | local newdocs = {} 93 | newdocs[1] = docs 94 | docs = newdocs 95 | end 96 | local docs, ids = ensure_oids(docs) 97 | 98 | self._db._moongoo:connect() 99 | 100 | local server_version = tonumber(string.sub(string.gsub(self._db._moongoo.version, "(%D)", ""), 1, 3)) 101 | 102 | if server_version < 254 then 103 | self._db:insert(self:full_name(), docs) 104 | return self:_check_last_error(ids) 105 | else 106 | local doc, err = self._db:cmd( 107 | { insert = self.name }, 108 | { 109 | documents = docs, 110 | ordered = true, 111 | writeConcern = self:_build_write_concern() 112 | } 113 | ) 114 | 115 | if not doc then 116 | return nil, err 117 | end 118 | 119 | return check_write_concern(doc, ids, doc.n) 120 | end 121 | end 122 | 123 | function _M.create(self, params) 124 | local params = params or {} 125 | local doc, err = self._db:cmd( 126 | { create = self.name }, 127 | params 128 | ) 129 | if not doc then 130 | return nil, err 131 | end 132 | return true 133 | end 134 | 135 | function _M.drop(self) 136 | local doc, err = self._db:cmd( 137 | { drop = self.name }, 138 | {} 139 | ) 140 | if not doc then 141 | return nil, err 142 | end 143 | return true 144 | end 145 | 146 | function _M.drop_index(self, name) 147 | local doc, err = self._db:cmd( 148 | { dropIndexes = self.name }, 149 | { index = name } 150 | ) 151 | if not doc then 152 | return nil, err 153 | end 154 | return true 155 | end 156 | 157 | function _M.ensure_index(self, docs) 158 | docs = build_index_names(docs) 159 | 160 | local doc, err = self._db:cmd( 161 | { createIndexes = self.name }, 162 | { indexes = docs } 163 | ) 164 | if not doc then 165 | return nil, err 166 | end 167 | return true 168 | end 169 | 170 | function _M.full_name(self) 171 | return self._db.name .. "." .. self.name 172 | end 173 | 174 | function _M.options(self) 175 | local doc, err = self._db:cmd( 176 | "listCollections", 177 | { 178 | filter = { name = self.name } 179 | } 180 | ) 181 | if not doc then 182 | return nil, err 183 | end 184 | return doc.cursor.firstBatch[1] 185 | end 186 | 187 | function _M.remove(self, query, single) 188 | local query = query or {} 189 | 190 | if getmetatable(cbson.oid("000000000000000000000000")) == getmetatable(query) then 191 | query = { _id = query } 192 | end 193 | 194 | local doc, err = self._db:cmd( 195 | { delete = self.name }, 196 | { 197 | deletes = {{q=query, limit = single and 1 or 0}}, 198 | ordered = true, 199 | writeConcern = self:_build_write_concern() 200 | } 201 | ) 202 | if not doc then 203 | return nil, err 204 | end 205 | 206 | return check_write_concern(doc, doc.n) 207 | end 208 | 209 | function _M.stats(self) 210 | local doc, err = self._db:cmd( 211 | {collstats = self.name}, 212 | {} 213 | ) 214 | if not doc then 215 | return nil, err 216 | end 217 | return doc 218 | end 219 | 220 | function _M.index_information(self) 221 | local doc, err = self._db:cmd( 222 | { listIndexes = self.name }, 223 | { } 224 | ) 225 | if not doc then 226 | return nil, err 227 | end 228 | return doc.cursor.firstBatch 229 | end 230 | 231 | function _M.rename(self, to_name, drop) 232 | local drop = drop or false 233 | -- rename 234 | local doc, err = self._db._moongoo:db("admin"):cmd( 235 | { renameCollection = self:full_name() }, 236 | { 237 | to = to_name, 238 | dropTarget = drop 239 | } 240 | ) 241 | if not doc then 242 | return nil, err 243 | end 244 | 245 | return self.new(to_name, self._db) 246 | end 247 | 248 | function _M.update(self, query, update, flags) 249 | local flags = flags or {} 250 | local query = query or {} 251 | 252 | if getmetatable(cbson.oid("000000000000000000000000")) == getmetatable(query) then 253 | query = { _id = query } 254 | end 255 | 256 | local update = { 257 | q = query, 258 | u = update, 259 | upsert = flags.upsert or false, 260 | multi = flags.multi or false 261 | } 262 | 263 | local doc, err = self._db:cmd( 264 | { update = self.name }, 265 | { 266 | updates = { update }, 267 | ordered = true, 268 | writeConcern = self:_build_write_concern() 269 | } 270 | ) 271 | if not doc then 272 | return nil, err 273 | end 274 | 275 | return doc.nModified 276 | end 277 | 278 | function _M.save(self, doc) 279 | if not doc._id then 280 | doc._id = cbson.oid(generate_oid()) 281 | end 282 | local r, err = self:update(doc._id, doc, {upsert = true}); 283 | if not r then 284 | return nil, err 285 | end 286 | 287 | return doc._id 288 | end 289 | 290 | function _M.map_reduce(self, map, reduce, flags) 291 | local flags = flags or {} 292 | flags.map = cbson.code(map) 293 | flags.reduce = cbson.code(reduce) 294 | flags.out = flags.out or { inline = true } 295 | 296 | local doc, err = self._db:cmd( 297 | { mapReduce = self.name }, 298 | flags 299 | ) 300 | if not doc then 301 | return nil, err 302 | end 303 | 304 | if doc.results then 305 | return doc.results 306 | end 307 | 308 | return self.new(doc.result, self._db) 309 | end 310 | 311 | function _M.find(self, query, fields) 312 | local query = query or {} 313 | if getmetatable(cbson.oid("000000000000000000000000")) == getmetatable(query) then 314 | query = { _id = query } 315 | end 316 | return cursor.new(self, query, fields) 317 | end 318 | 319 | function _M.find_one(self, query, fields) 320 | local query = query or {} 321 | if getmetatable(cbson.oid("000000000000000000000000")) == getmetatable(query) then 322 | query = { _id = query } 323 | end 324 | 325 | return self:find(query, fields):limit(-1):next() 326 | end 327 | 328 | function _M.find_and_modify(self, query, opts) 329 | local query = query or {} 330 | if getmetatable(cbson.oid("000000000000000000000000")) == getmetatable(query) then 331 | query = { _id = query } 332 | end 333 | 334 | local opts = opts or {} 335 | opts.query = query 336 | 337 | local doc, err = self._db:cmd( 338 | { findAndModify = self.name }, 339 | opts 340 | ) 341 | if not doc then 342 | return nil, err 343 | end 344 | return doc.value 345 | end 346 | 347 | function _M.aggregate(self, pipeline, opts) 348 | local opts = opts or {} 349 | opts.pipeline = pipeline 350 | if not opts.explain then 351 | opts.cursor = {} 352 | end 353 | 354 | local doc, err = self._db:cmd( 355 | { aggregate = self.name }, 356 | opts 357 | ) 358 | if not doc then 359 | return nil, err 360 | end 361 | 362 | if opts.explain then 363 | return doc 364 | end 365 | 366 | -- collection 367 | if opts.pipeline[#opts.pipeline]['$out'] then 368 | return self.new(opts.pipeline[#opts.pipeline]['$out'], self._db) 369 | end 370 | 371 | -- cursor 372 | return cursor.new(self, {}, {}, false, doc.cursor.id):add_batch(doc.cursor.firstBatch) 373 | end 374 | 375 | 376 | 377 | return _M -------------------------------------------------------------------------------- /lib/resty/moongoo/connection.lua: -------------------------------------------------------------------------------- 1 | local socket = ngx and ngx.socket.tcp or require("socket").tcp 2 | local cbson = require("cbson") 3 | 4 | local opcodes = { 5 | OP_REPLY = 1; 6 | OP_MSG = 1000; 7 | OP_UPDATE = 2001; 8 | OP_INSERT = 2002; 9 | RESERVED = 2003; 10 | OP_QUERY = 2004; 11 | OP_GET_MORE = 2005; 12 | OP_DELETE = 2006; 13 | OP_KILL_CURSORS = 2007; 14 | } 15 | 16 | local _M = {} 17 | 18 | local mt = { __index = _M } 19 | 20 | function _M.new(host, port, timeout) 21 | local sock = socket() 22 | if timeout then 23 | sock:settimeout(timeout) 24 | end 25 | 26 | return setmetatable({ 27 | sock = sock; 28 | host = host; 29 | port = port; 30 | _id = 0; 31 | }, mt) 32 | end 33 | 34 | function _M.connect(self, host, port) 35 | self.host = host or self.host 36 | self.port = port or self.port 37 | return self.sock:connect(self.host, self.port) 38 | end 39 | 40 | function _M.handshake(self) 41 | if ngx then 42 | self.sock:sslhandshake() 43 | else 44 | local ssl = require("ssl") 45 | self.sock = ssl.wrap(self.sock, {mode = "client", protocol = "tlsv1_2"}) 46 | assert(self.sock) 47 | self.sock:dohandshake() 48 | end 49 | end 50 | 51 | function _M.close(self) 52 | if ngx then 53 | self.sock:setkeepalive() 54 | else 55 | self.sock:close() 56 | end 57 | end 58 | 59 | function _M.get_reused_times(self) 60 | if not self.sock then 61 | return nil, "not initialized" 62 | end 63 | 64 | return self.sock:getreusedtimes() 65 | end 66 | 67 | function _M.settimeout(self, ms) 68 | self.sock:settimeout(ms) 69 | end 70 | 71 | function _M.send(self, data) 72 | return self.sock:send(data) 73 | end 74 | 75 | function _M.receive(self, pat) 76 | return self.sock:receive(pat) 77 | end 78 | 79 | function _M._handle_reply(self) 80 | local header = assert ( self.sock:receive ( 16 ) ) 81 | 82 | local length = cbson.raw_to_uint( string.sub(header , 1 , 4 )) 83 | local r_id = cbson.raw_to_uint( string.sub(header , 5 , 8 )) 84 | local r_to = cbson.raw_to_uint( string.sub(header , 9 , 12 )) 85 | local opcode = cbson.raw_to_uint( string.sub(header , 13 , 16 )) 86 | 87 | assert ( opcode == cbson.uint(opcodes.OP_REPLY ) ) 88 | assert ( r_to == cbson.uint(self._id) ) 89 | 90 | local data = assert ( self.sock:receive ( tostring(length-16 ) ) ) 91 | 92 | local flags = cbson.raw_to_uint( string.sub(data , 1 , 4 )) 93 | local cursor_id = cbson.raw_to_uint( string.sub(data , 5 , 12 )) 94 | local from = cbson.raw_to_uint( string.sub(data , 13 , 16 )) 95 | local number = tonumber(tostring(cbson.raw_to_uint( string.sub(data , 17 , 20 )))) 96 | 97 | local docs = string.sub(data , 21) 98 | 99 | local pos = 1 100 | local index = 0 101 | local r_docs = {} 102 | while index < number do 103 | local bson_size = tonumber(tostring(cbson.raw_to_uint(docs:sub(pos, pos+3)))) 104 | 105 | local dt = docs:sub(pos,pos+bson_size-1) -- get bson data according to size 106 | 107 | table.insert(r_docs, cbson.decode(dt)) 108 | 109 | pos = pos + bson_size 110 | index = index + 1 111 | end 112 | 113 | return flags, cursor_id, from, number, r_docs 114 | end 115 | 116 | function _M._build_header(self, op, payload_size) 117 | local size = cbson.uint_to_raw(cbson.uint(payload_size+16), 4) 118 | local op = cbson.uint_to_raw(cbson.uint(op), 4) 119 | self._id = self._id+1 120 | local id = cbson.uint_to_raw(cbson.uint(self._id), 4) 121 | local reply_to = "\0\0\0\0" 122 | return size .. id .. reply_to .. op 123 | end 124 | 125 | function _M._query(self, collection, query, to_skip, to_return, selector, flags) 126 | local flags = { 127 | tailable = flags and flags.tailable and 1 or 0, 128 | slaveok = flags and flags.slaveok and 1 or 0, 129 | notimeout = flags and flags.notimeout and 1 or 0, 130 | await = flags and flags.await and 1 or 0, 131 | exhaust = flags and flags.exhaust and 1 or 0, 132 | partial = flags and flags.partial and 1 or 0 133 | } 134 | 135 | local flagset = cbson.int_to_raw( 136 | cbson.int( 137 | 2 * flags["tailable"] + 138 | 2^2 * flags["slaveok"] + 139 | 2^4 * flags["notimeout"] + 140 | 2^5 * flags["await"] + 141 | 2^6 * flags["exhaust"] + 142 | 2^7 * flags["partial"] 143 | ), 144 | 4) 145 | 146 | local selector = selector and #selector and cbson.encode(selector) or "" 147 | 148 | local to_skip = cbson.int_to_raw(cbson.int(to_skip), 4) 149 | local to_return = cbson.int_to_raw(cbson.int(to_return), 4) 150 | 151 | local size = 4 + #collection + 1 + 4 + 4 + #query + #selector 152 | 153 | local header = self:_build_header(opcodes["OP_QUERY"], size) 154 | 155 | local data = header .. flagset .. collection .. "\0" .. to_skip .. to_return .. query .. selector 156 | 157 | assert(self:send(data)) 158 | return self:_handle_reply() 159 | end 160 | 161 | function _M._insert(self, collection, docs, flags) 162 | local encoded_docs = {} 163 | for k, doc in ipairs(docs) do 164 | encoded_docs[k] = cbson.encode(doc) 165 | end 166 | string_docs = table.concat(encoded_docs) 167 | 168 | local flags = { 169 | continue_on_error = flags and flags.continue_on_error and 1 or 0 170 | } 171 | 172 | local flagset = cbson.int_to_raw( 173 | cbson.int( 174 | 2 * flags["continue_on_error"] 175 | ), 176 | 4) 177 | 178 | local size = 4 + 1 + #collection + #string_docs 179 | local header = self:_build_header(opcodes["OP_INSERT"], size) 180 | 181 | local data = header .. flagset .. collection .. "\0" .. string_docs 182 | 183 | assert(self:send(data)) 184 | 185 | return true -- Mongo doesn't send a reply 186 | end 187 | 188 | function _M._kill_cursors(self, id) 189 | local id = cbson.uint_to_raw(id, 8) 190 | local num = cbson.int_to_raw(cbson.int(1), 4) 191 | local zero = cbson.int_to_raw(cbson.int(0), 4) 192 | local size = 8+4+4 193 | local header = self:_build_header(opcodes["OP_KILL_CURSORS"], size) 194 | local data = header .. zero .. num .. id 195 | assert(self:send(data)) 196 | return true -- Mongo doesn't send a reply 197 | end 198 | 199 | function _M._get_more(self, collection, number, cursor) 200 | local num = cbson.int_to_raw(cbson.int(number), 4) 201 | local zero = cbson.int_to_raw(cbson.int(0), 4) 202 | local cursor = cbson.uint_to_raw(cursor, 8) 203 | local size = 4+#collection+1+4+8 204 | local header = self:_build_header(opcodes["OP_GET_MORE"], size) 205 | local data = header .. zero .. collection .. '\0' .. num .. cursor 206 | assert(self:send(data)) 207 | return self:_handle_reply() 208 | end 209 | 210 | return _M -------------------------------------------------------------------------------- /lib/resty/moongoo/cursor.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local bit = require("bit") 3 | 4 | 5 | local function check_bit(num, bitnum) 6 | return bit.band(num,math.pow(2,bitnum)) ~= 0 -- and true or false 7 | end 8 | 9 | local _M = {} 10 | 11 | local mt = { __index = _M } 12 | 13 | function _M.new(collection, query, fields, explain, id) 14 | return setmetatable( 15 | { 16 | _collection = collection, 17 | _query = query, 18 | _fields = fields, 19 | _id = id or cbson.uint(0), 20 | _skip = 0, 21 | _limit = 0, 22 | _docs = {}, 23 | _started = false, 24 | _cnt = 0, 25 | _comment = nil, 26 | _hint = nil, 27 | _max_scan = nil , 28 | _max_time_ms = nil, 29 | _read_preference = nil, 30 | _snapshot = nil, 31 | _sort = nil, 32 | _await = false, 33 | _tailable = false, 34 | _explain = explain or false 35 | }, 36 | mt) 37 | end 38 | 39 | function _M.tailable(self, tailable) 40 | self._tailable = tailable 41 | return self 42 | end 43 | 44 | function _M.await(self, await) 45 | self._await = await 46 | return self 47 | end 48 | 49 | 50 | function _M.comment(self, comment) 51 | self._comment = comment 52 | return self 53 | end 54 | 55 | function _M.hint(self, hint) 56 | self._hint = hint 57 | return self 58 | end 59 | 60 | function _M.max_scan(self, max_scan) 61 | self._max_scan = max_scan 62 | return self 63 | end 64 | 65 | function _M.max_time_ms(self, max_time_ms) 66 | self._max_time_ms = max_time_ms 67 | return self 68 | end 69 | 70 | function _M.read_preference(self, read_preference) 71 | self._read_preference = read_preference 72 | return self 73 | end 74 | 75 | function _M.snapshot(self, snapshot) 76 | self._snapshot = snapshot 77 | return self 78 | end 79 | 80 | function _M.sort(self, sort) 81 | self._sort = sort 82 | return self 83 | end 84 | 85 | 86 | function _M.clone(self, explain) 87 | local clone = self.new(self._collection, self._query, self._fields, explain) 88 | clone:limit(self._limit) 89 | clone:skip(self._skip) 90 | 91 | clone:comment(self._comment) 92 | clone:hint(self._hint) 93 | clone:max_scan(self._max_scan) 94 | clone:max_time_ms(self._max_time_ms) 95 | clone:read_preference(self._read_preference) 96 | clone:snapshot(self._snapshot) 97 | clone:sort(self._sort) 98 | 99 | return clone 100 | end 101 | 102 | function _M.skip(self, skip) 103 | if self._started then 104 | print("Can's set skip after starting cursor") 105 | else 106 | self._skip = skip 107 | end 108 | return self 109 | end 110 | 111 | function _M.limit(self, limit) 112 | if self._started then 113 | print("Can's set limit after starting cursor") 114 | else 115 | self._limit = limit 116 | end 117 | return self 118 | end 119 | 120 | function _M._build_query(self) 121 | local ext = {} 122 | if self._comment then ext['$comment'] = self._comment end 123 | if self._explain then ext['$explain'] = true end 124 | 125 | 126 | if self._hint then ext['$hint'] = self._hint end 127 | if self._max_scan then ext['$maxScan'] = self._max_scan end 128 | if self._max_time_ms then ext['$maxTimeMS'] = self._max_time_ms end 129 | if self._read_preference then ext['$readPreference'] = self._read_preference end 130 | if self._snapshot then ext['$snapshot'] = true end 131 | if self._sort then ext['$orderby'] = self._sort end 132 | 133 | ext['$query'] = self._query 134 | 135 | return cbson.encode(ext) 136 | end 137 | 138 | function _M.next(self) 139 | local moongoo, err = self._collection._db._moongoo:connect() 140 | if not moongoo then 141 | return nil, err 142 | end 143 | 144 | if self:_finished() then 145 | if self._id ~= cbson.uint(0) then 146 | self._collection._db._moongoo.connection:_kill_cursors(self._id) 147 | self._id = cbson.uint(0) 148 | end 149 | return nil, "no more data" 150 | end 151 | 152 | if (not self._started) and (self._id == cbson.uint(0)) then 153 | 154 | -- query and add id and batch 155 | local flags, id, from, number, docs = self._collection._db._moongoo.connection:_query(self._collection:full_name(), self:_build_query(), self._skip, self._limit, self._fields, {tailable = self._tailable, await = self._await}) 156 | 157 | flags = tonumber(tostring(flags)) -- bitop can't work with cbson.int, so... 158 | 159 | if check_bit(flags, 1) then -- QueryFailure 160 | return nil, docs[1]['$err'] -- why is this $err and not errmsg, like others?? 161 | end 162 | self._id = id 163 | self:add_batch(docs) 164 | elseif #self._docs == 0 and self._id ~= cbson.uint(0) then 165 | -- we have something to fetch - get_more and add_batch 166 | local flags, id, from, number, docs = self._collection._db._moongoo.connection:_get_more(self._collection:full_name(), self._limit, self._id) 167 | 168 | flags = tonumber(tostring(flags)) -- bitop can't work with cbson.int, so... 169 | 170 | if check_bit(flags, 0) then -- QueryFailure 171 | return nil, "wrong cursor id" 172 | end 173 | self:add_batch(docs) 174 | self._id = id 175 | 176 | elseif #self._docs == 0 then--or self._id == cbson.uint(0) then 177 | return nil, "no more data" 178 | end 179 | self._cnt = self._cnt+1 180 | return table.remove(self._docs, 1) or nil, 'No more data' 181 | end 182 | 183 | function _M.all(self) 184 | local docs = {} 185 | while true do 186 | local doc = self:next() 187 | if doc == nil then break end 188 | table.insert(docs, doc) 189 | end 190 | return docs 191 | end 192 | 193 | function _M.rewind(self) 194 | self._started = false 195 | self._docs = {} 196 | self._collection._db._moongoo.connection:_kill_cursors(self._id) 197 | self._id = cbson.uint(0) 198 | return self 199 | end 200 | 201 | function _M.count(self) 202 | local doc, err = self._collection._db:cmd( 203 | { count = self._collection.name }, 204 | { 205 | query = self._query, 206 | skip = self._skip, 207 | limit = self._limit 208 | } 209 | ) 210 | if not doc then 211 | return nil, err 212 | end 213 | 214 | return doc and doc.n or 0 215 | end 216 | 217 | function _M.distinct(self, key) 218 | local doc, err = self._collection._db:cmd( 219 | { distinct = self._collection.name }, 220 | { 221 | query = self._query, 222 | key = key 223 | } 224 | ) 225 | if not doc then 226 | return nil, err 227 | end 228 | 229 | return doc and doc.values or {} 230 | end 231 | 232 | function _M.explain(self) 233 | return self:clone(true):sort(nil):next() 234 | end 235 | 236 | function _M.add_batch(self, docs) 237 | self._started = true 238 | for k,v in ipairs(docs) do 239 | table.insert(self._docs, v) 240 | end 241 | return self 242 | end 243 | 244 | function _M._finished(self) 245 | if self._limit == 0 then 246 | return false 247 | else 248 | if self._cnt >= math.abs(self._limit) then 249 | return true 250 | else 251 | return false 252 | end 253 | end 254 | end 255 | 256 | return _M 257 | -------------------------------------------------------------------------------- /lib/resty/moongoo/database.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local collection = require("resty.moongoo.collection") 3 | local gridfs = require("resty.moongoo.gridfs") 4 | 5 | local _M = {} 6 | 7 | local mt = { __index = _M } 8 | 9 | function _M.new(name, moongoo) 10 | return setmetatable({name = name, _moongoo = moongoo}, mt) 11 | end 12 | 13 | function _M.collection(self, name) 14 | return collection.new(name, self) 15 | end 16 | 17 | function _M.gridfs(self, name) 18 | return gridfs.new(self,name) 19 | end 20 | 21 | function _M.cmd(self, cmd, params) 22 | local r, err = self._moongoo:connect() 23 | if not r then 24 | return nil, err 25 | end 26 | return self:_cmd(cmd, params) 27 | end 28 | 29 | function _M._cmd(self, cmd, params) 30 | local params = params or {} 31 | if type(cmd) == "table" then 32 | local tmpcmd = '' 33 | for k,v in pairs(cmd) do 34 | params[k] = v 35 | tmpcmd = k 36 | end 37 | cmd = tmpcmd 38 | else 39 | params[cmd] = true 40 | end 41 | local cmd = cbson.encode_first(cmd, params) 42 | 43 | local _,_,_,_,docs = self._moongoo.connection:_query(self.name..".$cmd", cmd, 0, 1) 44 | 45 | if not docs[1] then 46 | return nil, "Empty reply from mongodb" 47 | end 48 | 49 | if not docs[1].ok or docs[1].ok == 0 then 50 | return nil, docs[1].errmsg 51 | end 52 | 53 | return docs[1] 54 | end 55 | 56 | function _M.insert(self, collection, docs) 57 | local r, err = self._moongoo:connect() 58 | if not r then 59 | return nil, err 60 | end 61 | return self:_insert(collection, docs) 62 | end 63 | 64 | function _M._insert(self, collection, docs) 65 | self._moongoo.connection:_insert(collection, docs) 66 | return 67 | end 68 | 69 | 70 | return _M -------------------------------------------------------------------------------- /lib/resty/moongoo/gridfs.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local gfsfile = require("resty.moongoo.gridfs.file") 3 | 4 | local _M = {} 5 | 6 | local mt = { __index = _M } 7 | 8 | function _M.new(db, name) 9 | local name = name or 'fs' 10 | return setmetatable( 11 | { 12 | _db = db, 13 | _name = name, 14 | _files = db:collection(name .. '.files'), 15 | _chunks = db:collection(name .. '.chunks') 16 | }, 17 | mt) 18 | end 19 | 20 | function _M.list(self) 21 | return self._files:find({}):distinct('filename') 22 | end 23 | 24 | function _M.remove(self, id) 25 | local r,err = self._files:remove({_id = id}) 26 | if not r then 27 | return nil, "Failed to remove file metadata: "..err 28 | end 29 | r,err = self._chunks:remove({files_id = id}) 30 | if not r then 31 | return nil, "Failed to remove file chunks: "..err 32 | end 33 | return r 34 | end 35 | 36 | function _M.find_version(self, name, version) 37 | -- Positive numbers are absolute and negative ones relative 38 | local cursor = self._files:find({filename = name}, {_id = 1}):limit(-1) 39 | cursor:sort({uploadDate = (version < 0) and cbson.int(-1) or cbson.int(1)}):skip(version < 0 and (math.abs(version) - 1) or version) 40 | local doc, err = cursor:next() 41 | if not doc then 42 | return nil, "No such file/version" 43 | end 44 | return doc._id 45 | end 46 | 47 | function _M.open(self, id) 48 | return gfsfile.open(self, id) 49 | end 50 | 51 | 52 | function _M.create(self, name, opts, safe) 53 | return gfsfile.new(self, name, opts, safe) 54 | end 55 | 56 | return _M 57 | 58 | -------------------------------------------------------------------------------- /lib/resty/moongoo/gridfs/file.lua: -------------------------------------------------------------------------------- 1 | local cbson = require("cbson") 2 | local generate_oid = require("resty.moongoo.utils").generate_oid 3 | 4 | 5 | local _M = {} 6 | 7 | local mt = { __index = _M } 8 | 9 | function _M.new(gridfs, name, opts, safe, read_only) 10 | local read_only = read_only or false 11 | local safe = safe == nil and true or safe 12 | 13 | opts = opts or {} 14 | opts.filename = name 15 | opts.length = opts.length or cbson.uint(0) 16 | opts.chunkSize = opts.chunkSize or cbson.uint(261120) 17 | 18 | local write_only = true 19 | if not safe then 20 | write_only = false 21 | end 22 | 23 | return setmetatable( 24 | { 25 | _gridfs = gridfs, 26 | _meta = opts, 27 | _write_only = write_only, 28 | _read_only = read_only, 29 | _pos = 0, 30 | _chunks = {}, 31 | _closed = false, 32 | _buffer = '', 33 | _n = 0 34 | }, 35 | mt) 36 | end 37 | 38 | function _M.open(gridfs, id) 39 | -- try to fetch 40 | local doc, err = gridfs._files:find_one({ _id = id}) 41 | if not doc then 42 | return nil, "No such file" 43 | else 44 | return _M.new(gridfs, doc.filename, doc, false, true) 45 | end 46 | end 47 | 48 | -- props 49 | 50 | function _M.content_type(self) return self._meta.contentType end 51 | function _M.filename(self) return self._meta.filename end 52 | function _M.md5(self) return self._meta.md5 end 53 | function _M.metadata(self) return self._meta.metadata end 54 | function _M.raw_length(self) return self._meta.length end 55 | function _M.raw_chunk_size(self) return self._meta.chunkSize end 56 | function _M.date(self) return self._meta.uploadDate end 57 | 58 | function _M.length(self) return tonumber(tostring(self._meta.length)) end 59 | function _M.chunk_size(self) return tonumber(tostring(self._meta.chunkSize)) end 60 | 61 | -- reading 62 | 63 | function _M.read(self) 64 | if self._write_only then 65 | return nil, "Can't read from write-only file" 66 | end 67 | 68 | if self._pos >= (self:length() or 0) then 69 | return nil, "EOF" 70 | end 71 | 72 | local n = math.modf(self._pos / self:chunk_size()) 73 | local query = {files_id = self._meta._id, n = n} 74 | local fields = {_id = false, data = true} 75 | 76 | local chunk = self._gridfs._chunks:find_one(query, fields) 77 | if not chunk then 78 | return nil, "EOF?" 79 | end 80 | 81 | return self:_slice(n, chunk.data) 82 | end 83 | 84 | function _M.seek(self, pos) 85 | self._pos = pos 86 | return self 87 | end 88 | 89 | function _M.tell(self) 90 | return self._pos 91 | end 92 | 93 | function _M.slurp(self) 94 | local data = {} 95 | local pos = self._pos 96 | self:seek(0) 97 | while true do 98 | local chunk = self:read() 99 | if not chunk then break end 100 | table.insert(data, chunk) 101 | end 102 | self:seek(pos) 103 | return table.concat(data) 104 | end 105 | 106 | -- writing 107 | 108 | function _M.write(self, data) 109 | if self._read_only then 110 | return false, "Can't write to read-only file" 111 | end 112 | 113 | if self._closed then 114 | return false, "Can't write to closed file" 115 | end 116 | 117 | self._buffer = self._buffer .. data 118 | self._meta.length = self._meta.length + data:len() 119 | 120 | while self._buffer:len() >= self:chunk_size() do 121 | local r, err = self:_chunk() 122 | if not r then 123 | return false, err 124 | end 125 | end 126 | return true 127 | end 128 | 129 | function _M.close(self) 130 | 131 | if self._closed then 132 | return nil, "File already closed" 133 | end 134 | self._closed = true 135 | 136 | local r, err = self:_chunk() -- enqueue/write last chunk of data 137 | if not r then 138 | return nil, err 139 | end 140 | 141 | if self._write_only then 142 | -- insert all collected chunks 143 | for k, v in ipairs(self._chunks) do 144 | local r, err = self._gridfs._chunks:insert(v) 145 | if not r then 146 | return nil, err 147 | end 148 | end 149 | end 150 | 151 | -- ensure indexes 152 | self._gridfs._files:ensure_index({{ key = {filename = true}}}) 153 | self._gridfs._chunks:ensure_index({ { key = {files_id = 1, n = 1}, unique = true } }); 154 | -- compute md5 155 | local res, err = self._gridfs._db:cmd({filemd5 = self:_files_id()}, {root = self._gridfs._name}) 156 | if not res then 157 | return nil, err 158 | end 159 | local file_md5 = res.md5 160 | -- insert metadata 161 | local ids, n = self._gridfs._files:insert(self:_metadata(file_md5)) 162 | if not ids then 163 | return nil, n 164 | end 165 | if n < 1 then 166 | return nil, "Duplicate file ID" 167 | end 168 | -- return metadata 169 | return ids[1] 170 | end 171 | 172 | -- private 173 | 174 | function _M._files_id(self) 175 | if not self._meta._id then 176 | self._meta._id = cbson.oid(generate_oid()) 177 | end 178 | return self._meta._id 179 | end 180 | 181 | function _M._metadata(self, file_md5) 182 | local doc = { 183 | _id = self:_files_id(), 184 | length = self:raw_length(), 185 | chunkSize = self:raw_chunk_size(), 186 | uploadDate = cbson.date(os.time()*1000), 187 | md5 = file_md5, 188 | filename = self:filename() or nil, 189 | content_type = self:content_type() or nil, 190 | metadata = self:metadata() or nil 191 | } 192 | 193 | return doc 194 | end 195 | 196 | function _M._slice(self, n, chunk) 197 | local offset = self._pos - (n * self:chunk_size()) 198 | local chunk = chunk:raw() 199 | self._pos = self._pos + chunk:len() 200 | return chunk:sub(offset+1); 201 | end 202 | 203 | function _M._chunk(self) 204 | local chunk = self._buffer:sub(1,self:chunk_size()) 205 | if chunk:len() == 0 then 206 | return true 207 | end 208 | self._buffer = self._buffer:sub(self:chunk_size()+1) 209 | local n = self._n 210 | self._n = self._n+1 211 | local data = cbson.binary("") 212 | data:raw(chunk, chunk:len()) 213 | if self._write_only then 214 | -- collect chunks for insert 215 | table.insert(self._chunks, {files_id = self:_files_id(), n = cbson.uint(n), data = data}) 216 | else 217 | -- insert immidiately, so we can read back (ugh) 218 | local ids, num = self._gridfs._chunks:insert({files_id = self:_files_id(), n = cbson.uint(n), data = data}) 219 | if not ids then 220 | return false, num 221 | end 222 | if num < 1 then 223 | return false, "Duplicate file ID" 224 | end 225 | end 226 | return true 227 | end 228 | 229 | 230 | 231 | return _M 232 | -------------------------------------------------------------------------------- /lib/resty/moongoo/utils.lua: -------------------------------------------------------------------------------- 1 | local bit = require("bit") 2 | local cbson = require("cbson") 3 | 4 | local md5 = ngx and ngx.md5 or function(str) return require("crypto").digest("md5", str) end 5 | local hmac_sha1 = ngx and ngx.hmac_sha1 or function(str, key) return require("crypto").hmac.digest("sha1", key, str, true) end 6 | local hasposix , posix = pcall(require, "posix") 7 | 8 | local machineid 9 | if hasposix then 10 | machineid = posix.uname("%n") 11 | else 12 | machineid = assert(io.popen("uname -n")):read("*l") 13 | end 14 | machineid = md5(machineid):sub(1, 6) 15 | 16 | local function uint_to_hex(num, len, be) 17 | local len = len or 4 18 | local be = be or 0 19 | local num = cbson.uint(num) 20 | local raw = cbson.uint_to_raw(num, len, be) 21 | local out = '' 22 | for i = 1, #raw do 23 | out = out .. string.format("%02x", raw:byte(i,i)) 24 | end 25 | return out 26 | end 27 | 28 | local counter = 0 29 | 30 | if not ngx then 31 | math.randomseed(os.time()) 32 | counter = math.random(100) 33 | else 34 | local resty_random = require "resty.random" 35 | local resty_string = require "resty.string" 36 | local strong_random = resty_random.bytes(4,true) 37 | while strong_random == nil do 38 | strong_random = resty_random.bytes(4,true) 39 | end 40 | counter = tonumber(resty_string.to_hex(strong_random), 16) 41 | end 42 | 43 | local function generate_oid() 44 | local pid = ngx and ngx.worker.pid() or nil 45 | if not pid then 46 | if hasposix then 47 | pid = posix.getpid("pid") 48 | else 49 | pid = 1 50 | end 51 | end 52 | 53 | pid = uint_to_hex(pid,2) 54 | 55 | counter = counter + 1 56 | local time = os.time() 57 | 58 | return uint_to_hex(time, 4, 1) .. machineid .. pid .. uint_to_hex(counter, 4, 1):sub(3,8) 59 | end 60 | 61 | local function print_r(t, indent) 62 | local indent=indent or '' 63 | if #indent > 5 then return end 64 | if type(t) ~= "table" then 65 | print(t) 66 | return 67 | end 68 | for key,value in pairs(t) do 69 | io.write(indent,'[',tostring(key),']') 70 | if type(value)=="table" then io.write(':\n') print_r(value,indent..'\t') 71 | else io.write(' = ',tostring(value),'\n') end 72 | end 73 | end 74 | 75 | local function parse_uri(url) 76 | -- initialize default parameters 77 | local parsed = {} 78 | -- empty url is parsed to nil 79 | if not url or url == "" then return nil, "invalid url" end 80 | -- remove whitespace 81 | url = string.gsub(url, "%s", "") 82 | -- get fragment 83 | url = string.gsub(url, "#(.*)$", function(f) 84 | parsed.fragment = f 85 | return "" 86 | end) 87 | -- get scheme 88 | url = string.gsub(url, "^([%w][%w%+%-%.]*)%:", 89 | function(s) parsed.scheme = s; return "" end) 90 | 91 | -- get authority 92 | local location 93 | url = string.gsub(url, "^//([^/]*)", function(n) 94 | location = n 95 | return "" 96 | end) 97 | 98 | -- get query stringing 99 | url = string.gsub(url, "%?(.*)", function(q) 100 | parsed.query_string = q 101 | return "" 102 | end) 103 | -- get params 104 | url = string.gsub(url, "%;(.*)", function(p) 105 | parsed.params = p 106 | return "" 107 | end) 108 | -- path is whatever was left 109 | if url ~= "" then parsed.database = string.gsub(url,"^/([^/]*).*","%1") end 110 | if not parsed.database or #parsed.database == 0 then parsed.database = "admin" end 111 | 112 | if not location then return parsed end 113 | 114 | location = string.gsub(location,"^([^@]*)@", 115 | function(u) parsed.userinfo = u; return "" end) 116 | 117 | parsed.hosts = {} 118 | string.gsub(location, "([^,]+)", function(u) 119 | local pr = { host = "localhost", port = 27017 } 120 | u = string.gsub(u, ":([^:]*)$", 121 | function(p) pr.port = p; return "" end) 122 | if u ~= "" then pr.host = u end 123 | table.insert(parsed.hosts, pr) 124 | end) 125 | if #parsed.hosts == 0 then parsed.hosts = {{ host = "localhost", port = 27017 }} end 126 | 127 | parsed.query = {} 128 | if parsed.query_string then 129 | string.gsub(parsed.query_string, "([^&]+)", function(u) 130 | u = string.gsub(u, "([^=]*)=([^=]*)$", 131 | function(k,v) parsed.query[k] = v; return "" end) 132 | end) 133 | end 134 | 135 | local userinfo = parsed.userinfo 136 | if not userinfo then return parsed end 137 | userinfo = string.gsub(userinfo, ":([^:]*)$", 138 | function(p) parsed.password = p; return "" end) 139 | parsed.user = userinfo 140 | return parsed 141 | end 142 | 143 | local function xor_bytestr( a, b ) 144 | local res = "" 145 | for i=1,#a do 146 | res = res .. string.char(bit.bxor(string.byte(a,i,i), string.byte(b, i, i))) 147 | end 148 | return res 149 | end 150 | 151 | local function xor_bytestr( a, b ) 152 | local res = "" 153 | for i=1,#a do 154 | res = res .. string.char(bit.bxor(string.byte(a,i,i), string.byte(b, i, i))) 155 | end 156 | return res 157 | end 158 | 159 | -- A simple implementation of PBKDF2_HMAC_SHA1 160 | local function pbkdf2_hmac_sha1( pbkdf2_key, iterations, salt, len ) 161 | local u1 = hmac_sha1(pbkdf2_key, salt .. "\0\0\0\1") 162 | local ui = u1 163 | for i=1,iterations-1 do 164 | u1 = hmac_sha1(pbkdf2_key, u1) 165 | ui = xor_bytestr(ui, u1) 166 | end 167 | if #ui < len then 168 | for i=1,len-(#ui) do 169 | ui = string.char(0) .. ui 170 | end 171 | end 172 | return ui 173 | end 174 | 175 | -- not full implementation, but oh well 176 | local function saslprep(username) 177 | return string.gsub(string.gsub(username, '=', '=3D'), ',' , '=2C') 178 | end 179 | 180 | local function pass_digest ( username , password ) 181 | return md5(username .. ":mongo:" .. password) 182 | end 183 | 184 | return { 185 | parse_uri = parse_uri; 186 | print_r = print_r; 187 | pbkdf2_hmac_sha1 = pbkdf2_hmac_sha1; 188 | saslprep = saslprep; 189 | pass_digest = pass_digest; 190 | xor_bytestr = xor_bytestr; 191 | generate_oid = generate_oid; 192 | } 193 | --------------------------------------------------------------------------------