├── .github ├── PULL_REQUEST_TEMPLATE.md ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── CODE_OF_CONDUCT.md ├── tests ├── nimongotest.nim ├── timeit.nim ├── bsontest.nim └── mongotest.nim ├── .travis.yml ├── .gitignore ├── nimongo ├── private │ ├── auth.nim │ ├── errors.nim │ ├── writeconcern.nim │ ├── proto.nim │ ├── reply.nim │ ├── single.nim │ ├── async.nim │ ├── threaded.nim │ └── clientbase.nim ├── mongo.nim └── bson.nim ├── .vscode └── tasks.json ├── nimongo.nimble ├── LICENSE └── README.md /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Adding new super-awesome feature... 2 | -------------------------------------------------------------------------------- /tests/nimongotest.nim: -------------------------------------------------------------------------------- 1 | ## Nimongo whole test suite 2 | 3 | import bsontest 4 | import mongotest 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | services: 3 | - docker 4 | before_install: 5 | - docker pull yglukhov/nim-base 6 | script: 7 | - docker run -v "$(pwd):/project" -w /project yglukhov/nim-base bash -c "nimble install -y && nimble test_ci" 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | nimcache 2 | mongo 3 | bson 4 | mongotest 5 | bsontest 6 | query 7 | nimongotest 8 | profile_results.txt 9 | tests/nimongo 10 | *.save 11 | Contribute.md 12 | *.ilk 13 | *.pdb 14 | *.exe 15 | *.cfg 16 | 17 | # ignore big blob for gridfs 18 | *.mkv -------------------------------------------------------------------------------- /nimongo/private/auth.nim: -------------------------------------------------------------------------------- 1 | type 2 | AuthenticationMethod* = enum 3 | NoAuth 4 | ScramSHA1 ## + 5 | MongodbCr 6 | MongodbX509 7 | Kerberos ## Enterprise-only 8 | Ldap ## Enterprise-only 9 | -------------------------------------------------------------------------------- /tests/timeit.nim: -------------------------------------------------------------------------------- 1 | ## Naive module for performance-testing 2 | import times 3 | 4 | template timeIt*(name: string, p: untyped): untyped = 5 | ## Performs timing of the block call, and makes output into stdout. 6 | let timeStart = cpuTime() 7 | p 8 | echo name, ": ", formatFloat((cpuTime() - timeStart) * 1000000, precision=12), " μs" 9 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | CONTRIBUTING 2 | ------------ 3 | 4 | 1. Any kind of contribution is welcomed to the `nimongo` project, either a feature implementation, bugfix or documentation update. 5 | 2. Please create all your contributions via Pull Requests mechanism with brief description that will show the usefulness of your contribution. 6 | 3. Please remember that the project is limited to a MongoDB driver thus consists only with BSON marashaler and MongoDB client, and any unrelated features can be not accepted. 7 | 8 | -------------------------------------------------------------------------------- /nimongo/private/errors.nim: -------------------------------------------------------------------------------- 1 | type 2 | CommunicationError* = object of CatchableError 3 | ## Raises on communication problems with MongoDB server 4 | 5 | NimongoError* = object of CatchableError 6 | ## Base exception for nimongo error (for simplifying error handling) 7 | 8 | NotFound* = object of NimongoError 9 | ## Raises when querying of one documents returns empty result 10 | 11 | ReplyFieldMissing* = object of NimongoError 12 | ## Raises when reqired field in reply is missing 13 | 14 | OperationTimeout* = object of NimongoError 15 | ## Raises when operation required error occures -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | **Is your feature request related to a problem? Please describe.** 8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 9 | 10 | **Describe the solution you'd like** 11 | A clear and concise description of what you want to happen. 12 | 13 | **Describe alternatives you've considered** 14 | A clear and concise description of any alternative solutions or features you've considered. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /nimongo/private/writeconcern.nim: -------------------------------------------------------------------------------- 1 | # This module implements MongoDB WriteConcern support 2 | import ../bson 3 | 4 | const Journaled*: bool = true 5 | 6 | type WriteConcern* = Bson 7 | 8 | proc writeConcern*(w: int32, j: bool, wtimeout: int = 0): WriteConcern = 9 | ## Custom write concern creation 10 | result = %*{"w": w, "j": j} 11 | if wtimeout > 0: 12 | result["wtimeout"] = wtimeout.toBson() 13 | 14 | proc writeConcernDefault*(): Bson = 15 | ## Default value for write concern at MongoDB server 16 | writeConcern(1, not Journaled) 17 | 18 | proc writeConcernMajority*(wtimeout: int = 0): WriteConcern = 19 | ## Majority of replica set members must approve 20 | ## that write operation was successful 21 | result = %*{"w": "majority", "j": Journaled} 22 | if wtimeout > 0: 23 | result["wtimeout"] = wtimeout.toBson() 24 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "label": "Build", 6 | "command": "nimble", 7 | "args": ["build"], 8 | "options": { 9 | "cwd": "${workspaceRoot}" 10 | }, 11 | "type": "shell", 12 | "group": { 13 | "kind": "build", 14 | "isDefault": true 15 | } 16 | }, 17 | { 18 | "label": "Test", 19 | "command": "nimble", 20 | "args": ["test"], 21 | "options": { 22 | "cwd": "${workspaceRoot}" 23 | }, 24 | "type": "shell", 25 | "group": { 26 | "kind": "test", 27 | "isDefault": true 28 | } 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /nimongo.nimble: -------------------------------------------------------------------------------- 1 | # Package 2 | description = "Pure Nim driver for MongoDB with support of synchronous and asynchronous I/O modes" 3 | version = "0.3" 4 | license = "MIT" 5 | author = "Rostyslav Dzinko " 6 | 7 | # Dependencies 8 | requires "scram >= 0.1.13" 9 | 10 | proc runTest(input: string) = 11 | let cmd = "nim c -r " & input 12 | echo "running: " & cmd 13 | exec cmd 14 | 15 | proc runTestThreaded(input: string) = 16 | let cmd = "nim c --threads:on -r " & input 17 | echo "running: " & cmd 18 | exec cmd 19 | 20 | proc testNoMongod() = 21 | runTest "nimongo/bson.nim" 22 | runTest "tests/bsontest.nim" 23 | runTestThreaded "nimongo/bson.nim" 24 | runTestThreaded "tests/bsontest.nim" 25 | 26 | task test, "tests": 27 | testNoMongod() 28 | runTest "tests/nimongotest.nim" 29 | runTestThreaded "tests/nimongotest.nim" 30 | 31 | task test_ci, "tests for CI": 32 | testNoMongod() 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | A clear and concise description of what the bug is. 9 | 10 | **To Reproduce** 11 | Steps to reproduce the behavior: 12 | 1. Go to '...' 13 | 2. Click on '....' 14 | 3. Scroll down to '....' 15 | 4. See error 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Screenshots** 21 | If applicable, add screenshots to help explain your problem. 22 | 23 | **Desktop (please complete the following information):** 24 | - OS: [e.g. iOS] 25 | - Browser [e.g. chrome, safari] 26 | - Version [e.g. 22] 27 | 28 | **Smartphone (please complete the following information):** 29 | - Device: [e.g. iPhone6] 30 | - OS: [e.g. iOS8.1] 31 | - Browser [e.g. stock browser, safari] 32 | - Version [e.g. 22] 33 | 34 | **Additional context** 35 | Add any other context about the problem here. 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Rostyslav Dzinko 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /nimongo/private/proto.nim: -------------------------------------------------------------------------------- 1 | import ../bson 2 | 3 | ## Wire protocol codes 4 | const OP_REPLY* = 1'i32 ## OP_REPLY operation code. Reply to a client request. responseTo is set. 5 | const OP_UPDATE* = 2001'i32 ## OP_UPDATE operation code. Update document. 6 | const OP_INSERT* = 2002'i32 ## OP_INSERT operation code. Insert new document. 7 | const RESERVED* = 2003'i32 ## RESERVED operation code. Formerly used for OP_GET_BY_OID. 8 | const OP_QUERY* = 2004'i32 ## OP_QUERY operation code. Query a collection. 9 | const OP_GET_MORE* = 2005'i32 ## OP_GET_MORE operation code. Get more data from a query. See Cursors. 10 | const OP_DELETE* = 2006'i32 ## OP_DELETE operation code. Delete documents. 11 | const OP_KILL_CURSORS* = 2007'i32 ## OP_KILL_CURSORS operation code. Notify database that the client has finished with the cursor. 12 | const OP_MSG* = 2013'i32 ## OP_MSG operation code. Send a message using the format introduced in MongoDB 3.6. 13 | 14 | const HEADER_LENGTH* = 16'i32 ## Message Header size in bytes 15 | 16 | proc buildMessageHeader*(messageLength, requestId, responseTo: int32, opCode: int32, res: var string) = 17 | ## Build Mongo message header as a series of bytes 18 | int32ToBytes(messageLength+HEADER_LENGTH, res) 19 | int32ToBytes(requestId, res) 20 | int32ToBytes(responseTo, res) 21 | int32ToBytes(opCode, res) 22 | 23 | proc buildMessageQuery*(flags: int32, fullCollectionName: string, numberToSkip, numberToReturn: int32, res: var string) = 24 | ## Build Mongo query message 25 | int32ToBytes(flags, res) 26 | res &= fullCollectionName 27 | res &= char(0) 28 | int32ToBytes(numberToSkip, res) 29 | int32ToBytes(numberToReturn, res) 30 | 31 | proc buildMessageMore*(fullCollectionName: string, cursorId: int64, numberToReturn: int32, res: var string) = 32 | ## Build Mongo get more message 33 | int32ToBytes(0'i32, res) 34 | res &= fullCollectionName 35 | res &= char(0) 36 | int32ToBytes(numberToReturn, res) 37 | int64ToBytes(cursorId, res) 38 | 39 | proc buildMessageKillCursors*(cursorIds: seq[int64], res: var string) = 40 | ## Build Mongo kill cursors message 41 | let ncursorIds: int32 = cursorIds.len().int32 42 | if ncursorIds > 0: 43 | int32ToBytes(0'i32, res) 44 | int32ToBytes(ncursorIds, res) 45 | for cursorId in cursorIds: 46 | int64ToBytes(cursorId, res) 47 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rostislav.dzinko@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /nimongo/private/reply.nim: -------------------------------------------------------------------------------- 1 | import ../bson 2 | import ./errors 3 | 4 | type StatusReply* = object ## Database Reply 5 | ok*: bool 6 | n*: int 7 | err*: string 8 | inserted_ids*: seq[Bson] 9 | bson*: Bson 10 | 11 | template parseReplyField(b: untyped, field: untyped, default: untyped, body: untyped): untyped = 12 | ## Take field from BSON. If field is missing and required than generate 13 | ## "ReplyFieldMissing" exception. If field is missing and not required 14 | ## than apply provided default value. If field exists than apply provided 15 | ## calculations body code. 16 | let b = reply[field] 17 | if b == nil.Bson: 18 | if isRequired: 19 | raise newException(ReplyFieldMissing, "Required field \"" & field & "\" missing in reply") 20 | else: 21 | result = default 22 | else: 23 | body 24 | 25 | proc parseReplyOk(reply: Bson, isRequired: bool): bool {.raises: [ReplyFieldMissing, Exception]} = 26 | ## Parse "ok" field in database reply. 27 | parseReplyField(val, "ok", false): 28 | case val.kind 29 | of BsonKindDouble: 30 | result = val.toFloat64 == 1.0'f64 31 | else: 32 | result = val.toInt == 1 33 | 34 | proc parseReplyN(reply: Bson, isRequired: bool): int {.raises: [ReplyFieldMissing, Exception]} = 35 | ## Parse "n" field in database reply. 36 | parseReplyField(val, "n", 0): 37 | case val.kind 38 | of BsonKindDouble: 39 | result = val.toFloat64.int 40 | else: 41 | result = val.toInt 42 | 43 | proc parseReplyErrmsg(reply: Bson, isRequired: bool): string {.raises: [ReplyFieldMissing, Exception]} = 44 | ## Parse "errmsg" field in database reply. 45 | parseReplyField(val, "errmsg", ""): 46 | if val.kind == BsonKindStringUTF8: 47 | result = val.toString 48 | else: 49 | result = "" 50 | 51 | proc toStatusReply*(reply: Bson, inserted_ids: seq[Bson] = @[]): StatusReply = 52 | ## Create StatusReply object from database reply BSON document and 53 | ## an optional list of OIDs. 54 | ## "ok" field is considered required. "n" and "errmsg" fields 55 | ## are optional and they are parsed if exist in reply 56 | result.bson = reply 57 | result.ok = parseReplyOk(reply, true) 58 | result.n = parseReplyN(reply, false) 59 | result.err = parseReplyErrmsg(reply, false) 60 | result.inserted_ids = inserted_ids 61 | 62 | proc isReplyOk*(reply: Bson): bool = 63 | ## This function is useful if we would like to check only "ok" field 64 | ## in reply and do not like to create full StatusReply object. Field 65 | ## is considered required 66 | result = parseReplyOk(reply, true) 67 | 68 | proc getReplyN*(reply: Bson): int = 69 | ## This function is useful if we would like to check only "n" field 70 | ## in reply and do not like to create full StatusReply object. Field 71 | ## is considered required 72 | result = parseReplyN(reply, true) 73 | 74 | proc getReplyErrmsg*(reply: Bson): string = 75 | ## This function is useful if we would like to check only "errmsg" field 76 | ## in reply and do not like to create full StatusReply object. Field 77 | ## is considered required 78 | result = parseReplyErrmsg(reply, true) 79 | 80 | converter toBool*(sr: StatusReply): bool = sr.ok 81 | ## If StatusReply.ok field is true = then StatusReply is considered 82 | ## to be successful. It is a convinience wrapper for the situation 83 | ## when we are not interested in no more status information than 84 | ## just a flag of success. 85 | -------------------------------------------------------------------------------- /tests/bsontest.nim: -------------------------------------------------------------------------------- 1 | ## Tests for bson.nim module 2 | import unittest 3 | 4 | import nimongo/bson 5 | 6 | suite "BSON serializer/deserializer test suite": 7 | 8 | echo "\n BSON serializer/deserializer test suite\n" 9 | 10 | test "Creating empty document with constructor": 11 | let doc = newBsonDocument() 12 | check($doc == "{\n}") 13 | 14 | test "Creating empty document with `%*` operator": 15 | let doc = %*{} 16 | check($doc == "{\n}") 17 | 18 | test "Creating document with all available types": 19 | let doc = %*{ 20 | "double": 5436.5436, 21 | "stringkey": "stringvalue", 22 | "document": { 23 | "double": 5436.5436, 24 | "key": "value" 25 | }, 26 | "array": [1, 2, 3], 27 | "int32": 5436'i32, 28 | "int64": 5436, 29 | } 30 | check(doc["double"].toFloat64 == 5436.5436) 31 | check(doc["stringkey"].toString == "stringvalue") 32 | check doc["stringkey"] is Bson 33 | check(doc["stringkey"].toString == "stringvalue") 34 | check(doc["document"]["double"].toFloat64 == 5436.5436) 35 | check(doc["document"]["key"].toString == "value") 36 | check(doc["array"][0].toInt64 == 1'i64) 37 | check(doc["int32"].toInt32 == 5436'i32) 38 | check(doc["int64"].toInt64 == 5436'i64) 39 | 40 | test "Document modification usin `[]=` operator": 41 | let doc = %*{ 42 | "int32": 1'i32, 43 | "array": [1, 2, 3] 44 | } 45 | check(doc["int32"].toInt32 == 1'i32) 46 | doc["int32"] = toBson(2'i32) 47 | check(doc["int32"].toInt32 == 2'i32) 48 | doc["array"][0] = toBson(10'i32) 49 | check(doc["array"][0].toInt32 == 10'i32) 50 | doc["newfield"] = "newvalue".toBson 51 | check(doc["newfield"].toString == "newvalue") 52 | 53 | 54 | test "Check if document has specific field with `in` operator": 55 | let doc = %*{ 56 | "field1": "string", 57 | "field2": 1'i32 58 | } 59 | check("field1" in doc) 60 | check(not ("field3" in doc)) 61 | 62 | test "Document inside array": 63 | let doc = %*{ 64 | "field": "value", 65 | "ar": [ 66 | { 67 | "field1": 5'i32, 68 | "field2": "gello" 69 | }, 70 | { 71 | "field": "hello" 72 | } 73 | ] 74 | } 75 | check(doc["ar"][0]["field1"].toInt() == 5) 76 | 77 | test "Document's merge": 78 | let a = %*{ 79 | "field1": "value1", 80 | "field2": [ 81 | {"ar0": "1"}, 82 | {"ar1": "2"}, 83 | {"ar2": "3"} 84 | ] 85 | } 86 | let b = %*{ 87 | "field3": "value2", 88 | "field0": 5'i32 89 | } 90 | 91 | let abm = merge(a, b) 92 | check(abm["field0"].toInt32 == 5'i32) 93 | check(abm["field2"][0]["ar0"].toString == "1") 94 | 95 | test "Document update": 96 | let a = %*{ 97 | "field1": "value1", 98 | "field2": [ 99 | {"ar0": "1"}, 100 | {"ar1": "2"}, 101 | {"ar2": "3"} 102 | ] 103 | } 104 | 105 | let b = %*{ 106 | "field3": "value2", 107 | "field0": 5'i32 108 | } 109 | 110 | b.update(a) 111 | check(b["field0"].toInt32 == 5'i32) 112 | check(b["field2"][0]["ar0"].toString == "1") 113 | 114 | test "array length": 115 | let arr = newBsonArray() 116 | arr.add(%*{ 117 | "field3": "value2", 118 | "field0": 5'i32 119 | }) 120 | 121 | check(arr.len == 1) 122 | 123 | -------------------------------------------------------------------------------- /nimongo/private/single.nim: -------------------------------------------------------------------------------- 1 | when compileOption("threads"): 2 | {.error: "This module is available only when --threads:off".} 3 | 4 | import os 5 | import net 6 | import uri 7 | import streams 8 | import md5 9 | import strutils 10 | import scram/client 11 | import ../bson 12 | import ./clientbase 13 | import ./errors 14 | 15 | type 16 | Mongo* = ref object of MongoBase ## Mongo client object 17 | pool: seq[LockedSocket] 18 | 19 | LockedSocket = ref object of LockedSocketBase 20 | sock: Socket 21 | 22 | 23 | proc newLockedSocket(): LockedSocket = 24 | ## Constructor for "locked" socket 25 | result.new() 26 | result.init() 27 | result.sock = newSocket() 28 | 29 | proc initPool(m: var Mongo) = 30 | m.pool = newSeq[LockedSocket](1) 31 | m.pool[0] = newLockedSocket() 32 | 33 | proc newMongo*(host: string = "127.0.0.1", port: uint16 = DefaultMongoPort, secure=false, maxConnections=16): Mongo = 34 | ## Mongo client constructor 35 | result.new() 36 | result.init(host, port) 37 | result.initPool() 38 | 39 | 40 | proc newMongoWithURI*(u: Uri, maxConnections=16): Mongo = 41 | result.new() 42 | result.init(u) 43 | result.initPool() 44 | 45 | proc newMongoWithURI*(u: string, maxConnections=16): Mongo = newMongoWithURI(parseUri(u), maxConnections) 46 | 47 | proc authenticateScramSha1(db: Database[Mongo], username: string, password: string, ls: LockedSocket): bool {.discardable.} 48 | proc acquire*(m: Mongo): LockedSocket = 49 | ## Retrieves next non-in-use socket for request 50 | while true: 51 | let s = m.pool[0] 52 | if not s.inuse: 53 | if not s.connected: 54 | try: 55 | s.sock.connect(m.host, Port(m.port)) 56 | s.connected = true 57 | if m.needsAuth() and not s.authenticated: 58 | s.authenticated = m[m.authDb()].authenticateScramSha1(m.username, m.password, s) 59 | m.authenticated = s.authenticated 60 | except OSError: 61 | continue 62 | s.inuse = true 63 | return s 64 | sleep(1000) 65 | 66 | proc release*(m: Mongo, ls: LockedSocket) = 67 | if ls.inuse: 68 | ls.inuse = false 69 | else: 70 | raise newException(ValueError, "Socket can't be released twice") 71 | 72 | method kind*(sm: Mongo): ClientKind = ClientKindSync ## Sync Mongo client 73 | 74 | proc connect*(m: Mongo): bool = 75 | ## Establish connection with Mongo server 76 | let s = m.acquire() 77 | m.release(s) 78 | result = s.connected 79 | 80 | proc newMongoDatabase*(u: Uri): Database[Mongo] {.deprecated.} = 81 | ## Create new Mongo sync client using URI type 82 | let m = newMongoWithURI(u) 83 | if m.connect(): 84 | result = m[u.path.extractFilename()] 85 | m.pool[0].authenticated = result.authenticateScramSha1(m.username, m.password, m.pool[0]) 86 | 87 | proc refresh*(f: Cursor[Mongo], lockedSocket: LockedSocket = nil): seq[Bson] = 88 | ## Private procedure for performing actual query to Mongo 89 | template releaseSocket(ls: LockedSocket) = 90 | if lockedSocket.isNil: 91 | f.connection.release(ls) 92 | # Template for disconnection handling 93 | template handleDisconnect(size: int, ls: LockedSocket) = 94 | if size == 0: 95 | ls.connected = false 96 | releaseSocket(ls) 97 | raise newException(CommunicationError, "Disconnected from MongoDB server") 98 | 99 | if f.isClosed(): 100 | raise newException(CommunicationError, "Cursor can't be closed while requesting") 101 | 102 | var res: string 103 | let numberToReturn = calcReturnSize(f) 104 | if f.isClosed(): 105 | return @[] 106 | 107 | let reqID = f.connection().nextRequestId() 108 | if f.cursorId == 0: 109 | res = prepareQuery(f, reqID, numberToReturn, f.nskip) 110 | else: 111 | res = prepareMore(f, reqID, numberToReturn) 112 | 113 | var ls = lockedSocket 114 | if ls.isNil: 115 | ls = f.connection.acquire() 116 | if ls.sock.trySend(res): 117 | var data: string = newStringOfCap(4) 118 | var received: int = ls.sock.recv(data, 4) 119 | handleDisconnect(received, ls) 120 | var stream: Stream = newStringStream(data) 121 | 122 | ## Read data 123 | let messageLength: int32 = stream.readInt32() - 4 124 | 125 | data = newStringOfCap(messageLength) 126 | received = ls.sock.recv(data, messageLength) 127 | handleDisconnect(received, ls) 128 | stream = newStringStream(data) 129 | 130 | discard stream.readInt32() ## requestId 131 | discard stream.readInt32() ## responseTo 132 | discard stream.readInt32() ## opCode 133 | let responceFlags = stream.readInt32() ## responseFlags 134 | let cursorId = stream.readInt64() ## cursorID 135 | discard stream.readInt32() ## startingFrom 136 | let numberReturned: int32 = stream.readInt32() ## numberReturned 137 | 138 | if f.cursorId == 0 or (f.queryFlags and TailableCursor) == 0: 139 | f.cursorId = cursorId 140 | if cursorId == 0: 141 | f.close() 142 | if (responceFlags and RFCursorNotFound) != 0: 143 | f.close() 144 | if numberReturned > 0: 145 | f.updateCount(numberReturned) 146 | for i in 0.. bool: x 90 | 91 | 92 | proc handleResponses(ls: AsyncLockedSocket): Future[void] {.async.} = 93 | # Template for disconnection handling 94 | template handleDisconnect(response: string, sock: AsyncLockedSocket) = 95 | if response == "": 96 | ls.connected = false 97 | ls.inuse = false 98 | raise newException(CommunicationError, "Disconnected from MongoDB server") 99 | 100 | while ls.queue.len > 0: 101 | var data: string = await ls.sock.recv(4) 102 | handleDisconnect(data, ls) 103 | 104 | var stream: Stream = newStringStream(data) 105 | let messageLength: int32 = stream.readInt32() - 4 106 | 107 | ## Read data 108 | data = "" 109 | while data.len < messageLength: 110 | let chunk: string = await ls.sock.recv(messageLength - data.len) 111 | handleDisconnect(chunk, ls) 112 | data &= chunk 113 | stream = newStringStream(data) 114 | 115 | 116 | discard stream.readInt32() ## requestID 117 | let responseTo = stream.readInt32() ## responseTo 118 | discard stream.readInt32() ## opCode 119 | let responceFlags = stream.readInt32() ## responseFlags 120 | let cursorId = stream.readInt64() ## cursorID 121 | discard stream.readInt32() ## startingFrom 122 | let numberReturned: int32 = stream.readInt32() ## numberReturned 123 | 124 | let futCur = ls.queue[responseTo] 125 | var res: seq[Bson] = @[] 126 | 127 | if futCur.cur.cursorId == 0 or (futCur.cur.queryFlags and TailableCursor) == 0: 128 | futCur.cur.cursorId = cursorId 129 | if cursorId == 0: 130 | futCur.cur.close() 131 | if (responceFlags and RFCursorNotFound) != 0: 132 | futCur.cur.close() 133 | if numberReturned > 0: 134 | futCur.cur.updateCount(numberReturned) 135 | for i in 0.. 0: 171 | for i in 0.. 0: 205 | f.updateCount(data.len.int32) 206 | for doc in data: 207 | if doc.contains("$err"): 208 | if doc["code"].toInt == 50: 209 | raise newException(OperationTimeout, "Command " & $f & " has timed out") 210 | elif data.len == 0 and numberToReturn == 1: 211 | raise newException(NotFound, "No documents matching query were found") 212 | else: 213 | discard 214 | return data 215 | 216 | proc one(f: Cursor[Mongo], ls: LockedSocket): Bson = 217 | # Internal proc used for sending authentication requests on particular socket 218 | let docs = f.limit(1).refresh(ls) 219 | if docs.len == 0: 220 | raise newException(NotFound, "No documents matching query were found") 221 | return docs[0] 222 | 223 | proc authenticateScramSha1(db: Database[Mongo], username: string, password: string, ls: LockedSocket): bool {.discardable, gcsafe.} = 224 | ## Authenticate connection (sync): using SCRAM-SHA-1 auth method 225 | if username == "" or password == "": 226 | return false 227 | 228 | var scramClient = newScramClient[SHA1Digest]() 229 | let clientFirstMessage = scramClient.prepareFirstMessage(username) 230 | 231 | let requestStart = %*{ 232 | "saslStart": 1'i32, 233 | "mechanism": "SCRAM-SHA-1", 234 | "payload": bin(clientFirstMessage), 235 | "autoAuthorize": 1'i32 236 | } 237 | let responseStart = db["$cmd"].makeQuery(requestStart).one(ls) 238 | ## line to check if connect worked 239 | if isNil(responseStart) or not isNil(responseStart["code"]): return false #connect failed or auth failure 240 | db.client.authenticated = true 241 | let 242 | responsePayload = binstr(responseStart["payload"]) 243 | passwordDigest = $toMd5("$#:mongo:$#" % [username, password]) 244 | clientFinalMessage = scramClient.prepareFinalMessage(passwordDigest, responsePayload) 245 | let requestContinue1 = %*{ 246 | "saslContinue": 1'i32, 247 | "conversationId": toInt32(responseStart["conversationId"]), 248 | "payload": bin(clientFinalMessage) 249 | } 250 | let responseContinue1 = db["$cmd"].makeQuery(requestContinue1).one(ls) 251 | 252 | if responseContinue1["ok"].toFloat64() == 0.0: 253 | db.client.authenticated = false 254 | return false 255 | 256 | if not scramClient.verifyServerFinalMessage(binstr(responseContinue1["payload"])): 257 | raise newException(Exception, "Server returned an invalid signature.") 258 | 259 | # Depending on how it's configured, Cyrus SASL (which the server uses) 260 | # requires a third empty challenge. 261 | if not responseContinue1["done"].toBool(): 262 | let requestContinue2 = %*{ 263 | "saslContinue": 1'i32, 264 | "conversationId": responseContinue1["conversationId"], 265 | "payload": "" 266 | } 267 | let responseContinue2 = db["$cmd"].makeQuery(requestContinue2).one(ls) 268 | if not responseContinue2["done"].toBool(): 269 | raise newException(Exception, "SASL conversation failed to complete.") 270 | return true -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | nimongo - Pure Nim MongoDB Driver [![Build Status](https://travis-ci.org/SSPkrolik/nimongo.svg?branch=master)](https://travis-ci.org/SSPkrolik/nimongo) [![nimble](https://raw.githubusercontent.com/yglukhov/nimble-tag/master/nimble.png)](https://github.com/yglukhov/nimble-tag) 2 | =================================== 3 | 4 | `nimongo` has a main intention to provide developer-friendly way to interact 5 | with MongoDB using Nim programming language without any other dependencies. 6 | 7 | You can find a table of supported features at the bottom of the document. 8 | 9 | `nimongo` is tested on MongoDB `3.x.x` and `4.0.x` versions with GCC and MSVS2017 compiler backends and the latest stable version of Nim (0.19.0). 10 | 11 | Installation 12 | ------------ 13 | You can use `nimble` package manager to install `nimongo`. The most recent 14 | version of the library can be installed like this: 15 | 16 | ```bash 17 | $ nimble install nimongo 18 | ``` 19 | 20 | or directly from Git repo: 21 | 22 | ```bash 23 | $ nimble install https://github.com/SSPkrolik/nimongo.git 24 | ``` 25 | 26 | Current status (briefly) 27 | ------------------------ 28 | 29 | Currently `nimongo.mongo` implements connection to single MongoDB server, and 30 | support for most widely used queries (whole CRUD with some exceptions), 31 | 32 | `nimongo.bson` gives full support of current BSON specification. As for 33 | performance, it is comparable with `pymongo` Python driver on rough timeit-style 34 | tests. 35 | 36 | Usage of synchronous client 37 | --------------------------- 38 | `nimongo.mongo.Mongo` synchronous client perform interaction with MongoDB 39 | over network using sockets and __blocking__ I/O which stops the thread it is 40 | used on from executing while MongoDB operation is not finished: either data 41 | is sent over network (_insert_, _update_, _remove_), or query (_find_) is done, 42 | and answer (or portion of it) is waited for. 43 | 44 | Mongo synchronous client is __thread-safe__. It uses simple `Lock` when 45 | executing commands and queries. 46 | 47 | ```nim 48 | import oids 49 | 50 | import nimongo.bson ## MongoDB BSON serialization/deserialization 51 | import nimongo.mongo ## MongoDB client 52 | 53 | ## Create new Mongo client 54 | var m = newMongo().slaveOk(true).allowPartial(false) 55 | 56 | ## Connect to Mongo server 57 | let connectResult = m.connect() 58 | 59 | ## Specify collection 60 | let collection = m["db"]["collectionName"] 61 | 62 | ## Create new bson document 63 | let doc = %*{ 64 | "name": "John" 65 | } 66 | 67 | 68 | 69 | ## Insert document into DB 70 | collection.insert(doc) 71 | 72 | ## Update [single] document 73 | let reply = collection.update(%*{ 74 | "name": "John" 75 | }, %*{ 76 | "$set": { 77 | "surname": "Smith" 78 | } 79 | }) 80 | 81 | # Check command execution status 82 | if reply.ok: 83 | echo "Modified a document." 84 | 85 | ## Delete multiple documents 86 | let removeResult = collection.remove(%*{"name": "John"}) 87 | 88 | ## Check how many documents were removed 89 | if removeResult.ok: 90 | echo "Removed ", removeResult.n, " documents." 91 | 92 | ## Delete single document 93 | collection.remove(%{"name": "John"}, limit=1) 94 | 95 | ## Delete collection 96 | collection.drop() 97 | 98 | ## Delete single document 99 | collection.remove(%{"name": "John"}, limit=1) 100 | 101 | ## Fetch number of documents in collection 102 | collection.count() 103 | 104 | ## Fetch number of documents in query 105 | let tally = collection.find(%*{"name": "John"}).count() 106 | 107 | ## Fetch one document from DB returning only one field: "name". 108 | let fetched = collection.find(%*{"name": "John"}, @["name"]).one() 109 | 110 | ## Fetch all matching documents from DB receiving seq[Bson] 111 | let documents = collection.find(%*{"name": "John"}).all() 112 | 113 | ## Fetch all matching documents as a iterator 114 | for document in collection.find(%*{"name": "John"}).items(): 115 | echo document 116 | 117 | ## Force cursor to return only distinct documents by specified field. 118 | let documents = collection.find(%*{"name": "John"}).unique("name").all() 119 | ``` 120 | 121 | Usage of async client 122 | --------------------- 123 | `nimongo.mongo.Mongo` can also work in an asynchronous mode based on 124 | `ayncdispatch` standard async mechanisms, and `asyncnet.AsyncSocket` sockets. It 125 | performs non-blocking I/O via `{.async.}` procedures. 126 | 127 | Mongo async client is __thread-safe__. It uses simple `Lock` when 128 | executing commands and queries. 129 | 130 | ```nim 131 | import asyncdispatch ## Nim async-supportive functions here 132 | import oids 133 | 134 | import nimongo.bson ## MongoDB BSON serialization/deserialization 135 | import nimongo.mongo ## MongoDB client 136 | 137 | ## Create new Mongo client 138 | var m: AsyncMongo = newAsyncMongo().slaveOk(false) ## Still Mongo type 139 | 140 | ## Connect to Mongo server with asynchronous socket 141 | let connected = waitFor(m.connect()) 142 | 143 | ## Testing connection establishing result 144 | echo "Async connection established: ", connected 145 | 146 | ## Inserting single document into MongoDB 147 | waitFor(m.insert(B("hello-async", "victory"))) 148 | 149 | ## Inserting multiple documents into MongoDB 150 | let 151 | doc1 = %*{"doc1": 15} 152 | doc2 = %*{"doc2": "string"} 153 | 154 | waitFor(m.insert(@[doc1, doc2])) 155 | 156 | ## Removing single document from MongoDB 157 | waitFor(m.remove(B("doc1", 15), limit=1)) 158 | 159 | ## Removing multiple documents from MongoDB 160 | waitFor(m.remove(B("doc1", 15))) 161 | ``` 162 | 163 | Currently Supported Features 164 | ============================ 165 | Here's a list of supported features with appropriate status icons: 166 | 167 | * :white_check_mark: - implemented feature 168 | * :red_circle: - __not__ implemented feature 169 | * :warning: - __partly__ supported or __unstable__ 170 | 171 | BSON 172 | ---- 173 | 174 | `nimongo.bson` module implements full BSON specification, and includes means 175 | for developer-friendly BSON creation, modification, serialization and 176 | deserialization. 177 | 178 | You can user either __B(...)__ template or __`%*`__ for documents creation 179 | depending on what is more convenient for you. 180 | 181 | 182 | ```nim 183 | let doc = B("name", "John")("surname", "Smith")("salary", 100) 184 | let doc2 = B( 185 | "name", "Sam")( 186 | "surname", "Uncle")( 187 | "salary", 1000)( 188 | "skills", @["power", "government", "army"] 189 | ) 190 | ``` 191 | 192 | Authentication 193 | --------------- 194 | `nimongo` supports the new SCRAM-SHA-1 challenge-response user authentication mechanism 195 | 196 | ```nim 197 | 198 | var db: Database[Mongo] 199 | try: 200 | db = newMongoDatabase("mongodb://$1:$2@localhost:27017/db" % [db_user,db_pass]) 201 | if not db.client.authenticated: raise newException(AUTHError, "Unable to authenticate to db") 202 | except: 203 | logging.error(getCurrentExceptionMsg()) 204 | raise 205 | 206 | ``` 207 | 208 | MongoDB Features 209 | ---------------- 210 | This table represents MongoDB features and their implementation status within 211 | `nimongo.mongo` Nim module. 212 | 213 | | Block | Feature | Status (sync) | Status (async) | Notes | 214 | |-----------:|:----------------|:------------------:|:------------------:|:------| 215 | |Connection | | __2__ / __7__ | __2__ / __7__ | | 216 | | | Single server | :white_check_mark: | :white_check_mark: | | 217 | | | Replica set | :red_circle: | :red_circle: | | 218 | | | Socket Timeout | :red_circle: | :red_circle: | | 219 | | | SSL | :red_circle: | :red_circle: | | 220 | | | Connect Timeout | :red_circle: | :red_circle: | | 221 | | | Write Concern | :white_check_mark: | :white_check_mark: | | 222 | | | Read Preference | :red_circle: | :red_circle: | | 223 | |Operations | Insert (Single/Multiple), Remove (Single/Multiple), Update (Single/Multiple/Upsert) | :white_check_mark: | :white_check_mark: | | 224 | |Querying | | __6__ / __10__ | __5__ / __9__ | | 225 | | | Find one | :white_check_mark: | :white_check_mark: | | 226 | | | Find all | :white_check_mark: | :white_check_mark: | | 227 | | | Find iterator | :white_check_mark: | :white_check_mark: | | 228 | | | Skip | :white_check_mark: | :white_check_mark: | | 229 | | | Limit | :white_check_mark: | :white_check_mark: | | 230 | | | Count | :white_check_mark: | :white_check_mark: | | 231 | | | Tailable | :white_check_mark: | :white_check_mark: | | 232 | | | Partial | :red_circle: | :red_circle: | | 233 | | | FindAndModify | :red_circle: | :red_circle: | | 234 | | | parallelCollectionScan | :red_circle:| :red_circle: | | 235 | | | getLastError | :white_check_mark: | :white_check_mark: | | 236 | |Authentication | | __1__ / __7__ | __1__ / __7__ | | 237 | | | authenticate | :red_circle: | :red_circle: | | 238 | | | SCRAM-SHA-1 | :white_check_mark: | :white_check_mark: | | 239 | | | MONGODB-CR | :red_circle: | :red_circle: | | 240 | | | MONGODB-X509 | :red_circle: | :red_circle: | | 241 | | | GSSAPI (Kerberos)|:red_circle: | :red_circle: | | 242 | | | PLAIN (LDAP SASL)|:red_circle: | :red_circle: | | 243 | | | logout | :red_circle: | :red_circle: | | 244 | |User Management | | __2__ / __7__ | __2__ / __7__ | | 245 | | | Create User | :white_check_mark: | :white_check_mark: | | 246 | | | Update User | :red_circle: | :red_circle: | | 247 | | | Drop User | :white_check_mark: | :white_check_mark: | | 248 | | | Drop all users | :red_circle: | :red_circle: | | 249 | | | Grant roles | :red_circle: | :red_circle: | | 250 | | | Revoke roles | :red_circle: | :red_circle: | | 251 | | | Users info | :red_circle: | :red_circle: | | 252 | |Role Management | | __0__ / __0__ | __0__ / __0__ | | 253 | |Replication | | __1__ / __1__ | __1__ / __1__ | | 254 | | | Is Master | :white_check_mark: | :white_check_mark: | | 255 | |Sharding | | __0__ / __0__ | __0__ / __0__ | | 256 | |Instance Administration Commands| | __6__ / __7__ | __6__ / __7__ | | 257 | | | Copy DB | :red_circle: | :red_circle: | | 258 | | | List databases | :white_check_mark: | :white_check_mark: | | 259 | | | Drop database | :white_check_mark: | :white_check_mark: | | 260 | | | List collections| :white_check_mark: | :white_check_mark: | | 261 | | | Rename collection|:white_check_mark: | :white_check_mark: | | 262 | | | Drop collection | :white_check_mark: | :white_check_mark: | | 263 | | | Create collection|:white_check_mark: | :white_check_mark: | | 264 | |Diagnostic | | __0__ / __0__ | __0__ / __0__ | | 265 | |GridFS | | __0__ / __0__ | __0__ / __0__ | | 266 | |Indices | | __0__ / __4__ | __0__ / __4__ | | 267 | | | Create Index | :red_circle: | :red_circle: | | 268 | | | Drop Index | :red_circle: | :red_circle: | | 269 | | | Drop Indices | :red_circle: | :red_circle: | | 270 | | | Ensure Index | :red_circle: | :red_circle: | | 271 | |Aggregation | | __3__ / __6__ | __3__ / __6__ | | 272 | | | aggregate | :red_circle: | :red_circle: | | 273 | | | count | :white_check_mark: | :white_check_mark: | | 274 | | | distinct | :white_check_mark: | :white_check_mark: | __Cursor.unique__ proc | 275 | | | group | :red_circle: | :red_circle: | | 276 | | | mapReduce | :red_circle: | :red_circle: | | 277 | | | orderBy | :white_check_mark: | :white_check_mark: | | 278 | |Geospatial | | __0__ /__3__ | __0__ / __3__ | | 279 | | | geoNear | :red_circle: | :red_circle: | | 280 | | | geoSearch | :red_circle: | :red_circle: | | 281 | | | geoWalk | :red_circle: | :red_circle: | | 282 | |Auditing | | __0__ / __1__ | __0__ / __1__ | | 283 | | |logApplicationMessage|:red_circle: | :red_circle: | | 284 | 285 | __P.S.__ Contribution is welcomed :) 286 | -------------------------------------------------------------------------------- /nimongo/private/clientbase.nim: -------------------------------------------------------------------------------- 1 | import os 2 | import strutils 3 | import uri 4 | import ../bson except `()` 5 | import writeconcern 6 | import proto 7 | 8 | when compileOption("threads"): 9 | import locks 10 | 11 | const 12 | DefaultMongoHost* = "127.0.0.1" 13 | DefaultMongoPort* = 27017'u16 ## Default MongoDB IP Port 14 | 15 | TailableCursor* = 1'i32 shl 1 ## Leave cursor alive on MongoDB side 16 | SlaveOk* = 1'i32 shl 2 ## Allow to query replica set slaves 17 | NoCursorTimeout* = 1'i32 shl 4 ## 18 | AwaitData* = 1'i32 shl 5 ## 19 | Exhaust* = 1'i32 shl 6 ## 20 | Partial* = 1'i32 shl 7 ## Get info only from running shards 21 | 22 | RFCursorNotFound* = 1'i32 ## CursorNotFound. Is set when getMore is called but the cursor id is not valid at the server. Returned with zero results. 23 | RFQueryFailure* = 1'i32 shl 1 ## QueryFailure. Is set when query failed. Results consist of one document containing an “$err” field describing the failure. 24 | RFShardConfigStale* = 1'i32 shl 2 ## ShardConfigStale. Drivers should ignore this. Only mongos will ever see this set, in which case, it needs to update config from the server. 25 | RFAwaitCapable* = 1'i32 shl 3 ## AwaitCapable. Is set when the server supports the AwaitData Query option. If it doesn’t, a client should sleep a little between getMore’s of a Tailable cursor. Mongod version 1.6 supports AwaitData and thus always sets AwaitCapable. 26 | 27 | type 28 | ClientKind* = enum ## Kind of client communication type 29 | ClientKindBase = 0 30 | ClientKindSync = 1 31 | ClientKindAsync = 2 32 | 33 | MongoBase* = ref object of RootObj ## Base for Mongo clients 34 | when compileOption("threads"): 35 | reqIdLock: Lock 36 | requestId {.guard: reqIdLock.}: int32 37 | else: 38 | requestId: int32 39 | host: string 40 | port: uint16 41 | queryFlags: int32 42 | username: string 43 | password: string 44 | db: string 45 | needAuth: bool 46 | authenticated: bool 47 | replicas: seq[tuple[host: string, port: uint16]] 48 | writeConcern: WriteConcern 49 | 50 | Database*[T] = ref DatabaseObj[T] 51 | DatabaseObj*[T] = object ## MongoDB database object 52 | name: string 53 | client*: T 54 | 55 | Collection*[T] = ref CollectionObj[T] 56 | CollectionObj*[T] = object ## MongoDB collection object 57 | name: string 58 | db: Database[T] 59 | client: T 60 | 61 | CollectionInfo* = ref CollectionInfoObj 62 | CollectionInfoObj* = object ## Collection information (for manual creation) 63 | disableIdIndex*: bool 64 | forceIdIndex*: bool 65 | capped: bool 66 | maxBytes: int 67 | maxDocs: int 68 | 69 | Cursor*[T] = ref CursorObj[T] 70 | CursorObj*[T] = object ## MongoDB cursor: manages queries object lazily 71 | collection: Collection[T] 72 | query: Bson 73 | fields: seq[string] 74 | queryFlags*: int32 75 | nskip: int32 76 | nlimit: int32 77 | nbatchSize: int32 78 | sorting: Bson 79 | cursorId: int64 80 | count: int32 81 | closed: bool 82 | 83 | GridFS*[T] = ref GridFSObj[T] 84 | GridFSObj*[T] = object 85 | ## GridFS is collection which namespaced to .files and .chunks 86 | name*: string # bucket name 87 | files*: Collection[T] 88 | chunks*: Collection[T] 89 | 90 | LockedSocketBase* {.inheritable.} = ref LockedSocketBaseObj 91 | LockedSocketBaseObj* {.inheritable.} = object 92 | inuse: bool 93 | authenticated: bool 94 | connected: bool 95 | 96 | template lockIfThreads(body: untyped): untyped = 97 | when compileOption("threads"): 98 | mb.reqIdLock.acquire() 99 | try: 100 | {.locks: [mb.reqIdLock].}: 101 | body 102 | finally: 103 | mb.reqIdLock.release() 104 | else: 105 | body 106 | 107 | method init*(mb: MongoBase, host: string, port: uint16) {.base.} = 108 | mb.host = host 109 | mb.port = port 110 | lockIfThreads: 111 | mb.requestID = 0 112 | mb.queryFlags = 0 113 | mb.replicas = @[] 114 | mb.username = "" 115 | mb.password = "" 116 | mb.db = "admin" 117 | mb.needAuth = false 118 | mb.authenticated = false 119 | mb.writeConcern = writeConcernDefault() 120 | 121 | method init*(b: MongoBase, u: Uri) {.base.} = 122 | let port = if u.port.len > 0: parseInt(u.port).uint16 else: DefaultMongoPort 123 | b.init(u.hostname, port) 124 | b.username = u.username 125 | b.password = u.password 126 | let db = u.path.extractFilename() 127 | if db != "": 128 | b.db = db 129 | b.needAuth = (b.username != "" and b.db != "") 130 | 131 | proc host*(mb: MongoBase): string = mb.host 132 | ## Connected server host 133 | 134 | proc port*(mb: MongoBase): uint16 = mb.port 135 | ## Connected server port 136 | 137 | proc username*(mb: MongoBase): string = mb.username 138 | ## Username to authenticate at Mongo Server 139 | 140 | proc password*(mb: MongoBase): string = mb.password 141 | ## Password to authenticate at Mongo Server 142 | 143 | proc authDb*(mb: MongoBase): string = mb.db 144 | ## Database for authentication 145 | 146 | proc needsAuth*(mb: MongoBase): bool = mb.needAuth 147 | ## Check if connection needs to be authenticated 148 | 149 | proc queryFlags*(mb: MongoBase): int32 = mb.queryFlags 150 | ## Query flags perform query flow and connection settings 151 | 152 | proc `queryFlags=`*(mb: MongoBase, flags: int32) = mb.queryFlags = flags 153 | ## Query flags perform query flow and connection settings 154 | 155 | proc nextRequestId*(mb: MongoBase): int32 = 156 | ## Return next request id for current MongoDB client 157 | lockIfThreads: 158 | mb.requestId = (mb.requestId + 1) mod (int32.high - 1'i32) 159 | result = mb.requestId 160 | 161 | proc writeConcern*(mb: MongoBase): WriteConcern = mb.writeConcern 162 | ## Getter for currently setup client's write concern 163 | 164 | proc `writeConcern=`*(mb: MongoBase, concern: WriteConcern) = 165 | ## Set client-wide write concern for sync client 166 | assert "w" in concern 167 | mb.writeConcern = concern 168 | 169 | proc authenticated*(mb: MongoBase): bool = mb.authenticated 170 | ## Query authenticated flag 171 | 172 | proc `authenticated=`*(mb: MongoBase, authenticated: bool) = mb.authenticated = authenticated 173 | ## Enable/disable authenticated flag for database 174 | 175 | method kind*(mb: MongoBase): ClientKind {.base.} = ClientKindBase 176 | ## Base Mongo client 177 | 178 | proc tailableCursor*(m: MongoBase, enable: bool = true): MongoBase {.discardable.} = 179 | ## Enable/disable tailable behaviour for the cursor (cursor is not 180 | ## removed immediately after the query) 181 | result = m 182 | m.queryFlags = if enable: m.queryFlags or TailableCursor else: m.queryFlags and (not TailableCursor) 183 | 184 | proc slaveOk*(m: MongoBase, enable: bool = true): MongoBase {.discardable.} = 185 | ## Enable/disable querying from slaves in replica sets 186 | result = m 187 | m.queryFlags = if enable: m.queryFlags or SlaveOk else: m.queryFlags and (not SlaveOk) 188 | 189 | proc noCursorTimeout*(m: MongoBase, enable: bool = true): MongoBase {.discardable.} = 190 | ## Enable/disable cursor idle timeout 191 | result = m 192 | m.queryFlags = if enable: m.queryFlags or NoCursorTimeout else: m.queryFlags and (not NoCursorTimeout) 193 | 194 | proc awaitData*(m: MongoBase, enable: bool = true): MongoBase {.discardable.} = 195 | ## Enable/disable data waiting behaviour (along with tailable cursor) 196 | result = m 197 | m.queryFlags = if enable: m.queryFlags or AwaitData else: m.queryFlags and (not AwaitData) 198 | 199 | proc exhaust*(m: MongoBase, enable: bool = true): MongoBase {.discardable.} = 200 | ## Enable/disabel exhaust flag which forces database to giveaway 201 | ## all data for the query in form of "get more" packages. 202 | result = m 203 | m.queryFlags = if enable: m.queryFlags or Exhaust else: m.queryFlags and (not Exhaust) 204 | 205 | proc allowPartial*(m: MongoBase, enable: bool = true): MongoBase {.discardable} = 206 | ## Enable/disable allowance for partial data retrieval from mongos when 207 | ## one or more shards are down. 208 | result = m 209 | m.queryFlags = if enable: m.queryFlags or Partial else: m.queryFlags and (not Partial) 210 | 211 | proc `$`*(m: MongoBase): string = 212 | ## Return full DSN for the Mongo connection 213 | return "mongodb://$#:$#" % [m.host, $m.port] 214 | 215 | proc `[]`*[T: MongoBase](client: T, dbName: string): Database[T] = 216 | ## Retrieves database from Mongo 217 | result.new() 218 | result.name = dbName 219 | result.client = client 220 | 221 | # === Locked Sockets API === # 222 | 223 | method init*(ls: LockedSocketBase) {.base.} = 224 | ls.inuse = false 225 | ls.authenticated = false 226 | ls.connected = false 227 | 228 | proc inuse*(ls: LockedSocketBase): bool = ls.inuse 229 | ## Return inuse 230 | 231 | proc `inuse=`*(ls: LockedSocketBase, inuse: bool) = 232 | ## Enable/disable inuse flag for socket 233 | ls.inuse = inuse 234 | 235 | proc authenticated*(ls: LockedSocketBase): bool = ls.authenticated 236 | ## Return authenticated 237 | 238 | proc `authenticated=`*(ls: LockedSocketBase, authenticated: bool) = 239 | ## Enable/disable authenticated flag for socket 240 | ls.authenticated = authenticated 241 | 242 | proc connected*(ls: LockedSocketBase): bool = ls.connected 243 | ## Return connected 244 | 245 | proc `connected=`*(ls: LockedSocketBase, connected: bool) = 246 | ## Enable/disable connected flag for socket 247 | ls.connected = connected 248 | 249 | # === Database API === # 250 | 251 | proc `$`*(db: Database): string = 252 | ## Database name string representation 253 | return db.name 254 | 255 | proc `[]`*[T: MongoBase](db: Database[T], collectionName: string): Collection[T] = 256 | ## Retrieves collection from Mongo Database 257 | result.new() 258 | result.name = collectionName 259 | result.client = db.client 260 | result.db = db 261 | 262 | proc name*(db: Database): string = db.name 263 | ## Return name of database 264 | 265 | proc `name=`*(db: Database, name: string) = 266 | ## Set new database name 267 | db.name = name 268 | 269 | # === Collection API === # 270 | 271 | proc `$`*(c: Collection): string = 272 | ## String representation of collection name 273 | return c.db.name & "." & c.name 274 | 275 | proc newCursor[T](c: Collection[T]): Cursor[T] = 276 | ## Private constructor for the Find object. Find acts by taking 277 | ## client settings (flags) that can be overriden when actual 278 | ## query is performed. 279 | result.new() 280 | result.collection = c 281 | result.fields = @[] 282 | result.queryFlags = c.client.queryFlags 283 | result.nskip = 0 284 | result.nlimit = 0 285 | result.nbatchSize = 0 286 | result.cursorId = 0 287 | result.count = 0 288 | result.closed = false 289 | 290 | proc makeQuery*[T: MongoBase](c: Collection[T], query: Bson, fields: seq[string] = @[], maxTime: int32 = 0): Cursor[T] = 291 | ## Create lazy query object to MongoDB that can be actually run 292 | ## by one of the Find object procedures: `one()` or `all()`. 293 | result = c.newCursor() 294 | result.query = query 295 | result.fields = fields 296 | if maxTime > 0: 297 | result.query["$maxTimeMS"] = maxTime.toBson() 298 | 299 | 300 | proc db*[T: MongoBase](c: Collection[T]): Database[T] = c.db 301 | ## Return the database from collection 302 | 303 | proc name*(c: Collection): string = c.name 304 | ## Return name of collection 305 | 306 | proc `name=`*(c: Collection, name: string) = 307 | ## Set new collection name 308 | c.name = name 309 | 310 | proc writeConcern*(c: Collection): WriteConcern = c.client.writeConcern 311 | ## Return write concern for collection 312 | 313 | # === Find API === # 314 | 315 | proc prepareQuery*(f: Cursor, requestId: int32, numberToReturn: int32, numberToSkip: int32): string = 316 | ## Prepare query and request queries for making OP_QUERY 317 | var bfields: Bson = newBsonDocument() 318 | if f.fields.len() > 0: 319 | for field in f.fields.items(): 320 | bfields[field] = 1'i32.toBson() 321 | let squery = f.query.bytes() 322 | let sfields: string = if f.fields.len() > 0: bfields.bytes() else: "" 323 | let colName = $(f.collection) 324 | result = "" 325 | var msg = "" 326 | buildMessageQuery(f.queryFlags, colName, numberToSkip, numberToReturn, msg) 327 | msg &= squery 328 | msg &= sfields 329 | buildMessageHeader(msg.len().int32, requestId, 0, OP_QUERY, result) 330 | result &= msg 331 | 332 | proc prepareMore*(f: Cursor, requestId: int32, numberToReturn: int32): string = 333 | ## Prepare query and request queries for making OP_GET_MORE 334 | let colName = $(f.collection) 335 | result = "" 336 | var msg = "" 337 | buildMessageMore(colName, f.cursorId, numberToReturn, msg) 338 | buildMessageHeader(msg.len().int32, requestId, 0, OP_GET_MORE, result) 339 | result &= msg 340 | 341 | proc orderBy*(f: Cursor, order: Bson): Cursor = 342 | ## Add sorting setting to query 343 | result = f 344 | f.query["$orderby"] = order 345 | 346 | proc tailableCursor*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 347 | ## Enable/disable tailable behaviour for the cursor (cursor is not 348 | ## removed immediately after the query) 349 | result = f 350 | f.queryFlags = if enable: f.queryFlags or TailableCursor else: f.queryFlags and (not TailableCursor) 351 | 352 | proc slaveOk*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 353 | ## Enable/disable querying from slaves in replica sets 354 | result = f 355 | f.queryFlags = if enable: f.queryFlags or SlaveOk else: f.queryFlags and (not SlaveOk) 356 | 357 | proc noCursorTimeout*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 358 | ## Enable/disable cursor idle timeout 359 | result = f 360 | f.queryFlags = if enable: f.queryFlags or NoCursorTimeout else: f.queryFlags and (not NoCursorTimeout) 361 | 362 | proc awaitData*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 363 | ## Enable/disable data waiting behaviour (along with tailable cursor) 364 | result = f 365 | f.queryFlags = if enable: f.queryFlags or AwaitData else: f.queryFlags and (not AwaitData) 366 | 367 | proc exhaust*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 368 | ## Enable/disabel exhaust flag which forces database to giveaway 369 | ## all data for the query in form of "get more" packages. 370 | result = f 371 | f.queryFlags = if enable: f.queryFlags or Exhaust else: f.queryFlags and (not Exhaust) 372 | 373 | proc allowPartial*(f: Cursor, enable: bool = true): Cursor {.discardable.} = 374 | ## Enable/disable allowance for partial data retrieval from mongo when 375 | ## on or more shards are down. 376 | result = f 377 | f.queryFlags = if enable: f.queryFlags or Partial else: f.queryFlags and (not Partial) 378 | 379 | proc skip*(f: Cursor, numSkip: int32): Cursor {.discardable.} = 380 | ## Specify number of documents from return sequence to skip 381 | result = f 382 | result.nskip = numSkip 383 | 384 | proc limit*(f: Cursor, numLimit: int32): Cursor {.discardable.} = 385 | ## Specify number of documents to return from database 386 | result = f 387 | result.nlimit = numLimit # Should be negative if hard limit, else soft limit used 388 | 389 | proc batchSize*(f: Cursor, numBatchSize: int32): Cursor {.discardable.} = 390 | ## Specify number of documents in first reply. Conflicts with limit 391 | result = f 392 | result.nbatchSize = numBatchSize 393 | 394 | proc calcReturnSize*(f: Cursor): int32 = 395 | if f.nlimit == 0: 396 | result = f.nbatchSize 397 | elif f.nlimit < 0: 398 | result = f.nlimit 399 | else: 400 | result = f.nlimit - f.count 401 | if result <= 0: 402 | f.closed = true 403 | # TODO Add kill cursor functionality here 404 | if f.nbatchSize > 0: 405 | result = min(result, f.nbatchSize).int32 406 | 407 | proc updateCount*(f: Cursor, count: int32) = 408 | ## Increasing the count of returned documents 409 | f.count += count 410 | 411 | proc isClosed*(f: Cursor): bool = f.closed 412 | ## Return status of cursor 413 | 414 | proc close*(f: Cursor) = 415 | ## Close cursor 416 | f.closed = true 417 | 418 | proc `$`*(f: Cursor): string = $f.query 419 | ## Return query of cursor as a string 420 | 421 | proc connection*[T: MongoBase](f: Cursor[T]): T = f.collection.client 422 | ## Get connection of cursor 423 | 424 | proc collection*[T: MongoBase](f: Cursor[T]): Collection[T] = f.collection 425 | ## Get collection from cursor 426 | 427 | proc cursorId*(f: Cursor): int64 = f.cursorId 428 | ## Return cursor ID 429 | 430 | proc `cursorId=`*(f: Cursor, cursorId: int64) = 431 | ## Set cursor ID 432 | f.cursorId = cursorId 433 | 434 | proc nskip*(f: Cursor): int32 = f.nskip 435 | ## Return amount of documents to skip 436 | 437 | proc filter*(f: Cursor): Bson = f.query["$query"] 438 | ## Return filter of query from cursor 439 | -------------------------------------------------------------------------------- /tests/mongotest.nim: -------------------------------------------------------------------------------- 1 | import asyncdispatch 2 | import oids 3 | import strutils 4 | import times 5 | import os 6 | import unittest 7 | 8 | import nimongo/bson 9 | import nimongo/mongo 10 | 11 | # TODO: unused 12 | import timeit 13 | 14 | {.hint[XDeclaredButNotUsed]: off.} 15 | {.warning[UnusedImport]: off.} 16 | 17 | const 18 | TestDB = "testdb" 19 | TestSyncCol = "sync" 20 | TestAsyncCol = "async" 21 | blob {.strdefine.} : string = "" 22 | bucket {.strdefine.}: string = "test_bucket" 23 | upload {.strdefine.}: string = "" 24 | 25 | var 26 | sm: Mongo = newMongo(maxConnections=2) ## Mongo synchronous client 27 | am: AsyncMongo = newAsyncMongo() ## Mongo asynchronous client 28 | 29 | let 30 | sdb: Database[Mongo] = sm[TestDB] 31 | adb: Database[AsyncMongo] = am[TestDB] 32 | sco: Collection[Mongo] = sdb[TestSyncCol] 33 | aco: Collection[AsyncMongo] = adb[TestAsyncCol] 34 | 35 | # Connection is required for running tests 36 | require(sm.connect()) 37 | require(waitFor(am.connect())) 38 | 39 | 40 | suite "Mongo instance administration commands test suite": 41 | 42 | echo "\n Mongo instance administration commands test suite\n" 43 | 44 | setup: 45 | discard 46 | 47 | test "[ASYNC] [SYNC] Init": 48 | check: 49 | sm.writeConcern["w"].toInt32() == writeConcernDefault()["w"].toInt32() 50 | am.writeConcern["j"].toBool() == writeConcernDefault()["j"].toBool() 51 | 52 | test "[ASYNC] [SYNC] Command: 'isMaster'": 53 | var m: bool 54 | m = sm.isMaster() 55 | m = waitFor(am.isMaster()) 56 | 57 | test "[ASYNC] [SYNC] Command: 'dropDatabase'": 58 | check(sdb.drop()) 59 | check(waitFor(adb.drop())) 60 | 61 | test "[ASYNC] [SYNC] Command: 'listDatabases'": 62 | sco.insert(%*{"test": "test"}) 63 | check("testdb" in sm.listDatabases()) 64 | check("testdb" in waitFor(am.listDatabases())) 65 | sco.remove(%*{"test": "test"}, limit=1) 66 | 67 | test "[ASYNC] [SYNC] Command: 'create' collection": 68 | discard sdb.createCollection("smanual") 69 | check("smanual" in sdb.listCollections()) 70 | 71 | discard waitFor(adb.createCollection("amanual")) 72 | check("amanual" in waitFor(adb.listCollections())) 73 | 74 | test "[ASYNC] [SYNC] Command: 'listCollections'": 75 | let sclist = sdb.listCollections() 76 | check("amanual" in sclist) 77 | check("smanual" in sclist) 78 | 79 | let aclist = waitFor(adb.listCollections()) 80 | check("amanual" in aclist) 81 | check("smanual" in aclist) 82 | 83 | test "[ASYNC] [SYNC] Command: 'renameCollection'": 84 | check(sco.insert(%*{})) 85 | check(waitFor(aco.insert(%*{}))) 86 | 87 | check(sco.rename("syncnew")) 88 | check(waitFor(aco.rename("asyncnew"))) 89 | 90 | check(sco.rename("sync")) 91 | check(waitFor(aco.rename("async"))) 92 | 93 | suite "Mongo connection error-handling operations": 94 | 95 | echo "\n Mongo connection error-handling operations\n" 96 | 97 | setup: 98 | discard 99 | 100 | test "[ASYNC] [SYNC] Command: 'getLastError'": 101 | check(sm.getLastError().ok) 102 | check(waitFor(am.getLastError()).ok) 103 | 104 | test "[ASYNC] [SYNC] Write operations error handling": 105 | discard sdb.createCollection("smanual") 106 | let sReplyCreate = sdb.createCollection("smanual") 107 | check(not sReplyCreate.ok) 108 | check(sReplyCreate.err.contains("already exists")) 109 | 110 | discard waitFor(adb.createCollection("amanual")) 111 | let aReplyCreate = waitFor(adb.createCollection("amanual")) 112 | check(not aReplyCreate.ok) 113 | check(aReplyCreate.err.contains("already exists")) 114 | 115 | suite "Authentication": 116 | 117 | echo "\n Authentication\n" 118 | 119 | setup: 120 | discard 121 | 122 | test "[ASYNC] [SYNC] Command: 'authenticate', method: 'SCRAM-SHA-1'": 123 | check(sdb.createUser("test1", "test")) 124 | let authtest = newMongoWithURI("mongodb://test1:test@localhost:27017/testdb") 125 | check(authtest.authDb == TestDB) 126 | authtest[TestDB][TestSyncCol].insert(%*{"data": "auth"}) 127 | check(authtest.authenticated == true) 128 | check(sdb.dropUser("test1")) 129 | 130 | check(waitFor(adb.createUser("test2", "test2"))) 131 | let authtest2 = newAsyncMongoWithURI("mongodb://test2:test2@localhost:27017/testdb") 132 | check(authtest2.authDb == TestDB) 133 | discard waitFor(authtest2[TestDB][TestAsyncCol].insert(%*{"data": "auth"})) 134 | check(authtest2.authenticated == true) 135 | check(waitFor(adb.dropUser("test2"))) 136 | 137 | suite "User Management": 138 | 139 | echo "\n User management\n" 140 | 141 | setup: 142 | discard 143 | 144 | test "[ASYNC][SYNC] Command: 'createUser' without roles and custom data": 145 | check(sdb.createUser("testuser", "testpass")) 146 | check(waitFor(adb.createUser("testuser2", "testpass2"))) 147 | check(sdb.dropUser("testuser")) 148 | check(waitFor(adb.dropUser("testuser2"))) 149 | 150 | test "[ASYNC][SYNC] Command: 'dropUser'": 151 | check(sdb.createUser("testuser2", "testpass2")) 152 | check(sdb.dropUser("testuser2")) 153 | check(waitFor(adb.createUser("testuser2", "testpass2"))) 154 | check(waitFor(adb.dropUser("testuser2"))) 155 | 156 | suite "Mongo collection-level operations": 157 | 158 | echo "\n Mongo collection-level operations\n" 159 | 160 | setup: 161 | discard sco.drop() 162 | discard waitFor(aco.drop()) 163 | 164 | test "[ASYNC] [SYNC] 'count' documents in collection": 165 | 166 | check(sco.insert( 167 | @[ 168 | %*{"iter": 0.int32, "label": "l"}, 169 | %*{"iter": 1.int32, "label": "l"}, 170 | %*{"iter": 2.int32, "label": "l"}, 171 | %*{"iter": 3.int32, "label": "l"}, 172 | %*{"iter": 4.int32, "label": "l"}, 173 | ] 174 | )) 175 | check(sco.count() == 5) 176 | 177 | check(waitFor(aco.insert( 178 | @[ 179 | %*{"iter": 0.int32, "label": "l"}, 180 | %*{"iter": 1.int32, "label": "l"}, 181 | %*{"iter": 2.int32, "label": "l"}, 182 | %*{"iter": 3.int32, "label": "l"}, 183 | %*{"iter": 4.int32, "label": "l"}, 184 | ] 185 | ))) 186 | check(waitFor(aco.count()) == 5) 187 | 188 | test "[ASYNC] [SYNC] 'drop' collection": 189 | check(sco.insert(%*{"svalue": "hello"})) 190 | discard sco.drop() 191 | check(sco.find(%*{"svalue": "hello"}).all().len() == 0) 192 | 193 | check(waitFor(aco.insert(%*{"svalue": "hello"}))) 194 | discard waitFor(aco.drop()) 195 | check(waitFor(aco.find(%*{"svalue": "hello"}).all()).len() == 0) 196 | 197 | 198 | suite "Mongo client operations test suite": 199 | 200 | echo "\n Mongo client operations \n" 201 | 202 | setup: 203 | discard sco.drop() 204 | discard waitFor(aco.drop()) 205 | 206 | test "[ASYNC] [SYNC] Mongo object `$` operator": 207 | check($sm == "mongodb://127.0.0.1:27017") 208 | check($am == "mongodb://127.0.0.1:27017") 209 | 210 | test "[ASYNC] [SYNC] Taking database": 211 | check($sdb == "testdb") 212 | check($adb == "testdb") 213 | 214 | test "[ASYNC] [SYNC] Taking collection": 215 | check($sco == "testdb.sync") 216 | check($aco == "testdb.async") 217 | 218 | test "[ASYNC] [SYNC] Inserting single document": 219 | check(sco.insert(%*{"double": 3.1415})) 220 | check(waitFor(aco.insert(%*{"double": 3.1415}))) 221 | 222 | check(sco.find(%*{"double": 3.1415}).all().len() == 1) 223 | check(waitFor(aco.find(%*{"double": 3.1415}).all()).len() == 1) 224 | 225 | test "[ASYNC] [SYNC] Inserting multiple documents": 226 | let 227 | doc1 = %*{"integer": 100'i32} 228 | doc2 = %*{"string": "hello", "subdoc": {"name": "John"}} 229 | doc3 = %*{"array": ["element1", "element2", "element3"]} 230 | 231 | check(sco.insert(@[doc1, doc2, doc3])) 232 | check(waitFor(aco.insert(@[doc1, doc2, doc3]))) 233 | 234 | test "[ASYNC] [SYNC] Update single document": 235 | let 236 | selector = %*{"integer": "integer"} 237 | updater = %*{"$set": {"integer": "string"}} 238 | 239 | check(sco.insert(@[selector, selector])) 240 | check(waitFor(aco.insert(@[selector, selector]))) 241 | 242 | check(sco.update(selector, updater, false, false)) 243 | check(waitFor(aco.update(selector, updater, false, false))) 244 | 245 | check(sco.find(%*{"integer": "string"}).all().len() == 1) 246 | check(waitFor(aco.find(%*{"integer": "string"}).all()).len() == 1) 247 | 248 | test "[ASYNC] [SYNC] Update multiple documents": 249 | let 250 | selector = %*{"integer": 100'i32} 251 | doc1 = %*{"integer": 100'i32} 252 | doc2 = %*{"integer": 100'i32} 253 | doc3 = %*{"integer": 100'i32} 254 | doc4 = %*{"integer": 100'i32} 255 | updater = %*{"$set": {"integer": 200'i32}} 256 | 257 | check(sco.insert(@[doc1, doc2])) 258 | check(waitFor(aco.insert(@[doc3, doc4]))) 259 | 260 | check(sco.update(selector, updater, true, false)) 261 | check(waitFor(aco.update(selector, updater, true, false))) 262 | 263 | check(sco.find(%*{"integer": 200'i32}).all().len() == 2) 264 | check(waitFor(aco.find(%*{"integer": 200'i32}).all()).len() == 2) 265 | 266 | test "[ASYNC] [SYNC] Upsert": 267 | let 268 | selector = %*{"integer": 100'i64} 269 | updater = %*{"$set": {"integer": 200'i64}} 270 | 271 | check(sco.update(selector, updater, false, true)) 272 | check(waitFor(aco.update(selector, updater, false, true))) 273 | 274 | check(sco.find(%*{"integer": 200}).all().len() == 1) 275 | check(waitFor(aco.find(%*{"integer": 200}).all()).len() == 1) 276 | 277 | test "[ASYNC] [SYNC] Remove single document": 278 | let doc = %*{"string": "hello"} 279 | check(sco.insert(doc)) 280 | check(sco.remove(doc, limit=1).ok) 281 | check(waitFor(aco.insert(doc))) 282 | check(waitFor(aco.remove(doc, limit=1)).ok) 283 | 284 | test "[ASYNC] [SYNC] Remove multiple documents": 285 | check(sco.insert(@[%*{"string": "value"}, %*{"string": "value"}])) 286 | check(sco.remove(%*{"string": "value"}).ok) 287 | check(sco.find(%*{"string": "value"}).all().len() == 0) 288 | 289 | check(waitFor(aco.insert(@[%*{"string": "value"}, %*{"string": "value"}]))) 290 | check(waitFor(aco.remove(%*{"string": "value"})).ok) 291 | check(waitFor(aco.find(%*{"string": "value"}).all()).len() == 0) 292 | 293 | 294 | suite "Mongo aggregation commands": 295 | 296 | echo "\n Mongo aggregation commands\n" 297 | 298 | setup: 299 | discard sco.drop() 300 | discard waitFor(aco.drop()) 301 | 302 | test "[ASYNC] [SYNC] Count documents in query result": 303 | sco.insert(@[%*{"string": "value"}, %*{"string": "value"}]) 304 | check(sco.find(%*{"string": "value"}).count() == 2) 305 | 306 | check(waitFor(aco.insert(@[%*{"string": "value"}, %*{"string": "value"}]))) 307 | check(waitFor(aco.find(%*{"string": "value"}).count()) == 2) 308 | 309 | test "[ASYNC] [SYNC] Query distinct values by field in collection documents": 310 | sco.insert(@[%*{"string": "value", "int": 1'i64}, %*{"string": "value", "double": 2.0}]) 311 | check(sco.find(%*{"string": "value"}).unique("string") == @["value"]) 312 | 313 | check(waitFor(aco.insert(@[%*{"string": "value", "int": 1'i64}, %*{"string": "value", "double": 2.0}]))) 314 | check(waitFor(aco.find(%*{"string": "value"}).unique("string")) == @["value"]) 315 | 316 | test "[ASYNC] [SYNC] Sort query results": 317 | sco.insert(@[%*{"i": 5}, %*{"i": 3}, %*{"i": 4}, %*{"i": 2}]) 318 | let res = sco.find(%*{}).orderBy(%*{"i": 1}).all() 319 | check: 320 | res[0]["i"].toInt == 2 321 | res[^1]["i"].toInt == 5 322 | 323 | discard waitFor(aco.insert(@[%*{"i": 5}, %*{"i": 3}, %*{"i": 4}, %*{"i": 2}])) 324 | let ares = waitFor(aco.find(%*{}).orderBy(%*{"i": 1}).all()) 325 | check: 326 | ares[0]["i"].toInt == 2 327 | ares[^1]["i"].toInt == 5 328 | 329 | 330 | suite "Mongo client querying test suite": 331 | 332 | echo "\n Mongo client querying\n" 333 | 334 | setup: 335 | discard sco.drop() 336 | discard waitFor(aco.drop()) 337 | 338 | test "[ASYNC] [SYNC] Query single document": 339 | let myId = genOid() 340 | check(sco.insert(%*{"string": "somedoc", "myid": myId})) 341 | check(sco.find(%*{"myid": myId}).one()["myid"].toOid() == myId) 342 | 343 | check(waitFor(aco.insert(%*{"string": "somedoc", "myid": myId}))) 344 | check(waitFor(aco.find(%*{"myid": myId}).one())["myid"].toOid() == myId) 345 | 346 | test "[ASYNC] [SYNC] Query multiple documents as a sequence": 347 | check(sco.insert(@[%*{"string": "value"}, %*{"string": "value"}])) 348 | check(sco.find(%*{"string": "value"}).all().len() == 2) 349 | 350 | check(waitFor(aco.insert(@[%*{"string": "value"}, %*{"string": "value"}]))) 351 | check(waitFor(aco.find(%*{"string": "value"}).all()).len() == 2) 352 | 353 | test "[ASYNC] [SYNC] Query multiple documents as iterator": 354 | check(sco.insert(%*{"string": "hello"})) 355 | check(sco.insert(%*{"string": "hello"})) 356 | for document in sco.find(%*{"string": "hello"}).items(): 357 | check(document["string"].toString == "hello") 358 | 359 | check(waitFor(aco.insert(%*{"string": "hello"}))) 360 | check(waitFor(aco.insert(%*{"string": "hello"}))) 361 | proc testIterAsync() {.async.} = 362 | let cur = aco.find(%*{"string": "hello"}) 363 | for document in cur: 364 | check(document["string"].toString == "hello") 365 | waitFor(testIterAsync()) 366 | 367 | test "[ASYNC] [SYNC] Query multiple documents up to limit": 368 | check(sco.insert( 369 | @[ 370 | %*{"iter": 0.int32, "label": "l"}, 371 | %*{"iter": 1.int32, "label": "l"}, 372 | %*{"iter": 2.int32, "label": "l"}, 373 | %*{"iter": 3.int32, "label": "l"}, 374 | %*{"iter": 4.int32, "label": "l"} 375 | ] 376 | )) 377 | check(sco.find(%*{"label": "l"}).limit(3).all().len() == 3) 378 | 379 | check(waitFor(aco.insert( 380 | @[ 381 | %*{"iter": 0.int32, "label": "l"}, 382 | %*{"iter": 1.int32, "label": "l"}, 383 | %*{"iter": 2.int32, "label": "l"}, 384 | %*{"iter": 3.int32, "label": "l"}, 385 | %*{"iter": 4.int32, "label": "l"} 386 | ] 387 | ))) 388 | check(waitFor(aco.find(%*{"label": "l"}).limit(3).all()).len() == 3) 389 | 390 | test "[ASYNC] [SYNC] Skip documents": 391 | check(sco.insert( 392 | @[ 393 | %*{"iter": 0.int32, "label": "l"}, 394 | %*{"iter": 1.int32, "label": "l"}, 395 | %*{"iter": 2.int32, "label": "l"}, 396 | %*{"iter": 3.int32, "label": "l"}, 397 | %*{"iter": 4.int32, "label": "l"}, 398 | ] 399 | )) 400 | check(sco.find(%*{"label": "l"}).skip(3).all().len() == 2) 401 | 402 | check(waitFor(aco.insert( 403 | @[ 404 | %*{"iter": 0.int32, "label": "l"}, 405 | %*{"iter": 1.int32, "label": "l"}, 406 | %*{"iter": 2.int32, "label": "l"}, 407 | %*{"iter": 3.int32, "label": "l"}, 408 | %*{"iter": 4.int32, "label": "l"}, 409 | ] 410 | ))) 411 | check(waitFor(aco.find(%*{"label": "l"}).skip(3).all()).len() == 2) 412 | 413 | suite "Mongo tailable cursor operations": 414 | 415 | echo "\n Mongo tailable cursor operations\n" 416 | 417 | setup: 418 | discard sco.drop() 419 | discard waitFor(aco.drop()) 420 | discard sdb["capped"].drop() 421 | discard waitFor(adb["capped"].drop()) 422 | 423 | when not compileOption("threads"): 424 | test "[ASYNC] [ SYNC ] Read documents one by one in collection": 425 | discard sdb.createCollection("capped", capped=true, maxSize=10000) 426 | let sccoll = sdb["capped"] 427 | let cur = sccoll.find(%*{"label": "t"}, maxTime=1500).tailableCursor().awaitData() 428 | discard sccoll.insert(%*{"iter": 0.int32, "label": "t"}) 429 | var data: seq[Bson] = @[] 430 | try: 431 | data = cur.next() 432 | check(data.len == 1) 433 | check(data[0]["iter"].toInt32 == 0.int32) 434 | discard sccoll.insert(%*{"iter": 1.int32, "label": "t"}) 435 | data = cur.next() 436 | check(data.len == 1) 437 | check(data[0]["iter"].toInt32 == 1.int32) 438 | discard sccoll.insert(%*{"iter": 2.int32, "label": "t"}) 439 | data = cur.next() 440 | check(data.len == 1) 441 | check(data[0]["iter"].toInt32 == 2.int32) 442 | discard sccoll.insert(%*{"iter": 3.int32, "label": "t"}) 443 | data = cur.next() 444 | check(data.len == 1) 445 | check(data[0]["iter"].toInt32 == 3.int32) 446 | data = cur.next() 447 | check(data.len == 0) 448 | except OperationTimeout: 449 | echo "Operation timed out" 450 | discard sccoll.drop() 451 | 452 | discard waitFor(adb.createCollection("capped", capped=true, maxSize=10000)) 453 | let accoll = adb["capped"] 454 | discard waitFor(accoll.insert(%*{"iter": 0.int32, "label": "t1"})) 455 | 456 | proc inserterAsync() {.async.} = 457 | await sleepAsync(1) 458 | discard await accoll.insert(%*{"iter": 1.int32, "label": "t1"}) 459 | discard await accoll.insert(%*{"iter": 2.int32, "label": "t1"}) 460 | await sleepAsync(0.5) 461 | discard await accoll.insert(%*{"iter": 3.int32, "label": "t1"}) 462 | 463 | proc readerAsync() {.async.} = 464 | let cur = accoll.find(%*{"label": "t1"}, maxTime=1500).tailableCursor().awaitData() 465 | var counter = 0 466 | while counter < 4: 467 | try: 468 | let data = await cur.next() 469 | if data.len > 0: 470 | check(data[0]["iter"].toInt32 < 4.int32) 471 | counter += 1 472 | except OperationTimeout: 473 | echo "Operation timed out" 474 | break 475 | try: 476 | let data = await cur.next() 477 | check(data.len == 0) 478 | except OperationTimeout: 479 | echo "Operation timed out" 480 | 481 | proc testTailableAsync() {.async.} = 482 | let fut = readerAsync() 483 | await inserterAsync() 484 | await fut 485 | 486 | waitFor(testTailableAsync()) 487 | discard waitFor(accoll.drop()) 488 | else: 489 | test "[ASYNC] [SYNC] Read documents from capped collection": 490 | discard sdb.createCollection("capped", capped=true, maxSize=10000) 491 | let sccoll = sdb["capped"] 492 | discard sccoll.insert(%*{"iter": 0.int32, "label": "t"}) 493 | 494 | proc inserterSync(sccoll: Collection[Mongo]) {.thread.} = 495 | sleep(1000) 496 | discard sccoll.insert(%*{"iter": 1.int32, "label": "t"}) 497 | discard sccoll.insert(%*{"iter": 2.int32, "label": "t"}) 498 | sleep(500) 499 | discard sccoll.insert(%*{"iter": 3.int32, "label": "t"}) 500 | 501 | proc readerSync(sccoll: Collection[Mongo]) {.thread.} = 502 | let cur = sccoll.find(%*{"label": "t"}, maxTime=1500).tailableCursor().awaitData() 503 | var counter = 0 504 | while counter < 4: 505 | try: 506 | let data = cur.next() 507 | if data.len > 0: 508 | check(data[0]["iter"].toInt32 < 4.int32) 509 | counter += 1 510 | except OperationTimeout: 511 | echo "Operation timed out" 512 | break 513 | try: 514 | let data = cur.next() 515 | check(data.len == 0) 516 | except OperationTimeout: 517 | echo "Operation timed out" 518 | 519 | var thr: array[2, Thread[Collection[Mongo]]] 520 | createThread[Collection[Mongo]](thr[1], readerSync, sccoll) 521 | createThread[Collection[Mongo]](thr[0], inserterSync, sccoll) 522 | joinThreads(thr) 523 | discard sccoll.drop() 524 | 525 | discard waitFor(adb.createCollection("capped", capped=true, maxSize=10000)) 526 | let accoll = adb["capped"] 527 | discard waitFor(accoll.insert(%*{"iter": 0.int32, "label": "t1"})) 528 | 529 | proc inserterAsync() {.async.} = 530 | await sleepAsync(1) 531 | discard await accoll.insert(%*{"iter": 1.int32, "label": "t1"}) 532 | discard await accoll.insert(%*{"iter": 2.int32, "label": "t1"}) 533 | await sleepAsync(0.5) 534 | discard await accoll.insert(%*{"iter": 3.int32, "label": "t1"}) 535 | 536 | proc readerAsync() {.async.} = 537 | let cur = accoll.find(%*{"label": "t1"}, maxTime=1500).tailableCursor().awaitData() 538 | var counter = 0 539 | while counter < 4: 540 | try: 541 | let data = await cur.next() 542 | if data.len > 0: 543 | check(data[0]["iter"].toInt32 < 4.int32) 544 | counter += 1 545 | except: 546 | echo "Operation timed out" 547 | break 548 | let data = await cur.next() 549 | check(data.len == 0) 550 | 551 | proc testTailableAsync() {.async.} = 552 | let fut = readerAsync() 553 | await inserterAsync() 554 | await fut 555 | 556 | waitFor(testTailableAsync()) 557 | discard waitFor(accoll.drop()) 558 | 559 | 560 | if blob == "": 561 | echo() 562 | echo "Cannot run test for GridFS." 563 | echo "No file given, re-run with -d:blob=" 564 | echo "to test uploading file add -d:upload=." 565 | echo "Optionally define the bucket name with -d:bucket=, default 'test_bucket'" 566 | else: 567 | suite "Mongo GridFS test suite": 568 | var sbcon = newMongo() 569 | require(sbcon.connect) 570 | var sbdb = sbcon["temptest"] 571 | var sbuck: GridFS[Mongo] 572 | var abuck: GridFS[AsyncMongo] 573 | test "[SYNC] Create Bucket": 574 | sbuck = sbdb.createBucket(bucket) 575 | let colllist = sbdb.listCollections 576 | check(not sbuck.isNil) 577 | check(($sbuck & ".files") in colllist) 578 | check(($sbuck & ".chunks") in colllist) 579 | test "[SYNC] Get the bucket": 580 | sbuck = sbdb.getBucket(bucket) 581 | check(not sbuck.isNil) 582 | check(not sbuck.files.isNil) 583 | check(not sbuck.chunks.isNil) 584 | when upload != "": 585 | test "[SYNC] Upload file": 586 | let upsucc = waitFor sbuck.uploadFile(upload, chunksize = 1024 * 1024) 587 | check upsucc 588 | test "[SYNC] Download file": 589 | var downfile = blob 590 | if upload != "": 591 | let (_, fname, ext) = splitFile upload 592 | downfile = fname & ext 593 | let downsucc = waitFor sbuck.downloadFile(downfile) 594 | check downsucc 595 | 596 | echo "" 597 | 598 | # Collections must not exist before tests in the suite 599 | discard sco.drop() 600 | discard waitFor(aco.drop()) 601 | -------------------------------------------------------------------------------- /nimongo/mongo.nim: -------------------------------------------------------------------------------- 1 | import asyncdispatch 2 | import asyncnet 3 | import random 4 | import md5 5 | import net 6 | import oids 7 | import sequtils 8 | import strutils 9 | import tables 10 | import typetraits 11 | import times 12 | import uri 13 | import asyncfile 14 | import strformat 15 | import mimetypes 16 | import os 17 | 18 | import bson except `()` 19 | 20 | import ./private/auth 21 | import ./private/clientbase 22 | import ./private/errors 23 | import ./private/reply 24 | import ./private/writeconcern 25 | import ./private/async 26 | when compileOption("threads"): 27 | import ./private/threaded as sync 28 | else: 29 | import ./private/single as sync 30 | 31 | randomize() 32 | 33 | export auth 34 | export clientbase except nextRequestId, init, calcReturnSize, updateCount, makeQuery, prepareMore, prepareQuery 35 | export errors 36 | export reply 37 | export writeconcern 38 | export async except acquire, release, refresh 39 | export sync except acquire, release, refresh 40 | 41 | # === Mongo client API === # 42 | 43 | proc replica*[T:Mongo|AsyncMongo](mb: T, nodes: seq[tuple[host: string, port: uint16]]) = 44 | for node in nodes: 45 | when T is Mongo: 46 | mb.replicas.add((host: node.host, port: sockets.Port(node.port))) 47 | when T is AsyncMongo: 48 | mb.replicas.add((host: node.host, port: asyncnet.Port(node.port))) 49 | 50 | # === Database API === # 51 | 52 | proc newMongoDatabase*(u: string): Database[Mongo] {.deprecated.} = 53 | ## Create new Mongo sync client using URI as string 54 | return newMongoDatabase(parseUri(u)) 55 | 56 | proc newAsyncMongoDatabase*(u: string, maxConnections = 16): Future[Database[AsyncMongo]] {.deprecated.} = 57 | ## Create new Mongo async client using URI as string 58 | return newAsyncMongoDatabase(parseUri(u), maxConnections) 59 | 60 | # === Collection API === # 61 | 62 | proc find*[T:Mongo|AsyncMongo](c: Collection[T], filter: Bson, fields: seq[string] = @[], maxTime: int32 = 0): Cursor[T] = 63 | ## Find query 64 | result = c.makeQuery( 65 | %*{ 66 | "$query": filter 67 | }, 68 | fields, 69 | maxTime 70 | ) 71 | 72 | # === Find API === # 73 | 74 | proc all*(f: Cursor[Mongo]): seq[Bson] = 75 | ## Perform MongoDB query and return all matching documents 76 | while not f.isClosed(): 77 | result.add(f.refresh()) 78 | 79 | proc all*(f: Cursor[AsyncMongo]): Future[seq[Bson]] {.async.} = 80 | ## Perform MongoDB query asynchronously and return all matching documents. 81 | while not f.isClosed(): 82 | let ret = await f.refresh() 83 | result.add(ret) 84 | 85 | proc one*(f: Cursor[Mongo]): Bson = 86 | ## Perform MongoDB query and return first matching document 87 | let docs = f.limit(1).refresh() 88 | if docs.len == 0: 89 | raise newException(NotFound, "No documents matching query were found") 90 | return docs[0] 91 | 92 | proc one*(f: Cursor[AsyncMongo]): Future[Bson] {.async.} = 93 | ## Perform MongoDB query asynchronously and return first matching document. 94 | let docs = await f.limit(1).refresh() 95 | if docs.len == 0: 96 | raise newException(NotFound, "No documents matching query were found") 97 | return docs[0] 98 | 99 | proc oneOrNone*(f: Cursor[Mongo]): Bson = 100 | ## Perform MongoDB query and return first matching document or 101 | ## nil if not found. 102 | let docs = f.limit(1).refresh() 103 | if docs.len > 0: 104 | result = docs[0] 105 | 106 | proc oneOrNone*(f: Cursor[AsyncMongo]): Future[Bson] {.async.} = 107 | ## Perform MongoDB query asynchronously and return first matching document or 108 | ## nil if not found. 109 | let docs = await f.limit(1).refresh() 110 | if docs.len > 0: 111 | result = docs[0] 112 | 113 | iterator items*(f: Cursor[Mongo]): Bson = 114 | ## Perform MongoDB query and return iterator for all matching documents 115 | while not f.isClosed(): 116 | let docs = f.refresh() 117 | for doc in docs: 118 | yield doc 119 | 120 | iterator items*(f: Cursor[AsyncMongo]): Bson = 121 | ## Perform MongoDB query and return iterator for all matching documents 122 | while not f.isClosed(): 123 | let docs = waitFor f.refresh() 124 | for doc in docs: 125 | yield doc 126 | 127 | iterator itemsForceSync*(f: Cursor[AsyncMongo]): Bson = 128 | while not f.isClosed(): 129 | let docs = waitFor f.refresh() 130 | for doc in docs: 131 | yield doc 132 | 133 | proc next*(f: Cursor[Mongo]): seq[Bson] = 134 | ## Perform MongoDB query for next batch of documents 135 | return f.refresh() 136 | 137 | proc next*(f: Cursor[AsyncMongo]): Future[seq[Bson]] {.async.} = 138 | ## Perform MongoDB query for next batch of documents 139 | let docs = await f.refresh() 140 | result = docs 141 | 142 | proc isMaster*(sm: Mongo): bool = 143 | ## Perform query in order to check if connected Mongo instance is a master 144 | return sm["admin"]["$cmd"].makeQuery(%*{"isMaster": 1}).one()["ismaster"].toBool 145 | 146 | proc isMaster*(am: AsyncMongo): Future[bool] {.async.} = 147 | ## Perform query in order to check if ocnnected Mongo instance is a master 148 | ## via async connection. 149 | let response = await am["admin"]["$cmd"].makeQuery(%*{"isMaster": 1}).one() 150 | return response["ismaster"].toBool 151 | 152 | proc listDatabases*(sm: Mongo): seq[string] = 153 | ## Return list of databases on the server 154 | let response = sm["admin"]["$cmd"].makeQuery(%*{"listDatabases": 1}).one() 155 | if response.isReplyOk: 156 | for db in response["databases"].items(): 157 | result.add(db["name"].toString()) 158 | 159 | proc listDatabases*(am: AsyncMongo): Future[seq[string]] {.async.} = 160 | ## Return list of databases on the server via async client 161 | let response = await am["admin"]["$cmd"].makeQuery(%*{"listDatabases": 1}).one() 162 | if response.isReplyOk: 163 | for db in response["databases"].items(): 164 | result.add(db["name"].toString()) 165 | 166 | proc createCollection*(db: Database[Mongo], name: string, capped: bool = false, autoIndexId: bool = true, maxSize: int = 0, maxDocs: int = 0): StatusReply = 167 | ## Create collection inside database via sync connection 168 | var request = %*{"create": name} 169 | 170 | if capped: request["capped"] = capped.toBson() 171 | if autoIndexId: request["autoIndexId"] = true.toBson() 172 | if maxSize > 0: request["size"] = maxSize.toBson() 173 | if maxDocs > 0: request["max"] = maxDocs.toBson() 174 | 175 | let response = db["$cmd"].makeQuery(request).one() 176 | return response.toStatusReply 177 | 178 | proc createCollection*(db: Database[AsyncMongo], name: string, capped: bool = false, autoIndexId: bool = true, maxSize: int = 0, maxDocs: int = 0): Future[StatusReply] {.async.} = 179 | ## Create collection inside database via async connection 180 | var request = %*{"create": name} 181 | 182 | if capped: request["capped"] = capped.toBson() 183 | if autoIndexId: request["autoIndexId"] = true.toBson() 184 | if maxSize > 0: request["size"] = maxSize.toBson() 185 | if maxDocs > 0: request["max"] = maxDocs.toBson() 186 | 187 | let response = await db["$cmd"].makeQuery(request).one() 188 | return response.toStatusReply 189 | 190 | proc listCollections*(db: Database[Mongo], filter: Bson = %*{}): seq[string] = 191 | ## List collections inside specified database 192 | let response = db["$cmd"].makeQuery(%*{"listCollections": 1'i32}).one() 193 | if response.isReplyOk: 194 | for col in response["cursor"]["firstBatch"]: 195 | result.add(col["name"].toString) 196 | 197 | proc listCollections*(db: Database[AsyncMongo], filter: Bson = %*{}): Future[seq[string]] {.async.} = 198 | ## List collections inside specified database via async connection 199 | let 200 | request = %*{"listCollections": 1'i32} 201 | response = await db["$cmd"].makeQuery(request).one() 202 | if response.isReplyOk: 203 | for col in response["cursor"]["firstBatch"]: 204 | result.add(col["name"].toString) 205 | 206 | proc rename*(c: Collection[Mongo], newName: string, dropTarget: bool = false): StatusReply = 207 | ## Rename collection 208 | let 209 | request = %*{ 210 | "renameCollection": $c, 211 | "to": "$#.$#" % [c.db.name, newName], 212 | "dropTarget": dropTarget 213 | } 214 | response = c.db.client["admin"]["$cmd"].makeQuery(request).one() 215 | c.name = newName 216 | return response.toStatusReply 217 | 218 | proc rename*(c: Collection[AsyncMongo], newName: string, dropTarget: bool = false): Future[StatusReply] {.async.} = 219 | ## Rename collection via async connection 220 | let 221 | request = %*{ 222 | "renameCollection": $c, 223 | "to": "$#.$#" % [c.db.name, newName], 224 | "dropTarget": dropTarget 225 | } 226 | response = await c.db.client["admin"]["$cmd"].makeQuery(request).one() 227 | c.name = newName 228 | return response.toStatusReply 229 | 230 | proc drop*(db: Database[Mongo]): bool = 231 | ## Drop database from server 232 | let response = db["$cmd"].makeQuery(%*{"dropDatabase": 1}).one() 233 | return response.isReplyOk 234 | 235 | proc drop*(db: Database[AsyncMongo]): Future[bool] {.async.} = 236 | ## Drop database from server via async connection 237 | let response = await db["$cmd"].makeQuery(%*{"dropDatabase": 1}).one() 238 | return response.isReplyOk 239 | 240 | proc drop*(c: Collection[Mongo]): tuple[ok: bool, message: string] = 241 | ## Drop collection from database 242 | let response = c.db["$cmd"].makeQuery(%*{"drop": c.name}).one() 243 | let status = response.toStatusReply 244 | return (ok: status.ok, message: status.err) 245 | 246 | proc drop*(c: Collection[AsyncMongo]): Future[tuple[ok: bool, message: string]] {.async.} = 247 | ## Drop collection from database via async clinet 248 | let response = await c.db["$cmd"].makeQuery(%*{"drop": c.name}).one() 249 | let status = response.toStatusReply 250 | return (ok: status.ok, message: status.err) 251 | 252 | proc stats*(c: Collection[Mongo]): Bson = 253 | return c.db["$cmd"].makeQuery(%*{"collStats": c.name}).one() 254 | 255 | proc stats*(c: Collection[AsyncMongo]): Future[Bson] {.async.} = 256 | return await c.db["$cmd"].makeQuery(%*{"collStats": c.name}).one() 257 | 258 | proc count*(c: Collection[Mongo]): int = 259 | ## Return number of documents in collection 260 | return c.db["$cmd"].makeQuery(%*{"count": c.name}).one().getReplyN 261 | 262 | proc count*(c: Collection[AsyncMongo]): Future[int] {.async.} = 263 | ## Return number of documents in collection via async client 264 | return (await c.db["$cmd"].makeQuery(%*{"count": c.name}).one()).getReplyN 265 | 266 | proc count*(f: Cursor[Mongo]): int = 267 | ## Return number of documents in find query result 268 | return f.collection.db["$cmd"].makeQuery(%*{"count": f.collection.name, "query": f.filter}).one().getReplyN 269 | 270 | proc count*(f: Cursor[AsyncMongo]): Future[int] {.async.} = 271 | ## Return number of document in find query result via async connection 272 | let 273 | response = await f.collection.db["$cmd"].makeQuery(%*{ 274 | "count": f.collection.name, 275 | "query": f.filter 276 | }).one() 277 | return response.getReplyN 278 | 279 | proc sort*[T:Mongo|AsyncMongo](f: Cursor[T], criteria: Bson): Cursor[T] = 280 | ## Setup sorting criteria 281 | f.sorting = criteria 282 | return f 283 | 284 | proc unique*(f: Cursor[Mongo], key: string): seq[string] = 285 | ## Force cursor to return only distinct documents by specified field. 286 | ## Corresponds to '.distinct()' MongoDB command. If Nim we use 'unique' 287 | ## because 'distinct' is Nim's reserved keyword. 288 | let 289 | request = %*{ 290 | "distinct": f.collection.name, 291 | "query": f.filter, 292 | "key": key 293 | } 294 | response = f.collection.db["$cmd"].makeQuery(request).one() 295 | 296 | if response.isReplyOk: 297 | for item in response["values"].items(): 298 | result.add(item.toString()) 299 | 300 | proc unique*(f: Cursor[AsyncMongo], key: string): Future[seq[string]] {.async.} = 301 | ## Force cursor to return only distinct documents by specified field. 302 | ## Corresponds to '.distinct()' MongoDB command. If Nim we use 'unique' 303 | ## because 'distinct' is Nim's reserved keyword. 304 | let 305 | request = %*{ 306 | "distinct": f.collection.name, 307 | "query": f.filter, 308 | "key": key 309 | } 310 | response = await f.collection.db["$cmd"].makeQuery(request).one() 311 | 312 | if response.isReplyOk: 313 | for item in response["values"].items(): 314 | result.add(item.toString()) 315 | 316 | proc getLastError*(m: Mongo): StatusReply = 317 | ## Get last error happened in current connection 318 | let response = m["admin"]["$cmd"].makeQuery(%*{"getLastError": 1'i32}).one() 319 | return response.toStatusReply 320 | 321 | proc getLastError*(am: AsyncMongo): Future[StatusReply] {.async.} = 322 | ## Get last error happened in current connection 323 | let response = await am["admin"]["$cmd"].makeQuery(%*{"getLastError": 1'i32}).one() 324 | return response.toStatusReply 325 | 326 | # ============= # 327 | # Insert API # 328 | # ============= # 329 | 330 | proc insert*(c: Collection[Mongo], documents: seq[Bson], ordered: bool = true, writeConcern: Bson = nil): StatusReply {.discardable.} = 331 | ## Insert several new documents into MongoDB using one request 332 | 333 | # 334 | # insert any missing _id fields 335 | # 336 | var inserted_ids: seq[Bson] = @[] 337 | for doc in documents: 338 | if not doc.contains("_id"): 339 | doc["_id"] = toBson(genOid()) 340 | inserted_ids.add(doc["_id"]) 341 | 342 | # 343 | # build & send Mongo query 344 | # 345 | let 346 | request = %*{ 347 | "insert": c.name, 348 | "documents": documents, 349 | "ordered": ordered, 350 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 351 | } 352 | response = c.db["$cmd"].makeQuery(request).one() 353 | 354 | return response.toStatusReply(inserted_ids=inserted_ids) 355 | 356 | proc insert*(c: Collection[Mongo], document: Bson, ordered: bool = true, writeConcern: Bson = nil): StatusReply {.discardable.} = 357 | ## Insert new document into MongoDB via sync connection 358 | return c.insert(@[document], ordered, if writeConcern == nil.Bson: c.writeConcern else: writeConcern) 359 | 360 | proc insert*(c: Collection[AsyncMongo], documents: seq[Bson], ordered: bool = true, writeConcern: Bson = nil): Future[StatusReply] {.async.} = 361 | ## Insert new documents into MongoDB via async connection 362 | 363 | # 364 | # insert any missing _id fields 365 | # 366 | var inserted_ids: seq[Bson] = @[] 367 | for doc in documents: 368 | if not doc.contains("_id"): 369 | doc["_id"] = toBson(genOid()) 370 | inserted_ids.add(doc["_id"]) 371 | 372 | # 373 | # build & send Mongo query 374 | # 375 | let 376 | request = %*{ 377 | "insert": c.name, 378 | "documents": documents, 379 | "ordered": ordered, 380 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 381 | } 382 | response = await c.db["$cmd"].makeQuery(request).one() 383 | 384 | return response.toStatusReply(inserted_ids=inserted_ids) 385 | 386 | proc insert*(c: Collection[AsyncMongo], document: Bson, ordered: bool = true, writeConcern: Bson = nil): Future[StatusReply] {.async.} = 387 | ## Insert new document into MongoDB via async connection 388 | result = await c.insert(@[document], ordered, if writeConcern == nil.Bson: c.writeConcern else: writeConcern) 389 | 390 | # =========== # 391 | # Update API # 392 | # =========== # 393 | 394 | proc update*(c: Collection[Mongo], selector: Bson, update: Bson, multi: bool, upsert: bool): StatusReply {.discardable.} = 395 | ## Update MongoDB document[s] 396 | let 397 | request = %*{ 398 | "update": c.name, 399 | "updates": [%*{"q": selector, "u": update, "upsert": upsert, "multi": multi}], 400 | "ordered": true 401 | } 402 | response = c.db["$cmd"].makeQuery(request).one() 403 | return response.toStatusReply 404 | 405 | proc update*(c: Collection[AsyncMongo], selector: Bson, update: Bson, multi: bool, upsert: bool): Future[StatusReply] {.async.} = 406 | ## Update MongoDB document[s] via async connection 407 | let request = %*{ 408 | "update": c.name, 409 | "updates": [%*{"q": selector, "u": update, "upsert": upsert, "multi": multi}], 410 | "ordered": true 411 | } 412 | let response = await c.db["$cmd"].makeQuery(request).one() 413 | return response.toStatusReply 414 | 415 | # ==================== # 416 | # Find and modify API # 417 | # ==================== # 418 | 419 | proc findAndModify*(c: Collection[Mongo], selector: Bson, sort: Bson, update: Bson, afterUpdate: bool, upsert: bool, writeConcern: Bson = nil, remove: bool = false): Future[StatusReply] {.async.} = 420 | ## Finds and modifies MongoDB document 421 | let request = %*{ 422 | "findAndModify": c.name, 423 | "query": selector, 424 | "new": afterUpdate, 425 | "upsert": upsert, 426 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 427 | } 428 | if not sort.isNil: 429 | request["sort"] = sort 430 | if remove: 431 | request["remove"] = remove.toBson() 432 | else: 433 | request["update"] = update 434 | let response = c.db["$cmd"].makeQuery(request).one() 435 | return response.toStatusReply 436 | 437 | proc findAndModify*(c: Collection[AsyncMongo], selector: Bson, sort: Bson, update: Bson, afterUpdate: bool, upsert: bool, writeConcern: Bson = nil, remove: bool = false): Future[StatusReply] {.async.} = 438 | ## Finds and modifies MongoDB document via async connection 439 | let request = %*{ 440 | "findAndModify": c.name, 441 | "query": selector, 442 | "new": afterUpdate, 443 | "upsert": upsert, 444 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 445 | } 446 | if not sort.isNil: 447 | request["sort"] = sort 448 | if remove: 449 | request["remove"] = remove.toBson() 450 | else: 451 | request["update"] = update 452 | let response = await c.db["$cmd"].makeQuery(request).one() 453 | return response.toStatusReply 454 | 455 | # ============ # 456 | # Remove API # 457 | # ============ # 458 | 459 | proc remove*(c: Collection[Mongo], selector: Bson, limit: int = 0, ordered: bool = true, writeConcern: Bson = nil): StatusReply {.discardable.} = 460 | ## Delete document[s] from MongoDB 461 | let 462 | request = %*{ 463 | "delete": c.name, 464 | "deletes": [%*{"q": selector, "limit": limit}], 465 | "ordered": true, 466 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 467 | } 468 | response = c.db["$cmd"].makeQuery(request).one() 469 | return response.toStatusReply 470 | 471 | proc remove*(c: Collection[AsyncMongo], selector: Bson, limit: int = 0, ordered: bool = true, writeConcern: Bson = nil): Future[StatusReply] {.async.} = 472 | ## Delete document[s] from MongoDB via asyn connection 473 | let 474 | request = %*{ 475 | "delete": c.name, 476 | "deletes": [%*{"q": selector, "limit": limit}], 477 | "ordered": true, 478 | "writeConcern": if writeConcern == nil.Bson: c.writeConcern else: writeConcern 479 | } 480 | response = await c.db["$cmd"].makeQuery(request).one() 481 | return response.toStatusReply 482 | 483 | # =============== # 484 | # User management 485 | # =============== # 486 | 487 | proc createUser*(db: DataBase[Mongo], username: string, pwd: string, customData: Bson = newBsonDocument(), roles: Bson = newBsonArray()): bool = 488 | ## Create new user for the specified database 489 | let createUserRequest = %*{ 490 | "createUser": username, 491 | "pwd": pwd, 492 | "customData": customData, 493 | "roles": roles, 494 | "writeConcern": db.client.writeConcern 495 | } 496 | let response = db["$cmd"].makeQuery(createUserRequest).one() 497 | return response.isReplyOk 498 | 499 | proc createUser*(db: Database[AsyncMongo], username: string, pwd: string, customData: Bson = newBsonDocument(), roles: Bson = newBsonArray()): Future[bool] {.async.} = 500 | ## Create new user for the specified database via async client 501 | let 502 | createUserRequest = %*{ 503 | "createUser": username, 504 | "pwd": pwd, 505 | "customData": customData, 506 | "roles": roles, 507 | "writeConcern": db.client.writeConcern 508 | } 509 | response = await db["$cmd"].makeQuery(createUserRequest).one() 510 | return response.isReplyOk 511 | 512 | proc dropUser*(db: Database[Mongo], username: string): bool = 513 | ## Drop user from the db 514 | let 515 | dropUserRequest = %*{ 516 | "dropUser": username, 517 | "writeConcern": db.client.writeConcern 518 | } 519 | response = db["$cmd"].makeQuery(dropUserRequest).one() 520 | return response.isReplyOk 521 | 522 | proc dropUser*(db: Database[AsyncMongo], username: string): Future[bool] {.async.} = 523 | ## Drop user from the db via async client 524 | let 525 | dropUserRequest = %*{ 526 | "dropUser": username, 527 | "writeConcern": db.client.writeConcern 528 | } 529 | response = await db["$cmd"].makeQuery(dropUserRequest).one() 530 | return response.isReplyOk 531 | 532 | # ============== # 533 | # Authentication # 534 | # ============== # 535 | 536 | proc authenticate*(db: Database[Mongo], username: string, password: string): bool {.discardable.} = 537 | ## Authenticate connection (sync): using MONGODB-CR auth method 538 | if username == "" or password == "": 539 | return false 540 | 541 | let nonce = db["$cmd"].makeQuery(%*{"getnonce": 1'i32}).one()["nonce"].toString 542 | let passwordDigest = $toMd5("$#:mongo:$#" % [username, password]) 543 | let key = $toMd5("$#$#$#" % [nonce, username, passwordDigest]) 544 | let request = %*{ 545 | "authenticate": 1'i32, 546 | "mechanism": "MONGODB-CR", 547 | "user": username, 548 | "nonce": nonce, 549 | "key": key, 550 | "autoAuthorize": 1'i32 551 | } 552 | let response = db["$cmd"].makeQuery(request).one() 553 | return response.isReplyOk 554 | 555 | # === GridFS API === # 556 | 557 | proc createBucket*(db: Database[Mongo], name: string): GridFs[Mongo] = 558 | ## Create a grid-fs bucket collection name. Grid-fs actually just simply a collection consists of two 559 | ## 1. .files 560 | ## 2. .chunks 561 | ## Hence creating bucket is creating two collections at the same time. 562 | new result 563 | result.name = name 564 | let fcolname = name & ".files" 565 | let ccolname = name & ".chunks" 566 | let filecstat = db.createCollection(fcolname) 567 | let chunkcstat = db.createCollection(ccolname) 568 | if filecstat.ok and chunkcstat.ok: 569 | result.files = db[fcolname] 570 | result.chunks = db[ccolname] 571 | 572 | proc createBucket*(db: Database[AsyncMongo], name: string): Future[GridFs[AsyncMongo]]{.async.} = 573 | ## Create a grid-fs bucket collection name async version 574 | new result 575 | result.name = name 576 | let fcolname = name & ".files" 577 | let ccolname = name & ".chunks" 578 | let collops = @[ 579 | db.createCollection(fcolname), 580 | db.createCollection(ccolname) 581 | ] 582 | let statR = await all(collops) 583 | if statR.allIt( it.ok ): 584 | result.files = db[fcolname] 585 | result.chunks = db[ccolname] 586 | 587 | proc getBucket*[T: Mongo|AsyncMongo](db: Database[T], name: string): GridFs[T] = 588 | ## Get the bucket (GridFS) instead of collection. 589 | let fcolname = name & ".files" 590 | let ccolname = name & ".chunks" 591 | new result 592 | result.files = db[fcolname] 593 | result.chunks = db[ccolname] 594 | result.name = name 595 | 596 | proc `$`*(g: GridFS): string = 597 | #result = &"{g.files.db.name}.{g.name}" 598 | result = g.name 599 | 600 | proc uploadFile*[T: Mongo|AsyncMongo](bucket: GridFs[T], f: AsyncFile, filename = "", 601 | metadata = null(), chunksize = 255 * 1024): Future[bool] {.async, discardable.} = 602 | ## Upload opened asyncfile with defined chunk size which defaulted at 255 KB 603 | let foid = genoid() 604 | let fsize = getFileSize f 605 | let fileentry = %*{ 606 | "_id": foid, 607 | "chunkSize": chunkSize, 608 | "length": fsize, 609 | "uploadDate": now().toTime.timeUTC, 610 | "filename": filename, 611 | "metadata": metadata 612 | } 613 | when T is Mongo: 614 | let entrystatus = bucket.files.insert(fileentry) 615 | else: 616 | let entrystatus = await bucket.files.insert(fileentry) 617 | if not entrystatus.ok: 618 | echo &"cannot upload {filename}: {entrystatus.err}" 619 | return 620 | 621 | var chunkn = 0 622 | for _ in countup(0, int(fsize-1), chunksize): 623 | var chunk = %*{ 624 | "files_id": foid, 625 | "n": chunkn 626 | } 627 | let data = await f.read(chunksize) 628 | chunk["data"] = bin data 629 | when T is Mongo: 630 | let chunkstatus = bucket.chunks.insert(chunk) 631 | else: 632 | let chunkstatus = await bucket.chunks.insesrt(chunk) 633 | if not chunkstatus.ok: 634 | echo &"problem happened when uploading: {chunkstatus.err}" 635 | return 636 | inc chunkn 637 | result = true 638 | 639 | proc uploadFile*[T: Mongo|AsyncMongo](bucket: GridFS[T], filename: string, 640 | metadata = null(), chunksize = 255 * 1024): Future[bool] {.async, discardable.} = 641 | ## A higher uploadFile which directly open and close file from filename. 642 | var f: AsyncFile 643 | try: 644 | f = openAsync filename 645 | except IOError: 646 | echo getCurrentExceptionMsg() 647 | return 648 | defer: close f 649 | 650 | let (_, fname, ext) = splitFile filename 651 | let m = newMimeTypes() 652 | var filemetadata = metadata 653 | if filemetadata.kind != BsonKindNull and filemetadata.kind == BsonKindDocument: 654 | filemetadata["mime"] = m.getMimeType(ext).toBson 655 | filemetadata["ext"] = ext.toBson 656 | else: 657 | filemetadata = %*{ 658 | "mime": m.getMimeType(ext), 659 | "exit": ext 660 | } 661 | result = await bucket.uploadFile(f, fname & ext, 662 | metadata = filemetadata, chunksize = chunksize) 663 | 664 | proc downloadFile*[T: Mongo|AsyncMongo](bucket: GridFS[T], f: AsyncFile, 665 | filename = ""): Future[bool] 666 | {.async, discardable.} = 667 | ## Download given filename and write it to f asyncfile. This only download 668 | ## the latest uploaded file in the same name. 669 | let q = %*{ "filename": filename } 670 | let uploadDesc = %*{ "uploadDate": -1 } 671 | let fdata = bucket.files.find(q, @["_id", "length"]).orderBy(uploadDesc).one 672 | if fdata.isNil: 673 | echo &"cannot download {filename} to file: {getCurrentExceptionMsg()}" 674 | return 675 | 676 | let qchunk = %*{ "files_id": fdata["_id"] } 677 | let fsize = fdata["length"].toInt 678 | let selector = @["data"] 679 | let sort = %* { "n": 1 } 680 | var currsize = 0 681 | var skipdoc = 0 682 | while currsize < fsize: 683 | var chunks = bucket.chunks.find(qchunk, selector).skip(skipdoc.int32).orderBy(sort).all() 684 | skipdoc += chunks.len 685 | for chunk in chunks: 686 | let data = binstr chunk["data"] 687 | currsize += data.len 688 | await f.write(data) 689 | 690 | if currsize < fsize: 691 | echo &"incomplete file download; only at {currsize.float / fsize.float * 100}%" 692 | return 693 | 694 | result = true 695 | 696 | proc downloadFile*[T: Mongo|AsyncMongo](bucket: GridFS[T], filename: string): 697 | Future[bool]{.async, discardable.} = 698 | ## Higher version for downloadFile. Ensure the destination file path has 699 | ## writing permission 700 | var f: AsyncFile 701 | try: 702 | f = openAsync(filename, fmWrite) 703 | except IOError: 704 | echo getCurrentExceptionMsg() 705 | return 706 | defer: close f 707 | let (dir, fname, ext) = splitFile filename 708 | result = await bucket.downloadFile(f, fname & ext) 709 | -------------------------------------------------------------------------------- /nimongo/bson.nim: -------------------------------------------------------------------------------- 1 | import base64 2 | import macros 3 | import md5 4 | import oids 5 | import streams 6 | import strutils 7 | import times 8 | import tables 9 | 10 | # ------------- type: BsonKind -------------------# 11 | 12 | type BsonKind* = char 13 | 14 | const 15 | BsonKindUnknown* = 0x00.BsonKind ## 16 | BsonKindDouble* = 0x01.BsonKind ## 64-bit floating-point 17 | BsonKindStringUTF8* = 0x02.BsonKind ## UTF-8 encoded C string 18 | BsonKindDocument* = 0x03.BsonKind ## Embedded document 19 | BsonKindArray* = 0x04.BsonKind ## Embedded array of Bson values 20 | BsonKindBinary* = 0x05.BsonKind ## Generic binary data 21 | BsonKindUndefined* = 0x06.BsonKind ## Some undefined value (deprecated) 22 | BsonKindOid* = 0x07.BsonKind ## Mongo Object ID 23 | BsonKindBool* = 0x08.BsonKind ## boolean value 24 | BsonKindTimeUTC* = 0x09.BsonKind ## int64 milliseconds (Unix epoch time) 25 | BsonKindNull* = 0x0A.BsonKind ## nil value stored in Mongo 26 | BsonKindRegexp* = 0x0B.BsonKind ## Regular expression and options 27 | BsonKindDBPointer* = 0x0C.BsonKind ## Pointer to 'db.col._id' 28 | BsonKindJSCode* = 0x0D.BsonKind ## - 29 | BsonKindDeprecated* = 0x0E.BsonKind ## - 30 | BsonKindJSCodeWithScope* = 0x0F.BsonKind ## - 31 | BsonKindInt32* = 0x10.BsonKind ## 32-bit integer number 32 | BsonKindTimestamp* = 0x11.BsonKind ## - 33 | BsonKindInt64* = 0x12.BsonKind ## 64-bit integer number 34 | BsonKindMaximumKey* = 0x7F.BsonKind ## Maximum MongoDB comparable value 35 | BsonKindMinimumKey* = 0xFF.BsonKind ## Minimum MongoDB comparable value 36 | 37 | type BsonSubtype* = char 38 | 39 | const 40 | BsonSubtypeGeneric* = 0x00.BsonSubtype ## 41 | BsonSubtypeFunction* = 0x01.BsonSubtype ## 42 | BsonSubtypeBinaryOld* = 0x02.BsonSubtype ## 43 | BsonSubtypeUuidOld* = 0x03.BsonSubtype ## 44 | BsonSubtypeUuid* = 0x04.BsonSubtype ## 45 | BsonSubtypeMd5* = 0x05.BsonSubtype ## 46 | BsonSubtypeUserDefined* = 0x80.BsonSubtype ## 47 | 48 | # ------------- type: Bson -----------------------# 49 | 50 | type 51 | BsonTimestamp* = object ## Internal MongoDB type used by mongos instances 52 | increment*: int32 53 | timestamp*: int32 54 | 55 | Bson* = ref object of RootObj ## Bson Node 56 | case kind*: BsonKind 57 | of BsonKindDouble: valueFloat64: float64 58 | of BsonKindStringUTF8: valueString: string 59 | of BsonKindDocument: valueDocument: OrderedTable[string, Bson] 60 | of BsonKindArray: valueArray: seq[Bson] 61 | of BsonKindBinary: 62 | case subtype: BsonSubtype 63 | of BsonSubtypeGeneric: valueGeneric: string 64 | of BsonSubtypeFunction: valueFunction: string 65 | of BsonSubtypeBinaryOld: valueBinOld: string 66 | of BsonSubtypeUuidOld: valueUuidOld: string 67 | of BsonSubtypeUuid: valueUuid: string 68 | of BsonSubtypeMd5: valueDigest: MD5Digest 69 | of BsonSubtypeUserDefined: valueUserDefined: string 70 | else: discard 71 | of BsonKindUndefined: discard 72 | of BsonKindOid: valueOid: Oid 73 | of BsonKindBool: valueBool: bool 74 | of BsonKindTimeUTC: valueTime: Time 75 | of BsonKindNull: discard 76 | of BsonKindRegexp: 77 | regex: string 78 | options: string 79 | of BsonKindDBPointer: 80 | refCol: string 81 | refOid: Oid 82 | of BsonKindJSCode: valueCode: string 83 | of BsonKindDeprecated: valueDepr: string 84 | of BsonKindJSCodeWithScope: valueCodeWS: string 85 | of BsonKindInt32: valueInt32: int32 86 | of BsonKindTimestamp: valueTimestamp: BsonTimestamp 87 | of BsonKindInt64: valueInt64: int64 88 | of BsonKindMaximumKey: discard 89 | of BsonKindMinimumKey: discard 90 | else: discard 91 | 92 | GeoPoint = array[0..1, float64] ## Represents Mongo Geo Point 93 | 94 | proc raiseWrongNodeException(bs: Bson) = 95 | raise newException(Exception, "Wrong node kind: " & $ord(bs.kind)) 96 | 97 | proc toBson*(x: Oid): Bson = 98 | ## Convert Mongo Object Id to Bson object 99 | return Bson(kind: BsonKindOid, valueOid: x) 100 | 101 | proc toOid*(x: Bson): Oid = 102 | ## Convert Bson to Mongo Object ID 103 | return x.valueOid 104 | 105 | proc toBson*(x: float64): Bson = 106 | ## Convert float64 to Bson object 107 | return Bson(kind: BsonKindDouble, valueFloat64: x) 108 | 109 | proc toFloat64*(x: Bson): float64 = 110 | ## Convert Bson object to float64 111 | return x.valueFloat64 112 | 113 | proc toBson*(x: string): Bson = 114 | ## Convert string to Bson object 115 | return Bson(kind: BsonKindStringUTF8, valueString: x) 116 | 117 | proc toString*(x: Bson): string = 118 | ## Convert Bson to UTF8 string 119 | case x.kind 120 | of BsonKindStringUTF8: 121 | return x.valueString 122 | else: 123 | raiseWrongNodeException(x) 124 | 125 | proc toBson*(x: int64): Bson = 126 | ## Convert int64 to Bson object 127 | return Bson(kind: BsonKindInt64, valueInt64: x) 128 | 129 | proc toInt64*(x: Bson): int64 = 130 | ## Convert Bson object to int 131 | case x.kind 132 | of BsonKindInt64: 133 | return int64(x.valueInt64) 134 | of BsonKindInt32: 135 | return int64(x.valueInt32) 136 | else: 137 | raiseWrongNodeException(x) 138 | 139 | proc toBson*(x: int32): Bson = 140 | ## Convert int32 to Bson object 141 | return Bson(kind: BsonKindInt32, valueInt32: x) 142 | 143 | proc toInt32*(x: Bson): int32 = 144 | ## Convert Bson to int32 145 | case x.kind 146 | of BsonKindInt64: 147 | return int32(x.valueInt64) 148 | of BsonKindInt32: 149 | return int32(x.valueInt32) 150 | else: 151 | raiseWrongNodeException(x) 152 | 153 | proc toInt*(x: Bson): int = 154 | ## Convert Bson to int whether it is int32 or int64 155 | case x.kind 156 | of BsonKindInt64: 157 | return int(x.valueInt64) 158 | of BsonKindInt32: 159 | return int(x.valueInt32) 160 | else: 161 | raiseWrongNodeException(x) 162 | 163 | proc toBson*(x: int): Bson = 164 | ## Convert int to Bson object 165 | return Bson(kind: BsonKindInt64, valueInt64: x) 166 | 167 | proc toBson*(x: bool): Bson = 168 | ## Convert bool to Bson object 169 | return Bson(kind: BsonKindBool, valueBool: x) 170 | 171 | proc toBool*(x: Bson): bool = 172 | ## Convert Bson object to bool 173 | return x.valueBool 174 | 175 | proc toBson*(x: Time): Bson = 176 | ## Convert Time to Bson object 177 | return Bson(kind: BsonKindTimeUTC, valueTime: x) 178 | 179 | proc toTime*(x: Bson): Time = 180 | ## Convert Bson object to Time 181 | return x.valueTime 182 | 183 | proc toBson*(x: BsonTimestamp): Bson = 184 | ## Convert inner BsonTimestamp to Bson object 185 | return Bson(kind: BsonKindTimestamp, valueTimestamp: x) 186 | 187 | proc toTimestamp*(x: Bson): BsonTimestamp = 188 | ## Convert Bson object to inner BsonTimestamp type 189 | return x.valueTimestamp 190 | 191 | proc toBson*(x: MD5Digest): Bson = 192 | ## Convert MD5Digest to Bson object 193 | return Bson(kind: BsonKindBinary, subtype: BsonSubtypeMd5, valueDigest: x) 194 | 195 | proc toBson*(x: var MD5Context): Bson = 196 | ## Convert MD5Context to Bson object (still digest from current context). 197 | ## :WARNING: MD5Context is finalized during conversion. 198 | var digest: MD5Digest 199 | x.md5Final(digest) 200 | return Bson(kind: BsonKindBinary, subtype: BsonSubtypeMd5, valueDigest: digest) 201 | 202 | proc podValueToBytesAtOffset[T](x: T, res: var string, off: int) {.inline.} = 203 | assert(off + sizeof(x) <= res.len) 204 | copyMem(addr res[off], unsafeAddr x, sizeof(x)) 205 | 206 | proc podValueToBytes[T](x: T, res: var string) {.inline.} = 207 | let off = res.len 208 | res.setLen(off + sizeof(x)) 209 | podValueToBytesAtOffset(x, res, off) 210 | 211 | proc int32ToBytesAtOffset*(x: int32, res: var string, off: int) = 212 | podValueToBytesAtOffset(x, res, off) 213 | 214 | proc int32ToBytes*(x: int32, res: var string) {.inline.} = 215 | ## Convert int32 data piece into series of bytes 216 | podValueToBytes(x, res) 217 | 218 | proc float64ToBytes*(x: float64, res: var string) {.inline.} = 219 | ## Convert float64 data piece into series of bytes 220 | podValueToBytes(x, res) 221 | 222 | proc int64ToBytes*(x: int64, res: var string) {.inline.} = 223 | ## Convert int64 data piece into series of bytes 224 | podValueToBytes(x, res) 225 | 226 | proc boolToBytes*(x: bool, res: var string) {.inline.} = 227 | ## Convert bool data piece into series of bytes 228 | podValueToBytes(if x: 1'u8 else: 0'u8, res) 229 | 230 | proc oidToBytes*(x: Oid, res: var string) {.inline.} = 231 | ## Convert Mongo Object ID data piece into series to bytes 232 | podValueToBytes(x, res) 233 | 234 | proc toBytes*(bs: Bson, res: var string) = 235 | ## Serialize Bson object into byte-stream 236 | case bs.kind 237 | of BsonKindDouble: 238 | float64ToBytes(bs.valueFloat64, res) 239 | of BsonKindStringUTF8: 240 | int32ToBytes(int32(bs.valueString.len + 1), res) 241 | res &= bs.valueString 242 | res &= char(0) 243 | of BsonKindDocument: 244 | let off = res.len 245 | res.setLen(off + sizeof(int32)) # We shall write the length in here... 246 | for key, val in bs.valueDocument: 247 | res &= val.kind 248 | res &= key 249 | res &= char(0) 250 | val.toBytes(res) 251 | res &= char(0) 252 | int32ToBytesAtOffset(int32(res.len - off), res, off) 253 | of BsonKindArray: 254 | let off = res.len 255 | res.setLen(off + sizeof(int32)) # We shall write the length in here... 256 | for i, val in bs.valueArray: 257 | res &= val.kind 258 | res &= $i 259 | res &= char(0) 260 | val.toBytes(res) 261 | res &= char(0) 262 | int32ToBytesAtOffset(int32(res.len - off), res, off) 263 | of BsonKindBinary: 264 | case bs.subtype 265 | of BsonSubtypeMd5: 266 | var sdig: string = newStringOfCap(16) 267 | for i in 0.. 0: 685 | s.readStr(ds, buf) 686 | else: 687 | buf = "" 688 | case st: 689 | of BsonSubtypeMd5: 690 | sub[] = cast[ptr MD5Digest](buf.cstring)[].toBson() 691 | of BsonSubtypeGeneric: 692 | sub[] = bin(buf) 693 | of BsonSubtypeUserDefined: 694 | sub[] = binuser(buf) 695 | of BsonSubtypeUuid: 696 | sub[] = bin(buf) 697 | else: 698 | raise newException(Exception, "Unexpected subtype: " & $(st.int)) 699 | of BsonKindUndefined: 700 | sub[] = undefined() 701 | of BsonKindOid: 702 | s.readStr(12, buf) 703 | sub[] = cast[ptr Oid](buf.cstring)[].toBson() 704 | of BsonKindBool: 705 | sub[] = if s.readChar() == 0.char: false.toBson() else: true.toBson() 706 | of BsonKindTimeUTC: 707 | let timeUTC: Bson = Bson(kind: BsonKindTimeUTC, valueTime: fromUnix((s.readInt64().float64 / 1000).int64)) 708 | sub[] = timeUTC 709 | of BsonKindNull: 710 | sub[] = null() 711 | of BsonKindRegexp: 712 | # sub[] = regex(s.readLine().string(), seqCharToString(sorted(s.readLine().string, system.cmp))) 713 | sub[] = regex(s.readLine().string(), s.readLine().string) 714 | of BsonKindDBPointer: 715 | let 716 | refcol: string = s.readStr(s.readInt32() - 1) 717 | refoid: Oid = cast[ptr Oid](s.readStr(12).cstring)[] 718 | discard s.readChar() 719 | sub[] = dbref(refcol, refoid) 720 | of BsonKindJSCode: 721 | s.readStr(s.readInt32() - 1, buf) 722 | discard s.readChar() 723 | sub[] = js(buf) 724 | of BsonKindInt32: 725 | sub[] = s.readInt32().toBson() 726 | of BsonKindTimestamp: 727 | sub[] = cast[BsonTimestamp](s.readInt64()).toBson() 728 | of BsonKindInt64: 729 | sub[] = s.readInt64().toBson() 730 | of BsonKindMinimumKey: 731 | sub[] = minkey() 732 | of BsonKindMaximumKey: 733 | sub[] = maxkey() 734 | else: 735 | raise newException(Exception, "Unexpected kind: " & $kind) 736 | 737 | proc initBsonDocument*(stream: Stream): Bson {.deprecated.} = 738 | return newBsonDocument(stream) 739 | 740 | proc initBsonDocument*(bytes: string): Bson {.deprecated.} = 741 | ## Create new Bson document from byte string 742 | newBsonDocument(newStringStream(bytes)) 743 | 744 | proc newBsonDocument*(bytes: string): Bson = 745 | ## Create new Bson document from byte string 746 | newBsonDocument(newStringStream(bytes)) 747 | 748 | ## Serialization/deserialization ext 749 | 750 | template dbKey*(name: string) {.pragma.} 751 | 752 | proc to*(b: Bson, T: typedesc): T = 753 | when T is seq: 754 | result.setLen(b.len) 755 | var i = 0 756 | for c in b: 757 | result[i] = c.to(type(result[i])) 758 | inc i 759 | elif T is string: 760 | result = b.toString 761 | elif T is int|int8|int16|int32|uint|uint8|uint16|uint32: 762 | when b.toInt is T: 763 | result = b.toInt 764 | else: 765 | result = b.toInt.T 766 | elif T is int64|uint64: 767 | when b.toInt64 is T: 768 | result = b.toInt64 769 | else: 770 | result = b.toInt64.T 771 | elif T is float: 772 | result = b.toFloat64 773 | elif T is bool: 774 | result = b.toBool 775 | elif T is enum: 776 | result = parseEnum[T](b.toString) 777 | elif T is TableRef|Table: 778 | proc valAUXType[K, V](arr: Table[K, V] | TableRef[K, V]): V = discard 779 | proc keyAUXType[K, V](arr: Table[K, V] | TableRef[K, V]): K = discard 780 | when T is TableRef: 781 | result = newTable[type(result.keyAUXType), type(result.valAUXType)]() 782 | for k, v in b: 783 | result[k] = v.to(type(result.valAUXType)) 784 | elif T is ref object: 785 | if b.kind == BsonKindNull: 786 | result = nil 787 | return 788 | result.new() 789 | for k, val in fieldPairs(result[]): 790 | var key = k 791 | when val.hasCustomPragma(dbKey): 792 | key = val.getCustomPragmaVal(dbKey) 793 | if key notin b: 794 | raise newException(Exception, "Key " & key & " not found for " & $T) 795 | val = b[key].to(type(val)) 796 | elif T is object|tuple: 797 | for k, val in fieldPairs(result): 798 | var key = k 799 | when val.hasCustomPragma(dbKey): 800 | key = val.getCustomPragmaVal(dbKey) 801 | if key notin b: 802 | raise newException(Exception, "Key " & key & " not found for " & $T) 803 | val = b[key].to(type(val)) 804 | 805 | else: 806 | {.error: "Unknown type".} 807 | 808 | proc toBson*[T](entry: T): Bson = 809 | when T is array | seq | set: 810 | result = newBsonArray() 811 | for v in entry: 812 | result.add(toBson(v)) 813 | elif T is ref: 814 | if entry.isNil: 815 | result = null() 816 | else: 817 | result = entry[].toBson() 818 | elif T is Table|TableRef: 819 | result = newBsonDocument() 820 | for k, v in entry: 821 | result[k] = toBson(v) 822 | elif T is object | tuple: 823 | result = newBsonDocument() 824 | for k, v in fieldPairs(entry): 825 | when v.hasCustomPragma(dbKey): 826 | result[v.getCustomPragmaVal(dbKey)] = toBson(v) 827 | else: 828 | result[k] = toBson(v) 829 | elif T is enum: 830 | result = toBson($entry) 831 | elif T is int8|int16|uint8|uint16|uint32: 832 | result = toBson(entry.int32) 833 | elif T is uint64: 834 | result = toBson(entry.int64) 835 | else: 836 | {.error: "toBson " & $T & " can't serialize".} 837 | 838 | proc merge*(a, b: Bson): Bson = 839 | 840 | proc m_rec(a,b,r: Bson)= 841 | for k, v in a: 842 | if not b[k].isNil: 843 | r[k] = v.merge(b[k]) 844 | else: 845 | r[k] = v 846 | 847 | for k, v in b: 848 | if a[k].isNil: 849 | r[k] = v 850 | 851 | if (a.kind == BsonKindDocument or a.kind == BsonKindArray) and 852 | (b.kind == BsonKindDocument or b.kind == BsonKindArray): 853 | result = newBsonDocument() 854 | m_rec(a,b,result) 855 | else: 856 | result = a 857 | 858 | proc update*(a, b: Bson)= 859 | if (a.kind == BsonKindDocument or a.kind == BsonKindArray) and 860 | (b.kind == BsonKindDocument or b.kind == BsonKindArray): 861 | 862 | for k, v in a: 863 | if not b[k].isNil: 864 | a[k] = v.merge(b[k]) 865 | 866 | for k, v in b: 867 | if a[k].isNil: 868 | a[k] = v 869 | 870 | when isMainModule: 871 | echo "Testing nimongo/bson.nim module..." 872 | let oid = genOid() 873 | let bdoc: Bson = %*{ 874 | "image": bin("12312l3jkalksjslkvdsdas"), 875 | "balance": 1000.23, 876 | "name": "John", 877 | "someId": oid, 878 | "someTrue": true, 879 | "surname": "Smith", 880 | "someNull": null(), 881 | "minkey": minkey(), 882 | "maxkey": maxkey(), 883 | "digest": "".toMd5(), 884 | "regexp-field": regex("pattern", "ismx"), 885 | "undefined": undefined(), 886 | "someJS": js("function identity(x) {return x;}"), 887 | "someRef": dbref("db.col", genOid()), 888 | "userDefined": binuser("some-binary-data"), 889 | "someTimestamp": BsonTimestamp(increment: 1, timestamp: 1), 890 | "utcTime": timeUTC(getTime()), 891 | "subdoc": %*{ 892 | "salary": 500 893 | }, 894 | "array": [ 895 | %*{"string": "hello"}, 896 | %*{"string" : "world"} 897 | ] 898 | } 899 | 900 | echo bdoc 901 | let bbytes = bdoc.bytes() 902 | let recovered = newBsonDocument(bbytes) 903 | echo "RECOVERED: ", recovered 904 | 905 | var bdoc2 = newBsonArray() 906 | bdoc2 = bdoc2.add(2) 907 | bdoc2 = bdoc2.add(2) 908 | echo bdoc2 909 | --------------------------------------------------------------------------------