├── .gitignore ├── .travis.yml ├── ChangeLog ├── LICENSE ├── MANIFEST ├── README.md ├── asyncmongo ├── __init__.py ├── asyncjobs.py ├── backends │ ├── __init__.py │ ├── glib2_backend.py │ ├── glib3_backend.py │ └── tornado_backend.py ├── client.py ├── connection.py ├── cursor.py ├── errors.py ├── helpers.py ├── message.py └── pool.py ├── setup.py └── test ├── __init__.py ├── sample_app ├── README ├── sample_app.py ├── sample_app2.py └── settings.py ├── test_authentication.py ├── test_command.py ├── test_connection.py ├── test_duplicate_insert.py ├── test_insert_delete.py ├── test_pooled_db.py ├── test_query.py ├── test_replica_set.py ├── test_safe_updates.py ├── test_shunt.py ├── test_slave_only.py ├── testgtk2 └── test.py └── testgtk3 └── test.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | build 3 | dist 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.5" 4 | - "2.6" 5 | - "2.7" 6 | services: mongodb 7 | install: 8 | - "pip install tornado==2.4.1 --use-mirrors" 9 | - "pip install pymongo --use-mirrors" 10 | env: 11 | - TEST_REPLICA_SETS="true" 12 | - TEST_REPLICA_SETS="false" 13 | script: 14 | - if [[ $TEST_REPLICA_SETS == 'true' ]]; then py.test test/test_replica_set.py; else py.test --ignore=test/test_replica_set.py; fi 15 | notifications: 16 | email: false -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | Version 1.3 5/19/2014 2 | * #65 fix connection leak 3 | * #64 fix error handling for connections and callbacks 4 | * #62 support for connecting to replica secondary instances 5 | * #58 replica set support 6 | 7 | Version 1.2.2 1/15/2013 8 | * #55 updated tests; enabled travis-ci 9 | * #54 cleanup logging 10 | * #50 allow None for limit 11 | * #49 add support for $comment in find 12 | * #44 helper function to list all collections 13 | * #40 add ability to pass arguments to ioLoop backend 14 | 15 | Version 1.2.1 1/30/2012 16 | * #37 include asyncmongo.backends in setup.py 17 | * #38 README.md not included in .tar.gz 18 | 19 | Version 1.2 1/25/2012 20 | * #29 support for glib mainloop 21 | * #35 support for hint to find() 22 | * #32 close connections in unit tests 23 | * #34 unittest updates 24 | * #31 fix typo in example 25 | 26 | Version 1.1.1 11/8/2011 27 | * fix cursor close in cases where IntegrityError is raised 28 | 29 | Version 1.1 11/8/2011 30 | * #26 close cursors after find() 31 | 32 | Version 1.0.3 10/21/2011 33 | * #22 fix missing README.md in MANIFEST 34 | * python 2.5 compatibility 35 | * #23 update TooManyConnections error wording 36 | 37 | Version 1.0 10/8/2011 38 | * #12 authentication support 39 | * #14 support for slave_okay=True 40 | * #6 fix port typo in example code 41 | * implement command method 42 | 43 | Version 0.1.3 2/8/2011 44 | * fix cleanup for connections that aren't closed cleanly 45 | * #1 implement mincached 46 | 47 | Version 0.1.2 12/6/2010 48 | * fixes for querying with safe=False 49 | 50 | Version 0.1 11/17/2010 51 | * Initial release 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | setup.py 2 | LICENSE 3 | README.md 4 | asyncmongo/__init__.py 5 | asyncmongo/connection.py 6 | asyncmongo/cursor.py 7 | asyncmongo/client.py 8 | asyncmongo/errors.py 9 | asyncmongo/helpers.py 10 | asyncmongo/message.py 11 | asyncmongo/pool.py 12 | asyncmongo/asyncjobs.py 13 | asyncmongo/backends/__init__.py 14 | asyncmongo/backends/glib2_backend.py 15 | asyncmongo/backends/glib3_backend.py 16 | asyncmongo/backends/tornado_backend.py 17 | asyncmongo/client.py 18 | test/__init__.py 19 | test/sample_app/sample_app.py 20 | test/sample_app/sample_app2.py 21 | test/sample_app/settings.py 22 | test/test_authentication.py 23 | test/test_command.py 24 | test/test_connection.py 25 | test/test_duplicate_insert.py 26 | test/test_insert_delete.py 27 | test/test_pooled_db.py 28 | test/test_query.py 29 | test/test_safe_updates.py 30 | test/test_shunt.py 31 | test/test_slave_only.py 32 | test/test_replica_set.py 33 | test/testgtk2/test.py 34 | test/testgtk3/test.py 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | asyncmongo 2 | ========== 3 | 4 | AsyncMongo is an asynchronous library for accessing mongo 5 | which is built on the tornado ioloop. 6 | 7 | [![Build Status](https://travis-ci.org/bitly/asyncmongo.png?branch=master)](https://travis-ci.org/bitly/asyncmongo) 8 | 9 | Installation 10 | ------------ 11 | 12 | Installing: `pip install asyncmongo` 13 | 14 | Usage 15 | ----- 16 | 17 | asyncmongo syntax strives to be similar to [pymongo](http://api.mongodb.org/python/current/api/pymongo/collection.html). 18 | 19 | import asyncmongo 20 | import tornado.web 21 | 22 | class Handler(tornado.web.RequestHandler): 23 | @property 24 | def db(self): 25 | if not hasattr(self, '_db'): 26 | self._db = asyncmongo.Client(pool_id='mydb', host='127.0.0.1', port=27017, maxcached=10, maxconnections=50, dbname='test') 27 | return self._db 28 | 29 | @tornado.web.asynchronous 30 | def get(self): 31 | self.db.users.find({'username': self.current_user}, limit=1, callback=self._on_response) 32 | # or 33 | # conn = self.db.connection(collectionname="...", dbname="...") 34 | # conn.find(..., callback=self._on_response) 35 | 36 | def _on_response(self, response, error): 37 | if error: 38 | raise tornado.web.HTTPError(500) 39 | self.render('template', full_name=response['full_name']) 40 | 41 | About 42 | ----- 43 | 44 | Some features are not currently implemented: 45 | 46 | * directly interfacing with indexes, dropping collections 47 | * retrieving results in batches instead of all at once 48 | (asyncmongo's nature means that no calls are blocking regardless of the number of results you are retrieving) 49 | * tailable cursors #15 50 | 51 | 52 | Requirements 53 | ------------ 54 | The following two python libraries are required 55 | 56 | * [pymongo](http://github.com/mongodb/mongo-python-driver) version 1.9+ for bson library 57 | * [tornado](http://github.com/facebook/tornado) 58 | 59 | Issues 60 | ------ 61 | 62 | Please report any issues via [github issues](https://github.com/bitly/asyncmongo/issues) 63 | -------------------------------------------------------------------------------- /asyncmongo/__init__.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | """ 18 | AsyncMongo is an asynchronous library for accessing mongo 19 | http://github.com/bitly/asyncmongo 20 | """ 21 | try: 22 | import bson 23 | except ImportError: 24 | raise ImportError("bson library not installed. Install pymongo >= 1.9 https://github.com/mongodb/mongo-python-driver") 25 | 26 | # also update in setup.py 27 | version = "1.3" 28 | version_info = (1, 3) 29 | 30 | ASCENDING = 1 31 | """Ascending sort order.""" 32 | DESCENDING = -1 33 | """Descending sort order.""" 34 | GEO2D = "2d" 35 | """Index specifier for a 2-dimensional `geospatial index`""" 36 | 37 | from errors import (Error, InterfaceError, AuthenticationError, DatabaseError, RSConnectionError, 38 | DataError, IntegrityError, ProgrammingError, NotSupportedError) 39 | 40 | from client import Client 41 | -------------------------------------------------------------------------------- /asyncmongo/asyncjobs.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2013 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | """Tools for creating `messages 18 | `_ to be sent to 19 | MongoDB. 20 | 21 | .. note:: This module is for internal use and is generally not needed by 22 | application developers. 23 | """ 24 | 25 | import logging 26 | import random 27 | from bson import SON 28 | 29 | import message 30 | import helpers 31 | from errors import AuthenticationError, RSConnectionError, InterfaceError 32 | 33 | 34 | class AsyncMessage(object): 35 | def __init__(self, connection, message, callback): 36 | super(AsyncMessage, self).__init__() 37 | self.connection = connection 38 | self.message = message 39 | self.callback = callback 40 | 41 | def process(self, *args, **kwargs): 42 | try: 43 | self.connection._send_message(self.message, self.callback) 44 | except Exception, e: 45 | if self.callback is None: 46 | logging.error("Error occurred in safe update mode: %s", e) 47 | else: 48 | self.callback(None, e) 49 | 50 | 51 | class AsyncJob(object): 52 | def __init__(self, connection, state, err_callback): 53 | super(AsyncJob, self).__init__() 54 | self.connection = connection 55 | self._err_callback = err_callback 56 | self._state = state 57 | 58 | def _error(self, e): 59 | self.connection.close() 60 | if self._err_callback: 61 | self._err_callback(e) 62 | 63 | def update_err_callback(self, err_callback): 64 | self._err_callback = err_callback 65 | 66 | def __repr__(self): 67 | return "%s at 0x%X, state = %r" % (self.__class__.__name__, id(self), self._state) 68 | 69 | 70 | class AuthorizeJob(AsyncJob): 71 | def __init__(self, connection, dbuser, dbpass, pool, err_callback): 72 | super(AuthorizeJob, self).__init__(connection, "start", err_callback) 73 | self.dbuser = dbuser 74 | self.dbpass = dbpass 75 | self.pool = pool 76 | 77 | def process(self, response=None, error=None): 78 | if error: 79 | logging.debug("Error during authentication: %r", error) 80 | self._error(AuthenticationError(error)) 81 | return 82 | 83 | if self._state == "start": 84 | self._state = "nonce" 85 | logging.debug("Sending nonce") 86 | msg = message.query( 87 | 0, 88 | "%s.$cmd" % self.pool._dbname, 89 | 0, 90 | 1, 91 | SON({'getnonce': 1}), 92 | SON({}) 93 | ) 94 | self.connection._send_message(msg, self.process) 95 | elif self._state == "nonce": 96 | # this is the nonce response 97 | self._state = "finish" 98 | try: 99 | nonce = response['data'][0]['nonce'] 100 | logging.debug("Nonce received: %r", nonce) 101 | key = helpers._auth_key(nonce, self.dbuser, self.dbpass) 102 | except Exception, e: 103 | self._error(AuthenticationError(e)) 104 | return 105 | 106 | msg = message.query( 107 | 0, 108 | "%s.$cmd" % self.pool._dbname, 109 | 0, 110 | 1, 111 | SON([('authenticate', 1), 112 | ('user', self.dbuser), 113 | ('nonce', nonce), 114 | ('key', key)]), 115 | SON({}) 116 | ) 117 | self.connection._send_message(msg, self.process) 118 | elif self._state == "finish": 119 | self._state = "done" 120 | try: 121 | assert response['number_returned'] == 1 122 | response = response['data'][0] 123 | except Exception, e: 124 | self._error(AuthenticationError(e)) 125 | return 126 | 127 | if response.get("ok") != 1: 128 | logging.debug("Failed authentication %s", response.get("errmsg")) 129 | self._error(AuthenticationError(response.get("errmsg"))) 130 | return 131 | self.connection._next_job() 132 | else: 133 | self._error(ValueError("Unexpected state: %s" % self._state)) 134 | 135 | 136 | class ConnectRSJob(AsyncJob): 137 | def __init__(self, connection, seed, rs, secondary_only, err_callback): 138 | super(ConnectRSJob, self).__init__(connection, "seed", err_callback) 139 | self.known_hosts = set(seed) 140 | self.rs = rs 141 | self._blacklisted = set() 142 | self._primary = None 143 | self._sec_only = secondary_only 144 | 145 | def process(self, response=None, error=None): 146 | if error: 147 | logging.debug("Problem connecting: %s", error) 148 | 149 | if self._state == "ismaster": 150 | self._state = "seed" 151 | 152 | if self._state == "seed": 153 | if self._sec_only and self._primary: 154 | # Add primary host to blacklisted to avoid connecting to it 155 | self._blacklisted.add(self._primary) 156 | 157 | fresh = self.known_hosts ^ self._blacklisted 158 | logging.debug("Working through the rest of the host list: %r", fresh) 159 | 160 | while fresh: 161 | if self._primary and self._primary not in self._blacklisted: 162 | # Try primary first 163 | h = self._primary 164 | else: 165 | h = random.choice(list(fresh)) 166 | 167 | if h in fresh: 168 | fresh.remove(h) 169 | 170 | # Add tried host to blacklisted 171 | self._blacklisted.add(h) 172 | 173 | logging.debug("Connecting to %s:%s", *h) 174 | self.connection._host, self.connection._port = h 175 | try: 176 | self.connection._socket_connect() 177 | logging.debug("Connected to %s", h) 178 | except InterfaceError, e: 179 | logging.error("Failed to connect to the host: %s", e) 180 | else: 181 | break 182 | 183 | else: 184 | self._error(RSConnectionError("No more hosts to try, tried: %s" % self.known_hosts)) 185 | return 186 | 187 | self._state = "ismaster" 188 | msg = message.query( 189 | options=0, 190 | collection_name="admin.$cmd", 191 | num_to_skip=0, 192 | num_to_return=-1, 193 | query=SON([("ismaster", 1)]) 194 | ) 195 | self.connection._send_message(msg, self.process) 196 | 197 | elif self._state == "ismaster": 198 | logging.debug("ismaster response: %r", response) 199 | 200 | try: 201 | assert len(response["data"]) == 1 202 | res = response["data"][0] 203 | except Exception, e: 204 | self._error(RSConnectionError("Invalid response data: %r" % response.get("data"))) 205 | return 206 | 207 | rs_name = res.get("setName") 208 | if rs_name and rs_name != self.rs: 209 | self._error(RSConnectionError("Wrong replica set: %s, expected: %s" % (rs_name, self.rs))) 210 | return 211 | 212 | hosts = res.get("hosts") 213 | if hosts: 214 | self.known_hosts.update(helpers._parse_host(h) for h in hosts) 215 | 216 | ismaster = res.get("ismaster") 217 | hidden = res.get("hidden") 218 | try: 219 | if ismaster and not self._sec_only: # master and required to connect to primary 220 | assert not hidden, "Primary cannot be hidden" 221 | logging.debug("Connected to master (%s)", res.get("me", "unknown")) 222 | self._state = "done" 223 | self.connection._next_job() 224 | elif not ismaster and self._sec_only and not hidden: # not master and required to connect to secondary 225 | assert res.get("secondary"), "Secondary must self-report as secondary" 226 | logging.debug("Connected to secondary (%s)", res.get("me", "unknown")) 227 | self._state = "done" 228 | self.connection._next_job() 229 | else: # either not master and primary connection required or master and secondary required 230 | primary = res.get("primary") 231 | if primary: 232 | self._primary = helpers._parse_host(primary) 233 | self._state = "seed" 234 | self.process() 235 | except Exception, e: 236 | self._error(RSConnectionError(e)) 237 | return 238 | 239 | -------------------------------------------------------------------------------- /asyncmongo/backends/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitly/asyncmongo/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/backends/__init__.py -------------------------------------------------------------------------------- /asyncmongo/backends/glib2_backend.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | import glib 18 | 19 | class Glib2Stream(object): 20 | def __init__(self, socket, **kwargs): 21 | self.__socket = socket 22 | self.__close_id = None 23 | self.__read_id = None 24 | self.__read_queue = [] 25 | 26 | def write(self, data): 27 | self.__socket.send(data) 28 | 29 | def read(self, size, callback): 30 | self.__read_queue.append((size, callback)) 31 | 32 | if not self.__read_id: 33 | self.set_waiting() 34 | 35 | def set_waiting(self): 36 | if self.__read_id: 37 | glib.source_remove(self.__read_id) 38 | 39 | self.__read_id = glib.io_add_watch( 40 | self.__socket, 41 | glib.IO_IN, 42 | self.__on_read_callback) 43 | 44 | def set_idle(self): 45 | if self.__read_id: 46 | glib.source_remove(self.__read_id) 47 | 48 | def __on_read_callback(self, source, condition): 49 | if not self.__read_queue: 50 | self.set_idle() 51 | return False 52 | 53 | size, callback = self.__read_queue.pop(0) 54 | data = self.__socket.recv(size) 55 | callback(data) 56 | return True 57 | 58 | def set_close_callback(self, callback): 59 | if self.__close_id: 60 | glib.source_remove(self.__close_id) 61 | 62 | self.__close_callback = callback 63 | self.__close_id = glib.io_add_watch(self.__socket, 64 | glib.IO_HUP|glib.IO_ERR, 65 | self.__on_close_callback) 66 | 67 | def __on_close_callback(self, source, cb_condition, *args, **kwargs): 68 | self.__close_callback() 69 | 70 | def close(self): 71 | if self.__close_id: 72 | glib.source_remove(self.__close_id) 73 | 74 | self.__socket.close() 75 | 76 | class AsyncBackend(object): 77 | _instance = None 78 | def __new__(cls, *args, **kwargs): 79 | if not cls._instance: 80 | cls._instance = super(AsyncBackend, cls).__new__( 81 | cls, *args, **kwargs) 82 | return cls._instance 83 | 84 | def register_stream(self, socket, **kwargs): 85 | return Glib2Stream(socket, **kwargs) 86 | -------------------------------------------------------------------------------- /asyncmongo/backends/glib3_backend.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | from gi.repository import GObject 18 | 19 | class Glib3Stream(object): 20 | def __init__(self, socket, **kwargs): 21 | self.__socket = socket 22 | self.__close_id = None 23 | self.__read_id = None 24 | self.__read_queue = [] 25 | 26 | def write(self, data): 27 | self.__socket.send(data) 28 | 29 | def read(self, size, callback): 30 | self.__read_queue.append((size, callback)) 31 | 32 | if not self.__read_id: 33 | self.set_waiting() 34 | 35 | def set_waiting(self): 36 | if self.__read_id: 37 | GObject.source_remove(self.__read_id) 38 | 39 | self.__read_id = GObject.io_add_watch( 40 | self.__socket, 41 | GObject.IO_IN, 42 | self.__on_read_callback) 43 | 44 | def set_idle(self): 45 | if self.__read_id: 46 | GObject.source_remove(self.__read_id) 47 | 48 | def __on_read_callback(self, source, condition): 49 | if not self.__read_queue: 50 | self.set_idle() 51 | return False 52 | 53 | size, callback = self.__read_queue.pop(0) 54 | data = self.__socket.recv(size) 55 | callback(data) 56 | return True 57 | 58 | def set_close_callback(self, callback): 59 | if self.__close_id: 60 | GObject.source_remove(self.__close_id) 61 | 62 | self.__close_callback = callback 63 | self.__close_id = GObject.io_add_watch(self.__socket, 64 | GObject.IO_HUP|GObject.IO_ERR, 65 | self.__on_close_callback) 66 | 67 | def __on_close_callback(self, source, cb_condition, *args, **kwargs): 68 | self.__close_callback() 69 | 70 | def close(self): 71 | if self.__close_id: 72 | GObject.source_remove(self.__close_id) 73 | 74 | self.__socket.close() 75 | 76 | class AsyncBackend(object): 77 | _instance = None 78 | def __new__(cls, *args, **kwargs): 79 | if not cls._instance: 80 | cls._instance = super(AsyncBackend, cls).__new__( 81 | cls, *args, **kwargs) 82 | return cls._instance 83 | 84 | def register_stream(self, socket, **kwargs): 85 | return Glib3Stream(socket, **kwargs) 86 | -------------------------------------------------------------------------------- /asyncmongo/backends/tornado_backend.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | import tornado.iostream 18 | 19 | class TornadoStream(object): 20 | def __init__(self, socket, **kwargs): 21 | """ 22 | :Parameters: 23 | - `socket`: TCP socket 24 | - `**kwargs`: passed to `tornado.iostream.IOStream` 25 | - `io_loop` (optional): Tornado IOLoop instance. 26 | - `max_buffer_size` (optional): 27 | - `read_chunk_size` (optional): 28 | """ 29 | self.__stream = tornado.iostream.IOStream(socket, **kwargs) 30 | 31 | def write(self, data): 32 | self.__stream.write(data) 33 | 34 | def read(self, size, callback): 35 | self.__stream.read_bytes(size, callback=callback) 36 | 37 | def set_close_callback(self, callback): 38 | self.__stream.set_close_callback(callback) 39 | 40 | def close(self): 41 | self.__stream._close_callback = None 42 | self.__stream.close() 43 | 44 | class AsyncBackend(object): 45 | _instance = None 46 | def __new__(cls, *args, **kwargs): 47 | if not cls._instance: 48 | cls._instance = super(AsyncBackend, cls).__new__( 49 | cls, *args, **kwargs) 50 | return cls._instance 51 | 52 | def register_stream(self, socket, **kwargs): 53 | """ 54 | :Parameters: 55 | - `socket`: TCP socket 56 | - `**kwargs`: passed to `tornado.iostream.IOStream` 57 | - `io_loop` (optional): Tornado IOLoop instance. 58 | - `max_buffer_size` (optional): 59 | - `read_chunk_size` (optional): 60 | """ 61 | return TornadoStream(socket, **kwargs) 62 | -------------------------------------------------------------------------------- /asyncmongo/client.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | from errors import DataError 18 | from pool import ConnectionPools 19 | from cursor import Cursor 20 | from bson.son import SON 21 | from functools import partial 22 | 23 | class Client(object): 24 | """ 25 | Client connection to represent a remote database. 26 | 27 | Internally Client maintains a pool of connections that will live beyond the life of this object. 28 | 29 | :Parameters: 30 | - `pool_id`: unique id for this connection pool 31 | - `**kwargs`: passed to `pool.ConnectionPool` 32 | - `mincached` (optional): minimum connections to open on instantiation. 0 to open connections on first use 33 | - `maxcached` (optional): maximum inactive cached connections for this pool. 0 for unlimited 34 | - `maxconnections` (optional): maximum open connections for this pool. 0 for unlimited 35 | - `maxusage` (optional): number of requests allowed on a connection before it is closed. 0 for unlimited 36 | - `dbname`: mongo database name 37 | - `backend': async loop backend, default = tornado 38 | - `**kwargs`: passed to `connection.Connection` 39 | - `host`: hostname or ip of mongo host 40 | - `port`: port to connect to 41 | - `slave_okay` (optional): is it okay to connect directly to and perform queries on a slave instance 42 | - `autoreconnect` (optional): auto reconnect on interface errors 43 | 44 | @returns a `Client` instance that wraps a `pool.ConnectionPool` 45 | 46 | Usage: 47 | >>> db = asyncmongo.Client(pool_id, host=host, port=port, dbname=dbname) 48 | >>> db.collectionname.find({...}, callback=...) 49 | 50 | """ 51 | def __init__(self, pool_id=None, **kwargs): 52 | self._pool = ConnectionPools.get_connection_pool(pool_id, **kwargs) 53 | 54 | def __getattr__(self, name): 55 | """Get a collection by name. 56 | 57 | :Parameters: 58 | - `name`: the name of the collection 59 | """ 60 | return self.connection(name) 61 | 62 | def __getitem__(self, name): 63 | """Get a collection by name. 64 | :Parameters: 65 | - `name`: the name of the collection to get 66 | """ 67 | return self.connection(name) 68 | 69 | def connection(self, collectionname, dbname=None): 70 | """Get a cursor to a collection by name. 71 | 72 | raises `DataError` on names with unallowable characters. 73 | 74 | :Parameters: 75 | - `collectionname`: the name of the collection 76 | - `dbname`: (optional) overide the default db for a connection 77 | 78 | """ 79 | if not collectionname or ".." in collectionname: 80 | raise DataError("collection names cannot be empty") 81 | if "$" in collectionname and not (collectionname.startswith("oplog.$main") or 82 | collectionname.startswith("$cmd")): 83 | raise DataError("collection names must not " 84 | "contain '$': %r" % collectionname) 85 | if collectionname.startswith(".") or collectionname.endswith("."): 86 | raise DataError("collecion names must not start " 87 | "or end with '.': %r" % collectionname) 88 | if "\x00" in collectionname: 89 | raise DataError("collection names must not contain the " 90 | "null character") 91 | return Cursor(dbname or self._pool._dbname, collectionname, self._pool) 92 | 93 | def collection_names(self, callback): 94 | """Get a list of all the collection names in selected database""" 95 | callback = partial(self._collection_names_result, callback) 96 | self["system.namespaces"].find(_must_use_master=True, callback=callback) 97 | 98 | def _collection_names_result(self, callback, results, error=None): 99 | """callback to for collection names query, filters out collection names""" 100 | names = [r['name'] for r in results if r['name'].count('.') == 1] 101 | assert error == None, repr(error) 102 | strip = len(self._pool._dbname) + 1 103 | callback([name[strip:] for name in names]) 104 | 105 | def command(self, command, value=1, callback=None, 106 | check=True, allowable_errors=[], **kwargs): 107 | """Issue a MongoDB command. 108 | 109 | Send command `command` to the database and return the 110 | response. If `command` is an instance of :class:`basestring` 111 | then the command {`command`: `value`} will be sent. Otherwise, 112 | `command` must be an instance of :class:`dict` and will be 113 | sent as is. 114 | 115 | Any additional keyword arguments will be added to the final 116 | command document before it is sent. 117 | 118 | For example, a command like ``{buildinfo: 1}`` can be sent 119 | using: 120 | 121 | >>> db.command("buildinfo") 122 | 123 | For a command where the value matters, like ``{collstats: 124 | collection_name}`` we can do: 125 | 126 | >>> db.command("collstats", collection_name) 127 | 128 | For commands that take additional arguments we can use 129 | kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: 130 | 131 | >>> db.command("filemd5", object_id, root=file_root) 132 | 133 | :Parameters: 134 | - `command`: document representing the command to be issued, 135 | or the name of the command (for simple commands only). 136 | 137 | .. note:: the order of keys in the `command` document is 138 | significant (the "verb" must come first), so commands 139 | which require multiple keys (e.g. `findandmodify`) 140 | should use an instance of :class:`~bson.son.SON` or 141 | a string and kwargs instead of a Python `dict`. 142 | 143 | - `value` (optional): value to use for the command verb when 144 | `command` is passed as a string 145 | - `**kwargs` (optional): additional keyword arguments will 146 | be added to the command document before it is sent 147 | 148 | .. mongodoc:: commands 149 | """ 150 | 151 | if isinstance(command, basestring): 152 | command = SON([(command, value)]) 153 | 154 | command.update(kwargs) 155 | 156 | self.connection("$cmd").find_one(command,callback=callback, 157 | _must_use_master=True, 158 | _is_command=True) 159 | -------------------------------------------------------------------------------- /asyncmongo/connection.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | import sys 18 | import socket 19 | import struct 20 | import logging 21 | from types import NoneType 22 | import functools 23 | 24 | from errors import ProgrammingError, IntegrityError, InterfaceError 25 | import helpers 26 | import asyncjobs 27 | 28 | 29 | class Connection(object): 30 | """ 31 | :Parameters: 32 | - `host`: hostname or ip of mongo host (not allowed when replica sets are used) 33 | - `port`: port to connect to (not allowed when replica sets are used) 34 | - `dbuser`: db user to connect with 35 | - `dbpass`: db password 36 | - `autoreconnect` (optional): auto reconnect on interface errors 37 | - `rs`: replica set name (required when replica sets are used) 38 | - `seed`: seed list to connect to a replica set (required when replica sets are used) 39 | - `secondary_only`: (optional, only useful for replica set connections) 40 | if true, connect to a secondary member only 41 | - `**kwargs`: passed to `backends.AsyncBackend.register_stream` 42 | 43 | """ 44 | def __init__(self, 45 | host=None, 46 | port=None, 47 | dbuser=None, 48 | dbpass=None, 49 | autoreconnect=True, 50 | pool=None, 51 | backend="tornado", 52 | rs=None, 53 | seed=None, 54 | secondary_only=False, 55 | **kwargs): 56 | assert isinstance(autoreconnect, bool) 57 | assert isinstance(dbuser, (str, unicode, NoneType)) 58 | assert isinstance(dbpass, (str, unicode, NoneType)) 59 | assert isinstance(rs, (str, NoneType)) 60 | assert pool 61 | assert isinstance(secondary_only, bool) 62 | 63 | if rs: 64 | assert host is None 65 | assert port is None 66 | assert isinstance(seed, (set, list)) 67 | else: 68 | assert isinstance(host, (str, unicode)) 69 | assert isinstance(port, int) 70 | assert seed is None 71 | 72 | self._host = host 73 | self._port = port 74 | self.__rs = rs 75 | self.__seed = seed 76 | self.__secondary_only = secondary_only 77 | self.__dbuser = dbuser 78 | self.__dbpass = dbpass 79 | self.__stream = None 80 | self.__callback = None 81 | self.__alive = False 82 | self.__autoreconnect = autoreconnect 83 | self.__pool = pool 84 | self.__kwargs = kwargs 85 | self.__backend = self.__load_backend(backend) 86 | self.__job_queue = [] 87 | self.usage_count = 0 88 | 89 | self.__connect(self.connection_error) 90 | 91 | def connection_error(self, error): 92 | raise error 93 | 94 | def __load_backend(self, name): 95 | __import__('asyncmongo.backends.%s_backend' % name) 96 | mod = sys.modules['asyncmongo.backends.%s_backend' % name] 97 | return mod.AsyncBackend() 98 | 99 | def __connect(self, err_callback): 100 | # The callback is only called in case of exception by async jobs 101 | if self.__dbuser and self.__dbpass: 102 | self._put_job(asyncjobs.AuthorizeJob(self, self.__dbuser, self.__dbpass, self.__pool, err_callback)) 103 | 104 | if self.__rs: 105 | self._put_job(asyncjobs.ConnectRSJob(self, self.__seed, self.__rs, self.__secondary_only, err_callback)) 106 | # Mark the connection as alive, even though it's not alive yet to prevent double-connecting 107 | self.__alive = True 108 | else: 109 | self._socket_connect() 110 | 111 | def _socket_connect(self): 112 | """create a socket, connect, register a stream with the async backend""" 113 | self.usage_count = 0 114 | try: 115 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) 116 | s.connect((self._host, self._port)) 117 | self.__stream = self.__backend.register_stream(s, **self.__kwargs) 118 | self.__stream.set_close_callback(self._socket_close) 119 | self.__alive = True 120 | except socket.error, error: 121 | raise InterfaceError(error) 122 | 123 | def _socket_close(self): 124 | """cleanup after the socket is closed by the other end""" 125 | callback = self.__callback 126 | self.__callback = None 127 | try: 128 | if callback: 129 | callback(None, InterfaceError('connection closed')) 130 | finally: 131 | # Flush the job queue, don't call the callbacks associated with the remaining jobs 132 | # since they have already been called as error callback on connection closing 133 | self.__job_queue = [] 134 | self.__alive = False 135 | self.__pool.cache(self) 136 | 137 | def _close(self): 138 | """close the socket and cleanup""" 139 | callback = self.__callback 140 | self.__callback = None 141 | try: 142 | if callback: 143 | callback(None, InterfaceError('connection closed')) 144 | finally: 145 | # Flush the job queue, don't call the callbacks associated with the remaining jobs 146 | # since they have already been called as error callback on connection closing 147 | self.__job_queue = [] 148 | self.__alive = False 149 | self.__stream.close() 150 | 151 | def close(self): 152 | """close this connection; re-cache this connection object""" 153 | try: 154 | self._close() 155 | finally: 156 | self.__pool.cache(self) 157 | 158 | def send_message(self, message, callback): 159 | """ send a message over the wire; callback=None indicates a safe=False call where we write and forget about it""" 160 | 161 | if self.__callback is not None: 162 | raise ProgrammingError('connection already in use') 163 | 164 | if callback: 165 | err_callback = functools.partial(callback, None) 166 | else: 167 | err_callback = None 168 | 169 | # Go and update err_callback for async jobs in queue if any 170 | for job in self.__job_queue: 171 | # this is a dirty hack and I hate it, but there is no way of setting the correct 172 | # err_callback during the connection time 173 | if isinstance(job, asyncjobs.AsyncJob): 174 | job.update_err_callback(err_callback) 175 | 176 | if not self.__alive: 177 | if self.__autoreconnect: 178 | self.__connect(err_callback) 179 | else: 180 | raise InterfaceError('connection invalid. autoreconnect=False') 181 | 182 | # Put the current message on the bottom of the queue 183 | self._put_job(asyncjobs.AsyncMessage(self, message, callback), 0) 184 | self._next_job() 185 | 186 | def _put_job(self, job, pos=None): 187 | if pos is None: 188 | pos = len(self.__job_queue) 189 | self.__job_queue.insert(pos, job) 190 | 191 | def _next_job(self): 192 | """execute the next job from the top of the queue""" 193 | if self.__job_queue: 194 | # Produce message from the top of the queue 195 | job = self.__job_queue.pop() 196 | # logging.debug("queue = %s, popped %r", self.__job_queue, job) 197 | job.process() 198 | 199 | def _send_message(self, message, callback): 200 | # logging.debug("_send_message, msg = %r: queue = %r, self.__callback = %r, callback = %r", 201 | # message, self.__job_queue, self.__callback, callback) 202 | 203 | self.__callback = callback 204 | self.usage_count +=1 205 | # __request_id used by get_more() 206 | (self.__request_id, data) = message 207 | try: 208 | self.__stream.write(data) 209 | if self.__callback: 210 | self.__stream.read(16, callback=self._parse_header) 211 | else: 212 | self.__request_id = None 213 | self.__pool.cache(self) 214 | 215 | except IOError: 216 | self.__alive = False 217 | raise 218 | # return self.__request_id 219 | 220 | def _parse_header(self, header): 221 | # return self.__receive_data_on_socket(length - 16, sock) 222 | length = int(struct.unpack(">> db.test.insert({"x": "y", "a": "b"}) 178 | ObjectId('...') 179 | >>> list(db.test.find()) 180 | [{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}] 181 | >>> db.test.update({"x": "y"}, {"$set": {"a": "c"}}) 182 | >>> list(db.test.find()) 183 | [{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}] 184 | 185 | If `safe` is ``True`` returns the response to the *lastError* 186 | command. Otherwise, returns ``None``. 187 | 188 | # Any additional keyword arguments imply ``safe=True``, and will 189 | # be used as options for the resultant `getLastError` 190 | # command. For example, to wait for replication to 3 nodes, pass 191 | # ``w=3``. 192 | 193 | :Parameters: 194 | - `spec`: a ``dict`` or :class:`~bson.son.SON` instance 195 | specifying elements which must be present for a document 196 | to be updated 197 | - `document`: a ``dict`` or :class:`~bson.son.SON` 198 | instance specifying the document to be used for the update 199 | or (in the case of an upsert) insert - see docs on MongoDB 200 | `update modifiers`_ 201 | - `upsert` (optional): perform an upsert if ``True`` 202 | - `manipulate` (optional): manipulate the document before 203 | updating? If ``True`` all instances of 204 | :mod:`~pymongo.son_manipulator.SONManipulator` added to 205 | this :class:`~pymongo.database.Database` will be applied 206 | to the document before performing the update. 207 | - `safe` (optional): check that the update succeeded? 208 | - `multi` (optional): update all documents that match 209 | `spec`, rather than just the first matching document. The 210 | default value for `multi` is currently ``False``, but this 211 | might eventually change to ``True``. It is recommended 212 | that you specify this argument explicitly for all update 213 | operations in order to prepare your code for that change. 214 | - `**kwargs` (optional): any additional arguments imply 215 | ``safe=True``, and will be used as options for the 216 | `getLastError` command 217 | 218 | .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating 219 | 220 | .. mongodoc:: update 221 | """ 222 | if not isinstance(spec, dict): 223 | raise TypeError("spec must be an instance of dict") 224 | if not isinstance(document, dict): 225 | raise TypeError("document must be an instance of dict") 226 | if not isinstance(upsert, bool): 227 | raise TypeError("upsert must be an instance of bool") 228 | if not isinstance(safe, bool): 229 | raise TypeError("safe must be an instance of bool") 230 | # TODO: apply SON manipulators 231 | # if upsert and manipulate: 232 | # document = self.__database._fix_incoming(document, self) 233 | 234 | if kwargs: 235 | safe = True 236 | 237 | if safe and not callable(callback): 238 | raise TypeError("callback must be callable") 239 | if not safe and callback is not None: 240 | raise TypeError("callback can not be used with safe=False") 241 | 242 | if callback: 243 | callback = functools.partial(self._handle_response, orig_callback=callback) 244 | 245 | self.__limit = None 246 | connection = self.__pool.connection() 247 | try: 248 | connection.send_message( 249 | message.update(self.full_collection_name, upsert, multi, 250 | spec, document, safe, kwargs), callback=callback) 251 | except: 252 | connection.close() 253 | raise 254 | 255 | 256 | def find_one(self, spec_or_id, **kwargs): 257 | """Get a single document from the database. 258 | 259 | All arguments to :meth:`find` are also valid arguments for 260 | :meth:`find_one`, although any `limit` argument will be 261 | ignored. Returns a single document, or ``None`` if no matching 262 | document is found. 263 | """ 264 | if spec_or_id is not None and not isinstance(spec_or_id, dict): 265 | spec_or_id = {"_id": spec_or_id} 266 | kwargs['limit'] = -1 267 | self.find(spec_or_id, **kwargs) 268 | 269 | def find(self, spec=None, fields=None, skip=0, limit=0, 270 | timeout=True, snapshot=False, tailable=False, sort=None, 271 | max_scan=None, slave_okay=False, 272 | _must_use_master=False, _is_command=False, hint=None, debug=False, 273 | comment=None, callback=None): 274 | """Query the database. 275 | 276 | The `spec` argument is a prototype document that all results 277 | must match. For example: 278 | 279 | >>> db.test.find({"hello": "world"}, callback=...) 280 | 281 | only matches documents that have a key "hello" with value 282 | "world". Matches can have other keys *in addition* to 283 | "hello". The `fields` argument is used to specify a subset of 284 | fields that should be included in the result documents. By 285 | limiting results to a certain subset of fields you can cut 286 | down on network traffic and decoding time. 287 | 288 | Raises :class:`TypeError` if any of the arguments are of 289 | improper type. 290 | 291 | :Parameters: 292 | - `spec` (optional): a SON object specifying elements which 293 | must be present for a document to be included in the 294 | result set 295 | - `fields` (optional): a list of field names that should be 296 | returned in the result set ("_id" will always be 297 | included), or a dict specifying the fields to return 298 | - `skip` (optional): the number of documents to omit (from 299 | the start of the result set) when returning the results 300 | - `limit` (optional): the maximum number of results to 301 | return 302 | - `timeout` (optional): if True, any returned cursor will be 303 | subject to the normal timeout behavior of the mongod 304 | process. Otherwise, the returned cursor will never timeout 305 | at the server. Care should be taken to ensure that cursors 306 | with timeout turned off are properly closed. 307 | - `snapshot` (optional): if True, snapshot mode will be used 308 | for this query. Snapshot mode assures no duplicates are 309 | returned, or objects missed, which were present at both 310 | the start and end of the query's execution. For details, 311 | see the `snapshot documentation 312 | `_. 313 | - `tailable` (optional): the result of this find call will 314 | be a tailable cursor - tailable cursors aren't closed when 315 | the last data is retrieved but are kept open and the 316 | cursors location marks the final document's position. if 317 | more data is received iteration of the cursor will 318 | continue from the last document received. For details, see 319 | the `tailable cursor documentation 320 | `_. 321 | - `sort` (optional): a list of (key, direction) pairs 322 | specifying the sort order for this query. See 323 | :meth:`~pymongo.cursor.Cursor.sort` for details. 324 | - `max_scan` (optional): limit the number of documents 325 | examined when performing the query 326 | - `slave_okay` (optional): is it okay to connect directly 327 | to and perform queries on a slave instance 328 | 329 | .. mongodoc:: find 330 | """ 331 | 332 | if spec is None: 333 | spec = {} 334 | 335 | if limit is None: 336 | limit = 0 337 | 338 | if not isinstance(spec, dict): 339 | raise TypeError("spec must be an instance of dict") 340 | if not isinstance(skip, int): 341 | raise TypeError("skip must be an instance of int") 342 | if not isinstance(limit, int): 343 | raise TypeError("limit must be an instance of int or None") 344 | if not isinstance(timeout, bool): 345 | raise TypeError("timeout must be an instance of bool") 346 | if not isinstance(snapshot, bool): 347 | raise TypeError("snapshot must be an instance of bool") 348 | if not isinstance(tailable, bool): 349 | raise TypeError("tailable must be an instance of bool") 350 | if not callable(callback): 351 | raise TypeError("callback must be callable") 352 | 353 | if fields is not None: 354 | if not fields: 355 | fields = {"_id": 1} 356 | if not isinstance(fields, dict): 357 | fields = helpers._fields_list_to_dict(fields) 358 | 359 | self.__spec = spec 360 | self.__fields = fields 361 | self.__skip = skip 362 | self.__limit = limit 363 | self.__batch_size = 0 364 | 365 | self.__timeout = timeout 366 | self.__tailable = tailable 367 | self.__snapshot = snapshot 368 | self.__ordering = sort and helpers._index_document(sort) or None 369 | self.__max_scan = max_scan 370 | self.__slave_okay = slave_okay 371 | self.__explain = False 372 | self.__hint = hint 373 | self.__comment = comment 374 | self.__debug = debug 375 | # self.__as_class = as_class 376 | self.__tz_aware = False #collection.database.connection.tz_aware 377 | self.__must_use_master = _must_use_master 378 | self.__is_command = _is_command 379 | 380 | connection = self.__pool.connection() 381 | try: 382 | if self.__debug: 383 | logging.debug('QUERY_SPEC: %r' % self.__query_spec()) 384 | 385 | connection.send_message( 386 | message.query(self.__query_options(), 387 | self.full_collection_name, 388 | self.__skip, 389 | self.__limit, 390 | self.__query_spec(), 391 | self.__fields), 392 | callback=functools.partial(self._handle_response, orig_callback=callback)) 393 | except Exception, e: 394 | logging.debug('Error sending query %s' % e) 395 | connection.close() 396 | raise 397 | 398 | def _handle_response(self, result, error=None, orig_callback=None): 399 | if result and result.get('cursor_id'): 400 | connection = self.__pool.connection() 401 | try: 402 | connection.send_message( 403 | message.kill_cursors([result['cursor_id']]), 404 | callback=None) 405 | except Exception, e: 406 | logging.debug('Error killing cursor %s: %s' % (result['cursor_id'], e)) 407 | connection.close() 408 | raise 409 | 410 | if error: 411 | logging.debug('%s %s' % (self.full_collection_name , error)) 412 | orig_callback(None, error=error) 413 | else: 414 | if self.__limit == -1 and len(result['data']) == 1: 415 | # handle the find_one() call 416 | orig_callback(result['data'][0], error=None) 417 | else: 418 | orig_callback(result['data'], error=None) 419 | 420 | 421 | def __query_options(self): 422 | """Get the query options string to use for this query.""" 423 | options = 0 424 | if self.__tailable: 425 | options |= _QUERY_OPTIONS["tailable_cursor"] 426 | if self.__slave_okay or self.__pool._slave_okay: 427 | options |= _QUERY_OPTIONS["slave_okay"] 428 | if not self.__timeout: 429 | options |= _QUERY_OPTIONS["no_timeout"] 430 | return options 431 | 432 | def __query_spec(self): 433 | """Get the spec to use for a query.""" 434 | spec = self.__spec 435 | if not self.__is_command and "$query" not in self.__spec: 436 | spec = SON({"$query": self.__spec}) 437 | if self.__ordering: 438 | spec["$orderby"] = self.__ordering 439 | if self.__explain: 440 | spec["$explain"] = True 441 | if self.__hint: 442 | spec["$hint"] = self.__hint 443 | if self.__comment: 444 | spec["$comment"] = self.__comment 445 | if self.__snapshot: 446 | spec["$snapshot"] = True 447 | if self.__max_scan: 448 | spec["$maxScan"] = self.__max_scan 449 | return spec 450 | 451 | 452 | -------------------------------------------------------------------------------- /asyncmongo/errors.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # 3 | # Copyright 2010 bit.ly 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | # StandardError 18 | # |__Error 19 | # |__InterfaceError 20 | # |__DatabaseError 21 | # |__DataError 22 | # |__IntegrityError 23 | # |__ProgrammingError 24 | # |__NotSupportedError 25 | 26 | class Error(StandardError): 27 | pass 28 | 29 | class InterfaceError(Error): 30 | pass 31 | 32 | class RSConnectionError(InterfaceError): 33 | pass 34 | 35 | class DatabaseError(Error): 36 | pass 37 | 38 | class DataError(DatabaseError): 39 | pass 40 | 41 | class IntegrityError(DatabaseError): 42 | def __init__(self, msg, code=None): 43 | self.code = code 44 | self.msg = msg 45 | 46 | def __unicode__(self): 47 | return u'IntegrityError: %s code:%s' % (self.msg, self.code or '') 48 | 49 | def __str__(self): 50 | return str(self.__unicode__()) 51 | 52 | class ProgrammingError(DatabaseError): 53 | pass 54 | 55 | class NotSupportedError(DatabaseError): 56 | pass 57 | 58 | class TooManyConnections(Error): 59 | pass 60 | 61 | class AuthenticationError(Error): 62 | pass 63 | -------------------------------------------------------------------------------- /asyncmongo/helpers.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | import bson 4 | from bson.son import SON 5 | import struct 6 | from asyncmongo import ASCENDING, DESCENDING, GEO2D 7 | from asyncmongo.errors import (DatabaseError, InterfaceError) 8 | 9 | 10 | def _parse_host(h): 11 | try: 12 | host, port = h.split(":", 1) 13 | port = int(port) 14 | except ValueError: 15 | raise ValueError("Wrong host:port value: %s" % h) 16 | 17 | return host, port 18 | 19 | def _unpack_response(response, cursor_id=None, as_class=dict, tz_aware=False): 20 | """Unpack a response from the database. 21 | 22 | Check the response for errors and unpack, returning a dictionary 23 | containing the response data. 24 | 25 | :Parameters: 26 | - `response`: byte string as returned from the database 27 | - `cursor_id` (optional): cursor_id we sent to get this response - 28 | used for raising an informative exception when we get cursor id not 29 | valid at server response 30 | - `as_class` (optional): class to use for resulting documents 31 | """ 32 | response_flag = struct.unpack("`_ to be sent to 17 | MongoDB. 18 | 19 | .. note:: This module is for internal use and is generally not needed by 20 | application developers. 21 | """ 22 | 23 | import random 24 | import struct 25 | 26 | import bson 27 | from bson.son import SON 28 | try: 29 | from pymongo import _cbson 30 | _use_c = True 31 | except ImportError: 32 | _use_c = False 33 | from pymongo.errors import InvalidOperation 34 | 35 | 36 | __ZERO = "\x00\x00\x00\x00" 37 | 38 | 39 | def __last_error(args): 40 | """Data to send to do a lastError. 41 | """ 42 | cmd = SON([("getlasterror", 1)]) 43 | cmd.update(args) 44 | return query(0, "admin.$cmd", 0, -1, cmd) 45 | 46 | 47 | def __pack_message(operation, data): 48 | """Takes message data and adds a message header based on the operation. 49 | 50 | Returns the resultant message string. 51 | """ 52 | request_id = random.randint(-2 ** 31 - 1, 2 ** 31) 53 | message = struct.pack("= maxcached 87 | assert maxconnections >= mincached 88 | self._args, self._kwargs = args, kwargs 89 | self._maxusage = maxusage 90 | self._mincached = mincached 91 | self._maxcached = maxcached 92 | self._maxconnections = maxconnections 93 | self._idle_cache = [] # the actual connections that can be used 94 | self._condition = Condition() 95 | self._dbname = dbname 96 | self._slave_okay = slave_okay 97 | self._connections = 0 98 | 99 | 100 | # Establish an initial number of idle database connections: 101 | idle = [self.connection() for i in range(mincached)] 102 | while idle: 103 | self.cache(idle.pop()) 104 | 105 | def new_connection(self): 106 | kwargs = self._kwargs 107 | kwargs['pool'] = self 108 | return Connection(*self._args, **kwargs) 109 | 110 | def connection(self): 111 | """ get a cached connection from the pool """ 112 | 113 | self._condition.acquire() 114 | try: 115 | if (self._maxconnections and self._connections >= self._maxconnections): 116 | raise TooManyConnections("%d connections are already equal to the max: %d" % (self._connections, self._maxconnections)) 117 | # connection limit not reached, get a dedicated connection 118 | try: # first try to get it from the idle cache 119 | con = self._idle_cache.pop(0) 120 | except IndexError: # else get a fresh connection 121 | con = self.new_connection() 122 | self._connections += 1 123 | finally: 124 | self._condition.release() 125 | return con 126 | 127 | def cache(self, con): 128 | """Put a dedicated connection back into the idle cache.""" 129 | if self._maxusage and con.usage_count > self._maxusage: 130 | self._connections -=1 131 | logging.debug('dropping connection %s uses past max usage %s' % (con.usage_count, self._maxusage)) 132 | con._close() 133 | return 134 | self._condition.acquire() 135 | if con in self._idle_cache: 136 | # called via socket close on a connection in the idle cache 137 | self._condition.release() 138 | return 139 | try: 140 | if not self._maxcached or len(self._idle_cache) < self._maxcached: 141 | # the idle cache is not full, so put it there 142 | self._idle_cache.append(con) 143 | else: # if the idle cache is already full, 144 | logging.debug('dropping connection. connection pool (%s) is full. maxcached %s' % (len(self._idle_cache), self._maxcached)) 145 | con._close() # then close the connection 146 | self._condition.notify() 147 | finally: 148 | self._connections -= 1 149 | self._condition.release() 150 | 151 | def close(self): 152 | """Close all connections in the pool.""" 153 | self._condition.acquire() 154 | try: 155 | while self._idle_cache: # close all idle connections 156 | con = self._idle_cache.pop(0) 157 | try: 158 | con._close() 159 | except Exception: 160 | pass 161 | self._connections -=1 162 | self._condition.notifyAll() 163 | finally: 164 | self._condition.release() 165 | 166 | 167 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from distutils.core import setup 3 | 4 | # also update version in __init__.py 5 | version = '1.3' 6 | 7 | setup( 8 | name="asyncmongo", 9 | version=version, 10 | keywords=["mongo", "mongodb", "pymongo", "asyncmongo", "tornado"], 11 | long_description=open(os.path.join(os.path.dirname(__file__),"README.md"), "r").read(), 12 | description="Asynchronous library for accessing mongodb built upon the tornado IOLoop.", 13 | author="Jehiah Czebotar", 14 | author_email="jehiah@gmail.com", 15 | url="http://github.com/bitly/asyncmongo", 16 | license="Apache Software License", 17 | classifiers=[ 18 | "License :: OSI Approved :: Apache Software License", 19 | ], 20 | packages=['asyncmongo', 'asyncmongo.backends'], 21 | requires=['pymongo (>=1.9)', 'tornado'], 22 | download_url="https://bitly-downloads.s3.amazonaws.com/asyncmongo/asyncmongo-%s.tar.gz" % version, 23 | ) 24 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitly/asyncmongo/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/test/__init__.py -------------------------------------------------------------------------------- /test/sample_app/README: -------------------------------------------------------------------------------- 1 | this tornado test application is used to ensure that asyncmongo is correctly cleaning up connections 2 | under various disconnect scenarios 3 | 4 | while true; do sh /path/to/mongo/initscript stop; sleep 0.5; sh /path/to/mongo/initscript start; sleep 2; done 5 | 6 | ab -n 10000000 -c 5 'http://127.0.0.1:5150/put' 7 | -------------------------------------------------------------------------------- /test/sample_app/sample_app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import tornado.httpserver 5 | import tornado.ioloop 6 | import tornado.web 7 | import tornado.options 8 | import logging 9 | import simplejson as json 10 | import asyncmongo 11 | import pymongo.json_util 12 | import base64 13 | import settings 14 | 15 | 16 | class BaseHandler(tornado.web.RequestHandler): 17 | @property 18 | def db(self): 19 | if not hasattr(self, "_db"): 20 | self._db = asyncmongo.Client(pool_id='test_pool', **settings.get('mongo_database')) 21 | return self._db 22 | 23 | def api_response(self, data): 24 | """return an api response in the proper output format with status_code == 200""" 25 | self.set_header("Content-Type", "application/javascript; charset=UTF-8") 26 | data = json.dumps(data, default=pymongo.json_util.default) 27 | self.finish(data) 28 | 29 | 30 | class Put(BaseHandler): 31 | @tornado.web.asynchronous 32 | def get(self): 33 | rand = base64.b64encode(os.urandom(32)) 34 | try: 35 | self.db.test.insert({ 'blah': rand }, callback=self.async_callback(self.finish_save)) 36 | except Exception, e: 37 | logging.error(e) 38 | return self.api_response({'status':'ERROR', 'status_string': '%s' % e}) 39 | 40 | def finish_save(self, response, error): 41 | if error or response[0].get('ok') != 1: 42 | logging.error(error) 43 | raise tornado.web.HTTPError(500, 'QUERY_ERROR') 44 | 45 | self.api_response({'status':'OK', 'status_string': 'record(%s) saved' % response}) 46 | 47 | 48 | class Application(tornado.web.Application): 49 | def __init__(self): 50 | debug = tornado.options.options.environment == "dev" 51 | app_settings = { 'debug':debug } 52 | 53 | handlers = [ 54 | (r"/put", Put) 55 | ] 56 | 57 | tornado.web.Application.__init__(self, handlers, **app_settings) 58 | 59 | 60 | if __name__ == "__main__": 61 | tornado.options.define("port", type=int, default=5150, help="Listen port") 62 | tornado.options.parse_command_line() 63 | 64 | logging.info("starting webserver on 0.0.0.0:%d" % tornado.options.options.port) 65 | http_server = tornado.httpserver.HTTPServer(request_callback=Application()) 66 | http_server.listen(tornado.options.options.port) 67 | tornado.ioloop.IOLoop.instance().start() 68 | -------------------------------------------------------------------------------- /test/sample_app/sample_app2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # mkdir /tmp/asyncmongo_sample_app2 4 | # mongod --port 27017 --oplogSize 10 --dbpath /tmp/asyncmongo_sample_app2 5 | 6 | # $mongo 7 | # >>>use test; 8 | # db.addUser("testuser", "testpass"); 9 | 10 | # ab -n 1000 -c 16 http://127.0.0.1:8888/ 11 | 12 | import sys 13 | import logging 14 | import os 15 | app_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) 16 | if app_dir not in sys.path: 17 | logging.debug('adding %r to sys.path' % app_dir) 18 | sys.path.insert(0, app_dir) 19 | 20 | import asyncmongo 21 | # make sure we get the local asyncmongo 22 | assert asyncmongo.__file__.startswith(app_dir) 23 | 24 | import tornado.ioloop 25 | import tornado.web 26 | import tornado.options 27 | 28 | class MainHandler(tornado.web.RequestHandler): 29 | @tornado.web.asynchronous 30 | def get(self): 31 | db.users.find_one({"user_id" : 1}, callback=self._on_response) 32 | 33 | def _on_response(self, response, error): 34 | assert not error 35 | self.write(str(response)) 36 | self.finish() 37 | 38 | 39 | if __name__ == "__main__": 40 | tornado.options.parse_command_line() 41 | application = tornado.web.Application([ 42 | (r"/?", MainHandler) 43 | ]) 44 | application.listen(8888) 45 | db = asyncmongo.Client(pool_id="test", 46 | host='127.0.0.1', 47 | port=27017, 48 | mincached=5, 49 | maxcached=15, 50 | maxconnections=30, 51 | dbname='test', 52 | dbuser='testuser', 53 | dbpass='testpass') 54 | tornado.ioloop.IOLoop.instance().start() 55 | -------------------------------------------------------------------------------- /test/sample_app/settings.py: -------------------------------------------------------------------------------- 1 | import tornado.options 2 | import random 3 | tornado.options.define("environment", default="dev", help="environment") 4 | 5 | def randomize(values): 6 | """ this is a wrapper that returns a function which when called returns a random value""" 7 | def picker(): 8 | return random.choice(values) 9 | return picker 10 | 11 | options = { 12 | 'dev' : { 13 | 'mongo_database' : {'host' : '127.0.0.1', 'port' : 27017, 'dbname' : 'testdb', 'maxconnections':5} 14 | } 15 | } 16 | 17 | default = {} 18 | 19 | def get(key): 20 | env = tornado.options.options.environment 21 | if env not in options: 22 | raise Exception("Invalid Environment (%s)" % env) 23 | v = options.get(env).get(key) or default.get(key) 24 | if callable(v): 25 | return v() 26 | return v 27 | -------------------------------------------------------------------------------- /test/test_authentication.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | import subprocess 5 | 6 | import test_shunt 7 | import asyncmongo 8 | 9 | TEST_TIMESTAMP = int(time.time()) 10 | 11 | class AuthenticationTest(test_shunt.MongoTest): 12 | def setUp(self): 13 | super(AuthenticationTest, self).setUp() 14 | logging.info('creating user') 15 | pipe = subprocess.Popen('''echo -e 'use test;\n db.addUser("testuser", "testpass");\n exit;' | mongo --port 27018 --host 127.0.0.1''', shell=True) 16 | pipe.wait() 17 | 18 | def test_authentication(self): 19 | try: 20 | test_shunt.setup() 21 | db = asyncmongo.Client(pool_id='testauth', host='127.0.0.1', port=27018, dbname='test', dbuser='testuser', 22 | dbpass='testpass', maxconnections=2) 23 | 24 | def update_callback(response, error): 25 | logging.info("UPDATE:") 26 | tornado.ioloop.IOLoop.instance().stop() 27 | logging.info(response) 28 | assert len(response) == 1 29 | test_shunt.register_called('update') 30 | 31 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, upsert=True, 32 | callback=update_callback) 33 | 34 | tornado.ioloop.IOLoop.instance().start() 35 | test_shunt.assert_called('update') 36 | 37 | def query_callback(response, error): 38 | tornado.ioloop.IOLoop.instance().stop() 39 | logging.info(response) 40 | logging.info(error) 41 | assert error is None 42 | assert isinstance(response, dict) 43 | assert response['_id'] == TEST_TIMESTAMP 44 | assert response['test_count'] == 1 45 | test_shunt.register_called('retrieved') 46 | 47 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_callback) 48 | tornado.ioloop.IOLoop.instance().start() 49 | test_shunt.assert_called('retrieved') 50 | except: 51 | tornado.ioloop.IOLoop.instance().stop() 52 | raise 53 | 54 | def test_failed_auth(self): 55 | try: 56 | test_shunt.setup() 57 | db = asyncmongo.Client(pool_id='testauth_f', host='127.0.0.1', port=27018, dbname='test', dbuser='testuser', 58 | dbpass='wrong', maxconnections=2) 59 | 60 | def query_callback(response, error): 61 | tornado.ioloop.IOLoop.instance().stop() 62 | logging.info(response) 63 | logging.info(error) 64 | assert isinstance(error, asyncmongo.AuthenticationError) 65 | assert response is None 66 | test_shunt.register_called('auth_failed') 67 | 68 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_callback) 69 | tornado.ioloop.IOLoop.instance().start() 70 | test_shunt.assert_called('auth_failed') 71 | except: 72 | tornado.ioloop.IOLoop.instance().stop() 73 | raise 74 | -------------------------------------------------------------------------------- /test/test_command.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | 3 | import test_shunt 4 | import asyncmongo 5 | 6 | 7 | class CommandTest( 8 | test_shunt.MongoTest, 9 | test_shunt.SynchronousMongoTest, 10 | ): 11 | mongod_options = [('--port', '27018')] 12 | 13 | def setUp(self): 14 | super(CommandTest, self).setUp() 15 | self.pymongo_conn.test.foo.insert({'_id': 1}) 16 | 17 | def test_find_and_modify(self): 18 | db = asyncmongo.Client(pool_id='test_query', host='127.0.0.1', port=int(self.mongod_options[0][1]), dbname='test', mincached=3) 19 | 20 | results = [] 21 | 22 | def callback(response, error): 23 | tornado.ioloop.IOLoop.instance().stop() 24 | self.assert_(error is None) 25 | results.append(response['value']) 26 | 27 | before = self.get_open_cursors() 28 | 29 | # First findAndModify creates doc with i: 2 and s: 'a' 30 | db.command('findAndModify', 'foo', 31 | callback=callback, 32 | query={'_id': 2}, 33 | update={'$set': {'s': 'a'}}, 34 | upsert=True, 35 | new=True, 36 | ) 37 | 38 | tornado.ioloop.IOLoop.instance().start() 39 | self.assertEqual( 40 | {'_id': 2, 's': 'a'}, 41 | results[0] 42 | ) 43 | 44 | # Second findAndModify updates doc with i: 2, sets s to 'b' 45 | db.command('findAndModify', 'foo', 46 | callback=callback, 47 | query={'_id': 2}, 48 | update={'$set': {'s': 'b'}}, 49 | upsert=True, 50 | new=True, 51 | ) 52 | 53 | tornado.ioloop.IOLoop.instance().start() 54 | self.assertEqual( 55 | {'_id': 2, 's': 'b'}, 56 | results[1] 57 | ) 58 | 59 | # check cursors 60 | after = self.get_open_cursors() 61 | assert before == after, "%d cursors left open (should be 0)" % (after - before) 62 | 63 | if __name__ == '__main__': 64 | import unittest 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /test/test_connection.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import logging 3 | import time 4 | 5 | import test_shunt 6 | import asyncmongo 7 | from asyncmongo.errors import DataError 8 | 9 | TEST_TIMESTAMP = int(time.time()) 10 | 11 | class ConnectionTest(test_shunt.MongoTest): 12 | def test_getitem(self): 13 | db = asyncmongo.Client(pool_id='test_query', host='127.0.0.1', port=27018, dbname='test', mincached=3) 14 | self.assert_( 15 | repr(db['foo']) == repr(db.foo), 16 | "dict-style access of a collection should be same as property access" 17 | ) 18 | 19 | def test_connection(self): 20 | db = asyncmongo.Client(pool_id='test_query', host='127.0.0.1', port=27018, dbname='test', mincached=3) 21 | for connection_name in [ 22 | '.', 23 | '..', 24 | '.foo', 25 | 'foo.', 26 | '.foo.', 27 | 'foo\x00' 28 | '\x00foo' 29 | ]: 30 | self.assertRaises( 31 | DataError, 32 | lambda: db.connection(connection_name) 33 | ) 34 | 35 | def test_query(self): 36 | logging.info('in test_query') 37 | test_shunt.setup() 38 | db = asyncmongo.Client(pool_id='test_query', host='127.0.0.1', port=27018, dbname='test', mincached=3) 39 | 40 | def insert_callback(response, error): 41 | tornado.ioloop.IOLoop.instance().stop() 42 | logging.info(response) 43 | assert len(response) == 1 44 | test_shunt.register_called('inserted') 45 | 46 | db.test_users.insert({"_id" : "test_connection.%d" % TEST_TIMESTAMP}, safe=True, callback=insert_callback) 47 | 48 | tornado.ioloop.IOLoop.instance().start() 49 | test_shunt.assert_called('inserted') 50 | 51 | def callback(response, error): 52 | tornado.ioloop.IOLoop.instance().stop() 53 | assert len(response) == 1 54 | test_shunt.register_called('got_record') 55 | 56 | db.test_users.find({}, limit=1, callback=callback) 57 | 58 | tornado.ioloop.IOLoop.instance().start() 59 | test_shunt.assert_called("got_record") 60 | -------------------------------------------------------------------------------- /test/test_duplicate_insert.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | 5 | import test_shunt 6 | import asyncmongo 7 | 8 | TEST_TIMESTAMP = int(time.time()) 9 | 10 | class DuplicateInsertTest(test_shunt.MongoTest): 11 | def test_duplicate_insert(self): 12 | test_shunt.setup() 13 | db = asyncmongo.Client(pool_id='dup_insert', host='127.0.0.1', port=27018, dbname='test') 14 | 15 | def insert_callback(response, error): 16 | tornado.ioloop.IOLoop.instance().stop() 17 | logging.info(response) 18 | assert len(response) == 1 19 | test_shunt.register_called('inserted') 20 | 21 | db.test_users.insert({"_id" : "duplicate_insert.%d" % TEST_TIMESTAMP}, callback=insert_callback) 22 | 23 | tornado.ioloop.IOLoop.instance().start() 24 | test_shunt.assert_called('inserted') 25 | 26 | def duplicate_callback(response, error): 27 | tornado.ioloop.IOLoop.instance().stop() 28 | logging.info(response) 29 | if error: 30 | test_shunt.register_called('dupe') 31 | 32 | db.test_users.insert({"_id" : "duplicate_insert.%d" % TEST_TIMESTAMP}, callback=duplicate_callback) 33 | 34 | tornado.ioloop.IOLoop.instance().start() 35 | test_shunt.assert_called('dupe') 36 | 37 | -------------------------------------------------------------------------------- /test/test_insert_delete.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | 5 | import test_shunt 6 | import asyncmongo 7 | 8 | TEST_TIMESTAMP = int(time.time()) 9 | 10 | class InsertDeleteTest(test_shunt.MongoTest): 11 | def test_insert(self): 12 | test_shunt.setup() 13 | db = asyncmongo.Client(pool_id='testinsert', host='127.0.0.1', port=27018, dbname='test') 14 | 15 | def insert_callback(response, error): 16 | tornado.ioloop.IOLoop.instance().stop() 17 | logging.info(response) 18 | assert len(response) == 1 19 | test_shunt.register_called('inserted') 20 | 21 | db.test_users.insert({"_id" : "insert.%d" % TEST_TIMESTAMP}, callback=insert_callback) 22 | 23 | tornado.ioloop.IOLoop.instance().start() 24 | test_shunt.assert_called('inserted') 25 | 26 | def query_callback(response, error): 27 | tornado.ioloop.IOLoop.instance().stop() 28 | logging.info(response) 29 | assert len(response) == 1 30 | test_shunt.register_called('retrieved') 31 | 32 | db.test_users.find_one({"_id" : "insert.%d" % TEST_TIMESTAMP}, callback=query_callback) 33 | tornado.ioloop.IOLoop.instance().start() 34 | test_shunt.assert_called('retrieved') 35 | 36 | 37 | def delete_callback(response, error): 38 | tornado.ioloop.IOLoop.instance().stop() 39 | logging.info(response) 40 | assert len(response) == 1 41 | test_shunt.register_called('deleted') 42 | 43 | db.test_users.remove({"_id" : "insert.%d" % TEST_TIMESTAMP}, callback=delete_callback) 44 | tornado.ioloop.IOLoop.instance().start() 45 | test_shunt.assert_called('deleted') 46 | 47 | -------------------------------------------------------------------------------- /test/test_pooled_db.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import logging 3 | import time 4 | from asyncmongo.errors import TooManyConnections 5 | 6 | import test_shunt 7 | import asyncmongo 8 | TEST_TIMESTAMP = int(time.time()) 9 | 10 | class PooledDBTest(test_shunt.MongoTest): 11 | def test_pooled_db(self): 12 | """ 13 | This tests simply verifies that we can grab two different connections from the pool 14 | and use them independantly. 15 | """ 16 | print asyncmongo.__file__ 17 | test_shunt.setup() 18 | client = asyncmongo.Client('id1', maxconnections=5, host='127.0.0.1', port=27018, dbname='test') 19 | test_users_collection = client.connection('test_users') 20 | 21 | def insert_callback(response, error): 22 | tornado.ioloop.IOLoop.instance().stop() 23 | logging.info(response) 24 | assert len(response) == 1 25 | test_shunt.register_called('inserted') 26 | 27 | test_users_collection.insert({"_id" : "record_test.%d" % TEST_TIMESTAMP}, safe=True, callback=insert_callback) 28 | 29 | tornado.ioloop.IOLoop.instance().start() 30 | test_shunt.assert_called('inserted') 31 | 32 | def pool_callback(response, error): 33 | if test_shunt.is_called('pool2'): 34 | tornado.ioloop.IOLoop.instance().stop() 35 | assert len(response) == 1 36 | test_shunt.register_called('pool1') 37 | 38 | def pool_callback2(response, error): 39 | if test_shunt.is_called('pool1'): 40 | # don't expect 2 finishes second 41 | tornado.ioloop.IOLoop.instance().stop() 42 | assert len(response) == 1 43 | test_shunt.register_called('pool2') 44 | 45 | test_users_collection.find({}, limit=1, callback=pool_callback) 46 | test_users_collection.find({}, limit=1, callback=pool_callback2) 47 | 48 | tornado.ioloop.IOLoop.instance().start() 49 | test_shunt.assert_called('pool1') 50 | test_shunt.assert_called('pool2') 51 | 52 | def too_many_connections(self): 53 | clients = [ 54 | asyncmongo.Client('id2', maxconnections=2, host='127.0.0.1', port=27018, dbname='test') 55 | for i in range(3) 56 | ] 57 | 58 | def callback(response, error): 59 | pass 60 | 61 | for client in clients[:2]: 62 | client.connection('foo').find({}, callback=callback) 63 | 64 | self.assertRaises( 65 | TooManyConnections, 66 | lambda: clients[2].connection('foo').find({}, callback=callback) 67 | ) 68 | 69 | -------------------------------------------------------------------------------- /test/test_query.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import logging 3 | import time 4 | 5 | import test_shunt 6 | import asyncmongo 7 | 8 | 9 | class QueryTest(test_shunt.MongoTest, test_shunt.SynchronousMongoTest): 10 | mongod_options = [('--port', '27018')] 11 | 12 | def setUp(self): 13 | super(QueryTest, self).setUp() 14 | self.pymongo_conn.test.foo.insert([{'i': i} for i in xrange(200)]) 15 | 16 | def test_query(self): 17 | db = asyncmongo.Client(pool_id='test_query', host='127.0.0.1', port=int(self.mongod_options[0][1]), dbname='test', mincached=3) 18 | 19 | def noop_callback(response, error): 20 | logging.info(response) 21 | loop = tornado.ioloop.IOLoop.instance() 22 | # delay the stop so kill cursor has time on the ioloop to get pushed through to mongo 23 | loop.add_timeout(time.time() + .1, loop.stop) 24 | 25 | before = self.get_open_cursors() 26 | 27 | # run 2 queries 28 | db.foo.find({}, callback=noop_callback) 29 | tornado.ioloop.IOLoop.instance().start() 30 | db.foo.find({}, callback=noop_callback) 31 | tornado.ioloop.IOLoop.instance().start() 32 | 33 | # check cursors 34 | after = self.get_open_cursors() 35 | assert before == after, "%d cursors left open (should be 0)" % (after - before) 36 | 37 | if __name__ == '__main__': 38 | import unittest 39 | unittest.main() 40 | -------------------------------------------------------------------------------- /test/test_replica_set.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | import subprocess 5 | 6 | import test_shunt 7 | import asyncmongo 8 | import asyncmongo.connection 9 | 10 | TEST_TIMESTAMP = int(time.time()) 11 | 12 | class ReplicaSetTest(test_shunt.MongoTest): 13 | mongod_options = [ 14 | ('--port', '27018', '--replSet', 'rs0'), 15 | ('--port', '27019', '--replSet', 'rs0'), 16 | ('--port', '27020', '--replSet', 'rs0'), 17 | ] 18 | 19 | def mongo_cmd(self, cmd, port=27018, res='"ok" : 1'): 20 | logging.info("mongo_cmd: %s", cmd) 21 | pipe = subprocess.Popen("mongo --port %d" % port, shell=True, 22 | stdout=subprocess.PIPE, stdin=subprocess.PIPE) 23 | reply = pipe.communicate(cmd)[0] 24 | assert reply.find(res) > 0 25 | return reply 26 | 27 | def wait_master(self, port): 28 | while True: 29 | if self.mongo_cmd("db.isMaster();", port).find('"ismaster" : true') > 0: 30 | logging.info("%d is a master", port) 31 | break 32 | else: 33 | logging.info("Waiting for %d to become master", port) 34 | time.sleep(5) 35 | 36 | def wait_secondary(self, port): 37 | while True: 38 | if self.mongo_cmd("db.isMaster();", port).find('"secondary" : true') > 0: 39 | logging.info("%d is a secondary", port) 40 | break 41 | else: 42 | logging.info("Waiting for %d to become secondary", port) 43 | time.sleep(5) 44 | 45 | def setUp(self): 46 | super(ReplicaSetTest, self).setUp() 47 | logging.info("configuring a replica set at 127.0.0.1") 48 | cfg = """ 49 | { 50 | "_id" : "rs0", 51 | "members" : [ 52 | { 53 | "_id" : 0, 54 | "host" : "127.0.0.1:27018" 55 | }, 56 | { 57 | "_id" : 1, 58 | "host" : "127.0.0.1:27019", 59 | "priority" : 2 60 | }, 61 | { 62 | "_id" : 2, 63 | "host" : "127.0.0.1:27020", 64 | "priority" : 0, 65 | "hidden": true 66 | } 67 | ] 68 | } 69 | """ 70 | self.mongo_cmd("rs.initiate(%s);" % cfg, 27019) 71 | logging.info("waiting for replica set to finish configuring") 72 | self.wait_master(27019) 73 | self.wait_secondary(27018) 74 | 75 | def test_connection(self): 76 | class Pool(object): 77 | def __init__(self): 78 | super(Pool, self).__init__() 79 | self._cache = [] 80 | 81 | def cache(self, c): 82 | self._cache.append(c) 83 | 84 | class AsyncClose(object): 85 | def process(self, *args, **kwargs): 86 | tornado.ioloop.IOLoop.instance().stop() 87 | 88 | try: 89 | for i in xrange(10): 90 | conn = asyncmongo.connection.Connection(pool=Pool(), 91 | seed=[('127.0.0.1', 27018), ('127.0.0.1', 27020)], 92 | rs="rs0") 93 | 94 | conn._put_job(AsyncClose(), 0) 95 | conn._next_job() 96 | tornado.ioloop.IOLoop.instance().start() 97 | 98 | assert conn._host == '127.0.0.1' 99 | assert conn._port == 27019 100 | 101 | for i in xrange(10): 102 | conn = asyncmongo.connection.Connection(pool=Pool(), 103 | seed=[('127.0.0.1', 27018), ('127.0.0.1', 27020)], 104 | rs="rs0", secondary_only=True) 105 | 106 | conn._put_job(AsyncClose(), 0) 107 | conn._next_job() 108 | tornado.ioloop.IOLoop.instance().start() 109 | 110 | assert conn._host == '127.0.0.1' 111 | assert conn._port == 27018 112 | 113 | except: 114 | tornado.ioloop.IOLoop.instance().stop() 115 | raise 116 | 117 | def test_update(self): 118 | try: 119 | test_shunt.setup() 120 | 121 | db = asyncmongo.Client(pool_id='testrs_f', rs="wrong_rs", seed=[("127.0.0.1", 27020)], dbname='test', maxconnections=2) 122 | 123 | # Try to update with a wrong replica set name 124 | def update_callback(response, error): 125 | tornado.ioloop.IOLoop.instance().stop() 126 | logging.info(response) 127 | logging.info(error) 128 | assert isinstance(error, asyncmongo.RSConnectionError) 129 | test_shunt.register_called('update_f') 130 | 131 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, callback=update_callback) 132 | 133 | tornado.ioloop.IOLoop.instance().start() 134 | test_shunt.assert_called('update_f') 135 | 136 | db = asyncmongo.Client(pool_id='testrs', rs="rs0", seed=[("127.0.0.1", 27020)], dbname='test', maxconnections=2) 137 | 138 | # Update 139 | def update_callback(response, error): 140 | logging.info("UPDATE:") 141 | tornado.ioloop.IOLoop.instance().stop() 142 | logging.info(response) 143 | assert len(response) == 1 144 | test_shunt.register_called('update') 145 | 146 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, upsert=True, callback=update_callback) 147 | 148 | tornado.ioloop.IOLoop.instance().start() 149 | test_shunt.assert_called('update') 150 | 151 | # Retrieve the updated value 152 | def query_callback(response, error): 153 | tornado.ioloop.IOLoop.instance().stop() 154 | logging.info(response) 155 | logging.info(error) 156 | assert error is None 157 | assert isinstance(response, dict) 158 | assert response['_id'] == TEST_TIMESTAMP 159 | assert response['test_count'] == 1 160 | test_shunt.register_called('retrieved') 161 | 162 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_callback) 163 | tornado.ioloop.IOLoop.instance().start() 164 | test_shunt.assert_called('retrieved') 165 | 166 | # Switch the master 167 | self.mongo_cmd( 168 | "cfg = rs.conf(); cfg.members[1].priority = 1; cfg.members[0].priority = 2; rs.reconfig(cfg);", 169 | 27019, "reconnected to server") 170 | self.wait_master(27018) 171 | 172 | # Expect the connection to be closed 173 | def query_err_callback(response, error): 174 | tornado.ioloop.IOLoop.instance().stop() 175 | logging.info(response) 176 | logging.info(error) 177 | assert isinstance(error, Exception) 178 | 179 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_err_callback) 180 | tornado.ioloop.IOLoop.instance().start() 181 | 182 | # Retrieve the updated value again, from the new master 183 | def query_again_callback(response, error): 184 | tornado.ioloop.IOLoop.instance().stop() 185 | logging.info(response) 186 | logging.info(error) 187 | assert error is None 188 | assert isinstance(response, dict) 189 | assert response['_id'] == TEST_TIMESTAMP 190 | assert response['test_count'] == 1 191 | test_shunt.register_called('retrieved_again') 192 | 193 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_again_callback) 194 | tornado.ioloop.IOLoop.instance().start() 195 | test_shunt.assert_called('retrieved_again') 196 | except: 197 | tornado.ioloop.IOLoop.instance().stop() 198 | raise 199 | -------------------------------------------------------------------------------- /test/test_safe_updates.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | 5 | import test_shunt 6 | import asyncmongo 7 | 8 | TEST_TIMESTAMP = int(time.time()) 9 | 10 | class SafeUpdatesTest(test_shunt.MongoTest): 11 | def test_update_safe(self): 12 | test_shunt.setup() 13 | db = asyncmongo.Client(pool_id='testinsert', host='127.0.0.1', port=27018, dbname='test', maxconnections=2) 14 | 15 | def update_callback(response, error): 16 | tornado.ioloop.IOLoop.instance().stop() 17 | logging.info(response) 18 | assert len(response) == 1 19 | test_shunt.register_called('update') 20 | 21 | # all of these should be called, but only one should have a callback 22 | # we also are checking that connections in the pool never increases >1 with max_connections=2 23 | # this is because connections for safe=False calls get put back in the pool immediated 24 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, safe=False, upsert=True) 25 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, safe=False, upsert=True) 26 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, safe=False, upsert=True) 27 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, safe=False, upsert=True) 28 | db.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, upsert=True, callback=update_callback) 29 | 30 | tornado.ioloop.IOLoop.instance().start() 31 | test_shunt.assert_called('update') 32 | 33 | def query_callback(response, error): 34 | tornado.ioloop.IOLoop.instance().stop() 35 | logging.info(response) 36 | assert isinstance(response, dict) 37 | assert response['_id'] == TEST_TIMESTAMP 38 | assert response['test_count'] == 5 39 | test_shunt.register_called('retrieved') 40 | 41 | db.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_callback) 42 | tornado.ioloop.IOLoop.instance().start() 43 | test_shunt.assert_called('retrieved') 44 | -------------------------------------------------------------------------------- /test/test_shunt.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import os 4 | import unittest 5 | import subprocess 6 | import signal 7 | import time 8 | 9 | import tornado.ioloop 10 | import pymongo 11 | 12 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, 13 | format='%(asctime)s %(process)d %(filename)s %(lineno)d %(levelname)s #| %(message)s', 14 | datefmt='%H:%M:%S') 15 | 16 | # add the path to the local asyncmongo 17 | # there is probably a better way to do this that doesn't require magic 18 | app_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 19 | if app_dir not in sys.path: 20 | logging.debug('adding %r to sys.path' % app_dir) 21 | sys.path.insert(0, app_dir) 22 | 23 | import asyncmongo 24 | import asyncmongo.pool 25 | # make sure we get the local asyncmongo 26 | assert asyncmongo.__file__.startswith(app_dir) 27 | 28 | class PuritanicalIOLoop(tornado.ioloop.IOLoop): 29 | """ 30 | A loop that quits when it encounters an Exception -- makes errors in 31 | callbacks easier to debug and prevents them from hanging the unittest 32 | suite. 33 | """ 34 | def handle_callback_exception(self, callback): 35 | exc_type, exc_value, tb = sys.exc_info() 36 | raise exc_value 37 | 38 | class MongoTest(unittest.TestCase): 39 | """ 40 | Starts and stops a mongod 41 | """ 42 | mongod_options = [('--port', str(27018))] 43 | def setUp(self): 44 | """setup method that starts up mongod instances using `self.mongo_options`""" 45 | # So any function that calls IOLoop.instance() gets the 46 | # PuritanicalIOLoop instead of a default loop. 47 | if not tornado.ioloop.IOLoop.initialized(): 48 | self.loop = PuritanicalIOLoop() 49 | tornado.ioloop.IOLoop._instance = self.loop 50 | else: 51 | self.loop = tornado.ioloop.IOLoop.instance() 52 | self.assert_( 53 | isinstance(self.loop, PuritanicalIOLoop), 54 | "Couldn't install IOLoop" 55 | ) 56 | 57 | self.temp_dirs = [] 58 | self.mongods = [] 59 | for options in self.mongod_options: 60 | dirname = os.tempnam() 61 | os.makedirs(dirname) 62 | self.temp_dirs.append(dirname) 63 | 64 | options = ['mongod', '--oplogSize', '2', '--dbpath', dirname, 65 | '--smallfiles', '-v', '--nojournal', '--bind_ip', '0.0.0.0'] + list(options) 66 | logging.debug(options) 67 | pipe = subprocess.Popen(options) 68 | self.mongods.append(pipe) 69 | logging.debug('started mongod %s' % pipe.pid) 70 | sleep_time = 1 + (len(self.mongods) * 2) 71 | logging.info('waiting for mongod to start (sleeping %d seconds)' % sleep_time) 72 | time.sleep(sleep_time) 73 | 74 | def tearDown(self): 75 | """teardown method that cleans up child mongod instances, and removes their temporary data files""" 76 | logging.debug('teardown') 77 | asyncmongo.pool.ConnectionPools.close_idle_connections() 78 | for mongod in self.mongods: 79 | logging.debug('killing mongod %s' % mongod.pid) 80 | os.kill(mongod.pid, signal.SIGKILL) 81 | mongod.wait() 82 | for dirname in self.temp_dirs: 83 | logging.debug('cleaning up %s' % dirname) 84 | pipe = subprocess.Popen(['rm', '-rf', dirname]) 85 | pipe.wait() 86 | 87 | 88 | class SynchronousMongoTest(unittest.TestCase): 89 | """ 90 | Convenience class: a test case that can make synchronous calls to the 91 | official pymongo to ease setup code, via the pymongo_conn property. 92 | """ 93 | mongod_options = [('--port', str(27018))] 94 | @property 95 | def pymongo_conn(self): 96 | if not hasattr(self, '_pymongo_conn'): 97 | self._pymongo_conn = pymongo.Connection(port=int(self.mongod_options[0][1])) 98 | return self._pymongo_conn 99 | 100 | def get_open_cursors(self): 101 | output = self.pymongo_conn.admin.command('serverStatus') 102 | return output.get('cursors', {}).get('totalOpen') 103 | 104 | results = {} 105 | 106 | def setup(): 107 | global results 108 | results = {} 109 | 110 | def register_called(key, data=None): 111 | assert key not in results 112 | results[key] = data 113 | 114 | def assert_called(key, data=None): 115 | assert key in results 116 | assert results[key] == data 117 | 118 | def is_called(key): 119 | return key in results 120 | -------------------------------------------------------------------------------- /test/test_slave_only.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import time 3 | import logging 4 | 5 | import test_shunt 6 | import asyncmongo 7 | 8 | TEST_TIMESTAMP = int(time.time()) 9 | 10 | class SlaveOnlyTest(test_shunt.MongoTest): 11 | mongod_options = [ 12 | ('--port', '27018', '--master'), 13 | ('--port', '27019', '--slave', '--source', '127.0.0.1:27018'), 14 | ] 15 | def test_query_slave(self): 16 | try: 17 | test_shunt.setup() 18 | masterdb = asyncmongo.Client(pool_id='testquerymaster', host='127.0.0.1', port=27018, dbname='test', maxconnections=2) 19 | slavedb = asyncmongo.Client(pool_id='testqueryslave', host='127.0.0.1', port=27019, dbname='test', maxconnections=2, slave_okay=True) 20 | logging.debug('waiting for replication to start (sleeping 4 seconds)') 21 | time.sleep(4) 22 | 23 | def update_callback(response, error): 24 | tornado.ioloop.IOLoop.instance().stop() 25 | logging.info(response) 26 | assert len(response) == 1 27 | test_shunt.register_called('update') 28 | 29 | masterdb.test_stats.update({"_id" : TEST_TIMESTAMP}, {'$inc' : {'test_count' : 1}}, upsert=True, callback=update_callback) 30 | 31 | tornado.ioloop.IOLoop.instance().start() 32 | test_shunt.assert_called('update') 33 | 34 | # wait for the insert to get to the slave 35 | time.sleep(2.5) 36 | 37 | def query_callback(response, error): 38 | tornado.ioloop.IOLoop.instance().stop() 39 | logging.info(response) 40 | logging.info(error) 41 | assert error is None 42 | assert isinstance(response, dict) 43 | assert response['_id'] == TEST_TIMESTAMP 44 | assert response['test_count'] == 1 45 | test_shunt.register_called('retrieved') 46 | 47 | slavedb.test_stats.find_one({"_id" : TEST_TIMESTAMP}, callback=query_callback) 48 | tornado.ioloop.IOLoop.instance().start() 49 | test_shunt.assert_called('retrieved') 50 | except: 51 | tornado.ioloop.IOLoop.instance().stop() 52 | raise 53 | -------------------------------------------------------------------------------- /test/testgtk2/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import base64 5 | import pygtk 6 | pygtk.require('2.0') 7 | import gtk 8 | import asyncmongo 9 | 10 | database= {'host' : '127.0.0.1', 'port' : 27018, 'dbname' : 'testdb', 'maxconnections':5} 11 | 12 | class TestApp(object): 13 | def __init__(self): 14 | self.__win = gtk.Window() 15 | self.__win.set_title("AsyncMongo test") 16 | box = gtk.VBox() 17 | self.__win.add(box) 18 | 19 | self.message = gtk.Label('') 20 | box.pack_start(self.message) 21 | 22 | btn = gtk.Button(label="Test Insert") 23 | box.pack_start(btn) 24 | btn.connect('clicked', self._on_insert_clicked) 25 | 26 | btn = gtk.Button(label="Test Query") 27 | box.pack_start(btn) 28 | btn.connect('clicked', self._on_query_clicked) 29 | 30 | self._db = asyncmongo.Client(pool_id='test_pool', backend="glib2", **database) 31 | 32 | def _on_query_clicked(self, obj): 33 | self._db.test.find({}, callback=self._on_query_response) 34 | 35 | def _on_query_response(self, data, error): 36 | if error: 37 | self.message.set_text(error) 38 | 39 | self.message.set_text('Query OK, %d objects found' % len(data)) 40 | 41 | def _on_insert_clicked(self, obj): 42 | rand = base64.b64encode(os.urandom(32)) 43 | try: 44 | self._db.test.insert({ 'blah': rand }, callback=self._on_insertion) 45 | except Exception, e: 46 | print e 47 | 48 | def _on_insertion(self, data, error): 49 | if error: 50 | self.message.set_text(error) 51 | 52 | self.message.set_text("Insert OK") 53 | 54 | def show(self): 55 | self.__win.show_all() 56 | 57 | if __name__ == "__main__": 58 | app = TestApp() 59 | app.show() 60 | gtk.main() 61 | -------------------------------------------------------------------------------- /test/testgtk3/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import base64 5 | import asyncmongo 6 | from gi.repository import Gtk 7 | 8 | database= {'host' : '127.0.0.1', 'port' : 27018, 'dbname' : 'testdb', 'maxconnections':5} 9 | 10 | class TestApp(object): 11 | def __init__(self): 12 | self.__win = Gtk.Window() 13 | self.__win.set_title("AsyncMongo test") 14 | box = Gtk.VBox() 15 | self.__win.add(box) 16 | 17 | self.message = Gtk.Label('') 18 | box.pack_start(self.message, 0, 1, 1) 19 | 20 | btn = Gtk.Button(label="Test Insert") 21 | box.pack_start(btn, 0, 1, 1) 22 | btn.connect('clicked', self._on_insert_clicked) 23 | 24 | btn = Gtk.Button(label="Test Query") 25 | box.pack_start(btn, 0, 1, 1) 26 | btn.connect('clicked', self._on_query_clicked) 27 | 28 | self._db = asyncmongo.Client(pool_id='test_pool', backend="glib3", **database) 29 | 30 | def _on_query_clicked(self, obj): 31 | self._db.test.find({}, callback=self._on_query_response) 32 | 33 | def _on_query_response(self, data, error): 34 | if error: 35 | self.message.set_text(error) 36 | 37 | self.message.set_text('Query OK, %d objects found' % len(data)) 38 | 39 | def _on_insert_clicked(self, obj): 40 | rand = base64.b64encode(os.urandom(32)) 41 | try: 42 | self._db.test.insert({ 'blah': rand }, callback=self._on_insertion) 43 | except Exception, e: 44 | print e 45 | 46 | def _on_insertion(self, data, error): 47 | if error: 48 | self.message.set_text(error) 49 | 50 | self.message.set_text("Insert OK") 51 | 52 | def show(self): 53 | self.__win.show_all() 54 | 55 | if __name__ == "__main__": 56 | app = TestApp() 57 | app.show() 58 | Gtk.main() 59 | --------------------------------------------------------------------------------