├── .gitignore ├── .travis.yml ├── COPYING ├── MANIFEST.in ├── Makefile ├── README.md ├── config-replicaset.js ├── docs ├── Makefile └── source │ ├── api │ ├── collection.rst │ ├── database.rst │ ├── errors.rst │ ├── index.rst │ └── orm.rst │ ├── conf.py │ ├── index.rst │ ├── installation.rst │ └── tutorial.rst ├── mongotor ├── __init__.py ├── client.py ├── connection.py ├── cursor.py ├── database.py ├── errors.py ├── helpers.py ├── message.py ├── node.py ├── orm │ ├── __init__.py │ ├── collection.py │ ├── field.py │ ├── manager.py │ └── signal.py └── pool.py ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── orm │ ├── __init__.py │ ├── test_collection.py │ ├── test_manager.py │ └── test_signal.py ├── test_client.py ├── test_connection.py ├── test_cursor.py ├── test_database.py ├── test_node.py ├── test_pool.py ├── test_replicaset.py └── util.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[co] 2 | 3 | # Packages 4 | *.egg 5 | *.egg-info 6 | dist 7 | build 8 | eggs 9 | parts 10 | bin 11 | var 12 | sdist 13 | develop-eggs 14 | .installed.cfg 15 | 16 | # Installer logs 17 | pip-log.txt 18 | 19 | # Unit test / coverage reports 20 | .coverage 21 | .tox 22 | 23 | #Translations 24 | *.mo 25 | 26 | data 27 | 28 | #Mr Developer 29 | .mr.developer.cfg 30 | 31 | #PyCharm 32 | .idea/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | 5 | script: 6 | - pip install -r requirements-dev.txt 7 | - make mongo-start 8 | - make mongo-config 9 | - python setup.py build 10 | - make test 11 | 12 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright (C) <2010-2012> Marcel Nicolay 2 | 3 | GNU LESSER GENERAL PUBLIC LICENSE 4 | Version 3, 29 June 2007 5 | 6 | Copyright (C) 2007 Free Software Foundation, Inc. 7 | Everyone is permitted to copy and distribute verbatim copies 8 | of this license document, but changing it is not allowed. 9 | 10 | 11 | This version of the GNU Lesser General Public License incorporates 12 | the terms and conditions of version 3 of the GNU General Public 13 | License, supplemented by the additional permissions listed below. 14 | 15 | 0. Additional Definitions. 16 | 17 | As used herein, "this License" refers to version 3 of the GNU Lesser 18 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 19 | General Public License. 20 | 21 | "The Library" refers to a covered work governed by this License, 22 | other than an Application or a Combined Work as defined below. 23 | 24 | An "Application" is any work that makes use of an interface provided 25 | by the Library, but which is not otherwise based on the Library. 26 | Defining a subclass of a class defined by the Library is deemed a mode 27 | of using an interface provided by the Library. 28 | 29 | A "Combined Work" is a work produced by combining or linking an 30 | Application with the Library. The particular version of the Library 31 | with which the Combined Work was made is also called the "Linked 32 | Version". 33 | 34 | The "Minimal Corresponding Source" for a Combined Work means the 35 | Corresponding Source for the Combined Work, excluding any source code 36 | for portions of the Combined Work that, considered in isolation, are 37 | based on the Application, and not on the Linked Version. 38 | 39 | The "Corresponding Application Code" for a Combined Work means the 40 | object code and/or source code for the Application, including any data 41 | and utility programs needed for reproducing the Combined Work from the 42 | Application, but excluding the System Libraries of the Combined Work. 43 | 44 | 1. Exception to Section 3 of the GNU GPL. 45 | 46 | You may convey a covered work under sections 3 and 4 of this License 47 | without being bound by section 3 of the GNU GPL. 48 | 49 | 2. Conveying Modified Versions. 50 | 51 | If you modify a copy of the Library, and, in your modifications, a 52 | facility refers to a function or data to be supplied by an Application 53 | that uses the facility (other than as an argument passed when the 54 | facility is invoked), then you may convey a copy of the modified 55 | version: 56 | 57 | a) under this License, provided that you make a good faith effort to 58 | ensure that, in the event an Application does not supply the 59 | function or data, the facility still operates, and performs 60 | whatever part of its purpose remains meaningful, or 61 | 62 | b) under the GNU GPL, with none of the additional permissions of 63 | this License applicable to that copy. 64 | 65 | 3. Object Code Incorporating Material from Library Header Files. 66 | 67 | The object code form of an Application may incorporate material from 68 | a header file that is part of the Library. You may convey such object 69 | code under terms of your choice, provided that, if the incorporated 70 | material is not limited to numerical parameters, data structure 71 | layouts and accessors, or small macros, inline functions and templates 72 | (ten or fewer lines in length), you do both of the following: 73 | 74 | a) Give prominent notice with each copy of the object code that the 75 | Library is used in it and that the Library and its use are 76 | covered by this License. 77 | 78 | b) Accompany the object code with a copy of the GNU GPL and this license 79 | document. 80 | 81 | 4. Combined Works. 82 | 83 | You may convey a Combined Work under terms of your choice that, 84 | taken together, effectively do not restrict modification of the 85 | portions of the Library contained in the Combined Work and reverse 86 | engineering for debugging such modifications, if you also do each of 87 | the following: 88 | 89 | a) Give prominent notice with each copy of the Combined Work that 90 | the Library is used in it and that the Library and its use are 91 | covered by this License. 92 | 93 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 94 | document. 95 | 96 | c) For a Combined Work that displays copyright notices during 97 | execution, include the copyright notice for the Library among 98 | these notices, as well as a reference directing the user to the 99 | copies of the GNU GPL and this license document. 100 | 101 | d) Do one of the following: 102 | 103 | 0) Convey the Minimal Corresponding Source under the terms of this 104 | License, and the Corresponding Application Code in a form 105 | suitable for, and under terms that permit, the user to 106 | recombine or relink the Application with a modified version of 107 | the Linked Version to produce a modified Combined Work, in the 108 | manner specified by section 6 of the GNU GPL for conveying 109 | Corresponding Source. 110 | 111 | 1) Use a suitable shared library mechanism for linking with the 112 | Library. A suitable mechanism is one that (a) uses at run time 113 | a copy of the Library already present on the user's computer 114 | system, and (b) will operate properly with a modified version 115 | of the Library that is interface-compatible with the Linked 116 | Version. 117 | 118 | e) Provide Installation Information, but only if you would otherwise 119 | be required to provide such information under section 6 of the 120 | GNU GPL, and only to the extent that such information is 121 | necessary to install and execute a modified version of the 122 | Combined Work produced by recombining or relinking the 123 | Application with a modified version of the Linked Version. (If 124 | you use option 4d0, the Installation Information must accompany 125 | the Minimal Corresponding Source and Corresponding Application 126 | Code. If you use option 4d1, you must provide the Installation 127 | Information in the manner specified by section 6 of the GNU GPL 128 | for conveying Corresponding Source.) 129 | 130 | 5. Combined Libraries. 131 | 132 | You may place library facilities that are a work based on the 133 | Library side by side in a single library together with other library 134 | facilities that are not Applications and are not covered by this 135 | License, and convey such a combined library under terms of your 136 | choice, if you do both of the following: 137 | 138 | a) Accompany the combined library with a copy of the same work based 139 | on the Library, uncombined with any other library facilities, 140 | conveyed under the terms of this License. 141 | 142 | b) Give prominent notice with the combined library that part of it 143 | is a work based on the Library, and explaining where to find the 144 | accompanying uncombined form of the same work. 145 | 146 | 6. Revised Versions of the GNU Lesser General Public License. 147 | 148 | The Free Software Foundation may publish revised and/or new versions 149 | of the GNU Lesser General Public License from time to time. Such new 150 | versions will be similar in spirit to the present version, but may 151 | differ in detail to address new problems or concerns. 152 | 153 | Each version is given a distinguishing version number. If the 154 | Library as you received it specifies that a certain numbered version 155 | of the GNU Lesser General Public License "or any later version" 156 | applies to it, you have the option of following the terms and 157 | conditions either of that published version or of any later version 158 | published by the Free Software Foundation. If the Library as you 159 | received it does not specify a version number of the GNU Lesser 160 | General Public License, you may choose any version of the GNU Lesser 161 | General Public License ever published by the Free Software Foundation. 162 | 163 | If the Library as you received it specifies that a proxy can decide 164 | whether future versions of the GNU Lesser General Public License shall 165 | apply, that proxy's public statement of acceptance of any version is 166 | permanent authorization for you to choose that version for the 167 | Library. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include mongotor *.py 2 | recursive-include mongotor/orm *.py 3 | include README.md 4 | include requirements.txt -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .SILENT: 2 | 3 | all: install_deps test 4 | 5 | filename=mongotor-`python -c 'import mongotor;print mongotor.version'`.tar.gz 6 | 7 | export PYTHONPATH:= ${PWD} 8 | 9 | MONGOD=mongod 10 | MONGO_DATA=`pwd`/data 11 | 12 | mongo-start-node1: 13 | ${MONGOD} --port=27027 --dbpath=${MONGO_DATA}/db/node1 --replSet=mongotor --logpath=${MONGO_DATA}/log/node1.log --fork --smallfiles --oplogSize 30 --nojournal 14 | 15 | mongo-start-node2: 16 | ${MONGOD} --port=27028 --dbpath=${MONGO_DATA}/db/node2 --replSet=mongotor --logpath=${MONGO_DATA}/log/node2.log --fork --smallfiles --oplogSize 30 --nojournal 17 | 18 | mongo-start-arbiter: 19 | ${MONGOD} --port=27029 --dbpath=${MONGO_DATA}/db/arbiter --replSet=mongotor --logpath=${MONGO_DATA}/log/arbiter.log --fork --smallfiles --oplogSize 30 --nojournal 20 | 21 | mongo-restart: mongo-kill mongo-start 22 | 23 | mongo-start: 24 | mkdir -p ${MONGO_DATA}/db/node1 ${MONGO_DATA}/db/node2 ${MONGO_DATA}/db/arbiter ${MONGO_DATA}/log 25 | 26 | echo "starting mongo instance" 27 | make mongo-start-node1 28 | make mongo-start-node2 29 | make mongo-start-arbiter 30 | echo 'Waiting 10s for `mongod`s to start' 31 | sleep 10 32 | 33 | mongo-kill-node1: 34 | ps -eo pid,args | grep 27027 | grep ${MONGO_DATA} | grep -v grep | awk '{print $$1}' | xargs kill 2> /dev/null | true 35 | 36 | mongo-kill-node2: 37 | ps -eo pid,args | grep 27028 | grep ${MONGO_DATA} | grep -v grep | awk '{print $$1}' | xargs kill 2> /dev/null | true 38 | 39 | mongo-kill-arbiter: 40 | ps -eo pid,args | grep 27029 | grep ${MONGO_DATA} | grep -v grep | awk '{print $$1}' | xargs kill 2> /dev/null | true 41 | 42 | mongo-kill: 43 | echo "killing mongo instance" 44 | make mongo-kill-node1 45 | make mongo-kill-node2 46 | make mongo-kill-arbiter 47 | echo 'Waiting 1s for `mongod`s to stop' 48 | sleep 1 49 | 50 | mongo-config: 51 | mongo localhost:27027 < config-replicaset.js 52 | echo 'Waiting 40s to let replicaset elect a primary' 53 | sleep 40 54 | 55 | install_deps: 56 | pip install -r requirements-dev.txt 57 | 58 | test: clean 59 | nosetests 60 | 61 | clean: 62 | echo "Cleaning up build and *.pyc files..." 63 | find . -name '*.pyc' -exec rm -rf {} \; 64 | 65 | release: clean test publish 66 | printf "Exporting to $(filename)... " 67 | tar czf $(filename) mongotor setup.py README.md 68 | echo "DONE!" 69 | 70 | publish: 71 | python setup.py sdist register upload 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is MongoTor ? 2 | 3 | (MONGOdb + TORnado) is an asynchronous toolkit for working with ``mongodb`` inside a ``tornado`` app. Mongotor has a pure implementation of python + tornado and only depends on tornado and bson (provided by pymongo) 4 | 5 | [![Build Status](https://travis-ci.org/marcelnicolay/mongotor.svg?branch=master)](https://travis-ci.org/marcelnicolay/mongotor) 6 | 7 | ## Features 8 | 9 | MongoTor is still an alpha project, but already implements the following features: 10 | 11 | * Support for ``replica sets`` 12 | * Automatic ``reconnection`` 13 | * Connection ``pooling`` 14 | * Support for running database commands (``count``, ``sum``, ``mapreduce`` etc...) 15 | * ``ORM`` like to map documents and fields 16 | * ``Signals`` for pre_save, post_save, pre_remove, post_remove, pre_update and post_update 17 | * 100% of code coverage by test 18 | 19 | The next steps are provide support to: 20 | 21 | * sharding 22 | * authentication 23 | * nearest preference in replica sets 24 | * gridfs 25 | * all python versions (2.5, 2.6, 2.7, 3.2 and PyPy), only python 2.7 is tested now 26 | 27 | ## Documentation 28 | 29 | Visit our online [documentation](http://mongotor.readthedocs.org/) for more examples 30 | 31 | ## Why not pymongo ? 32 | 33 | [PyMongo](http://api.mongodb.org/python/current/) is a recommended way to work with MongoDB in python, but isn't asynchronous and not run inside de tornado's ioloop. If you use pymongo you won't take the advantages of tornado. 34 | 35 | ## Why not motor ? 36 | 37 | [Motor](http://emptysquare.net/motor/) wraps PyMongo and makes it async with greenlet. Is a great project, but it uses greenlet. If you can use greenlets why not use gevent instead of tornado? PyMongo already works with gevent and you dont need to thinking about write all of your code with callbacks. My point is, if you are using a very powerfull non-blocking web server with a pure python code, you'll probably want to work with a pure tornado driver for accessing mongo, obviously since this module has a full support to mongodb features like pymongo. 38 | 39 | ## Why not asyncmongo ? 40 | 41 | [AsyncMongo](https://github.com/bitly/asyncmongo) is an asynchronous library for accessing mongodb with tornado.ioloop, but don't implement replica set and other mongodb features. 42 | 43 | Besides, this project is not walking very well, or better, very fast. Exist a lot of issues and pull requests that aren't looked. 44 | 45 | I am very thankful to asyncmongo, i worked with it in some projects and it's been served as inspiration, but now, I am very excited to write my own library, more flexible, fast, secure and that will walking faster. 46 | 47 | 48 | ## Installing 49 | 50 | ```bash 51 | pip install mongotor 52 | ``` 53 | 54 | ## Simple usage 55 | 56 | ```python 57 | import tornado.web 58 | from tornado import gen 59 | from mongotor.database import Database 60 | from bson import ObjectId 61 | 62 | class Handler(tornado.web.RequestHandler): 63 | 64 | def initialize(self): 65 | self.db = Database.init('localhost:27017', 'mongotor_test') 66 | 67 | @tornado.web.asynchronous 68 | @gen.engine 69 | def get(self): 70 | user = {'_id': ObjectId(), 'name': 'User Name'} 71 | yield gen.Task(self.db.user.insert, user) 72 | 73 | yield gen.Task(self.db.user.update, user['_id'], {"$set": {'name': 'New User Name'}}) 74 | 75 | user_found = yield gen.Task(self.db.user.find_one, user['_id']) 76 | assert user_found['name'] == 'New User Name' 77 | 78 | yield gen.Task(self.db.user.remove, user['_id']) 79 | ``` 80 | 81 | ## Support to ReplicaSet 82 | 83 | ```python 84 | import tornado.web 85 | from tornado import gen 86 | from mongotor.database import Database 87 | from mongotor.node import ReadPreference 88 | from bson import ObjectId 89 | import time 90 | 91 | 92 | class Handler(tornado.web.RequestHandler): 93 | 94 | def initialize(self): 95 | # configuring an replica set 96 | self.db = db = Database.init(["localhost:27027", "localhost:27028"], dbname='mongotor_test', 97 | read_preference=ReadPreference.SECONDARY_PREFERRED) 98 | 99 | @tornado.web.asynchronous 100 | @gen.engine 101 | def get(self): 102 | user = {'_id': ObjectId()} 103 | 104 | # write on primary 105 | yield gen.Task(self.db.user.insert, user) 106 | 107 | # wait for replication 108 | time.sleep(2) 109 | 110 | # read from secondary 111 | user_found = yield gen.Task(self.db.user.find_one, user['_id']) 112 | assert user_found == user 113 | ``` 114 | 115 | ## Using ORM 116 | 117 | ```python 118 | from mongotor.orm import collection, field 119 | from mongotor.database import Database 120 | 121 | from datetime import datetime 122 | import tornado.web 123 | from tornado import gen 124 | 125 | # A connection to the MongoDB database needs to be 126 | # established before perform operations 127 | Database.init(['localhost:27017','localhost:27018'], 'mongotor_test') 128 | 129 | class User(collection.Collection): 130 | __collection__ = "user" 131 | 132 | _id = field.ObjectIdField() 133 | name = field.StringField() 134 | active = field.BooleanField() 135 | created = field.DateTimeField() 136 | 137 | class Handler(tornado.web.RequestHandler): 138 | 139 | @tornado.web.asynchronous 140 | @gen.engine 141 | def get(self): 142 | user = User() 143 | user.name = "User name" 144 | user.active = True 145 | user.created = datetime.now() 146 | 147 | yield gen.Task(user.save) 148 | 149 | # update date 150 | user.name = "New name" 151 | yield gen.Task(user.update) 152 | 153 | # find one object 154 | user_found = yield gen.Task(User.objects.find_one, user._id) 155 | 156 | # find many objects 157 | new_user = User() 158 | new_user.name = "new user name" 159 | new_user.user.active = True 160 | new_user.created = datetime.now() 161 | 162 | users_actives = yield gen.Task(User.objects.find, {'active': True}) 163 | 164 | users_actives[0].active = False 165 | yield gen.Task(users_actives[0].save) 166 | 167 | # remove object 168 | yield gen.Task(user_found.remove) 169 | ``` 170 | 171 | ## Contributing 172 | 173 | Write tests for your new feature and send a pull request. 174 | 175 | For run mongotor tests install mongodb and do: 176 | 177 | ```bash 178 | # create a new virtualenv 179 | mkvirtualenv mongotor 180 | 181 | # install dev requirements 182 | pip install -r requirements-dev.txt 183 | 184 | # start mongo 185 | make mongo-start 186 | 187 | # configure replicaset 188 | make mongo-config 189 | 190 | # run tests 191 | make test 192 | ``` 193 | 194 | ## Issues 195 | 196 | Please report any issues via [github issues](https://github.com/marcelnicolay/mongotor/issues) 197 | -------------------------------------------------------------------------------- /config-replicaset.js: -------------------------------------------------------------------------------- 1 | // rodar somente no primario 2 | var config = { 3 | '_id': 'mongotor', 4 | 'members': [ 5 | {'_id': 0, 'host': 'localhost:27029', arbiterOnly: true}, // mongo-arbiter 6 | {'_id': 1, 'host': 'localhost:27027', 'priority': 2}, // mongo-01 7 | {'_id': 2, 'host': 'localhost:27028'} // mongo-02 8 | ] 9 | }; 10 | rs.initiate(config) -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MongoTor.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MongoTor.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/MongoTor" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/MongoTor" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /docs/source/api/collection.rst: -------------------------------------------------------------------------------- 1 | :mod:`collection` -- A mongo collection 2 | ============================================ 3 | 4 | .. automodule:: mongotor.client 5 | :synopsis: a mongo collection 6 | 7 | .. autoclass:: mongotor.client.Client 8 | 9 | .. automethod:: insert 10 | .. automethod:: remove 11 | .. automethod:: update 12 | .. automethod:: find_one 13 | .. automethod:: find 14 | .. automethod:: count 15 | .. automethod:: distinct 16 | .. automethod:: aggregate 17 | .. automethod:: group 18 | 19 | -------------------------------------------------------------------------------- /docs/source/api/database.rst: -------------------------------------------------------------------------------- 1 | :mod:`database` -- Database level operations 2 | ============================================ 3 | 4 | .. automodule:: mongotor.database 5 | :synopsis: Database level operations 6 | 7 | .. autoclass:: mongotor.database.Database 8 | 9 | .. automethod:: connect 10 | .. automethod:: disconnect 11 | .. automethod:: command 12 | -------------------------------------------------------------------------------- /docs/source/api/errors.rst: -------------------------------------------------------------------------------- 1 | :mod:`errors` -- Mongotor errors 2 | ============================================ 3 | 4 | .. automodule:: mongotor.errors 5 | :synopsis: Mongotor errors 6 | 7 | .. autoclass:: mongotor.errors.Error 8 | .. autoclass:: mongotor.errors.InterfaceError 9 | .. autoclass:: mongotor.errors.TooManyConnections 10 | .. autoclass:: mongotor.errors.InvalidOperationError 11 | .. autoclass:: mongotor.errors.IntegrityError -------------------------------------------------------------------------------- /docs/source/api/index.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | =========================================== 3 | 4 | .. automodule:: mongotor 5 | :synopsis: An asynchronous driver and toolkit for accessing MongoDB with Tornado 6 | 7 | .. autodata:: version 8 | .. data:: Database 9 | 10 | Alias for :class:`mongotor.database.Database`. 11 | 12 | Sub-modules: 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | connection 18 | database 19 | collection 20 | orm 21 | errors 22 | message 23 | pool 24 | replica_set 25 | -------------------------------------------------------------------------------- /docs/source/api/orm.rst: -------------------------------------------------------------------------------- 1 | :mod:`orm` -- Map a mongo collection into a python class 2 | ============================================ 3 | 4 | .. automodule:: mongotor.orm.collection 5 | :synopsis: map a mongo collection to a python class 6 | 7 | .. autoclass:: mongotor.orm.collection.Collection 8 | 9 | .. automethod:: save 10 | .. automethod:: remove 11 | .. automethod:: update 12 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # MongoTor documentation build configuration file, created by 4 | # sphinx-quickstart on Sun Sep 16 22:34:17 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | sys.path.append(os.path.abspath('../..')) 16 | 17 | import mongotor 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ----------------------------------------------------- 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be extensions 29 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 30 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage'] 31 | 32 | # Add any paths that contain templates here, relative to this directory. 33 | templates_path = ['_templates'] 34 | 35 | # The suffix of source filenames. 36 | source_suffix = '.rst' 37 | 38 | # The encoding of source files. 39 | #source_encoding = 'utf-8-sig' 40 | 41 | # The master toctree document. 42 | master_doc = 'index' 43 | 44 | # General information about the project. 45 | project = u'MongoTor' 46 | copyright = u'2012, Marcel Nicolay' 47 | 48 | # The version info for the project you're documenting, acts as replacement for 49 | # |version| and |release|, also used in various other places throughout the 50 | # built documents. 51 | # 52 | # The short X.Y version. 53 | version = mongotor.version 54 | # The full version, including alpha/beta/rc tags. 55 | release = mongotor.version 56 | 57 | # The language for content autogenerated by Sphinx. Refer to documentation 58 | # for a list of supported languages. 59 | #language = None 60 | 61 | # There are two options for replacing |today|: either, you set today to some 62 | # non-false value, then it is used: 63 | #today = '' 64 | # Else, today_fmt is used as the format for a strftime call. 65 | #today_fmt = '%B %d, %Y' 66 | 67 | # List of patterns, relative to source directory, that match files and 68 | # directories to ignore when looking for source files. 69 | exclude_patterns = [] 70 | 71 | # The reST default role (used for this markup: `text`) to use for all documents. 72 | #default_role = None 73 | 74 | # If true, '()' will be appended to :func: etc. cross-reference text. 75 | #add_function_parentheses = True 76 | 77 | # If true, the current module name will be prepended to all description 78 | # unit titles (such as .. function::). 79 | #add_module_names = True 80 | 81 | # If true, sectionauthor and moduleauthor directives will be shown in the 82 | # output. They are ignored by default. 83 | #show_authors = False 84 | 85 | # The name of the Pygments (syntax highlighting) style to use. 86 | pygments_style = 'colorful' 87 | 88 | # A list of ignored prefixes for module index sorting. 89 | #modindex_common_prefix = [] 90 | 91 | 92 | # -- Options for HTML output --------------------------------------------------- 93 | 94 | # The theme to use for HTML and HTML Help pages. See the documentation for 95 | # a list of builtin themes. 96 | html_theme = 'nature' 97 | 98 | # Theme options are theme-specific and customize the look and feel of a theme 99 | # further. For a list of options available for each theme, see the 100 | # documentation. 101 | #html_theme_options = {} 102 | 103 | # Add any paths that contain custom themes here, relative to this directory. 104 | #html_theme_path = [] 105 | 106 | # The name for this set of Sphinx documents. If None, it defaults to 107 | # " v documentation". 108 | #html_title = None 109 | 110 | # A shorter title for the navigation bar. Default is the same as html_title. 111 | #html_short_title = None 112 | 113 | # The name of an image file (relative to this directory) to place at the top 114 | # of the sidebar. 115 | #html_logo = None 116 | 117 | # The name of an image file (within the static path) to use as favicon of the 118 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 119 | # pixels large. 120 | #html_favicon = None 121 | 122 | # Add any paths that contain custom static files (such as style sheets) here, 123 | # relative to this directory. They are copied after the builtin static files, 124 | # so a file named "default.css" will overwrite the builtin "default.css". 125 | html_static_path = ['_static'] 126 | 127 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 128 | # using the given strftime format. 129 | #html_last_updated_fmt = '%b %d, %Y' 130 | 131 | # If true, SmartyPants will be used to convert quotes and dashes to 132 | # typographically correct entities. 133 | #html_use_smartypants = True 134 | 135 | # Custom sidebar templates, maps document names to template names. 136 | #html_sidebars = {} 137 | 138 | # Additional templates that should be rendered to pages, maps page names to 139 | # template names. 140 | #html_additional_pages = {} 141 | 142 | # If false, no module index is generated. 143 | #html_domain_indices = True 144 | 145 | # If false, no index is generated. 146 | #html_use_index = True 147 | 148 | # If true, the index is split into individual pages for each letter. 149 | #html_split_index = False 150 | 151 | # If true, links to the reST sources are added to the pages. 152 | #html_show_sourcelink = True 153 | 154 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 155 | #html_show_sphinx = True 156 | 157 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 158 | #html_show_copyright = True 159 | 160 | # If true, an OpenSearch description file will be output, and all pages will 161 | # contain a tag referring to it. The value of this option must be the 162 | # base URL from which the finished HTML is served. 163 | #html_use_opensearch = '' 164 | 165 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 166 | #html_file_suffix = None 167 | 168 | # Output file base name for HTML help builder. 169 | htmlhelp_basename = 'MongoTordoc' 170 | 171 | 172 | # -- Options for LaTeX output -------------------------------------------------- 173 | 174 | latex_elements = { 175 | # The paper size ('letterpaper' or 'a4paper'). 176 | #'papersize': 'letterpaper', 177 | 178 | # The font size ('10pt', '11pt' or '12pt'). 179 | #'pointsize': '10pt', 180 | 181 | # Additional stuff for the LaTeX preamble. 182 | #'preamble': '', 183 | } 184 | 185 | # Grouping the document tree into LaTeX files. List of tuples 186 | # (source start file, target name, title, author, documentclass [howto/manual]). 187 | latex_documents = [ 188 | ('index', 'MongoTor.tex', u'MongoTor Documentation', 189 | u'Marcel Nicolat', 'manual'), 190 | ] 191 | 192 | # The name of an image file (relative to this directory) to place at the top of 193 | # the title page. 194 | #latex_logo = None 195 | 196 | # For "manual" documents, if this is true, then toplevel headings are parts, 197 | # not chapters. 198 | #latex_use_parts = False 199 | 200 | # If true, show page references after internal links. 201 | #latex_show_pagerefs = False 202 | 203 | # If true, show URL addresses after external links. 204 | #latex_show_urls = False 205 | 206 | # Documents to append as an appendix to all manuals. 207 | #latex_appendices = [] 208 | 209 | # If false, no module index is generated. 210 | #latex_domain_indices = True 211 | 212 | 213 | # -- Options for manual page output -------------------------------------------- 214 | 215 | # One entry per manual page. List of tuples 216 | # (source start file, name, description, authors, manual section). 217 | man_pages = [ 218 | ('index', 'mongotor', u'MongoTor Documentation', 219 | [u'Marcel Nicolat'], 1) 220 | ] 221 | 222 | # If true, show URL addresses after external links. 223 | #man_show_urls = False 224 | 225 | 226 | # -- Options for Texinfo output ------------------------------------------------ 227 | 228 | # Grouping the document tree into Texinfo files. List of tuples 229 | # (source start file, target name, title, author, 230 | # dir menu entry, description, category) 231 | texinfo_documents = [ 232 | ('index', 'MongoTor', u'MongoTor Documentation', 233 | u'Marcel Nicolat', 'MongoTor', 'One line description of project.', 234 | 'Miscellaneous'), 235 | ] 236 | 237 | # Documents to append as an appendix to all manuals. 238 | #texinfo_appendices = [] 239 | 240 | # If false, no module index is generated. 241 | #texinfo_domain_indices = True 242 | 243 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 244 | #texinfo_show_urls = 'footnote' 245 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | ========================================== 2 | Welcome to Mongotor's documentation! 3 | ========================================== 4 | 5 | (MONGO + TORnado) is an asynchronous toolkit for accessing mongo with tornado. 6 | 7 | Features 8 | ======== 9 | 10 | * ORM like to map documents and fields :py:mod:`~mongotor.orm` 11 | * Advanced connection management (replica sets, slave okay) 12 | * Automatic reconnection 13 | * Connection pooling 14 | * Support for running database commands (find, find_one, count, sum, mapreduce etc...) 15 | * Signals for pre_save, post_save, pre_remove, post_remove, pre_update and post_update 16 | * 100% of code coverage by test 17 | 18 | Contents: 19 | ========= 20 | 21 | .. toctree:: 22 | :maxdepth: 2 23 | 24 | installation 25 | tutorial 26 | api/index 27 | 28 | 29 | Contributing to the project 30 | =========================== 31 | 32 | `List of contributors `_ 33 | 34 | Source Code 35 | ----------- 36 | 37 | The source is available on `GitHub `_ and contributions are welcome. 38 | 39 | Issues 40 | ------ 41 | 42 | Please report any issues via `github issues `_ 43 | 44 | 45 | Indices and tables 46 | ================== 47 | 48 | * :ref:`genindex` 49 | * :ref:`modindex` 50 | * :ref:`search` 51 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | 5 | 6 | Supported Installation Methods 7 | ------------------------------- 8 | 9 | MongoTor supports installation using standard Python "distutils" or 10 | "setuptools" methodologies. An overview of potential setups is as follows: 11 | 12 | Install via easy_install or pip 13 | ------------------------------- 14 | 15 | When ``easy_install`` or ``pip`` is available, the distribution can be 16 | downloaded from Pypi and installed in one step:: 17 | 18 | easy_install mongotor 19 | 20 | Or with pip:: 21 | 22 | pip install mongotor 23 | 24 | This command will download the latest version of MongoTor from the `Python 25 | Cheese Shop `_ and install it to your system. 26 | 27 | Installing using setup.py 28 | ---------------------------------- 29 | 30 | Otherwise, you can install from the distribution using the ``setup.py`` script:: 31 | 32 | python setup.py install 33 | 34 | Checking the Installed MongoTor Version 35 | --------------------------------------------- 36 | 37 | The version of MongoTor installed can be checked from your 38 | Python prompt like this: 39 | 40 | .. sourcecode:: python 41 | 42 | >>> import mongotor 43 | >>> mongotor.version # doctest: +SKIP 44 | 45 | Requirements 46 | ------------ 47 | 48 | The following three python libraries are required. 49 | 50 | * `pymongo `_ version 1.9+ for bson library 51 | * `tornado `_ 52 | 53 | .. note:: 54 | The above requirements are automatically managed when installed using 55 | any of the supported installation methods -------------------------------------------------------------------------------- /docs/source/tutorial.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Tutorial 3 | ======== 4 | 5 | This tutorial is meant to introduce you to the basic concepts of using 6 | MongoTor using an example application. The example application is a 7 | simple user database where people could fill in their information and 8 | register themselves. 9 | 10 | 11 | Getting started 12 | =============== 13 | 14 | * Ensure that an instance of MongoDB is running in an accessible 15 | location. This tutorial assumes that such an instance is running on the 16 | localhost. 17 | 18 | 19 | Defining our Collection 20 | ======================= 21 | 22 | A `MongoDB Collection `_ 23 | is the rough equivalent of a table in a relational database. Though 24 | MongoDB collections are schemaless documents in them usually have a 25 | similar structure. This "similar structure" could be defined as a 26 | :class:`~mongotor.orm.collection.Collection`. 27 | 28 | In this example application we define the structure of Users collection 29 | with the required :py:mod:`~mongotor.orm.field` (s) :: 30 | 31 | 32 | class User(Collection): 33 | __collection__ = "user" 34 | 35 | _id = ObjectIdField() 36 | name = StringField() 37 | active = BooleanField() 38 | created = DateTimeField() 39 | 40 | 41 | Connecting to the Database 42 | ========================== 43 | 44 | A connection to the MongoDB database needs to be established before 45 | MongoTor can manage collections or do any other operations. A 46 | connection is established using a :class:`~mongotor.database.Database` 47 | object :: 48 | 49 | from mongotor.database import Database 50 | Database.connect('localhost:27017', 'test_db') 51 | 52 | 53 | Creating a new document 54 | ======================= 55 | 56 | A new document can be created in the collection by creating an instance of 57 | the Collection, assigning values to the fields and then calling the save 58 | method :: 59 | 60 | new_user = User() 61 | new_user.name = "New user" 62 | new_user.active = True 63 | new_user.save() 64 | 65 | 66 | A new instance would also be created from a dictionary (for example from a 67 | Form handler in your web application):: 68 | 69 | >>> new_user = User.create({'name': 'Some user name'}) 70 | >>> new_user.name 71 | u'Some user name' 72 | >>> new_user.save() 73 | 74 | 75 | Using ORM in a TornadoHandler 76 | ===================================== 77 | 78 | :: 79 | 80 | from mongotor.orm import Collection 81 | from mongotor.orm.field import StringField, ObjectIdField, BooleanField, DateTimeField 82 | from mongotor.database import Database 83 | 84 | from datetime import datetime 85 | import tornado.web 86 | from tornado import gen 87 | 88 | # A connection to the MongoDB database needs to be established before perform operations 89 | # A connection is stabilished using a Databse object 90 | Database.connect(['localhost:27017'], 'asyncmongo_test') 91 | 92 | class User(Collection): 93 | 94 | __collection__ = "user" 95 | 96 | _id = ObjectIdField() 97 | name = StringField() 98 | active = BooleanField() 99 | created = DateTimeField() 100 | 101 | class Handler(tornado.web.RequestHandler): 102 | 103 | @tornado.web.asynchronous 104 | @gen.engine 105 | def get(self): 106 | user = User() 107 | user.name = "User name" 108 | user.active = True 109 | user.created = datetime.now() 110 | 111 | yield gen.Task(user.save) 112 | 113 | # update date 114 | user.name = "New name" 115 | yield gen.Task(user.update) 116 | 117 | # find one object 118 | user_found = yield gen.Task(User.objects.find_one, user._id) 119 | 120 | # find many objects 121 | new_user = User() 122 | new_user.name = "new user name" 123 | new_user.user.active = True 124 | new_user.created = datetime.now() 125 | 126 | users_actives = yield gen.Task(User.objects.find, {'active': True}) 127 | 128 | users_actives[0].active = False 129 | yield gen.Task(users_actives[0].save) 130 | 131 | # remove object 132 | yield gen.Task(user_found.remove) 133 | 134 | 135 | Using Client in a TornadoHandler 136 | ================================= 137 | 138 | MongoTor supports :py:class:`~mongotor.client.Client` for direct access to mongo, without orm layer 139 | 140 | :: 141 | 142 | from mongotor.database import Database 143 | from bson import ObjectId 144 | from tornado import gen, web 145 | 146 | class Handler(web.RequestHandler): 147 | 148 | def initialize(self): 149 | self.db = Database.connect(['localhost:27017'], 'asyncmongo_test') 150 | 151 | @web.asynchronous 152 | @gen.engine 153 | def get(self): 154 | user = {'_id': ObjectId, 'name': 'User Name'} 155 | yield gen.Task(self.db.user.insert, user) 156 | 157 | yield gen.Task(self.db.user.update, user['_id'], {"$set": {'name': 'New User Name'}}) 158 | 159 | user_found = yield gen.Task(self.db.user.find_one, user['_id']) 160 | assert user_found['name'] == 'New User Name' 161 | 162 | yield gen.Task(self.db.user.remove, user['_id']) 163 | 164 | Using Signals 165 | ============= 166 | 167 | MongoTor supports :py:mod:`~mongotor.orm.signals` for pre_save, post_save, 168 | pre_remove, post_remove, pre_update, post_update to which receivers could bind to. 169 | 170 | :: 171 | 172 | from mongotor.orm import collection, field 173 | from mongotor.orm.signal import pre_save, receiver 174 | from mongotor.database import Databas 175 | from bson import ObjectId 176 | 177 | import tornado.web 178 | from tornado import gen 179 | 180 | class User(collection.Collection): 181 | __collection__ = "user" 182 | 183 | _id = field.ObjectIdField() 184 | name = field.StringField() 185 | active = field.BooleanField() 186 | created = field.DateTimeField() 187 | 188 | @receiver(pre_save, User) 189 | def set_object_id(sender, instance): 190 | if not instance._id: 191 | instance._id = ObjectId() 192 | 193 | 194 | class Handler(tornado.web.RequestHandler): 195 | 196 | @tornado.web.asynchronous 197 | @gen.engine 198 | def get(self): 199 | user = User() 200 | user.name = "User name" 201 | user.active = True 202 | user.created = datetime.now() 203 | 204 | yield gen.Task(user.save) -------------------------------------------------------------------------------- /mongotor/__init__.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | version = "0.1.0" 19 | -------------------------------------------------------------------------------- /mongotor/client.py: -------------------------------------------------------------------------------- 1 | 2 | # coding: utf-8 3 | # 4 | # Copyright (C) <2012> Marcel Nicolay 5 | # 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU Lesser General Public License as 8 | # published by the Free Software Foundation, either version 3 of the 9 | # License, or (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU Lesser General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU Lesser General Public License 17 | # along with this program. If not, see . 18 | import logging 19 | from bson.code import Code 20 | from tornado import gen 21 | from mongotor.node import ReadPreference 22 | from mongotor.cursor import Cursor 23 | from mongotor import message 24 | from mongotor import helpers 25 | 26 | log = logging.getLogger(__name__) 27 | 28 | 29 | class Client(object): 30 | 31 | def __init__(self, database, collection): 32 | self._database = database 33 | self._collection = collection 34 | self._collection_name = database.get_collection_name(collection) 35 | 36 | @gen.engine 37 | def insert(self, doc_or_docs, safe=True, check_keys=True, callback=None): 38 | """Insert a document 39 | 40 | :Parameters: 41 | - `doc_or_docs`: a document or list of documents to be 42 | inserted 43 | - `safe` (optional): check that the insert succeeded? 44 | - `check_keys` (optional): check if keys start with '$' or 45 | contain '.', raising :class:`~pymongo.errors.InvalidName` 46 | in either case 47 | - `callback` : method which will be called when save is finished 48 | """ 49 | if isinstance(doc_or_docs, dict): 50 | doc_or_docs = [doc_or_docs] 51 | 52 | assert isinstance(doc_or_docs, list) 53 | 54 | message_insert = message.insert(self._collection_name, doc_or_docs, 55 | check_keys, safe, {}) 56 | 57 | log.debug("mongo: db.{0}.insert({1})".format(self._collection_name, doc_or_docs)) 58 | 59 | node = yield gen.Task(self._database.get_node, ReadPreference.PRIMARY) 60 | connection = yield gen.Task(node.connection) 61 | 62 | response, error = yield gen.Task(connection.send_message, 63 | message_insert, safe) 64 | 65 | if callback: 66 | callback((response, error)) 67 | 68 | @gen.engine 69 | def remove(self, spec_or_id={}, safe=True, callback=None): 70 | """remove a document 71 | 72 | :Parameters: 73 | - `spec_or_id`: a query or a document id 74 | - `safe` (optional): safe insert operation 75 | - `callback` : method which will be called when save is finished 76 | """ 77 | if not isinstance(spec_or_id, dict): 78 | spec_or_id = {"_id": spec_or_id} 79 | 80 | assert isinstance(spec_or_id, dict) 81 | 82 | message_delete = message.delete(self._collection_name, spec_or_id, 83 | safe, {}) 84 | 85 | log.debug("mongo: db.{0}.remove({1})".format(self._collection_name, spec_or_id)) 86 | node = yield gen.Task(self._database.get_node, ReadPreference.PRIMARY) 87 | connection = yield gen.Task(node.connection) 88 | 89 | response, error = yield gen.Task(connection.send_message, 90 | message_delete, safe) 91 | 92 | if callback: 93 | callback((response, error)) 94 | 95 | @gen.engine 96 | def update(self, spec, document, upsert=False, safe=True, 97 | multi=False, callback=None): 98 | """Update a document(s) in this collection. 99 | 100 | :Parameters: 101 | - `spec`: a ``dict`` or :class:`~bson.son.SON` instance 102 | specifying elements which must be present for a document 103 | to be updated 104 | - `document`: a ``dict`` or :class:`~bson.son.SON` 105 | instance specifying the document to be used for the update 106 | or (in the case of an upsert) insert - see docs on MongoDB 107 | `update modifiers`_ 108 | - `upsert` (optional): perform an upsert if ``True`` 109 | - `safe` (optional): check that the update succeeded? 110 | - `multi` (optional): update all documents that match 111 | `spec`, rather than just the first matching document. The 112 | default value for `multi` is currently ``False``, but this 113 | might eventually change to ``True``. It is recommended 114 | that you specify this argument explicitly for all update 115 | operations in order to prepare your code for that change. 116 | """ 117 | assert isinstance(spec, dict), "spec must be an instance of dict" 118 | assert isinstance(document, dict), "document must be an instance of dict" 119 | assert isinstance(upsert, bool), "upsert must be an instance of bool" 120 | assert isinstance(safe, bool), "safe must be an instance of bool" 121 | 122 | message_update = message.update(self._collection_name, upsert, 123 | multi, spec, document, safe, {}) 124 | 125 | log.debug("mongo: db.{0}.update({1}, {2}, {3}, {4})".format( 126 | self._collection_name, spec, document, upsert, multi)) 127 | 128 | node = yield gen.Task(self._database.get_node, ReadPreference.PRIMARY) 129 | connection = yield gen.Task(node.connection) 130 | 131 | response, error = yield gen.Task(connection.send_message, 132 | message_update, safe) 133 | 134 | callback((response, error)) 135 | 136 | @gen.engine 137 | def find_one(self, spec_or_id=None, **kwargs): 138 | """Get a single document from the database. 139 | 140 | All arguments to :meth:`find` are also valid arguments for 141 | :meth:`find_one`, although any `limit` argument will be 142 | ignored. Returns a single document, or ``None`` if no matching 143 | document is found. 144 | 145 | :Parameters: 146 | 147 | - `spec_or_id` (optional): a dictionary specifying 148 | the query to be performed OR any other type to be used as 149 | the value for a query for ``"_id"``. 150 | 151 | - `**kwargs` (optional): any additional keyword arguments 152 | are the same as the arguments to :meth:`find`. 153 | """ 154 | if spec_or_id is not None and not isinstance(spec_or_id, dict): 155 | spec_or_id = {"_id": spec_or_id} 156 | 157 | self.find(spec_or_id, limit=-1, **kwargs) 158 | 159 | def find(self, *args, **kwargs): 160 | """Query the database. 161 | 162 | The `spec` argument is a prototype document that all results 163 | must match. For example: 164 | 165 | :Parameters: 166 | - `spec` (optional): a SON object specifying elements which 167 | must be present for a document to be included in the 168 | result set 169 | - `fields` (optional): a list of field names that should be 170 | returned in the result set ("_id" will always be 171 | included), or a dict specifying the fields to return 172 | - `skip` (optional): the number of documents to omit (from 173 | the start of the result set) when returning the results 174 | - `limit` (optional): the maximum number of results to 175 | return 176 | - `timeout` (optional): if True, any returned cursor will be 177 | subject to the normal timeout behavior of the mongod 178 | process. Otherwise, the returned cursor will never timeout 179 | at the server. Care should be taken to ensure that cursors 180 | with timeout turned off are properly closed. 181 | - `snapshot` (optional): if True, snapshot mode will be used 182 | for this query. Snapshot mode assures no duplicates are 183 | returned, or objects missed, which were present at both 184 | the start and end of the query's execution. For details, 185 | see the `snapshot documentation 186 | `_. 187 | - `tailable` (optional): the result of this find call will 188 | be a tailable cursor - tailable cursors aren't closed when 189 | the last data is retrieved but are kept open and the 190 | cursors location marks the final document's position. if 191 | more data is received iteration of the cursor will 192 | continue from the last document received. For details, see 193 | the `tailable cursor documentation 194 | `_. 195 | - `sort` (optional): a list of (key, direction) pairs 196 | specifying the sort order for this query. See 197 | :meth:`~pymongo.cursor.Cursor.sort` for details. 198 | - `max_scan` (optional): limit the number of documents 199 | examined when performing the query 200 | - `read_preferences` (optional): The read preference for 201 | this query. 202 | """ 203 | 204 | log.debug("mongo: db.{0}.find({spec}).limit({limit}).sort({sort})".format( 205 | self._collection_name, 206 | spec=args[0] if args else {}, 207 | sort=kwargs.get('sort', {}), 208 | limit=kwargs.get('limit', '') 209 | )) 210 | cursor = Cursor(self._database, self._collection, *args, **kwargs) 211 | 212 | if 'callback' in kwargs: 213 | cursor.find(callback=kwargs['callback']) 214 | else: 215 | return cursor 216 | 217 | def distinct(self, key, callback): 218 | """Get a list of distinct values for `key` among all documents 219 | in this collection. 220 | 221 | Raises :class:`TypeError` if `key` is not an instance of 222 | :class:`basestring` (:class:`str` in python 3). 223 | 224 | To get the distinct values for a key in the result set of a 225 | query use :meth:`~mongotor.cursor.Cursor.distinct`. 226 | 227 | :Parameters: 228 | - `key`: name of key for which we want to get the distinct values 229 | 230 | """ 231 | self.find().distinct(key, callback=callback) 232 | 233 | def count(self, callback): 234 | """Get the size of the results among all documents. 235 | 236 | Returns the number of documents in the results set 237 | """ 238 | self.find().count(callback=callback) 239 | 240 | @gen.engine 241 | def aggregate(self, pipeline, read_preference=None, callback=None): 242 | """Perform an aggregation using the aggregation framework on this 243 | collection. 244 | 245 | :Parameters: 246 | - `pipeline`: a single command or list of aggregation commands 247 | - `read_preference` 248 | 249 | .. note:: Requires server version **>= 2.1.0** 250 | 251 | .. _aggregate command: 252 | http://docs.mongodb.org/manual/applications/aggregation 253 | """ 254 | if not isinstance(pipeline, (dict, list, tuple)): 255 | raise TypeError("pipeline must be a dict, list or tuple") 256 | 257 | if isinstance(pipeline, dict): 258 | pipeline = [pipeline] 259 | 260 | response, error = yield gen.Task(self._database.command, "aggregate", 261 | self._collection, pipeline=pipeline, 262 | read_preference=read_preference) 263 | 264 | callback(response) 265 | 266 | @gen.engine 267 | def group(self, key, condition, initial, reduce, finalize=None, 268 | read_preference=None, callback=None): 269 | """Perform a query similar to an SQL *group by* operation. 270 | 271 | Returns an array of grouped items. 272 | 273 | The `key` parameter can be: 274 | 275 | - ``None`` to use the entire document as a key. 276 | - A :class:`list` of keys (each a :class:`basestring` 277 | (:class:`str` in python 3)) to group by. 278 | - A :class:`basestring` (:class:`str` in python 3), or 279 | :class:`~bson.code.Code` instance containing a JavaScript 280 | function to be applied to each document, returning the key 281 | to group by. 282 | 283 | :Parameters: 284 | - `key`: fields to group by (see above description) 285 | - `condition`: specification of rows to be 286 | considered (as a :meth:`find` query specification) 287 | - `initial`: initial value of the aggregation counter object 288 | - `reduce`: aggregation function as a JavaScript string 289 | - `finalize`: function to be called on each object in output list. 290 | 291 | """ 292 | 293 | group = {} 294 | if isinstance(key, basestring): 295 | group["$keyf"] = Code(key) 296 | elif key is not None: 297 | group = {"key": helpers._fields_list_to_dict(key)} 298 | 299 | group["ns"] = self._collection 300 | group["$reduce"] = Code(reduce) 301 | group["cond"] = condition 302 | group["initial"] = initial 303 | if finalize is not None: 304 | group["finalize"] = Code(finalize) 305 | 306 | response, error = yield gen.Task(self._database.command, "group", 307 | group, 308 | read_preference=read_preference) 309 | 310 | callback(response) 311 | -------------------------------------------------------------------------------- /mongotor/connection.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | from __future__ import with_statement 18 | from tornado import iostream 19 | from tornado import stack_context 20 | from mongotor.errors import InterfaceError, IntegrityError, \ 21 | ProgrammingError, DatabaseError 22 | from mongotor import helpers 23 | import socket 24 | import logging 25 | import struct 26 | import contextlib 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | 31 | class Connection(object): 32 | 33 | def __init__(self, host, port, pool=None, autoreconnect=True, timeout=5): 34 | self._host = host 35 | self._port = port 36 | self._pool = pool 37 | self._autoreconnect = autoreconnect 38 | self._timeout = timeout 39 | self._connected = False 40 | self._callback = None 41 | 42 | self._connect() 43 | 44 | logger.debug('{0} created'.format(self)) 45 | 46 | def _connect(self): 47 | self.usage = 0 48 | try: 49 | socket.timeout(self._timeout) 50 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) 51 | s.connect((self._host, self._port)) 52 | 53 | self._stream = iostream.IOStream(s) 54 | self._stream.set_close_callback(self._socket_close) 55 | 56 | self._connected = True 57 | except socket.error, error: 58 | raise InterfaceError(error) 59 | 60 | def __repr__(self): 61 | return "Connection {0} ::: ".format(id(self)) 62 | 63 | def _parse_header(self, header): 64 | #logger.debug('got data %r' % header) 65 | length = int(struct.unpack(" 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | import logging 18 | from tornado import gen 19 | from bson import SON 20 | from mongotor import message 21 | from mongotor import helpers 22 | 23 | _QUERY_OPTIONS = { 24 | "tailable_cursor": 2, 25 | "slave_okay": 4, 26 | "oplog_replay": 8, 27 | "no_timeout": 16} 28 | 29 | DESCENDING = -1 30 | ASCENDING = 1 31 | 32 | logger = logging.getLogger(__name__) 33 | 34 | 35 | class Cursor(object): 36 | """A cursor / iterator over Mongo query results. 37 | """ 38 | 39 | def __init__(self, database, collection, spec_or_id=None, fields=None, snapshot=False, 40 | tailable=False, max_scan=None, is_command=False, explain=False, hint=None, 41 | skip=0, limit=0, sort=None, connection=None, 42 | read_preference=None, timeout=True, slave_okay=True, **kw): 43 | 44 | if spec_or_id is not None and not isinstance(spec_or_id, dict): 45 | spec_or_id = {"_id": spec_or_id} 46 | 47 | self._spec = spec_or_id or {} 48 | 49 | if fields is not None: 50 | if not fields: 51 | fields = {"_id": 1} 52 | if not isinstance(fields, dict): 53 | fields = helpers._fields_list_to_dict(fields) 54 | 55 | self._fields = fields 56 | self._snapshot = snapshot 57 | self._tailable = tailable 58 | self._max_scan = max_scan 59 | self._hint = hint 60 | self._database = database 61 | self._collection = collection 62 | self._collection_name = database.get_collection_name(collection) 63 | self._timeout = timeout 64 | self._is_command = is_command 65 | self._explain = explain 66 | self._slave_okay = slave_okay 67 | self._read_preference = read_preference 68 | self._connection = connection 69 | self._ordering = sort 70 | self._skip = skip 71 | self._limit = limit 72 | 73 | @gen.engine 74 | def find(self, callback=None): 75 | message_query = message.query(self._query_options(), self._collection_name, 76 | self._skip, self._limit, self._query_spec(), self._fields) 77 | 78 | if not self._connection: 79 | node = yield gen.Task(self._database.get_node, self._read_preference) 80 | connection = yield gen.Task(node.connection) 81 | else: 82 | connection = self._connection 83 | 84 | response, _ = yield gen.Task(connection.send_message_with_response, message_query) 85 | response = helpers._unpack_response(response) 86 | 87 | # close cursor 88 | if response and response.get('cursor_id'): 89 | cursor_id = response['cursor_id'] 90 | 91 | connection.send_message(message.kill_cursors([cursor_id]), callback=None) 92 | 93 | if self._limit == -1 and len(response['data']) == 1: 94 | callback((response['data'][0], None)) 95 | else: 96 | callback((response['data'], None)) 97 | 98 | @gen.engine 99 | def count(self, callback): 100 | """Get the size of the results set for this query. 101 | 102 | Returns the number of documents in the results set for this query. Does 103 | """ 104 | command = {"query": self._spec} 105 | 106 | response, error = yield gen.Task(self._database.command, 107 | 'count', self._collection, **command) 108 | 109 | total = 0 110 | if response and len(response) > 0 and 'n' in response: 111 | total = int(response['n']) 112 | 113 | callback(total) 114 | 115 | @gen.engine 116 | def distinct(self, key, callback): 117 | """Get a list of distinct values for `key` among all documents 118 | in the result set of this query. 119 | 120 | :Parameters: 121 | - `key`: name of key for which we want to get the distinct values 122 | """ 123 | if not isinstance(key, basestring): 124 | raise TypeError("key must be an instance " 125 | "of %s" % (basestring.__name__,)) 126 | 127 | command = {"key": key} 128 | if self._spec: 129 | command.update({"query": self._spec}) 130 | 131 | response, error = yield gen.Task(self._database.command, 132 | 'distinct', self._collection, **command) 133 | 134 | callback(response['values']) 135 | 136 | def _query_options(self): 137 | """Get the query options string to use for this query.""" 138 | options = 0 139 | if self._tailable: 140 | options |= _QUERY_OPTIONS["tailable_cursor"] 141 | if self._slave_okay: 142 | options |= _QUERY_OPTIONS["slave_okay"] 143 | if not self._timeout: 144 | options |= _QUERY_OPTIONS["no_timeout"] 145 | return options 146 | 147 | def _query_spec(self): 148 | """Get the spec to use for a query.""" 149 | spec = self._spec 150 | if not self._is_command and "$query" not in self._spec: 151 | spec = SON({"$query": self._spec}) 152 | if self._ordering: 153 | spec["$orderby"] = self._ordering 154 | if self._explain: 155 | spec["$explain"] = True 156 | if self._hint: 157 | spec["$hint"] = self._hint 158 | if self._snapshot: 159 | spec["$snapshot"] = True 160 | if self._max_scan: 161 | spec["$maxScan"] = self._max_scan 162 | return spec 163 | -------------------------------------------------------------------------------- /mongotor/database.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | from functools import partial, wraps 19 | from datetime import timedelta 20 | from tornado import gen 21 | from tornado.ioloop import IOLoop 22 | from bson import SON 23 | from mongotor.node import Node, ReadPreference 24 | from mongotor.errors import DatabaseError 25 | from mongotor.client import Client 26 | import warnings 27 | 28 | 29 | def initialized(fn): 30 | @wraps(fn) 31 | def wrapped(self, *args, **kwargs): 32 | if not hasattr(self, '_initialized'): 33 | raise DatabaseError("you must be initialize database before perform this action") 34 | 35 | return fn(self, *args, **kwargs) 36 | 37 | return wrapped 38 | 39 | 40 | class Database(object): 41 | """Database object 42 | """ 43 | _instance = None 44 | 45 | def __new__(cls): 46 | if not cls._instance: 47 | cls._instance = super(Database, cls).__new__(cls) 48 | 49 | return cls._instance 50 | 51 | @classmethod 52 | def init(cls, addresses, dbname, read_preference=None, **kwargs): 53 | """initialize the database 54 | 55 | >>> Database.init(['localhost:27017', 'localhost:27018'], 'test', maxconnections=100) 56 | >>> db = Database() 57 | >>> db.collection.insert({...}, callback=...) 58 | 59 | :Parameters: 60 | - `addresses` : addresses can be a list or a simple string, host:port 61 | - `dbname` : mongo database name 62 | - `read_preference` (optional): The read preference for 63 | this query. 64 | - `maxconnections` (optional): maximum open connections for pool. 0 for unlimited 65 | - `maxusage` (optional): number of requests allowed on a connection 66 | before it is closed. 0 for unlimited 67 | - `autoreconnect`: autoreconnect to database. default is True 68 | """ 69 | if cls._instance and hasattr(cls._instance, '_initialized') and cls._instance._initialized: 70 | return cls._instance 71 | 72 | database = Database() 73 | database._init(addresses, dbname, read_preference, **kwargs) 74 | 75 | return database 76 | 77 | def _init(self, addresses, dbname, read_preference=None, **kwargs): 78 | self._addresses = self._parse_addresses(addresses) 79 | self._dbname = dbname 80 | self._read_preference = read_preference or ReadPreference.PRIMARY 81 | self._nodes = [] 82 | self._pool_kwargs = kwargs 83 | self._initialized = True 84 | self._connected = False 85 | self._connect_callbacks = [] 86 | 87 | for host, port in self._addresses: 88 | node = Node(host, port, self, self._pool_kwargs) 89 | self._nodes.append(node) 90 | 91 | def _connect(self, callback): 92 | """Connect to database 93 | connect all mongodb nodes, configuring states and preferences 94 | - `callback`: (optional) method that will be called when the database is connected 95 | """ 96 | assert not self._connected 97 | self._connect_callbacks.append(callback) 98 | if len(self._connect_callbacks) == 1: # if another _connect is not in progress 99 | self._config_nodes(callback=self._on_config_node) 100 | 101 | def _config_nodes(self, callback=None): 102 | for node in self._nodes: 103 | node.config(callback) 104 | 105 | IOLoop.instance().add_timeout(timedelta(seconds=30), self._config_nodes) 106 | 107 | def _on_config_node(self): 108 | for node in self._nodes: 109 | if not node.initialized: 110 | return 111 | 112 | self._connected = True 113 | for callback in self._connect_callbacks: 114 | IOLoop.instance().add_callback(callback) 115 | self._connect_callbacks = [] 116 | 117 | @property 118 | def dbname(self): 119 | return self._dbname 120 | 121 | @initialized 122 | def get_collection_name(self, collection): 123 | return u'%s.%s' % (self.dbname, collection) 124 | 125 | def _parse_addresses(self, addresses): 126 | if isinstance(addresses, (str, unicode)): 127 | addresses = [addresses] 128 | 129 | assert isinstance(addresses, list) 130 | 131 | parsed_addresses = [] 132 | for address in addresses: 133 | host, port = address.split(":") 134 | parsed_addresses.append((host, int(port))) 135 | 136 | return parsed_addresses 137 | 138 | @classmethod 139 | def connect(cls, *args, **kwargs): 140 | """connect database 141 | 142 | this method is deprecated, use :class:`~mongotor.database.Database.init` to initiate a new database 143 | """ 144 | warnings.warn("deprecated", DeprecationWarning) 145 | 146 | return cls.init(*args, **kwargs) 147 | 148 | @classmethod 149 | def disconnect(cls): 150 | """Disconnect to database 151 | 152 | >>> Database.disconnect() 153 | 154 | """ 155 | if not cls._instance or not hasattr(cls._instance, '_initialized'): 156 | raise ValueError("Database isn't initialized") 157 | 158 | for node in cls._instance._nodes: 159 | node.disconnect() 160 | 161 | cls._instance = None 162 | 163 | @gen.engine 164 | @initialized 165 | def send_message(self, message, read_preference=None, 166 | with_response=True, callback=None): 167 | node = yield gen.Task(self.get_node, read_preference) 168 | 169 | connection = yield gen.Task(node.connection) 170 | 171 | if with_response: 172 | connection.send_message_with_response(message, callback=callback) 173 | else: 174 | connection.send_message(message, callback=callback) 175 | 176 | @gen.engine 177 | @initialized 178 | def get_node(self, read_preference=None, callback=None): 179 | assert callback 180 | 181 | # check if database is connected 182 | if not self._connected: 183 | # connect database 184 | yield gen.Task(self._connect) 185 | 186 | if read_preference is None: 187 | read_preference = self._read_preference 188 | 189 | node = ReadPreference.select_node(self._nodes, read_preference) 190 | if not node: 191 | raise DatabaseError('could not find an available node') 192 | 193 | callback(node) 194 | 195 | @initialized 196 | def command(self, command, value=1, read_preference=None, 197 | callback=None, check=True, allowable_errors=[], **kwargs): 198 | """Issue a MongoDB command. 199 | 200 | Send command `command` to the database and return the 201 | response. If `command` is an instance of :class:`basestring` 202 | then the command {`command`: `value`} will be sent. Otherwise, 203 | `command` must be an instance of :class:`dict` and will be 204 | sent as is. 205 | 206 | Any additional keyword arguments will be added to the final 207 | command document before it is sent. 208 | 209 | For example, a command like ``{buildinfo: 1}`` can be sent 210 | using: 211 | 212 | >>> db.command("buildinfo") 213 | 214 | For a command where the value matters, like ``{collstats: 215 | collection_name}`` we can do: 216 | 217 | >>> db.command("collstats", collection_name) 218 | 219 | For commands that take additional arguments we can use 220 | kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: 221 | 222 | >>> db.command("filemd5", object_id, root=file_root) 223 | 224 | :Parameters: 225 | - `command`: document representing the command to be issued, 226 | or the name of the command (for simple commands only). 227 | 228 | .. note:: the order of keys in the `command` document is 229 | significant (the "verb" must come first), so commands 230 | which require multiple keys (e.g. `findandmodify`) 231 | should use an instance of :class:`~bson.son.SON` or 232 | a string and kwargs instead of a Python `dict`. 233 | 234 | - `value` (optional): value to use for the command verb when 235 | `command` is passed as a string 236 | - `**kwargs` (optional): additional keyword arguments will 237 | be added to the command document before it is sent 238 | 239 | """ 240 | if isinstance(command, basestring): 241 | command = SON([(command, value)]) 242 | 243 | command.update(kwargs) 244 | 245 | if read_preference is None: 246 | read_preference = self._read_preference 247 | 248 | self._command(command, read_preference=read_preference, callback=callback) 249 | 250 | def _command(self, command, read_preference=None, 251 | connection=None, callback=None): 252 | 253 | if read_preference is None: 254 | read_preference = self._read_preference 255 | 256 | client = Client(self, '$cmd') 257 | 258 | client.find_one(command, is_command=True, connection=connection, 259 | read_preference=read_preference, callback=callback) 260 | 261 | def __getattr__(self, name): 262 | """Get a client collection by name. 263 | 264 | :Parameters: 265 | - `name`: the name of the collection 266 | """ 267 | return Client(self, name) 268 | -------------------------------------------------------------------------------- /mongotor/errors.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | 19 | class Error(StandardError): 20 | """Base class for all mongotor exceptions. 21 | 22 | """ 23 | 24 | 25 | class InterfaceError(Error): 26 | """Raised when a connection to the database cannot be made or is lost. 27 | """ 28 | 29 | 30 | class TooManyConnections(InterfaceError): 31 | """Raised when a pool is busy. 32 | """ 33 | 34 | 35 | class InvalidOperationError(Error): 36 | """Raised when a client attempts to perform an invalid operation. 37 | """ 38 | 39 | 40 | class DatabaseError(Error): 41 | """Raised when a database operation fails. 42 | 43 | """ 44 | 45 | def __init__(self, error, code=None): 46 | self.code = code 47 | Error.__init__(self, error) 48 | 49 | 50 | class IntegrityError(DatabaseError): 51 | """Raised when a safe insert or update fails due to a duplicate key error. 52 | 53 | """ 54 | def __init__(self, msg, code=None): 55 | self.code = code 56 | self.msg = msg 57 | 58 | 59 | class ProgrammingError(DatabaseError): 60 | pass 61 | 62 | 63 | class TimeoutError(DatabaseError): 64 | """Raised when a database operation times out. 65 | """ 66 | -------------------------------------------------------------------------------- /mongotor/helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2009-2010 10gen, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import bson 16 | import struct 17 | from mongotor.errors import (DatabaseError, 18 | InterfaceError, TimeoutError) 19 | 20 | 21 | def _unpack_response(response, cursor_id=None, as_class=dict, tz_aware=False): 22 | """Unpack a response from the database. 23 | 24 | Check the response for errors and unpack, returning a dictionary 25 | containing the response data. 26 | 27 | :Parameters: 28 | - `response`: byte string as returned from the database 29 | - `cursor_id` (optional): cursor_id we sent to get this response - 30 | used for raising an informative exception when we get cursor id not 31 | valid at server response 32 | - `as_class` (optional): class to use for resulting documents 33 | """ 34 | response_flag = struct.unpack("`_ to be sent to 17 | MongoDB. 18 | 19 | .. note:: This module is for internal use and is generally not needed by 20 | application developers. 21 | """ 22 | 23 | import random 24 | import struct 25 | 26 | import bson 27 | from bson.son import SON 28 | from mongotor.errors import InvalidOperationError 29 | 30 | 31 | __ZERO = "\x00\x00\x00\x00" 32 | 33 | 34 | def __last_error(args): 35 | """Data to send to do a lastError. 36 | """ 37 | cmd = SON([("getlasterror", 1)]) 38 | cmd.update(args) 39 | return query(0, "admin.$cmd", 0, -1, cmd) 40 | 41 | 42 | def __pack_message(operation, data): 43 | """Takes message data and adds a message header based on the operation. 44 | 45 | Returns the resultant message string. 46 | """ 47 | request_id = random.randint(-2 ** 31 - 1, 2 ** 31) 48 | message = struct.pack(" 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | import logging 19 | import random 20 | from tornado import gen 21 | from bson import SON 22 | from mongotor.pool import ConnectionPool 23 | from mongotor.connection import Connection 24 | from mongotor.errors import InterfaceError, TooManyConnections 25 | 26 | logger = logging.getLogger(__name__) 27 | 28 | 29 | class Node(object): 30 | """Node of database cluster 31 | """ 32 | 33 | def __init__(self, host, port, database, pool_kargs=None): 34 | if not pool_kargs: 35 | pool_kargs = {} 36 | 37 | assert isinstance(host, (str, unicode)) 38 | assert isinstance(port, int) 39 | 40 | self.host = host 41 | self.port = port 42 | self.database = database 43 | self.pool_kargs = pool_kargs 44 | 45 | self.is_primary = False 46 | self.is_secondary = False 47 | self.available = False 48 | self.initialized = False 49 | 50 | self.pool = ConnectionPool(self.host, self.port, self.database.dbname, 51 | **self.pool_kargs) 52 | 53 | @gen.engine 54 | def config(self, callback=None): 55 | ismaster = SON([('ismaster', 1)]) 56 | 57 | response = None 58 | try: 59 | try: 60 | connection = yield gen.Task(self.connection) 61 | except TooManyConnections: 62 | # create a connection on the fly if pool is full 63 | connection = Connection(host=self.host, port=self.port) 64 | response, error = yield gen.Task(self.database._command, ismaster, 65 | connection=connection) 66 | if not connection._pool: # if connection is created on the fly 67 | connection.close() 68 | except InterfaceError, ie: 69 | logger.error('oops, database node {host}:{port} is unavailable: {error}' 70 | .format(host=self.host, port=self.port, error=ie)) 71 | 72 | if response: 73 | self.is_primary = response.get('ismaster', True) 74 | self.is_secondary = response.get('secondary', False) 75 | self.available = True 76 | else: 77 | self.available = False 78 | 79 | self.initialized = True 80 | 81 | if callback: 82 | callback() 83 | 84 | def disconnect(self): 85 | self.pool.close() 86 | 87 | def __repr__(self): 88 | return """MongoDB node {host}:{port} ({primary}, {secondary})""" \ 89 | .format(host=self.host, port=self.port, primary=self.is_primary, 90 | secondary=self.is_secondary) 91 | 92 | def connection(self, callback): 93 | """Return one connection from pool 94 | """ 95 | self.pool.connection(callback) 96 | 97 | 98 | class ReadPreference(object): 99 | """Defines the read preferences supported by mongotor. 100 | 101 | * `PRIMARY`: Queries are sent to the primary of the replica set. 102 | * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, 103 | otherwise a secondary. 104 | * `SECONDARY`: Queries are distributed among secondaries. An error 105 | is raised if no secondaries are available. 106 | * `SECONDARY_PREFERRED`: Queries are distributed among secondaries, 107 | or the primary if no secondary is available. 108 | * TODO: `NEAREST`: Queries are distributed among all members. 109 | """ 110 | 111 | PRIMARY = 0 112 | PRIMARY_PREFERRED = 1 113 | SECONDARY = 2 114 | SECONDARY_ONLY = 2 115 | SECONDARY_PREFERRED = 3 116 | #NEAREST = 4 117 | 118 | @classmethod 119 | def select_primary_node(cls, nodes): 120 | for node in nodes: 121 | if node.available and node.is_primary: 122 | return node 123 | 124 | @classmethod 125 | def select_random_node(cls, nodes, secondary_only): 126 | candidates = [] 127 | 128 | for node in nodes: 129 | if not node.available: 130 | continue 131 | 132 | if secondary_only and node.is_primary: 133 | continue 134 | 135 | candidates.append(node) 136 | 137 | if not candidates: 138 | return None 139 | 140 | return random.choice(candidates) 141 | 142 | @classmethod 143 | def select_node(cls, nodes, mode=None): 144 | if mode is None: 145 | mode = cls.PRIMARY 146 | 147 | if mode == cls.PRIMARY: 148 | return cls.select_primary_node(nodes) 149 | 150 | if mode == cls.PRIMARY_PREFERRED: 151 | primary_node = cls.select_primary_node(nodes) 152 | if primary_node: 153 | return primary_node 154 | else: 155 | return cls.select_node(nodes, cls.SECONDARY) 156 | 157 | if mode == cls.SECONDARY: 158 | return cls.select_random_node(nodes, secondary_only=True) 159 | 160 | if mode == cls.SECONDARY_PREFERRED: 161 | secondary_node = cls.select_random_node(nodes, secondary_only=True) 162 | if secondary_node: 163 | return secondary_node 164 | else: 165 | return cls.select_primary_node(nodes) 166 | -------------------------------------------------------------------------------- /mongotor/orm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/marcelnicolay/mongotor/9baa0a871d196e037e7f20ec43c82b99230cab7e/mongotor/orm/__init__.py -------------------------------------------------------------------------------- /mongotor/orm/collection.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | import logging 19 | from tornado import gen 20 | from mongotor.client import Client 21 | from mongotor.orm.field import Field 22 | from mongotor.orm.signal import (pre_save, post_save, 23 | pre_remove, post_remove, pre_update, post_update) 24 | from mongotor.orm.manager import Manager 25 | from mongotor.database import Database 26 | 27 | 28 | logger = logging.getLogger(__name__) 29 | __lazy_classes__ = {} 30 | 31 | 32 | class CollectionMetaClass(type): 33 | 34 | def __new__(cls, name, bases, attrs): 35 | global __lazy_classes__ 36 | 37 | # Add the document's fields to the _data 38 | for attr_name, attr_value in attrs.iteritems(): 39 | if hasattr(attr_value, "__class__") and\ 40 | issubclass(attr_value.__class__, Field) and\ 41 | attr_value.name is None: 42 | 43 | attr_value.name = attr_name 44 | 45 | new_class = super(CollectionMetaClass, cls).__new__(cls, name, 46 | bases, attrs) 47 | 48 | __lazy_classes__[name] = new_class 49 | 50 | new_class.objects = Manager(collection=new_class) 51 | 52 | return new_class 53 | 54 | 55 | class Collection(object): 56 | """Collection is the base class 57 | 58 | This class map a mongo collection into a python class. 59 | You only need to write a class and starts to use the orm advantages. 60 | 61 | For example, a simple users collection can be mapping 62 | using: 63 | 64 | >>> from mongotor.orm import collection, field 65 | >>> class Users(collection.Collection): 66 | >>> __collection__ = 'users' 67 | >>> name = field.StringField() 68 | """ 69 | __metaclass__ = CollectionMetaClass 70 | 71 | def __new__(cls, class_name=None, *args, **kwargs): 72 | if class_name: 73 | global __lazy_classes__ 74 | return __lazy_classes__.get(class_name) 75 | 76 | return super(Collection, cls).__new__(cls, *args, **kwargs) 77 | 78 | def __init__(self): 79 | self._data = {} 80 | self._dirty = set() 81 | 82 | def as_dict(self, fields=None): 83 | items = {} 84 | fields = fields or [] 85 | iteritems = self.__class__.__dict__.iteritems() 86 | if fields: 87 | iteritems = ((k,v) for k,v in iteritems if k in fields) 88 | 89 | for attr_name, attr_type in iteritems: 90 | if isinstance(attr_type, Field): 91 | attr_value = getattr(self, attr_name) 92 | if attr_value is not None: 93 | items[attr_name] = attr_value 94 | 95 | # Gets the fields from base classes when fields are None so we only 96 | # get the dirty fields when we only want to update those. 97 | if not fields: 98 | for cls in self.__class__.__mro__: 99 | for attr_name, attr_type in cls.__dict__.iteritems(): 100 | if isinstance(attr_type, Field): 101 | attr_value = getattr(self, attr_name) 102 | if attr_value is not None: 103 | items[attr_name] = attr_value 104 | 105 | return items 106 | 107 | @property 108 | def dirty_fields(self): 109 | return list(self._dirty) 110 | 111 | def clean_fields(self): 112 | self._dirty = set() 113 | 114 | @classmethod 115 | def create(cls, dictionary, cleaned=False): 116 | """Create a new instance of collection from a dictionary 117 | 118 | For example, creating a new instance from a mapped collection 119 | Users: 120 | 121 | >>> user = Users.create({'name': 'should be name'}) 122 | >>> assert user.name == 'should be name' 123 | """ 124 | instance = cls() 125 | for (key, value) in dictionary.iteritems(): 126 | try: 127 | setattr(instance, str(key), value) 128 | except TypeError, e: 129 | logger.warn(e) 130 | 131 | if cleaned: 132 | instance.clean_fields() 133 | 134 | return instance 135 | 136 | @gen.engine 137 | def save(self, safe=True, check_keys=True, callback=None): 138 | """Save a document 139 | 140 | >>> user = Users() 141 | >>> user.name = 'should be name' 142 | >>> user.save() 143 | 144 | :Parameters: 145 | - `safe` (optional): safe insert operation 146 | - `check_keys` (optional): check if keys start with '$' or 147 | contain '.', raising :class:`~pymongo.errors.InvalidName` 148 | in either case 149 | - `callback` : method which will be called when save is finished 150 | """ 151 | pre_save.send(instance=self) 152 | 153 | client = Client(Database(), self.__collection__) 154 | response, error = yield gen.Task(client.insert, self.as_dict(), 155 | safe=safe, check_keys=check_keys) 156 | 157 | self.clean_fields() 158 | 159 | post_save.send(instance=self) 160 | 161 | if callback: 162 | callback((response, error)) 163 | 164 | @gen.engine 165 | def remove(self, safe=True, callback=None): 166 | """Remove a document 167 | 168 | :Parameters: 169 | - `safe` (optional): safe remove operation 170 | - `callback` : method which will be called when remove is finished 171 | """ 172 | pre_remove.send(instance=self) 173 | 174 | client = Client(Database(), self.__collection__) 175 | response, error = yield gen.Task(client.remove, self._id, safe=safe) 176 | 177 | post_remove.send(instance=self) 178 | 179 | if callback: 180 | callback((response, error)) 181 | 182 | @gen.engine 183 | def update(self, document=None, upsert=False, safe=True, multi=False, 184 | callback=None, force=False): 185 | """Update a document 186 | 187 | :Parameters: 188 | - `safe` (optional): safe update operation 189 | - `callback` : method which will be called when update is finished 190 | - `force`: if True will overide full document 191 | """ 192 | if not document and not self.dirty_fields: 193 | callback(tuple()) 194 | return 195 | 196 | pre_update.send(instance=self) 197 | 198 | if not document: 199 | if force: 200 | document = self.as_dict() 201 | else: 202 | document = {"$set": self.as_dict(self.dirty_fields)} 203 | 204 | client = Client(Database(), self.__collection__) 205 | spec = {'_id': self._id} 206 | 207 | response, error = yield gen.Task(client.update, spec, document, 208 | upsert=upsert, safe=safe, multi=multi) 209 | 210 | self.clean_fields() 211 | 212 | post_update.send(instance=self) 213 | 214 | if callback: 215 | callback((response, error)) 216 | -------------------------------------------------------------------------------- /mongotor/orm/field.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | import uuid 19 | import re 20 | import decimal 21 | from datetime import datetime 22 | from bson import ObjectId 23 | 24 | 25 | class Field(object): 26 | 27 | def __init__(self, default=None, name=None, field_type=None): 28 | 29 | self.field_type = field_type 30 | self.name = name 31 | self.default = self._validate(default) 32 | 33 | def __get__(self, instance, owner): 34 | if not instance: 35 | return self 36 | 37 | value = instance._data.get(self.name) 38 | if value is None: 39 | return self.default() if callable(self.default) else self.default 40 | 41 | return value 42 | 43 | def __set__(self, instance, value): 44 | value = self._validate(value) 45 | if self.name not in instance._data or instance._data[self.name] != value: 46 | instance._dirty.add(self.name) 47 | instance._data[self.name] = value 48 | 49 | def _validate(self, value): 50 | if value is not None and not isinstance(value, self.field_type): 51 | try: 52 | value = self.field_type(value) 53 | except TypeError: 54 | raise(TypeError("type of %s must be %s" % (self.name, 55 | self.field_type))) 56 | except ValueError: 57 | raise(TypeError("type of %s must be %s" % (self.name, 58 | self.field_type))) 59 | return value 60 | 61 | 62 | class StringField(Field): 63 | 64 | def __init__(self, regex=None, *args, **kwargs): 65 | self.regex = re.compile(regex) if regex else None 66 | super(StringField, self).__init__(field_type=unicode, *args, **kwargs) 67 | 68 | def _validate(self, value): 69 | value = super(StringField, self)._validate(value) 70 | if self.regex is not None and self.regex.match(value) is None: 71 | raise(TypeError("Value did not match regex")) 72 | 73 | return value 74 | 75 | 76 | class UrlField(StringField): 77 | 78 | REGEX = re.compile( 79 | r'^https?://' 80 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' 81 | r'localhost|' 82 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' 83 | r'(?::\d+)?' 84 | r'(?:/?|[/?]\S+)$', re.IGNORECASE 85 | ) 86 | 87 | def __init__(self, *args, **kwargs): 88 | super(UrlField, self).__init__(self.REGEX, *args, **kwargs) 89 | 90 | 91 | class EmailField(StringField): 92 | 93 | REGEX = re.compile( 94 | # dot-atom 95 | r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" 96 | # quoted-string 97 | r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016' 98 | r'-\177])*"' 99 | # domain 100 | r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', 101 | re.IGNORECASE 102 | ) 103 | 104 | def __init__(self, *args, **kwargs): 105 | super(EmailField, self).__init__(self.REGEX, *args, **kwargs) 106 | 107 | 108 | class NumberField(Field): 109 | 110 | def __init__(self, field_type, min_value=None, max_value=None, 111 | *args, **kwargs): 112 | self.min_value = min_value 113 | self.max_value = max_value 114 | super(NumberField, self).__init__(field_type=field_type, 115 | *args, **kwargs) 116 | 117 | def _validate(self, value): 118 | value = super(NumberField, self)._validate(value) 119 | if self.min_value is not None and value < self.min_value: 120 | raise(TypeError("Value can not be less than %s" % (self.min_value))) 121 | 122 | if self.max_value is not None and value > self.max_value: 123 | raise(TypeError("Value can not be more than %s" & (self.max_value))) 124 | 125 | return value 126 | 127 | 128 | class IntegerField(NumberField): 129 | 130 | def __init__(self, *args, **kwargs): 131 | super(IntegerField, self).__init__(int, *args, **kwargs) 132 | 133 | 134 | class LongField(NumberField): 135 | 136 | def __init__(self, *args, **kwargs): 137 | super(LongField, self).__init__(long, *args, **kwargs) 138 | 139 | 140 | class FloatField(NumberField): 141 | 142 | def __init__(self, *args, **kwargs): 143 | super(FloatField, self).__init__(float, *args, **kwargs) 144 | 145 | 146 | class DecimalField(NumberField): 147 | 148 | def __init__(self, *args, **kwargs): 149 | super(DecimalField, self).__init__(decimal.Decimal, *args, **kwargs) 150 | 151 | 152 | class DateTimeField(Field): 153 | 154 | def __init__(self, *args, **kwargs): 155 | super(DateTimeField, self).__init__(field_type=datetime, 156 | *args, **kwargs) 157 | 158 | 159 | class BooleanField(Field): 160 | 161 | def __init__(self, *args, **kwargs): 162 | super(BooleanField, self).__init__(field_type=bool, *args, **kwargs) 163 | 164 | 165 | class ListField(Field): 166 | 167 | def __init__(self, *args, **kwargs): 168 | super(ListField, self).__init__(field_type=list, *args, **kwargs) 169 | 170 | 171 | class ObjectField(Field): 172 | 173 | def __init__(self, *args, **kwargs): 174 | super(ObjectField, self).__init__(field_type=dict, *args, **kwargs) 175 | 176 | 177 | class ObjectIdField(Field): 178 | 179 | def __init__(self, *args, **kwargs): 180 | super(ObjectIdField, self).__init__(field_type=ObjectId, 181 | *args, **kwargs) 182 | 183 | 184 | class UuidField(Field): 185 | 186 | def __init__(self, *args, **kwargs): 187 | super(UuidField, self).__init__(field_type=uuid.UUID, *args, **kwargs) 188 | 189 | 190 | class Md5Field(Field): 191 | 192 | length = 32 193 | 194 | def __init__(self, *args, **kwargs): 195 | super(Md5Field, self).__init__(field_type=unicode, *args, **kwargs) 196 | 197 | def _validate(self, value): 198 | value = super(Md5Field, self)._validate(value) 199 | if len(value) is not self.length: 200 | raise(TypeError("Md5 dose not have the correct length")) 201 | 202 | try: 203 | int(value, 16) 204 | except: 205 | raise(TypeError("The Md5 hash should be a 16byte hash value")) 206 | 207 | return value 208 | 209 | 210 | class Sha1Field(Field): 211 | 212 | length = 40 213 | 214 | def __init__(self, *args, **kwargs): 215 | super(Sha1Field, self).__init__(field_type=unicode, *args, **kwargs) 216 | 217 | def _validate(self, value): 218 | value = super(Sha1Field, self)._validate(value) 219 | if len(value) is not self.length: 220 | raise(TypeError("Sha1 dose not have the correct length")) 221 | 222 | try: 223 | int(value, 20) 224 | except: 225 | raise(TypeError("The Sha1 hash should be a 20byte hash value")) 226 | 227 | return value 228 | -------------------------------------------------------------------------------- /mongotor/orm/manager.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | from bson.son import SON 19 | from tornado import gen 20 | from mongotor.database import Database 21 | from mongotor.client import Client 22 | 23 | 24 | class Manager(object): 25 | 26 | def __init__(self, collection): 27 | self.collection = collection 28 | 29 | @gen.engine 30 | def find_one(self, query, callback): 31 | client = Client(Database(), self.collection.__collection__) 32 | result, error = yield gen.Task(client.find_one, query) 33 | 34 | instance = None 35 | if result: 36 | instance = self.collection.create(result, cleaned=True) 37 | 38 | callback(instance) 39 | 40 | @gen.engine 41 | def find(self, query, callback, **kw): 42 | client = Client(Database(), self.collection.__collection__) 43 | result, error = yield gen.Task(client.find, query, **kw) 44 | 45 | items = [] 46 | 47 | if result: 48 | for item in result: 49 | items.append(self.collection.create(item, cleaned=True)) 50 | 51 | callback(items) 52 | 53 | def count(self, query=None, callback=None): 54 | client = Client(Database(), self.collection.__collection__) 55 | client.find(query).count(callback=callback) 56 | 57 | @gen.engine 58 | def distinct(self, key, callback, query=None): 59 | client = Client(Database(), self.collection.__collection__) 60 | client.find(query).distinct(key, callback=callback) 61 | 62 | @gen.engine 63 | def geo_near(self, near, max_distance=None, num=None, spherical=None, 64 | unique_docs=None, query=None, callback=None, **kw): 65 | 66 | command = SON({"geoNear": self.collection.__collection__}) 67 | 68 | if near != None: 69 | command.update({'near': near}) 70 | 71 | if query != None: 72 | command.update({'query': query}) 73 | 74 | if num != None: 75 | command.update({'num': num}) 76 | 77 | if max_distance != None: 78 | command.update({'maxDistance': max_distance}) 79 | 80 | if unique_docs != None: 81 | command.update({'uniqueDocs': unique_docs}) 82 | 83 | if spherical != None: 84 | command.update({'spherical': spherical}) 85 | 86 | result, error = yield gen.Task(Database().command, command) 87 | items = [] 88 | 89 | if result and result['ok']: 90 | for item in result['results']: 91 | items.append(self.collection.create(item['obj'], cleaned=True)) 92 | 93 | callback(items) 94 | 95 | @gen.engine 96 | def map_reduce(self, map_, reduce_, callback, query=None, out=None): 97 | command = SON({'mapreduce': self.collection.__collection__}) 98 | 99 | command.update({ 100 | 'map': map_, 101 | 'reduce': reduce_, 102 | }) 103 | 104 | if query is not None: 105 | command.update({'query': query}) 106 | if out is None: 107 | command.update({'out': {'inline': 1}}) 108 | 109 | result, error = yield gen.Task(Database().command, command) 110 | if not result or int(result['ok']) != 1: 111 | callback(None) 112 | return 113 | 114 | callback(result['results']) 115 | 116 | @gen.engine 117 | def truncate(self, callback=None): 118 | client = Client(Database(), self.collection.__collection__) 119 | yield gen.Task(client.remove, {}) 120 | 121 | if callback: 122 | callback() 123 | -------------------------------------------------------------------------------- /mongotor/orm/signal.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | 18 | 19 | class Signal(object): 20 | 21 | def __init__(self): 22 | self.receivers = [] 23 | 24 | def connect(self, sender, handler): 25 | self.receivers.append((sender, handler)) 26 | 27 | def disconnect(self, sender, handler): 28 | self.receivers.remove((sender, handler)) 29 | 30 | def send(self, instance): 31 | for sender, handler in self.receivers: 32 | if isinstance(instance, sender): 33 | handler(sender, instance) 34 | 35 | 36 | def receiver(signal, sender): 37 | 38 | def _decorator(handler): 39 | signal.connect(sender, handler) 40 | return handler 41 | 42 | return _decorator 43 | 44 | pre_save = Signal() 45 | post_save = Signal() 46 | 47 | pre_remove = Signal() 48 | post_remove = Signal() 49 | 50 | pre_update = Signal() 51 | post_update = Signal() 52 | -------------------------------------------------------------------------------- /mongotor/pool.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 3 | # Copyright (C) <2012> Marcel Nicolay 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this program. If not, see . 17 | import logging 18 | from datetime import timedelta 19 | from threading import Condition 20 | from tornado.ioloop import IOLoop 21 | from functools import partial 22 | from mongotor.connection import Connection 23 | from mongotor.errors import TooManyConnections 24 | 25 | log = logging.getLogger(__name__) 26 | 27 | 28 | class ConnectionPool(object): 29 | """Connection Pool 30 | 31 | :Parameters: 32 | - `maxconnections` (optional): maximum open connections for this pool. 0 for unlimited 33 | - `maxusage` (optional): number of requests allowed on a connection before it is closed. 0 for unlimited 34 | - `dbname`: mongo database name 35 | - `autoreconnect`: autoreconnect on database 36 | 37 | """ 38 | def __init__(self, host, port, dbname, maxconnections=0, maxusage=0, 39 | autoreconnect=True): 40 | 41 | assert isinstance(host, (str, unicode)) 42 | assert isinstance(port, int) 43 | assert isinstance(maxconnections, int) 44 | assert isinstance(maxusage, int) 45 | assert isinstance(dbname, (str, unicode)) 46 | assert isinstance(autoreconnect, bool) 47 | 48 | self._host = host 49 | self._port = port 50 | self._maxconnections = maxconnections 51 | self._maxusage = maxusage 52 | self._autoreconnect = autoreconnect 53 | self._connections = 0 54 | self._idle_connections = [] 55 | self._condition = Condition() 56 | 57 | for i in range(self._maxconnections): 58 | conn = self._create_connection() 59 | self._idle_connections.append(conn) 60 | 61 | def __repr__(self): 62 | return "ConnectionPool {0}:{1}:{2} using:{3}, idle:{4} :::: "\ 63 | .format(id(self), self._host, self._port, self._connections, len(self._idle_connections)) 64 | 65 | def _create_connection(self): 66 | log.debug('{0} creating new connection'.format(self)) 67 | return Connection(host=self._host, port=self._port, pool=self, 68 | autoreconnect=self._autoreconnect) 69 | 70 | def connection(self, callback=None, retries=0): 71 | """Get a connection from pool 72 | 73 | :Parameters: 74 | - `callback` : method which will be called when connection is ready 75 | 76 | """ 77 | self._condition.acquire() 78 | try: 79 | try: 80 | conn = self._idle_connections.pop(0) 81 | except IndexError: 82 | if self._maxconnections and self._connections >= self._maxconnections: 83 | if retries > 10: 84 | raise TooManyConnections() 85 | 86 | log.warn('{0} too many connections, retries {1}'.format(self, retries)) 87 | retry_connection = partial(self.connection, retries=(retries + 1), callback=callback) 88 | IOLoop.instance().add_timeout(timedelta(microseconds=300), retry_connection) 89 | 90 | return 91 | 92 | conn = self._create_connection() 93 | 94 | self._connections += 1 95 | 96 | finally: 97 | self._condition.release() 98 | 99 | log.debug('{0} {1} connection retrieved'.format(self, conn)) 100 | callback(conn) 101 | 102 | def release(self, conn): 103 | if self._maxusage and conn.usage > self._maxusage: 104 | if not conn.closed(): 105 | log.debug('{0} {1} connection max usage expired, renewing...'.format(self, conn)) 106 | self._connections -= 1 107 | conn.close() 108 | return 109 | 110 | self._condition.acquire() 111 | 112 | if conn in self._idle_connections: 113 | log.debug('{0} {1} called by socket close'.format(self, conn)) 114 | self._condition.release() 115 | return 116 | 117 | try: 118 | self._idle_connections.append(conn) 119 | self._condition.notify() 120 | finally: 121 | self._connections -= 1 122 | self._condition.release() 123 | 124 | log.debug('{0} {1} release connection'.format(self, conn)) 125 | 126 | def close(self): 127 | """Close all connections in the pool.""" 128 | log.debug('{0} closing...'.format(self)) 129 | self._condition.acquire() 130 | try: 131 | while self._idle_connections: # close all idle connections 132 | con = self._idle_connections.pop(0) 133 | try: 134 | con.close() 135 | except Exception: 136 | pass 137 | self._connections -= 1 138 | self._condition.notifyAll() 139 | finally: 140 | self._condition.release() 141 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | 3 | coverage==3.5.2 4 | nose==1.1.2 5 | fudge==1.0.3 6 | sure==1.0.6 7 | spec==0.9.7 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pymongo>=2.3 2 | tornado>=2.2 -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [egg_info] 2 | tag_build = 3 | tag_date = 0 4 | tag_svn_revision = 0 5 | 6 | [nosetests] 7 | tests=tests/ 8 | nocapture=1 9 | verbosity=2 10 | with-coverage=1 11 | cover-erase=1 12 | cover-package=mongotor 13 | with-specplugin=1 14 | with-xunit=1 15 | xunit-file=build/nosetests.xml -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 -*- 2 | from setuptools import setup 3 | from mongotor import version 4 | import os 5 | 6 | 7 | def get_packages(): 8 | # setuptools can't do the job :( 9 | packages = [] 10 | for root, dirnames, filenames in os.walk('mongotor'): 11 | if '__init__.py' in filenames: 12 | packages.append(".".join(os.path.split(root)).strip(".")) 13 | 14 | return packages 15 | 16 | setup( 17 | name = 'mongotor', 18 | version = version, 19 | description = "(MongoDB + Tornado) is an asynchronous driver and toolkit for working with MongoDB inside a Tornado app", 20 | long_description = open("README.md").read(), 21 | keywords = ['mongo','tornado'], 22 | author = 'Marcel Nicolay', 23 | author_email = 'marcel.nicolay@gmail.com', 24 | url = 'http://marcelnicolay.github.com/mongotor/', 25 | license = 'OSI', 26 | classifiers = ['Development Status :: 4 - Beta', 27 | 'Intended Audience :: Developers', 28 | 'License :: OSI Approved', 29 | 'Natural Language :: English', 30 | 'Natural Language :: Portuguese (Brazilian)', 31 | 'Operating System :: POSIX :: Linux', 32 | 'Programming Language :: Python :: 2.7', 33 | 'Topic :: Software Development :: Libraries :: Application Frameworks', 34 | ], 35 | install_requires = open("requirements.txt").read().split("\n"), 36 | packages = get_packages(), 37 | test_suite="nose.collector" 38 | ) -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/marcelnicolay/mongotor/9baa0a871d196e037e7f20ec43c82b99230cab7e/tests/__init__.py -------------------------------------------------------------------------------- /tests/orm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/marcelnicolay/mongotor/9baa0a871d196e037e7f20ec43c82b99230cab7e/tests/orm/__init__.py -------------------------------------------------------------------------------- /tests/orm/test_collection.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from mongotor.database import Database 5 | from mongotor.orm.collection import Collection 6 | from mongotor.orm.manager import Manager 7 | from mongotor.orm.field import (ObjectIdField, StringField, DateTimeField, 8 | IntegerField, BooleanField, FloatField, ListField, ObjectField, 9 | LongField, DecimalField, UrlField, UuidField, EmailField, Md5Field, 10 | Sha1Field) 11 | from mongotor.errors import DatabaseError 12 | from bson.objectid import ObjectId 13 | import sure 14 | import uuid 15 | import hashlib 16 | 17 | 18 | class CollectionTestCase(testing.AsyncTestCase): 19 | 20 | def get_new_ioloop(self): 21 | return IOLoop.instance() 22 | 23 | def setUp(self): 24 | super(CollectionTestCase, self).setUp() 25 | Database.init(["localhost:27027", "localhost:27028"], dbname='test') 26 | 27 | def tearDown(self): 28 | super(CollectionTestCase, self).tearDown() 29 | Database.disconnect() 30 | 31 | def test_save_a_new_document(self): 32 | """[CollectionTestCase] - Save a new document using collection schema""" 33 | class CollectionTest(Collection): 34 | __collection__ = "collection_test" 35 | _id = ObjectIdField() 36 | string_attr = StringField() 37 | 38 | doc_test = CollectionTest() 39 | doc_test._id = ObjectId() 40 | doc_test.string_attr = "should be string value" 41 | 42 | doc_test.save(callback=self.stop) 43 | response, error = self.wait() 44 | 45 | response['ok'].should.be.equal(1.0) 46 | error.should.be.none 47 | 48 | def test_remove_a_document(self): 49 | """[CollectionTestCase] - Remove a document""" 50 | class CollectionTest(Collection): 51 | __collection__ = "collection_test" 52 | _id = ObjectIdField() 53 | string_attr = StringField() 54 | 55 | doc_test = CollectionTest() 56 | doc_test._id = ObjectId() 57 | doc_test.string_attr = "should be string value" 58 | 59 | doc_test.save(callback=self.stop) 60 | self.wait() 61 | 62 | doc_test.remove(callback=self.stop) 63 | response, error = self.wait() 64 | 65 | response['ok'].should.be.equal(1.0) 66 | error.should.be.none 67 | 68 | def test_update_a_document(self): 69 | """[CollectionTestCase] - Update a document""" 70 | class CollectionTest(Collection): 71 | __collection__ = "collection_test" 72 | _id = ObjectIdField() 73 | string_attr = StringField() 74 | 75 | doc_test = CollectionTest() 76 | doc_test._id = ObjectId() 77 | doc_test.string_attr = "should be string value" 78 | 79 | doc_test.save(callback=self.stop) 80 | self.wait() 81 | 82 | doc_test.string_attr = "should be new string value" 83 | doc_test.update(callback=self.stop) 84 | response, error = self.wait() 85 | 86 | response['ok'].should.be.equal(1.0) 87 | error.should.be.none 88 | 89 | def test_can_create_collection_from_dictionary(self): 90 | """[CollectionTestCase] - Create a document from dictionary """ 91 | object_id = ObjectId() 92 | object_dict = { 93 | 'string_attr': 'string_attr', 94 | 'integer_attr': 1, 95 | 'bool_attr': True, 96 | 'float_attr': 1.0, 97 | 'list_attr': [1, 2, 3], 98 | 'object_attr': {'chave': 'valor'}, 99 | 'object_id_attr': object_id, 100 | } 101 | 102 | class CollectionTest(Collection): 103 | string_attr = StringField() 104 | integer_attr = IntegerField() 105 | bool_attr = BooleanField() 106 | float_attr = FloatField() 107 | list_attr = ListField() 108 | object_attr = ObjectField() 109 | object_id_attr = ObjectIdField() 110 | unknow_object = StringField() 111 | 112 | object_instance = CollectionTest.create(object_dict) 113 | 114 | object_instance.string_attr.should.be.equal('string_attr') 115 | object_instance.integer_attr.should.be.equal(1) 116 | object_instance.bool_attr.should.be.ok 117 | object_instance.float_attr.should.be.equal(1.0) 118 | object_instance.list_attr.should.be.equal([1, 2, 3]) 119 | object_instance.object_attr.should.be.equal({'chave': 'valor'}) 120 | object_instance.object_id_attr.should.be.equal(object_id) 121 | 122 | def test_create_attribute_if_model_does_not_contains_field(self): 123 | """[CollectionTestCase] - Create attribute if model does not contains field""" 124 | class CollectionTest(Collection): 125 | string_attr = StringField() 126 | 127 | object_dict = { 128 | 'string_attr': 'string_attr', 129 | 'integer_attr': 1 130 | } 131 | 132 | object_instance = CollectionTest.create(object_dict) 133 | object_instance.string_attr.should.be.equal('string_attr') 134 | object_instance.integer_attr.should.be.equal(1) 135 | 136 | def test_ignore_attribute_with_different_field_type(self): 137 | """[CollectionTestCase] - Ignore attributes with different field type""" 138 | class CollectionTest(Collection): 139 | string_attr = DateTimeField() 140 | 141 | object_dict = { 142 | 'string_attr': 'duvido' 143 | } 144 | 145 | object_instance = CollectionTest.create(object_dict) 146 | object_instance.string_attr.should.be.none 147 | 148 | def test_can_set_manager_object_in_collection(self): 149 | """[CollectionTestCase] - Can set manager object in collection""" 150 | class CollectionTest(Collection): 151 | should_be_value = StringField() 152 | 153 | CollectionTest.objects.should.be.a('mongotor.orm.manager.Manager') 154 | 155 | def test_can_be_load_lazy_class(self): 156 | """[CollectionTestCase] - Can be load lazy collection""" 157 | class CollectionTest(Collection): 158 | pass 159 | 160 | issubclass(Collection("CollectionTest"), CollectionTest).should.be.ok 161 | 162 | def test_can_be_load_child_lazy_class(self): 163 | """[CollectionTestCase] - Can be load lazy child collection""" 164 | class CollectionTest(Collection): 165 | pass 166 | 167 | class ChildCollectionTest(CollectionTest): 168 | pass 169 | 170 | issubclass(Collection("ChildCollectionTest"),\ 171 | ChildCollectionTest).should.be.ok 172 | 173 | def test_raises_erro_when_use_collection_with_not_initialized_database(self): 174 | """[CollectionTestCase] - Raises DatabaseError when use collection with a not initialized database""" 175 | 176 | class CollectionTest(Collection): 177 | __collection__ = 'collection_test' 178 | 179 | Database.disconnect() 180 | CollectionTest().save.when.called_with(callback=None) \ 181 | .throw(DatabaseError, 'you must be initialize database before perform this action') 182 | 183 | Database.init(["localhost:27027", "localhost:27028"], dbname='test') 184 | 185 | def test_update_tracks_changed_attrs(self): 186 | """[CollectionTestCase] - Update a document and track dirty fields""" 187 | class CollectionTest(Collection): 188 | __collection__ = "collection_test" 189 | _id = ObjectIdField() 190 | string_attr = StringField() 191 | 192 | doc_test = CollectionTest() 193 | doc_test._id = ObjectId() 194 | doc_test.string_attr = "should be string value" 195 | 196 | doc_test.save(callback=self.stop) 197 | self.wait() 198 | 199 | doc_test.string_attr = "should be new string value" 200 | "string_attr".should.be.within(doc_test.dirty_fields) 201 | "_id".shouldnot.be.within(doc_test.dirty_fields) 202 | 203 | def test_update_tracks_changed_object_attrs(self): 204 | """[CollectionTestCase] - Update a document and track dirty object fields""" 205 | class CollectionTest(Collection): 206 | __collection__ = "collection_test" 207 | _id = ObjectIdField() 208 | string_attr = StringField() 209 | object_field = ObjectField() 210 | 211 | doc_test = CollectionTest() 212 | doc_test._id = ObjectId() 213 | doc_test.string_attr = "should be string value" 214 | 215 | doc_test.save(callback=self.stop) 216 | self.wait() 217 | 218 | doc_test.object_field = {'name': 'should be a new'} 219 | "object_field".should.be.within(doc_test.dirty_fields) 220 | "_id".shouldnot.be.within(doc_test.dirty_fields) 221 | 222 | def test_load_obj_does_not_set_dirty_keys(self): 223 | """[CollectionTestCase] - Check if freshly loaded document has no dirty fields""" 224 | class CollectionTest(Collection): 225 | __collection__ = "collection_test" 226 | _id = ObjectIdField() 227 | string_attr = StringField() 228 | 229 | doc_test = CollectionTest() 230 | doc_test._id = ObjectId() 231 | doc_test.string_attr = "should be string value" 232 | 233 | doc_test.save(callback=self.stop) 234 | self.wait() 235 | CollectionTest.objects.find_one(query=doc_test._id, callback=self.stop) 236 | db_doc_test = self.wait() 237 | db_doc_test.dirty_fields.should.be.empty 238 | 239 | def test_force_update(self): 240 | class CollectionTest(Collection): 241 | __collection__ = "collection_test" 242 | _id = ObjectIdField() 243 | string_attr = StringField() 244 | 245 | doc_test = CollectionTest() 246 | doc_test._id = ObjectId() 247 | doc_test.string_attr = "should be string value" 248 | 249 | doc_test.save(callback=self.stop) 250 | self.wait() 251 | 252 | doc_test.string_attr = "changed" 253 | 254 | doc_test.update(callback=self.stop, force=True) 255 | self.wait() 256 | 257 | CollectionTest.objects.find_one(query=doc_test._id, callback=self.stop) 258 | db_doc_test = self.wait() 259 | 260 | db_doc_test.string_attr.should.be.equal("changed") 261 | 262 | def test_empty_callback(self): 263 | class CollectionTest(Collection): 264 | __collection__ = "collection_test" 265 | _id = ObjectIdField() 266 | string_attr = StringField() 267 | 268 | doc_test = CollectionTest() 269 | doc_test._id = ObjectId() 270 | doc_test.string_attr = "should be string value" 271 | 272 | doc_test.save(callback=self.stop) 273 | db_doc_test = self.wait() 274 | 275 | doc_test.update(callback=self.stop) 276 | db_doc_test = self.wait() 277 | 278 | db_doc_test.should.be(tuple()) 279 | 280 | def test_get_fields_from_base_classes(self): 281 | class CollectionTest(Collection): 282 | __collection__ = "collection_test" 283 | _id = ObjectIdField(default=ObjectId()) 284 | base_url_field = UrlField(default="https://www.test.com") 285 | base_decimal_field = DecimalField(default=2.1) 286 | base_md5_field = Md5Field(default=hashlib.md5("test").hexdigest()) 287 | 288 | class ChildCollectionTest(CollectionTest): 289 | child_uuid_field = UuidField(default=uuid.uuid4()) 290 | child_email_field = EmailField(default="test@test.com") 291 | child_sha1_field = Sha1Field(default=hashlib.sha1("test").hexdigest()) 292 | 293 | class SecondChildCollectionTest(ChildCollectionTest): 294 | second_child_long_field = LongField(default=1000) 295 | 296 | test_instance = SecondChildCollectionTest() 297 | test_dict = test_instance.as_dict() 298 | test_dict.should.be.length_of(8) 299 | -------------------------------------------------------------------------------- /tests/orm/test_manager.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from bson import ObjectId 5 | from mongotor.orm.field import (ObjectIdField, StringField) 6 | from mongotor.orm.collection import Collection 7 | from mongotor.database import Database 8 | from tests.util import unittest 9 | 10 | 11 | class CollectionTest(Collection): 12 | __collection__ = "collection_test" 13 | 14 | _id = ObjectIdField() 15 | string_attr = StringField() 16 | 17 | 18 | class ManagerTestCase(testing.AsyncTestCase, unittest.TestCase): 19 | 20 | def setUp(self): 21 | super(ManagerTestCase, self).setUp() 22 | Database.init(["localhost:27027", "localhost:27028"], dbname='mongotor_test') 23 | 24 | def tearDown(self): 25 | super(ManagerTestCase, self).tearDown() 26 | CollectionTest.objects.truncate(callback=self.stop) 27 | self.wait() 28 | Database.disconnect() 29 | 30 | def get_new_ioloop(self): 31 | return IOLoop.instance() 32 | 33 | def test_find_one(self): 34 | """[ManagerTestCase] - Find one""" 35 | collection_test = CollectionTest() 36 | collection_test._id = ObjectId() 37 | collection_test.string_attr = "string value" 38 | collection_test.save(callback=self.stop) 39 | self.wait() 40 | 41 | other_collection_test = CollectionTest() 42 | other_collection_test._id = ObjectId() 43 | other_collection_test.string_attr = "string value" 44 | other_collection_test.save(callback=self.stop) 45 | self.wait() 46 | 47 | CollectionTest.objects.find_one({'string_attr': "string value"}, 48 | callback=self.stop) 49 | collections_found = self.wait() 50 | 51 | collections_found._id.should.be.within([collection_test._id, 52 | other_collection_test._id]) 53 | 54 | def test_find_one_not_found(self): 55 | """[ManagerTestCase] - Find one when not found""" 56 | CollectionTest.objects.find_one({'string_attr': "string value"}, 57 | callback=self.stop) 58 | collections_found = self.wait() 59 | 60 | collections_found.should.be.none 61 | 62 | def test_find(self): 63 | """[ManagerTestCase] - Find documents""" 64 | collection_test = CollectionTest() 65 | collection_test._id = ObjectId() 66 | collection_test.string_attr = "string value" 67 | collection_test.save(callback=self.stop) 68 | self.wait() 69 | 70 | other_collection_test = CollectionTest() 71 | other_collection_test._id = ObjectId() 72 | other_collection_test.string_attr = "other string value" 73 | other_collection_test.save(callback=self.stop) 74 | self.wait() 75 | 76 | CollectionTest.objects.find({'string_attr': "string value"}, 77 | callback=self.stop) 78 | collections_found = self.wait() 79 | 80 | collections_found.should.have.length_of(1) 81 | collections_found[0]._id.should.be.equal(collection_test._id) 82 | 83 | def test_find_not_found(self): 84 | """[ManagerTestCase] - Find documents when not found""" 85 | CollectionTest.objects.find({'string_attr': "string value diff"}, 86 | callback=self.stop) 87 | collections_found = self.wait() 88 | 89 | collections_found.should.have.length_of(0) 90 | 91 | def test_count(self): 92 | """[ManagerTestCase] - Count document in collection""" 93 | collection_test = CollectionTest() 94 | collection_test._id = ObjectId() 95 | collection_test.string_attr = "string value" 96 | collection_test.save(callback=self.stop) 97 | self.wait() 98 | 99 | CollectionTest.objects.count(callback=self.stop) 100 | count = self.wait() 101 | 102 | count.should.be.equal(1) 103 | 104 | def test_count_not_found(self): 105 | """[ManagerTestCase] - Count document when not found""" 106 | CollectionTest.objects.count(callback=self.stop) 107 | count = self.wait() 108 | 109 | count.should.be.equal(0) 110 | 111 | def test_find_distinct_values_with_distinct_command(self): 112 | """[ManagerTestCase] - Find distinct values with distinct command""" 113 | collection_test = CollectionTest() 114 | collection_test._id = ObjectId() 115 | collection_test.string_attr = "Value A" 116 | collection_test.save(callback=self.stop) 117 | self.wait() 118 | 119 | collection_test = CollectionTest() 120 | collection_test._id = ObjectId() 121 | collection_test.string_attr = "Value B" 122 | collection_test.save(callback=self.stop) 123 | self.wait() 124 | 125 | collection_test = CollectionTest() 126 | collection_test._id = ObjectId() 127 | collection_test.string_attr = "Value A" 128 | collection_test.save(callback=self.stop) 129 | self.wait() 130 | 131 | collection_test = CollectionTest() 132 | collection_test._id = ObjectId() 133 | collection_test.string_attr = "Value C" 134 | collection_test.save(callback=self.stop) 135 | self.wait() 136 | 137 | CollectionTest.objects.distinct(key='string_attr', callback=self.stop) 138 | distinct_values = self.wait() 139 | 140 | self.assertEqual(3, len(distinct_values)) 141 | self.assertIn("Value A", distinct_values) 142 | self.assertIn("Value B", distinct_values) 143 | self.assertIn("Value C", distinct_values) 144 | 145 | def test_find_distinct_values_with_distinct_command_excluding_some_values(self): 146 | """[ManagerTestCase] - Find distinct values with distinct command excluding some value""" 147 | collection_test = CollectionTest() 148 | collection_test._id = ObjectId() 149 | collection_test.string_attr = "Value A" 150 | collection_test.save(callback=self.stop) 151 | self.wait() 152 | 153 | collection_test = CollectionTest() 154 | collection_test._id = ObjectId() 155 | collection_test.string_attr = "Value B" 156 | collection_test.save(callback=self.stop) 157 | self.wait() 158 | 159 | collection_test = CollectionTest() 160 | collection_test._id = ObjectId() 161 | collection_test.string_attr = "Value A" 162 | collection_test.save(callback=self.stop) 163 | self.wait() 164 | 165 | collection_test = CollectionTest() 166 | collection_test._id = ObjectId() 167 | collection_test.string_attr = "Value C" 168 | collection_test.save(callback=self.stop) 169 | self.wait() 170 | 171 | query = { 172 | 'string_attr': { 173 | '$ne': 'Value A' 174 | } 175 | } 176 | CollectionTest.objects.distinct(key='string_attr', query=query, 177 | callback=self.stop) 178 | distinct_values = self.wait() 179 | 180 | self.assertEqual(2, len(distinct_values)) 181 | self.assertIn("Value B", distinct_values) 182 | self.assertIn("Value C", distinct_values) 183 | 184 | def test_execute_simple_mapreduce_return_results_inline(self): 185 | """[ManagerTestCase] - Find exceute simple mapreduce return inline results""" 186 | collections = [ 187 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value A'}), 188 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value B'}), 189 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value A'}), 190 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value C'}), 191 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value D'}), 192 | CollectionTest.create({'_id': ObjectId(), 'string_attr': 'Value E'}), 193 | ] 194 | for coll in collections: 195 | coll.save(callback=self.stop) 196 | self.wait() 197 | 198 | query = { 199 | 'string_attr': {'$ne': 'Value E'}, 200 | } 201 | 202 | map_ = """ 203 | function m() { 204 | emit(this.string_attr, 1); 205 | } 206 | """ 207 | 208 | reduce_ = """ 209 | function r(key, values) { 210 | var total = 0; 211 | for (var i = 0; i < values.length; i++) { 212 | total += values[i]; 213 | } 214 | return total; 215 | } 216 | """ 217 | 218 | CollectionTest.objects.map_reduce(map_, reduce_, query=query, 219 | callback=self.stop) 220 | results = self.wait() 221 | 222 | self.assertEquals(4, len(results)) 223 | self.assertEquals({u'_id': u'Value A', u'value': 2.0}, results[0]) 224 | self.assertEquals({u'_id': u'Value B', u'value': 1.0}, results[1]) 225 | self.assertEquals({u'_id': u'Value C', u'value': 1.0}, results[2]) 226 | self.assertEquals({u'_id': u'Value D', u'value': 1.0}, results[3]) -------------------------------------------------------------------------------- /tests/orm/test_signal.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from tornado import testing 3 | from tornado.ioloop import IOLoop 4 | from mongotor.orm import signal 5 | from mongotor.database import Database 6 | from mongotor.orm.collection import Collection 7 | from mongotor.orm.field import StringField, ObjectId, ObjectIdField 8 | from tests.util import unittest 9 | 10 | 11 | class SignalTestCase(testing.AsyncTestCase, unittest.TestCase): 12 | 13 | def get_new_ioloop(self): 14 | return IOLoop.instance() 15 | 16 | def setUp(self): 17 | super(SignalTestCase, self).setUp() 18 | SignalTestCase.signal_triggered = False 19 | Database.init(["localhost:27027", "localhost:27028"], dbname='mongotor_test') 20 | 21 | def tearDown(self): 22 | super(SignalTestCase, self).tearDown() 23 | 24 | class CollectionTest(Collection): 25 | __collection__ = "collection_test" 26 | 27 | CollectionTest.objects.truncate(callback=self.stop) 28 | self.wait() 29 | Database.disconnect() 30 | 31 | def test_save_sends_pre_save_signal_correctly_and_I_can_handle_the_collection_instance(self): 32 | 33 | class CollectionTest(Collection): 34 | __collection__ = "collection_test" 35 | 36 | _id = ObjectIdField() 37 | string_attr = StringField() 38 | 39 | @signal.receiver(signal.pre_save, CollectionTest) 40 | def collection_pre_save_handler(sender, instance): 41 | instance.string_attr += " updated" 42 | SignalTestCase.signal_triggered = True 43 | 44 | collection_test = CollectionTest() 45 | collection_test._id = ObjectId() 46 | collection_test.string_attr = "should be string value" 47 | collection_test.save(callback=self.stop) 48 | 49 | self.wait() 50 | self.assertTrue(SignalTestCase.signal_triggered) 51 | 52 | CollectionTest.objects.find_one(collection_test._id, callback=self.stop) 53 | collection_found = self.wait() 54 | self.assertEquals("should be string value updated", collection_found.string_attr) 55 | 56 | def test_save_sends_post_save_signal_correctly_and_I_can_handle_the_collection_instance(self): 57 | 58 | class CollectionTest(Collection): 59 | __collection__ = "collection_test" 60 | 61 | _id = ObjectIdField() 62 | string_attr = StringField() 63 | 64 | @signal.receiver(signal.post_save, CollectionTest) 65 | def collection_post_save_handler(sender, instance): 66 | CollectionTest.objects.find_one(collection_test._id, callback=self.stop) 67 | collection_found = self.wait() 68 | self.assertEquals(instance.string_attr, collection_found.string_attr) 69 | SignalTestCase.signal_triggered = True 70 | 71 | collection_test = CollectionTest() 72 | collection_test._id = ObjectId() 73 | collection_test.string_attr = "should be string value" 74 | collection_test.save(callback=self.stop) 75 | 76 | self.wait() 77 | self.assertTrue(SignalTestCase.signal_triggered) 78 | 79 | def test_remove_sends_pre_remove_signal_correctly_and_I_can_handle_the_collection_instance_before_it_dies(self): 80 | 81 | class CollectionTest(Collection): 82 | __collection__ = "collection_test" 83 | 84 | _id = ObjectIdField() 85 | string_attr = StringField() 86 | 87 | collection_test = CollectionTest() 88 | collection_test._id = ObjectId() 89 | collection_test.string_attr = "should be string value" 90 | collection_test.save() 91 | 92 | @signal.receiver(signal.pre_remove, CollectionTest) 93 | def collection_pre_remove_handler(sender, instance): 94 | SignalTestCase.instance_copy = deepcopy(instance) 95 | SignalTestCase.signal_triggered = True 96 | 97 | collection_test.remove(callback=self.stop) 98 | 99 | self.wait() 100 | self.assertTrue(SignalTestCase.signal_triggered) 101 | self.assertEquals("should be string value", SignalTestCase.instance_copy.string_attr) 102 | 103 | def test_remove_sends_post_remove_signal_correctly_and_instance_does_not_exists_anymore(self): 104 | 105 | class CollectionTest(Collection): 106 | __collection__ = "collection_test" 107 | 108 | _id = ObjectIdField() 109 | string_attr = StringField() 110 | 111 | collection_test = CollectionTest() 112 | collection_test._id = ObjectId() 113 | collection_test.string_attr = "should be string value" 114 | collection_test.save(callback=self.stop) 115 | self.wait() 116 | 117 | @signal.receiver(signal.post_remove, CollectionTest) 118 | def collection_post_remove_handler(sender, instance): 119 | CollectionTest.objects.find_one(collection_test._id, callback=self.stop) 120 | collection_found = self.wait() 121 | self.assertIsNone(collection_found) 122 | SignalTestCase.signal_triggered = True 123 | 124 | collection_test.remove(callback=self.stop) 125 | 126 | self.wait() 127 | self.assertTrue(SignalTestCase.signal_triggered) 128 | 129 | def test_update_sends_pre_update_signal_correctly(self): 130 | 131 | class CollectionTest(Collection): 132 | __collection__ = "collection_test" 133 | 134 | _id = ObjectIdField() 135 | string_attr = StringField() 136 | 137 | collection_test = CollectionTest() 138 | collection_test._id = ObjectId() 139 | collection_test.string_attr = "should be string value" 140 | collection_test.save(callback=self.stop) 141 | 142 | self.wait() 143 | 144 | @signal.receiver(signal.pre_update, CollectionTest) 145 | def collection_pre_update_handler(sender, instance): 146 | instance.string_attr += ' updated' 147 | SignalTestCase.signal_triggered = True 148 | 149 | collection_test.string_attr = "should be another string value" 150 | collection_test.update(callback=self.stop) 151 | self.wait() 152 | 153 | CollectionTest.objects.find_one(collection_test._id, callback=self.stop) 154 | 155 | collection_found = self.wait() 156 | self.assertEquals("should be another string value updated", collection_found.string_attr) 157 | self.assertTrue(SignalTestCase.signal_triggered) 158 | 159 | def test_update_sends_post_update_signal_correctly(self): 160 | 161 | class CollectionTest(Collection): 162 | __collection__ = "collection_test" 163 | 164 | _id = ObjectIdField() 165 | string_attr = StringField() 166 | 167 | collection_test = CollectionTest() 168 | collection_test._id = ObjectId() 169 | collection_test.string_attr = "should be string value" 170 | collection_test.save(callback=self.stop) 171 | self.wait() 172 | 173 | @signal.receiver(signal.post_update, CollectionTest) 174 | def collection_post_update_handler(sender, instance): 175 | self.assertEquals(collection_test.string_attr, instance.string_attr) 176 | SignalTestCase.signal_triggered = True 177 | 178 | collection_test.string_attr = "should be another string value" 179 | collection_test.update(callback=self.stop) 180 | self.wait() 181 | 182 | self.assertEquals("should be another string value", collection_test.string_attr) 183 | self.assertTrue(SignalTestCase.signal_triggered) 184 | -------------------------------------------------------------------------------- /tests/test_client.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from mongotor.database import Database 5 | from bson import ObjectId 6 | from datetime import datetime 7 | import sure 8 | 9 | 10 | class ClientTestCase(testing.AsyncTestCase): 11 | 12 | def get_new_ioloop(self): 13 | return IOLoop.instance() 14 | 15 | def tearDown(self): 16 | super(ClientTestCase, self).tearDown() 17 | Database().collection_test.remove({}) 18 | Database._instance = None 19 | 20 | def test_insert_a_single_document(self): 21 | """[ClientTestCase] - insert a single document with client""" 22 | 23 | db = Database.init(["localhost:27027", "localhost:27028"], 24 | dbname='test') 25 | 26 | document = {'_id': ObjectId(), 'name': 'shouldbename'} 27 | 28 | db.collection_test.insert(document, callback=self.stop) 29 | response, error = self.wait() 30 | 31 | response['ok'].should.be.equal(1.0) 32 | error.should.be.none 33 | 34 | def test_insert_a_document_list(self): 35 | """[ClientTestCase] - insert a list of document with client""" 36 | 37 | db = Database.init(["localhost:27027", "localhost:27028"], 38 | dbname='test') 39 | 40 | documents = [{'_id': ObjectId(), 'name': 'shouldbename'}, 41 | {'_id': ObjectId(), 'name': 'shouldbename2'}] 42 | 43 | db.collection_test.insert(documents, callback=self.stop) 44 | response, error = self.wait() 45 | 46 | response['ok'].should.be.equal(1.0) 47 | error.should.be.none 48 | 49 | def test_remove_document_by_id(self): 50 | """[ClientTestCase] - remove a document by id""" 51 | db = Database.init(["localhost:27027", "localhost:27028"], 52 | dbname='test') 53 | 54 | documents = [{'_id': ObjectId(), 'name': 'shouldbename'}, 55 | {'_id': ObjectId(), 'name': 'shouldbename2'}] 56 | 57 | db.collection_test.insert(documents, callback=self.stop) 58 | response, error = self.wait() 59 | 60 | db.collection_test.remove(documents[0]['_id'], callback=self.stop) 61 | response, error = self.wait() 62 | 63 | response['ok'].should.be.equal(1.0) 64 | error.should.be.none 65 | 66 | def test_remove_document_by_spec(self): 67 | """[ClientTestCase] - remove a document by spec""" 68 | db = Database.init(["localhost:27027", "localhost:27028"], 69 | dbname='test') 70 | 71 | documents = [{'_id': ObjectId(), 'name': 'shouldbename'}, 72 | {'_id': ObjectId(), 'name': 'shouldbename2'}] 73 | 74 | db.collection_test.insert(documents, callback=self.stop) 75 | response, error = self.wait() 76 | 77 | db.collection_test.remove({'name': 'shouldbename'}, callback=self.stop) 78 | response, error = self.wait() 79 | 80 | response['ok'].should.be.equal(1.0) 81 | error.should.be.none 82 | 83 | def test_update_document(self): 84 | """[ClientTestCase] - update a document""" 85 | db = Database.init(["localhost:27027", "localhost:27028"], 86 | dbname='test') 87 | 88 | documents = [{'_id': ObjectId(), 'name': 'shouldbename'}, 89 | {'_id': ObjectId(), 'name': 'shouldbename2'}] 90 | 91 | db.collection_test.insert(documents, callback=self.stop) 92 | response, error = self.wait() 93 | 94 | db.collection_test.update(documents[0], {'$set': {'name': 95 | 'should be a new name'}}, callback=self.stop) 96 | response, error = self.wait() 97 | 98 | response['ok'].should.be.equal(1.0) 99 | error.should.be.none 100 | 101 | def test_find_document(self): 102 | """[ClientTestCase] - find a document""" 103 | db = Database.init(["localhost:27027", "localhost:27028"], 104 | dbname='test') 105 | 106 | documents = [{'_id': ObjectId(), 'someflag': 1}, 107 | {'_id': ObjectId(), 'someflag': 1}, 108 | {'_id': ObjectId(), 'someflag': 2}] 109 | 110 | db.collection_test.insert(documents, callback=self.stop) 111 | response, error = self.wait() 112 | 113 | db.collection_test.find({'someflag': 1}, callback=self.stop) 114 | response, error = self.wait() 115 | 116 | response[0]['_id'].should.be(documents[0]['_id']) 117 | response[1]['_id'].should.be(documents[1]['_id']) 118 | error.should.be.none 119 | 120 | def test_find_one_document(self): 121 | """[ClientTestCase] - find one document""" 122 | db = Database.init(["localhost:27027", "localhost:27028"], 123 | dbname='test') 124 | 125 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam'}, 126 | {'_id': ObjectId(), 'param': 'shouldbeparam1'}, 127 | {'_id': ObjectId(), 'param': 'shouldbeparam2'}] 128 | 129 | db.collection_test.insert(documents, callback=self.stop) 130 | response, error = self.wait() 131 | 132 | db.collection_test.find_one({'param': 'shouldbeparam1'}, 133 | callback=self.stop) 134 | response, error = self.wait() 135 | 136 | response['_id'].should.be(documents[1]['_id']) 137 | error.should.be.none 138 | 139 | def test_find_one_document_by_id(self): 140 | """[ClientTestCase] - find one document by id""" 141 | db = Database.init(["localhost:27027", "localhost:27028"], 142 | dbname='test') 143 | 144 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam'}, 145 | {'_id': ObjectId(), 'param': 'shouldbeparam1'}, 146 | {'_id': ObjectId(), 'param': 'shouldbeparam2'}] 147 | 148 | db.collection_test.insert(documents, callback=self.stop) 149 | response, error = self.wait() 150 | 151 | db.collection_test.find_one(documents[2]['_id'], 152 | callback=self.stop) 153 | response, error = self.wait() 154 | 155 | response['_id'].should.be(documents[2]['_id']) 156 | error.should.be.none 157 | 158 | def test_count_documents_in_find(self): 159 | """[ClientTestCase] - counting documents in query""" 160 | db = Database.init(["localhost:27027", "localhost:27028"], 161 | dbname='test') 162 | 163 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam1'}, 164 | {'_id': ObjectId(), 'param': 'shouldbeparam1'}, 165 | {'_id': ObjectId(), 'param': 'shouldbeparam2'}] 166 | 167 | db.collection_test.insert(documents, callback=self.stop) 168 | response, error = self.wait() 169 | 170 | db.collection_test.find({"param": 'shouldbeparam1'}).count(callback=self.stop) 171 | total = self.wait() 172 | 173 | total.should.be.equal(2) 174 | 175 | def test_count_all_documents(self): 176 | """[ClientTestCase] - counting among all documents""" 177 | db = Database.init(["localhost:27027", "localhost:27028"], 178 | dbname='test') 179 | 180 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam1'}, 181 | {'_id': ObjectId(), 'param': 'shouldbeparam1'}, 182 | {'_id': ObjectId(), 'param': 'shouldbeparam2'}] 183 | 184 | db.collection_test.insert(documents, callback=self.stop) 185 | response, error = self.wait() 186 | 187 | db.collection_test.count(callback=self.stop) 188 | total = self.wait() 189 | 190 | total.should.be.equal(3) 191 | 192 | def test_distinct_documents_in_find(self): 193 | """[ClientTestCase] - distinct documents in query""" 194 | db = Database.init(["localhost:27027", "localhost:27028"], 195 | dbname='test') 196 | 197 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam1', 'uuid': 100}, 198 | {'_id': ObjectId(), 'param': 'shouldbeparam1', 'uuid': 100}, 199 | {'_id': ObjectId(), 'param': 'shouldbeparam2', 'uuid': 200}] 200 | 201 | db.collection_test.insert(documents, callback=self.stop) 202 | response, error = self.wait() 203 | 204 | db.collection_test.find({"param": 'shouldbeparam1'}).distinct('uuid', callback=self.stop) 205 | distincts = self.wait() 206 | 207 | distincts.should.have.length_of(1) 208 | distincts[0].should.be.equal(100) 209 | 210 | def test_distinct_all_documents(self): 211 | """[ClientTestCase] - distinct among all documents""" 212 | db = Database.init(["localhost:27027", "localhost:27028"], 213 | dbname='test') 214 | 215 | documents = [{'_id': ObjectId(), 'param': 'shouldbeparam1', 'uuid': 100}, 216 | {'_id': ObjectId(), 'param': 'shouldbeparam1', 'uuid': 100}, 217 | {'_id': ObjectId(), 'param': 'shouldbeparam2', 'uuid': 200}] 218 | 219 | db.collection_test.insert(documents, callback=self.stop) 220 | response, error = self.wait() 221 | 222 | db.collection_test.distinct('uuid', callback=self.stop) 223 | distincts = self.wait() 224 | 225 | distincts.should.have.length_of(2) 226 | distincts[0].should.be.equal(100) 227 | distincts[1].should.be.equal(200) 228 | 229 | def test_aggregate_collection(self): 230 | """[ClientTestCase] - aggregate command""" 231 | db = Database.init(["localhost:27027", "localhost:27028"], 232 | dbname='test') 233 | 234 | documents = [{ 235 | "title": "this is my title", 236 | "author": "bob", 237 | "posted": datetime.now(), 238 | "pageViews": 5, 239 | "tags": ["good", "fun"], 240 | }, { 241 | "title": "this is my title", 242 | "author": "joe", 243 | "posted": datetime.now(), 244 | "pageViews": 5, 245 | "tags": ["good"], 246 | }] 247 | 248 | db.articles.insert(documents, callback=self.stop) 249 | response, error = self.wait() 250 | 251 | try: 252 | pipeline = { 253 | "$project": { 254 | "author": 1, 255 | "tags": 1, 256 | } 257 | }, { 258 | "$unwind": "$tags" 259 | }, { 260 | "$group": { 261 | "_id": {"tags": "$tags"}, 262 | "authors": {"$addToSet": "$author"} 263 | } 264 | } 265 | db.articles.aggregate(pipeline, callback=self.stop) 266 | 267 | response = self.wait() 268 | 269 | response['result'][0]['_id'].should.be.equal({u'tags': u'fun'}) 270 | response['result'][0]['authors'].should.be.equal([u'bob']) 271 | 272 | response['result'][1]['_id'].should.be.equal({u'tags': u'good'}) 273 | response['result'][1]['authors'].should.be.equal([u'joe', u'bob']) 274 | finally: 275 | db.articles.remove({}, callback=self.stop) 276 | self.wait() 277 | 278 | def test_group(self): 279 | """[ClientTestCase] - group command""" 280 | db = Database.init(["localhost:27027", "localhost:27028"], 281 | dbname='test') 282 | group = { 283 | 'key': None, 284 | 'condition': {'author': 'joe'}, 285 | 'initial': {'csum': 0}, 286 | 'reduce': 'function(obj,prev){prev.csum+=obj.pageViews;}' 287 | } 288 | 289 | documents = [{ 290 | "title": "this is my title", 291 | "author": "bob", 292 | "posted": datetime.now(), 293 | "pageViews": 5, 294 | "tags": ["good", "fun"], 295 | }, { 296 | "title": "this is my title", 297 | "author": "joe", 298 | "posted": datetime.now(), 299 | "pageViews": 6, 300 | "tags": ["good"], 301 | }, { 302 | "title": "this is my title", 303 | "author": "joe", 304 | "posted": datetime.now(), 305 | "pageViews": 10, 306 | "tags": ["good"], 307 | }] 308 | db.articles.insert(documents, callback=self.stop) 309 | response, error = self.wait() 310 | 311 | try: 312 | db.articles.group(callback=self.stop, **group) 313 | result = self.wait() 314 | result['retval'][0]['csum'].should.be.equal(16) 315 | finally: 316 | db.articles.remove({}, callback=self.stop) 317 | self.wait() 318 | 319 | def test_insert_and_find_with_elemmatch(self): 320 | documents = [{ 321 | '_id': ObjectId(), 322 | 'name': 'should be name 1', 323 | 'comment': [{'author': 'joe'}, {'author': 'ana'}] 324 | }, { 325 | '_id': ObjectId(), 326 | 'name': 'should be name 2', 327 | 'comment': [{'author': 'ana'}] 328 | }] 329 | 330 | db = Database.init(["localhost:27027", "localhost:27028"], 331 | dbname='test') 332 | db.articles.insert(documents, callback=self.stop) 333 | self.wait() 334 | 335 | db.articles.find({'comment.author': 'joe'}, ('comment.$.author', ), limit=-1, callback=self.stop) 336 | 337 | result, _ = self.wait() 338 | 339 | keys = result.keys() 340 | keys.sort() 341 | 342 | keys.should.be.equal(['_id', 'comment']) 343 | 344 | str(result['_id']).should.be.equal(str(documents[0]['_id'])) 345 | result['comment'].should.have.length_of(1) 346 | result['comment'][0]['author'].should.be.equal('joe') 347 | _.should.be.none 348 | -------------------------------------------------------------------------------- /tests/test_connection.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import with_statement 3 | from tornado.ioloop import IOLoop 4 | from tornado import testing 5 | from mongotor.connection import Connection 6 | from mongotor.errors import InterfaceError, DatabaseError, IntegrityError 7 | from bson import ObjectId 8 | from mongotor import message 9 | from mongotor import helpers 10 | 11 | import sure 12 | import fudge 13 | 14 | 15 | class ConnectionTestCase(testing.AsyncTestCase): 16 | 17 | def get_new_ioloop(self): 18 | return IOLoop.instance() 19 | 20 | def setUp(self): 21 | super(ConnectionTestCase, self).setUp() 22 | self.conn = Connection(host="localhost", port=27027) 23 | 24 | def tearDown(self): 25 | super(ConnectionTestCase, self).tearDown() 26 | self.conn.close() 27 | 28 | def test_not_connect_to_mongo_raises_error(self): 29 | """[ConnectionTestCase] - Raises error when can't connect to mongo""" 30 | 31 | Connection.when.called_with(host="localhost", port=27000) \ 32 | .should.throw(InterfaceError, "Connection refused") 33 | 34 | def test_connect_to_mongo(self): 35 | """[ConnectionTestCase] - Can stabilish connection to mongo""" 36 | 37 | self.conn._connected.should.be.ok 38 | 39 | def test_send_test_message_to_mongo(self): 40 | """[ConnectionTestCase] - Send message to test driver connection""" 41 | 42 | object_id = ObjectId() 43 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 44 | {'driverOIDTest': object_id}) 45 | 46 | self.conn.send_message_with_response(message_test, callback=self.stop) 47 | response, _ = self.wait() 48 | 49 | response = helpers._unpack_response(response) 50 | result = response['data'][0] 51 | 52 | result['oid'].should.be(object_id) 53 | result['ok'].should.be(1.0) 54 | result['str'].should.be(str(object_id)) 55 | 56 | def test_close_connection_to_mongo(self): 57 | """[ConnectionTestCase] - Can close connection to mongo""" 58 | 59 | self.conn.close() 60 | 61 | self.conn._connected.should_not.be.ok 62 | self.conn._stream.closed().should.be.ok 63 | 64 | def test_return_integrity_error_when_mongo_return_err(self): 65 | """[ConnectionTestCase] - Returns IntegrityError when mongo return a message with err""" 66 | 67 | object_id = ObjectId() 68 | message_insert = message.insert('mongotor_test.articles', [{'_id': object_id}], 69 | False, True, {}) 70 | 71 | self.conn.send_message(message_insert, True, callback=self.stop) 72 | self.wait() 73 | 74 | self.conn.send_message(message_insert, True, callback=self.stop) 75 | self.wait.when.called_with().throw(IntegrityError) 76 | 77 | @fudge.patch('mongotor.connection.helpers') 78 | def test_raises_error_when_cant_unpack_response(self, fake_helpers): 79 | """[ConnectionTestCase] - Returns DatabaseError when can't unpack response from mongo""" 80 | 81 | fake_helpers.provides('_unpack_response') \ 82 | .raises(DatabaseError('database error')) 83 | 84 | object_id = ObjectId() 85 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 86 | {'driverOIDTest': object_id}) 87 | 88 | self.conn.send_message(message_test, with_last_error=True, callback=self.stop) 89 | 90 | self.wait.when.called_with().throw(DatabaseError, 'database error') 91 | 92 | def test_reconnect_when_connection_was_lost(self): 93 | """[ConnectionTestCase] - Reconnect to mongo when connection was lost""" 94 | 95 | self.conn.close() 96 | self.conn._callback = self.stop 97 | self.wait() 98 | 99 | self.test_send_test_message_to_mongo() 100 | 101 | def test_raises_interface_error_when_cant_reconnect(self): 102 | """[ConnectionTestCase] - Raises InterfaceError when connection was lost and autoreconnect is False""" 103 | 104 | self.conn = Connection(host="localhost", port=27027, autoreconnect=False) 105 | 106 | self.conn.close() 107 | 108 | self.conn.send_message.when.called_with('shouldBeMessage', 109 | callback=None).should.throw(InterfaceError, "connection is closed") 110 | 111 | def test_raises_error_when_stream_reaise_ioerror(self): 112 | """[ConnectionTestCase] - Raises IOError when stream throw error""" 113 | fake_stream = fudge.Fake() 114 | fake_stream.expects('write').raises(IOError()) 115 | 116 | with fudge.patched_context(self.conn, '_stream', fake_stream): 117 | 118 | self.conn.send_message.when.called_with((0, ''), callback=None) \ 119 | .throw(IOError) 120 | -------------------------------------------------------------------------------- /tests/test_cursor.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from bson.objectid import ObjectId 5 | from mongotor import message 6 | from mongotor.cursor import Cursor, DESCENDING, ASCENDING 7 | from mongotor.database import Database 8 | from mongotor.node import ReadPreference 9 | import sure 10 | 11 | 12 | class CursorTestCase(testing.AsyncTestCase): 13 | 14 | def get_new_ioloop(self): 15 | return IOLoop.instance() 16 | 17 | def setUp(self): 18 | super(CursorTestCase, self).setUp() 19 | Database.init(["localhost:27027", "localhost:27028"], dbname='mongotor_test') 20 | 21 | def tearDown(self): 22 | super(CursorTestCase, self).tearDown() 23 | 24 | # delete all documents 25 | message_delete = message.delete('mongotor_test.cursor_test', 26 | {}, True, {}) 27 | 28 | Database().get_node(ReadPreference.PRIMARY, callback=self.stop) 29 | node = self.wait() 30 | node.connection(self.stop) 31 | connection = self.wait() 32 | 33 | connection.send_message(message_delete, with_last_error=True, callback=self.stop) 34 | self.wait() 35 | 36 | Database.disconnect() 37 | 38 | def _insert_document(self, document): 39 | message_insert = message.insert('mongotor_test.cursor_test', [document], 40 | True, True, {}) 41 | 42 | Database().get_node(ReadPreference.PRIMARY, callback=self.stop) 43 | node = self.wait() 44 | node.connection(self.stop) 45 | connection = self.wait() 46 | 47 | connection.send_message(message_insert, with_last_error=True, callback=self.stop) 48 | self.wait() 49 | 50 | def test_find_document_whitout_spec(self): 51 | """[CursorTestCase] - Find one document without spec""" 52 | 53 | document = {'_id': ObjectId(), 'name': 'should be name'} 54 | self._insert_document(document) 55 | 56 | cursor = Cursor(database=Database(), collection='cursor_test', limit=-1) 57 | cursor.find(callback=self.stop) 58 | 59 | result, error = self.wait() 60 | 61 | result['_id'].should.be.equal(document['_id']) 62 | result['name'].should.be.equal(document['name']) 63 | error.should.be.none 64 | 65 | def test_find_documents_with_limit(self): 66 | """[CursorTestCase] - Find documents with limit""" 67 | 68 | document1 = {'_id': ObjectId(), 'name': 'should be name 1'} 69 | self._insert_document(document1) 70 | 71 | document2 = {'_id': ObjectId(), 'name': 'should be name 2'} 72 | self._insert_document(document2) 73 | 74 | document3 = {'_id': ObjectId(), 'name': 'should be name 3'} 75 | self._insert_document(document3) 76 | 77 | cursor = Cursor(database=Database(), collection='cursor_test', limit=2) 78 | cursor.find(callback=self.stop) 79 | 80 | result, error = self.wait() 81 | 82 | result.should.have.length_of(2) 83 | str(result[0]['_id']).should.be.equal(str(document1['_id'])) 84 | str(result[1]['_id']).should.be.equal(str(document2['_id'])) 85 | error.should.be.none 86 | 87 | def test_find_documents_with_spec(self): 88 | """[CursorTestCase] - Find documents with spec""" 89 | 90 | document1 = {'_id': ObjectId(), 'name': 'should be name 1', 'flag': 1} 91 | self._insert_document(document1) 92 | 93 | document2 = {'_id': ObjectId(), 'name': 'should be name 2', 'flag': 2} 94 | self._insert_document(document2) 95 | 96 | document3 = {'_id': ObjectId(), 'name': 'should be name 3', 'flag': 1} 97 | self._insert_document(document3) 98 | 99 | cursor = Cursor(Database(), 'cursor_test', {'flag': 1}, limit=2) 100 | cursor.find(callback=self.stop) 101 | 102 | result, error = self.wait() 103 | 104 | result.should.have.length_of(2) 105 | str(result[0]['_id']).should.be.equal(str(document1['_id'])) 106 | str(result[1]['_id']).should.be.equal(str(document3['_id'])) 107 | error.should.be.none 108 | 109 | def test_find_documents_ordering_descending_by_field(self): 110 | """[CursorTestCase] - Find documents order descending by field""" 111 | 112 | document1 = {'_id': ObjectId(), 'name': 'should be name 1', 'size': 1} 113 | self._insert_document(document1) 114 | 115 | document2 = {'_id': ObjectId(), 'name': 'should be name 2', 'size': 2} 116 | self._insert_document(document2) 117 | 118 | document3 = {'_id': ObjectId(), 'name': 'should be name 3', 'size': 3} 119 | self._insert_document(document3) 120 | 121 | cursor = Cursor(database=Database(), collection='cursor_test', 122 | limit=2, sort={'size': DESCENDING}) 123 | cursor.find(callback=self.stop) 124 | 125 | result, error = self.wait() 126 | 127 | result.should.have.length_of(2) 128 | str(result[0]['_id']).should.be.equal(str(document3['_id'])) 129 | str(result[1]['_id']).should.be.equal(str(document2['_id'])) 130 | error.should.be.none 131 | 132 | def test_find_documents_ordering_ascending_by_field(self): 133 | """[CursorTestCase] - Find documents order ascending by field""" 134 | 135 | document1 = {'_id': ObjectId(), 'name': 'should be name 1', 'size': 1} 136 | self._insert_document(document1) 137 | 138 | document2 = {'_id': ObjectId(), 'name': 'should be name 2', 'size': 2} 139 | self._insert_document(document2) 140 | 141 | document3 = {'_id': ObjectId(), 'name': 'should be name 3', 'size': 3} 142 | self._insert_document(document3) 143 | 144 | cursor = Cursor(database=Database(), collection='cursor_test', 145 | limit=2, sort={'size': ASCENDING}) 146 | cursor.find(callback=self.stop) 147 | 148 | result, error = self.wait() 149 | 150 | result.should.have.length_of(2) 151 | str(result[0]['_id']).should.be.equal(str(document1['_id'])) 152 | str(result[1]['_id']).should.be.equal(str(document2['_id'])) 153 | error.should.be.none 154 | 155 | def test_find_document_by_id(self): 156 | """[CursorTestCase] - Find document by id""" 157 | 158 | document1 = {'_id': ObjectId(), 'name': 'should be name 1', 'size': 1} 159 | self._insert_document(document1) 160 | 161 | document2 = {'_id': ObjectId(), 'name': 'should be name 2', 'size': 2} 162 | self._insert_document(document2) 163 | 164 | document3 = {'_id': ObjectId(), 'name': 'should be name 3', 'size': 3} 165 | self._insert_document(document3) 166 | 167 | cursor = Cursor(Database(), 'cursor_test', document2['_id'], limit=-1) 168 | cursor.find(callback=self.stop) 169 | 170 | result, error = self.wait() 171 | 172 | str(result['_id']).should.be.equal(str(document2['_id'])) 173 | error.should.be.none 174 | 175 | def test_find_returning_fields(self): 176 | """[CursorTestCase] - Find and return only selectd fields""" 177 | 178 | document1 = {'_id': ObjectId(), 'name': 'should be name 1', 179 | 'comment': [{'author': 'joe'}, {'author': 'ana'}]} 180 | self._insert_document(document1) 181 | 182 | document2 = {'_id': ObjectId(), 'name': 'should be name 2', 183 | 'comment': [{'author': 'ana'}]} 184 | self._insert_document(document2) 185 | 186 | document3 = {'_id': ObjectId(), 'name': 'should be name 3', 187 | 'comment': [{'author': 'june'}]} 188 | self._insert_document(document3) 189 | 190 | cursor = Cursor(Database(), 'cursor_test', {'comment.author': 'joe'}, 191 | ('comment.$.author', ), limit=-1) 192 | cursor.find(callback=self.stop) 193 | 194 | result, _ = self.wait() 195 | 196 | keys = result.keys() 197 | keys.sort() 198 | 199 | keys.should.be.equal(['_id', 'comment']) 200 | 201 | str(result['_id']).should.be.equal(str(document1['_id'])) 202 | result['comment'].should.have.length_of(1) 203 | result['comment'][0]['author'].should.be.equal('joe') 204 | _.should.be.none 205 | -------------------------------------------------------------------------------- /tests/test_database.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from mongotor.database import Database 5 | from mongotor.errors import DatabaseError 6 | from mongotor import message 7 | from mongotor import helpers 8 | from bson.objectid import ObjectId 9 | import sure 10 | import fudge 11 | 12 | 13 | class DatabaseTestCase(testing.AsyncTestCase): 14 | 15 | def get_new_ioloop(self): 16 | return IOLoop.instance() 17 | 18 | def tearDown(self): 19 | super(DatabaseTestCase, self).tearDown() 20 | Database._instance = None 21 | 22 | def test_create_singleton_database_connection_using_connect_method(self): 23 | """[DatabaseTestCase] - Create a singleton database connection using connect method""" 24 | database = Database.connect("localhost:27027", dbname='test') 25 | 26 | database.should.be.equal(Database()) 27 | 28 | def test_create_singleton_database_connection(self): 29 | """[DatabaseTestCase] - Create a singleton database connection""" 30 | database = Database.init("localhost:27027", dbname='test') 31 | 32 | database.should.be.equal(Database()) 33 | 34 | def test_not_raise_when_database_was_initiated(self): 35 | """[DatabaseTestCase] - Not raises ValueError when connect to inititated database""" 36 | 37 | database1 = Database.init("localhost:27027", dbname='test') 38 | database2 = Database.init("localhost:27027", dbname='test') 39 | 40 | database1.should.be.equal(database2) 41 | 42 | def test_send_test_message(self): 43 | """[DatabaseTestCase] - Send a test message to database""" 44 | 45 | Database.init(["localhost:27027", "localhost:27028"], dbname='test') 46 | 47 | object_id = ObjectId() 48 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 49 | {'driverOIDTest': object_id}) 50 | 51 | Database().send_message(message_test, callback=self.stop) 52 | response, _ = self.wait() 53 | response = helpers._unpack_response(response) 54 | 55 | result = response['data'][0] 56 | 57 | result['oid'].should.be(object_id) 58 | result['ok'].should.be(1.0) 59 | result['str'].should.be(str(object_id)) 60 | 61 | def test_disconnect_database(self): 62 | """[DatabaseTestCase] - Disconnect the database""" 63 | Database.init(["localhost:27027"], dbname='test') 64 | Database.disconnect() 65 | 66 | Database._instance.should.be.none 67 | 68 | def test_raises_error_when_disconnect_a_not_connected_database(self): 69 | """[DatabaseTestCase] - Raises ValueError when disconnect from a not connected database""" 70 | Database.disconnect.when.called_with().throw(ValueError, "Database isn't initialized") 71 | 72 | def test_raises_error_when_could_not_find_node(self): 73 | """[DatabaseTestCase] - Raises DatabaseError when could not find valid nodes""" 74 | 75 | database = Database.init(["localhost:27030"], dbname='test') 76 | 77 | def send_message(): 78 | database.send_message('', callback=self.stop) 79 | self.wait() 80 | 81 | send_message.when.called.throw(DatabaseError, 'could not find an available node') 82 | 83 | def test_run_command(self): 84 | """[DatabaseTestCase] - Run a database command""" 85 | 86 | database = Database.init(["localhost:27027"], dbname='test') 87 | database.command('ismaster', callback=self.stop) 88 | 89 | response, error = self.wait() 90 | response['ok'].should.be.ok 91 | -------------------------------------------------------------------------------- /tests/test_node.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import unittest 3 | import sure 4 | from mongotor.node import ReadPreference, Node 5 | 6 | 7 | class ReadPreferenceTestCase(unittest.TestCase): 8 | 9 | def setUp(self): 10 | class Database: 11 | dbname = 'test' 12 | 13 | self.primary = Node(host='localhost', port=27027, database=Database) 14 | self.secondary1 = Node(host='localhost', port=27028, database=Database) 15 | self.secondary2 = Node(host='localhost', port=27029, database=Database) 16 | 17 | self.primary.available = True 18 | self.primary.is_primary = True 19 | 20 | self.secondary1.available = True 21 | self.secondary1.is_secondary = True 22 | 23 | def test_read_preference_default(self): 24 | """[ReadPreferenceTestCase] - get primary node by default""" 25 | node_found = ReadPreference.select_node([self.secondary1, 26 | self.secondary2, self.primary]) 27 | 28 | node_found.should.be.eql(self.primary) 29 | 30 | def test_read_preference_primary(self): 31 | """[ReadPreferenceTestCase] - get primary node when preference is PRIMARY""" 32 | node_found = ReadPreference.select_node([self.secondary1, 33 | self.secondary2, self.primary], ReadPreference.PRIMARY) 34 | 35 | node_found.should.be.eql(self.primary) 36 | 37 | def test_read_preference_primary_preferred_up(self): 38 | """[ReadPreferenceTestCase] - get primary node when preference is PRIMARY_PREFERRED and primary is up""" 39 | 40 | node_found = ReadPreference.select_node([self.secondary1, 41 | self.secondary2, self.primary], ReadPreference.PRIMARY_PREFERRED) 42 | 43 | node_found.should.be.eql(self.primary) 44 | 45 | def test_read_preference_primary_preferred_down(self): 46 | """[ReadPreferenceTestCase] - get secondary node when preference is PRIMARY_PREFERRED and primary is down""" 47 | 48 | self.primary.available = False 49 | node_found = ReadPreference.select_node([self.secondary1, 50 | self.secondary2, self.primary], ReadPreference.PRIMARY_PREFERRED) 51 | 52 | node_found.should.be.eql(self.secondary1) 53 | 54 | def test_read_preference_secondary(self): 55 | """[ReadPreferenceTestCase] - get secondary node when preference is SECONDARY""" 56 | 57 | node_found = ReadPreference.select_node([self.secondary1, 58 | self.secondary2, self.primary], ReadPreference.SECONDARY) 59 | 60 | node_found.should.be.eql(self.secondary1) 61 | 62 | def test_read_preference_secondary_preferred(self): 63 | """[ReadPreferenceTestCase] - get secondary node when preference is SECONDARY_PREFERRED""" 64 | 65 | node_found = ReadPreference.select_node([self.secondary1, 66 | self.secondary2, self.primary], ReadPreference.SECONDARY_PREFERRED) 67 | 68 | node_found.should.be.eql(self.secondary1) 69 | 70 | def test_read_preference_secondary_preferred_down(self): 71 | """[ReadPreferenceTestCase] - get primary node when preference is SECONDARY_PREFERRED and secondary is down""" 72 | 73 | self.secondary1.available = False 74 | node_found = ReadPreference.select_node([self.secondary1, 75 | self.secondary2, self.primary], ReadPreference.SECONDARY_PREFERRED) 76 | 77 | node_found.should.be.eql(self.primary) -------------------------------------------------------------------------------- /tests/test_pool.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from bson import ObjectId 5 | from mongotor.pool import ConnectionPool 6 | from mongotor.database import Database 7 | from mongotor.errors import TooManyConnections 8 | from mongotor import message 9 | import sure 10 | 11 | 12 | class ConnectionPoolTestCase(testing.AsyncTestCase): 13 | 14 | def get_new_ioloop(self): 15 | return IOLoop.instance() 16 | 17 | def test_get_connection(self): 18 | """[ConnectionPoolTestCase] - Can get a connection""" 19 | pool = ConnectionPool('localhost', 27027, dbname='test') 20 | pool.connection(self.stop) 21 | conn = self.wait() 22 | 23 | conn.should.be.a('mongotor.connection.Connection') 24 | 25 | def test_wait_for_connection_when_maxconnection_is_reached(self): 26 | """[ConnectionPoolTestCase] - Wait for a connection when maxconnections is reached""" 27 | 28 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=1) 29 | 30 | pool.connection(self.stop) 31 | conn1 = self.wait() 32 | 33 | pool.connection(self.stop) 34 | 35 | conn1.close() 36 | conn2 = self.wait() 37 | 38 | conn1.should.be.a('mongotor.connection.Connection') 39 | conn2.should.be.a('mongotor.connection.Connection') 40 | pool._connections.should.be.equal(1) 41 | 42 | def test_raise_too_many_connection_when_maxconnection_is_reached(self): 43 | """[ConnectionPoolTestCase] - Raise TooManyConnections connection when maxconnections is reached""" 44 | 45 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=10) 46 | 47 | connections = [] 48 | for i in xrange(10): 49 | pool.connection(self.stop) 50 | connections.append(self.wait()) 51 | 52 | pool.connection(self.stop) 53 | self.wait.when.called_with().should.throw(TooManyConnections) 54 | 55 | def test_close_connection_stream_should_be_release_from_pool(self): 56 | """[ConnectionPoolTestCase] - Release connection from pool when stream is closed""" 57 | 58 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=10) 59 | 60 | pool.connection(self.stop) 61 | connection = self.wait() 62 | 63 | def release(conn): 64 | conn.should.be.equal(connection) 65 | _release(conn) 66 | self.stop() 67 | 68 | pool._idle_connections.should.have.length_of(9) 69 | 70 | _release = pool.release 71 | pool.release = release 72 | connection._stream.close() 73 | 74 | self.wait() 75 | 76 | pool._connections.should.be.equal(0) 77 | pool._idle_connections.should.have.length_of(10) 78 | 79 | def test_maxusage_in_pool_connections(self): 80 | """[ConnectionPoolTestCase] - test maxusage in connections""" 81 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=1, maxusage=299) 82 | 83 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 84 | {'driverOIDTest': ObjectId()}) 85 | 86 | for i in xrange(300): 87 | pool.connection(self.stop) 88 | connection = self.wait() 89 | 90 | connection.send_message_with_response(message_test, callback=self.stop) 91 | self.wait() 92 | 93 | pool.connection(self.stop) 94 | new_connection = self.wait() 95 | 96 | new_connection.usage.should.be.equal(0) 97 | new_connection.should_not.be.equal(connection) 98 | new_connection.send_message_with_response(message_test, callback=self.stop) 99 | 100 | self.wait() 101 | 102 | new_connection.usage.should.be.equal(1) 103 | 104 | def test_load_in_pool_connections(self): 105 | """[ConnectionPoolTestCase] - test load in connections""" 106 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=10, maxusage=29) 107 | 108 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 109 | {'driverOIDTest': ObjectId()}) 110 | 111 | for i in xrange(300): 112 | pool.connection(self.stop) 113 | connection = self.wait() 114 | 115 | connection.send_message_with_response(message_test, callback=self.stop) 116 | self.wait() 117 | 118 | pool._idle_connections.should.have.length_of(0) 119 | 120 | for i in xrange(300): 121 | pool.connection(self.stop) 122 | connection = self.wait() 123 | 124 | connection.send_message_with_response(message_test, callback=self.stop) 125 | self.wait() 126 | 127 | pool._idle_connections.should.have.length_of(0) 128 | 129 | def test_load_two_in_pool_connections(self): 130 | """[ConnectionPoolTestCase] - test load two in connections""" 131 | pool = ConnectionPool('localhost', 27027, dbname='test', maxconnections=10, maxusage=29) 132 | 133 | message_test = message.query(0, 'mongotor_test.$cmd', 0, 1, 134 | {'driverOIDTest': ObjectId()}) 135 | 136 | for i in xrange(30000): 137 | pool.connection(self.stop) 138 | connection = self.wait() 139 | 140 | connection.send_message_with_response(message_test, callback=self.stop) 141 | self.wait() 142 | 143 | pool._idle_connections.should.have.length_of(0) 144 | pool._connections.should.be.equal(0) 145 | 146 | def test_check_connections_when_use_cursors(self): 147 | """[ConnectionPoolTestCase] - check connections when use cursors""" 148 | db = Database.init('localhost:27027', dbname='test', maxconnections=10, maxusage=29) 149 | 150 | try: 151 | for i in range(2): 152 | db.cards.insert({'_id': ObjectId(), 'range': i}, callback=self.stop) 153 | self.wait() 154 | 155 | db._nodes[0].pool._connections.should.be.equal(0) 156 | 157 | db.cards.find({}, callback=self.stop) 158 | self.wait() 159 | 160 | db._nodes[0].pool._connections.should.be.equal(0) 161 | finally: 162 | Database.disconnect() 163 | -------------------------------------------------------------------------------- /tests/test_replicaset.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from tornado.ioloop import IOLoop 3 | from tornado import testing 4 | from bson import ObjectId 5 | from mongotor.errors import DatabaseError 6 | from mongotor.database import Database 7 | from mongotor.node import ReadPreference 8 | import sure 9 | import os 10 | import time 11 | 12 | 13 | class ReplicaSetTestCase(testing.AsyncTestCase): 14 | 15 | def get_new_ioloop(self): 16 | return IOLoop.instance() 17 | 18 | def tearDown(self): 19 | super(ReplicaSetTestCase, self).tearDown() 20 | Database.disconnect() 21 | 22 | def test_configure_nodes(self): 23 | """[ReplicaSetTestCase] - Configure nodes""" 24 | 25 | db = Database.init(["localhost:27027", "localhost:27028"], dbname='test') 26 | db._connect(callback=self.stop) 27 | self.wait() 28 | 29 | master_node = ReadPreference.select_primary_node(Database()._nodes) 30 | secondary_node = ReadPreference.select_node(Database()._nodes, mode=ReadPreference.SECONDARY) 31 | 32 | master_node.host.should.be('localhost') 33 | master_node.port.should.be(27027) 34 | 35 | secondary_node.host.should.be('localhost') 36 | secondary_node.port.should.be(27028) 37 | 38 | nodes = Database()._nodes 39 | nodes.should.have.length_of(2) 40 | 41 | def test_raises_error_when_mode_is_secondary_and_secondary_is_down(self): 42 | """[ReplicaSetTestCase] - Raise error when mode is secondary and secondary is down""" 43 | os.system('make mongo-kill-node2') 44 | time.sleep(1) # stops are fast 45 | 46 | try: 47 | db = Database.init(["localhost:27027", "localhost:27028"], dbname='test') 48 | db._connect(callback=self.stop) 49 | self.wait() 50 | 51 | Database().send_message.when.called_with((None, ''), 52 | read_preference=ReadPreference.SECONDARY)\ 53 | .throw(DatabaseError) 54 | finally: 55 | os.system('make mongo-start-node2') 56 | time.sleep(5) # wait to become secondary again 57 | 58 | 59 | class SecondaryPreferredTestCase(testing.AsyncTestCase): 60 | 61 | def get_new_ioloop(self): 62 | return IOLoop.instance() 63 | 64 | def tearDown(self): 65 | super(SecondaryPreferredTestCase, self).tearDown() 66 | Database._instance = None 67 | 68 | def test_find_on_secondary(self): 69 | """[SecondaryPreferredTestCase] - test find document from secondary""" 70 | db = Database.init(["localhost:27027", "localhost:27028"], dbname='test', 71 | read_preference=ReadPreference.SECONDARY_PREFERRED) 72 | db._connect(callback=self.stop) 73 | self.wait() 74 | 75 | doc = {'_id': ObjectId()} 76 | db.test.insert(doc, callback=self.stop) 77 | self.wait() 78 | 79 | time.sleep(2) 80 | db.test.find_one(doc, callback=self.stop) 81 | doc_found, error = self.wait() 82 | 83 | doc_found.should.be.eql(doc) 84 | -------------------------------------------------------------------------------- /tests/util.py: -------------------------------------------------------------------------------- 1 | ## copied from tornado source code: 2 | 3 | import sys 4 | 5 | # Encapsulate the choice of unittest or unittest2 here. 6 | # To be used as 'from tests.util import unittest'. 7 | if sys.version_info >= (2, 7): 8 | import unittest 9 | else: 10 | import unittest2 as unittest 11 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26, py27, py26-tornado23, py27-tornado23, py26-tornado24, py27-tornado24 3 | 4 | [testenv] 5 | commands=nosetests 6 | 7 | [testenv:py26] 8 | basepython = python2.6 9 | deps = 10 | unittest2 11 | tornado>=3.0 12 | pymongo>=2.3 13 | nose>=1.1.2 14 | fudge>=1.0.3 15 | sure>=1.0.6 16 | 17 | [testenv:py27] 18 | basepython = python2.7 19 | deps = 20 | tornado>=3.0 21 | pymongo>=2.3 22 | nose>=1.1.2 23 | fudge>=1.0.3 24 | sure>=1.0.6 25 | 26 | [testenv:py26-tornado23] 27 | basepython = python2.6 28 | deps = 29 | unittest2 30 | tornado==2.3 31 | pymongo>=2.3 32 | nose>=1.1.2 33 | fudge>=1.0.3 34 | sure>=1.0.6 35 | 36 | [testenv:py27-tornado23] 37 | basepython = python2.7 38 | deps = 39 | tornado==2.3 40 | pymongo>=2.3 41 | nose>=1.1.2 42 | fudge>=1.0.3 43 | sure>=1.0.6 44 | 45 | [testenv:py26-tornado24] 46 | basepython = python2.6 47 | deps = 48 | unittest2 49 | tornado==2.4 50 | pymongo>=2.3 51 | nose>=1.1.2 52 | fudge>=1.0.3 53 | sure>=1.0.6 54 | 55 | [testenv:py27-tornado24] 56 | basepython = python2.7 57 | deps = 58 | tornado==2.4 59 | pymongo>=2.3 60 | nose>=1.1.2 61 | fudge>=1.0.3 62 | sure>=1.0.6 63 | --------------------------------------------------------------------------------