├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── THANKS ├── changelog.rst ├── docs ├── Makefile ├── api.rst ├── changelog.rst ├── conf.py ├── index.rst ├── installation.rst ├── make.bat └── tutorial.rst ├── examples └── gen_example.py ├── momoko ├── __init__.py ├── connection.py └── exceptions.py ├── perf_test.py ├── setup.py ├── tcproxy ├── .gitignore ├── ChangeLog ├── LICENSE ├── Makefile ├── README.md ├── TODO └── src │ ├── Makefile │ ├── ae.c │ ├── ae.h │ ├── ae_epoll.c │ ├── ae_kqueue.c │ ├── ae_select.c │ ├── anet.c │ ├── anet.h │ ├── config.h │ ├── fmacros.h │ ├── policy.c │ ├── policy.h │ ├── policy.rl │ ├── tcproxy.c │ ├── util.c │ ├── util.h │ ├── zmalloc.c │ └── zmalloc.h └── tests.py /.gitignore: -------------------------------------------------------------------------------- 1 | MANIFEST 2 | build/ 3 | dist/ 4 | *.pyc 5 | *.pth 6 | __pycache__ 7 | *.egg-info/ 8 | *.egg/ 9 | docs/_build/ 10 | *.sublime-project 11 | *.sublime-workspace 12 | *.swp 13 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - 2.7 5 | - 3.4 6 | - 3.5 7 | - 3.6 8 | - pypy 9 | - pypy3.5 10 | 11 | addons: 12 | postgresql: "9.4" 13 | 14 | before_script: 15 | - psql -c 'CREATE DATABASE momoko_test;' -U postgres 16 | - psql -U postgres momoko_test -c 'CREATE EXTENSION IF NOT EXISTS hstore' 17 | - make -C tcproxy 18 | - env 19 | 20 | env: 21 | global: 22 | - MOMOKO_TEST_HSTORE=1 23 | - MOMOKO_TEST_JSON=1 24 | - MOMOKO_TEST_HOST=127.0.0.1 25 | - PGHOST=127.0.0.1 26 | matrix: 27 | - MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER="<5.0.0" 28 | - MOMOKO_PSYCOPG2_IMPL=psycopg2cffi TORNADO_VER="<5.0.0" 29 | - MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER=">=5.0.0" 30 | - MOMOKO_PSYCOPG2_IMPL=psycopg2cffi TORNADO_VER=">=5.0.0" 31 | 32 | install: 'pip install tornado"${TORNADO_VER}" ${MOMOKO_PSYCOPG2_IMPL} unittest2' 33 | script: python setup.py test 34 | 35 | matrix: 36 | exclude: 37 | - python: pypy 38 | env: MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER="<5.0.0" 39 | - python: pypy 40 | env: MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER=">=5.0.0" 41 | - python: pypy3.5 42 | env: MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER="<5.0.0" 43 | - python: pypy3.5 44 | env: MOMOKO_PSYCOPG2_IMPL=psycopg2 TORNADO_VER=">=5.0.0" 45 | include: 46 | # https://github.com/travis-ci/travis-ci/issues/9815 47 | - python: 3.7 48 | dist: xenial 49 | sudo: true 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2011-2014 by Frank Smit 2 | and Zaar Hai 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE changelog.rst THANKS tests.py 2 | recursive-include docs * 3 | recursive-include examples * 4 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Momoko 2 | ====== 3 | 4 | .. image:: https://img.shields.io/pypi/v/momoko.svg 5 | :target: https://pypi.python.org/pypi/momoko 6 | 7 | .. image:: https://img.shields.io/travis/FSX/momoko.svg 8 | :target: https://travis-ci.org/FSX/momoko 9 | 10 | .. image:: https://img.shields.io/pypi/dm/momoko.svg 11 | :target: https://pypi.python.org/pypi/momoko 12 | 13 | 14 | Momoko wraps Psycopg2_'s functionality for use in Tornado_. Have a look at tutorial_ or full documentation_. 15 | 16 | **Important:** This is the 2.x version of Momoko. It requires 4.0 <= Tornado < **6.0**, uses futures instead of calllbacks 17 | and introduces a slightly different API compared to 1.x version. While transition is very 18 | straightforward, the API is not backward compatible with 1.x! 19 | 20 | .. _Psycopg2: http://initd.org/psycopg/ 21 | .. _Tornado: http://www.tornadoweb.org/ 22 | .. _tutorial: http://momoko.readthedocs.org/en/master/tutorial.html 23 | .. _documentation: http://momoko.readthedocs.org/en/master 24 | 25 | Maintainer wanted 26 | ----------------- 27 | Unfortunately none of the developers of this project actively use it anymore in their work. Test-covered pull requests will be happily accepted, but no active development is planned so far. If you have serious intentions to maintain this project, please get in touch. 28 | 29 | Installation 30 | ------------ 31 | 32 | With pip:: 33 | 34 | pip install momoko 35 | 36 | Or manually:: 37 | 38 | python setup.py install 39 | 40 | 41 | Testing 42 | ------- 43 | 44 | Set the following environment variables with your own values before running the 45 | unit tests:: 46 | 47 | make -C tcproxy 48 | export MOMOKO_TEST_DB='your_db' 49 | export MOMOKO_TEST_USER='your_user' 50 | export MOMOKO_TEST_PASSWORD='your_password' 51 | export MOMOKO_TEST_HOST='localhost' 52 | export MOMOKO_TEST_PORT='5432' 53 | 54 | And run the tests with:: 55 | 56 | python setup.py test 57 | -------------------------------------------------------------------------------- /THANKS: -------------------------------------------------------------------------------- 1 | People who contributed code and fixed bugs: 2 | https://github.com/FSX/momoko/graphs/contributors 3 | -------------------------------------------------------------------------------- /changelog.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 2.2.5.1 (2018-11-05) 5 | -------------------- 6 | * Updated README (mainly for PyPi) 7 | 8 | 2.2.5 (2018-11-05) 9 | ------------------ 10 | * Explicitly declaring and testing Python 3.5, 3.6, and 3.7 support 11 | * Tornado 5.x support (Tornado 4.x is supported as well) 12 | * Dropped support for Python 2.6 and 3.3 13 | 14 | 2.2.4 (2016-10-31) 15 | ------------------ 16 | * Resiliency to PostgreSQL restarts (`issue 147`_) 17 | * Provide a useful `__repr__()` method for `ConnectionContainer` (`issue 146`_) 18 | * Dropped support for Python 3.2 - Tornado stopped supporting it as well 19 | * Fixed potential conflict in connection management (`issue 139`_) 20 | 21 | .. _issue 147: https://github.com/FSX/momoko/issues/147 22 | .. _issue 146: https://github.com/FSX/momoko/issues/146 23 | .. _issue 139: https://github.com/FSX/momoko/issues/139 24 | 25 | 2.2.3 (2016-03-10) 26 | ------------------ 27 | * Matching ``execute`` parameters behaviour to those of psycopg2. (`issue 136`_). 28 | 29 | .. _issue 136: https://github.com/FSX/momoko/issues/136 30 | 31 | 2.2.2 (2015-12-02) 32 | ------------------ 33 | * Doc fixes (`issue 131`_). Thanks to gward_. 34 | * Makefile fix (`issue 132`_). Thanks to bitwolaiye_. 35 | * Catching all syncrhonous exceptions (`issue 134`_). Thanks to m-messiah_. 36 | * Catchin ``IOError``\ s in IOLoop handlers (`issue 127`_). 37 | 38 | .. _issue 127: https://github.com/FSX/momoko/issues/127 39 | .. _issue 131: https://github.com/FSX/momoko/issues/131 40 | .. _issue 132: https://github.com/FSX/momoko/issues/132 41 | .. _issue 134: https://github.com/FSX/momoko/issues/134 42 | .. _bitwolaiye: https://github.com/bitwolaiye 43 | .. _gward: https://github.com/gward 44 | .. _m-messiah: https://github.com/m-messiah 45 | 46 | 2.2.1 (2015-10-13) 47 | ------------------ 48 | * Wait for pending connections during connection acquiring (`issue 122`_). Thanks to jbowes_. 49 | 50 | .. _issue 122: https://github.com/FSX/momoko/issues/122 51 | .. _jbowes: https://github.com/jbowes 52 | 53 | 2.2.0 (2015-09-20) 54 | ------------------ 55 | * Fixed serious flaw with connection retrials. `More details`_. 56 | * Fixed ping to handle failures properly (`issue 115`_). 57 | * NOTE: gcc is now required to run tests - we supply built-in version of `tcproxy`_ for connection failure simulation. 58 | 59 | .. _More details: https://github.com/FSX/momoko/commit/85183f5370181f75a29e876f5211d99c40b4ba5e 60 | .. _issue 115: https://github.com/FSX/momoko/issues/115 61 | .. _tcproxy: https://github.com/dccmx/tcproxy 62 | 63 | 2.1.1 (2015-08-03) 64 | ------------------ 65 | * Fixed JSON/HSTORE support with named cursors (`issue 112`_). Thanks to helminster_. 66 | 67 | .. _issue 112: https://github.com/FSX/momoko/issues/112 68 | .. _helminster: https://github.com/helminster 69 | 70 | 2.1.0 (2015-07-08) 71 | ------------------ 72 | * Auto shrink support. Thanks to `John Chumnanvech`_. 73 | 74 | .. _John Chumnanvech: https://github.com/jchumnanvech 75 | 76 | 2.0.0 (2015-05-10) 77 | ------------------ 78 | * Full rewrite using using Futures_ 79 | * NOTE: The new API is similar but not backwards compatible. Make sure to read documentation first. 80 | 81 | .. _Futures: http://tornado.readthedocs.org/en/latest/concurrent.html 82 | 83 | 1.1.6 (2015-04-26) 84 | ------------------ 85 | * Aadded register_json 86 | * Docs: fix typos, spelling, grammatical errors; improve unclear wording 87 | * Removed support for psycopg2ct 88 | 89 | 90 | 1.1.5 (2014-11-17) 91 | ------------------ 92 | 93 | * Catching ALL types of early error. Fixes `issue 79`_. 94 | 95 | .. _issue 79: https://github.com/FSX/momoko/issues/79 96 | 97 | 98 | 1.1.4 (2014-07-21) 99 | ------------------ 100 | 101 | * Tornado 4.0 compatablity: backported old ``Task`` class for Tornado 4.0 compatablity. 102 | 103 | 104 | 1.1.3 (2014-05-21) 105 | ------------------ 106 | 107 | * Fixed hstore. 108 | 109 | 110 | 1.1.2 (2014-03-06) 111 | ------------------ 112 | 113 | * Fixed a minor Python 3.2 issue. 114 | 115 | 116 | 1.1.1 (2014-03-06) 117 | ------------------ 118 | 119 | Fixes: 120 | 121 | * ``Connection.transaction`` does not break when passed SQL strings are of ``unicode`` type 122 | 123 | 124 | 1.1.0 (2014-02-24) 125 | ------------------ 126 | 127 | New features: 128 | 129 | * Transparent automatic reconnects if database disappears and comes back. 130 | * Session init commands (``setsession``). 131 | * Dynamic pool size stretching. New connections will be opened under 132 | load up-to predefined limit. 133 | * API for manual connection management with ``getconn``/``putconn``. Useful for server-side cursors. 134 | * A lot of internal improvements and cleanup. 135 | 136 | Fixes: 137 | 138 | * Connections are managed explicitly - eliminates transaction problems reported. 139 | * ``connection_factory`` (and ``curosr_factor``) arguments handled properly by ``Pool``. 140 | 141 | 142 | 1.0.0 (2013-05-01) 143 | ------------------ 144 | 145 | * Fix code example in documentation. By matheuspatury in `pull request 46`_ 146 | 147 | .. _pull request 46: https://github.com/FSX/momoko/pull/46 148 | 149 | 150 | 1.0.0b2 (2013-02-28) 151 | -------------------- 152 | 153 | * Tested on CPython 2.6, 2.7, 3.2, 3.3 and PyPy with Psycopg2_, psycopg2ct_ and psycopg2cffi_. 154 | * Add and remove a database connection to and from the IOLoop for each operation. 155 | See `pull request 38`_ and commits 189323211b_ and 92940db0a0_ for more information. 156 | * Replaced dynamic connection pool with a static one. 157 | * Add support for hstore_. 158 | 159 | .. _Psycopg2: http://initd.org/psycopg/ 160 | .. _psycopg2ct: http://pypi.python.org/pypi/psycopg2ct 161 | .. _psycopg2cffi: http://pypi.python.org/pypi/psycopg2cffi 162 | .. _pull request 38: https://github.com/FSX/momoko/pull/38 163 | .. _189323211b: https://github.com/FSX/momoko/commit/189323211bcb44ea158f41ddf87d4240c0e657d6 164 | .. _92940db0a0: https://github.com/FSX/momoko/commit/92940db0a0f6d780724f42d3d66f1b75a78430ff 165 | .. _hstore: http://www.postgresql.org/docs/9.2/static/hstore.html 166 | 167 | 168 | 1.0.0b1 (2012-12-16) 169 | -------------------- 170 | 171 | This is a beta release. It means that the code has not been tested thoroughly 172 | yet. This first beta release is meant to provide all the functionality of the 173 | previous version plus a few additions. 174 | 175 | * Most of the code has been rewritten. 176 | * The mogrify_ method has been added. 177 | * Added support for transactions. 178 | * The query chain and batch have been removed, because ``tornado.gen`` can be used instead. 179 | * Error reporting has bee improved by passing the raised exception to the callback. 180 | A callback accepts two arguments: the cursor and the error. 181 | * ``Op``, ``WaitOp`` and ``WaitAllOps`` in ``momoko.utils`` are wrappers for 182 | classes in ``tornado.gen`` which raise the error again when one occurs. 183 | And the user can capture the exception in the request handler. 184 | * A complete set of tests has been added in the ``momoko`` module: ``momoko.tests``. 185 | These can be run with ``python setup.py test``. 186 | 187 | .. _mogrify: http://initd.org/psycopg/docs/cursor.html#cursor.mogrify 188 | 189 | 190 | 0.5.0 (2012-07-30) 191 | ------------------ 192 | 193 | * Removed all Adisp related code. 194 | * Refactored connection pool and connection polling. 195 | * Just pass all unspecified arguments to ``BlockingPool`` and ``AsyncPool``. So 196 | ``connection_factory`` can be used again. 197 | 198 | 199 | 0.4.0 (2011-12-15) 200 | ------------------ 201 | 202 | * Reorganized classes and files. 203 | * Renamed ``momoko.Client`` to ``momoko.AsyncClient``. 204 | * Renamed ``momoko.Pool`` to ``momoko.AsyncPool``. 205 | * Added a client and pool for blocking connections, ``momoko.BlockingClient`` 206 | and ``momoko.BlockingPool``. 207 | * Added ``PoolError`` to the import list in ``__init__.py``. 208 | * Added an example that uses Tornado's gen_ module and Swift_. 209 | * Callbacks are now optional for ``AsyncClient``. 210 | * ``AsyncPool`` and ``Poller`` now accept a ioloop argument. [fzzbt_] 211 | * Unit tests have been added. [fzzbt_] 212 | 213 | .. _gen: http://www.tornadoweb.org/documentation/gen.html 214 | .. _Swift: http://code.naeseth.com/swirl/ 215 | .. _fzzbt: https://github.com/fzzbt 216 | 217 | 218 | 0.3.0 (2011-08-07) 219 | ------------------ 220 | 221 | * Renamed ``momoko.Momoko`` to ``momoko.Client``. 222 | * Programming in blocking-style is now possible with ``AdispClient``. 223 | * Support for Python 3 has been added. 224 | * The batch and chain fucntion now accepts different arguments. See the 225 | documentation for details. 226 | 227 | 228 | 0.2.0 (2011-04-30) 229 | ------------------ 230 | 231 | * Removed ``executemany`` from ``Momoko``, because it can not be used in asynchronous mode. 232 | * Added a wrapper class, ``Momoko``, for ``Pool``, ``BatchQuery`` and ``QueryChain``. 233 | * Added the ``QueryChain`` class for executing a chain of queries (and callables) 234 | in a certain order. 235 | * Added the ``BatchQuery`` class for executing batches of queries at the same time. 236 | * Improved ``Pool._clean_pool``. It threw an ``IndexError`` when more than one 237 | connection needed to be closed. 238 | 239 | 240 | 0.1.0 (2011-03-13) 241 | ------------------- 242 | 243 | * Initial release. 244 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | ifdef VIRTUAL_ENV 11 | VENV_PPATH = $(shell echo $(VIRTUAL_ENV)/lib/*/site-packages) 12 | SRC_PPATH = $(shell find $(VIRTUAL_ENV)/src -mindepth 1 -maxdepth 1 -type d 2>/dev/null|xargs echo |tr ' ' :) 13 | ifdef PYTHONPATH 14 | PYTHONPATH = $(VENV_PPATH):$(SRC_PATH):$(PYTHONPATH) 15 | else 16 | PYTHONPATH = $(VENV_PPATH):$(SRC_PATH) 17 | export PYTHONPATH 18 | endif 19 | endif 20 | 21 | # Internal variables. 22 | PAPEROPT_a4 = -D latex_paper_size=a4 23 | PAPEROPT_letter = -D latex_paper_size=letter 24 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 25 | # the i18n builder cannot share the environment and doctrees with the others 26 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 27 | 28 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 29 | 30 | help: 31 | @echo "Please use \`make ' where is one of" 32 | @echo " html to make standalone HTML files" 33 | @echo " dirhtml to make HTML files named index.html in directories" 34 | @echo " singlehtml to make a single large HTML file" 35 | @echo " pickle to make pickle files" 36 | @echo " json to make JSON files" 37 | @echo " htmlhelp to make HTML files and a HTML help project" 38 | @echo " qthelp to make HTML files and a qthelp project" 39 | @echo " devhelp to make HTML files and a Devhelp project" 40 | @echo " epub to make an epub" 41 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 42 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 43 | @echo " text to make text files" 44 | @echo " man to make manual pages" 45 | @echo " texinfo to make Texinfo files" 46 | @echo " info to make Texinfo files and run them through makeinfo" 47 | @echo " gettext to make PO message catalogs" 48 | @echo " changes to make an overview of all changed/added/deprecated items" 49 | @echo " linkcheck to check all external links for integrity" 50 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 51 | 52 | clean: 53 | -rm -rf $(BUILDDIR)/* 54 | 55 | html: 56 | $(SPHINXBUILD) -a -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 57 | @echo 58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 59 | 60 | dirhtml: 61 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 62 | @echo 63 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 64 | 65 | singlehtml: 66 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 67 | @echo 68 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 69 | 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | json: 76 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 77 | @echo 78 | @echo "Build finished; now you can process the JSON files." 79 | 80 | htmlhelp: 81 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 82 | @echo 83 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 84 | ".hhp project file in $(BUILDDIR)/htmlhelp." 85 | 86 | qthelp: 87 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 88 | @echo 89 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 90 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 91 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Momoko.qhcp" 92 | @echo "To view the help file:" 93 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Momoko.qhc" 94 | 95 | devhelp: 96 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 97 | @echo 98 | @echo "Build finished." 99 | @echo "To view the help file:" 100 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Momoko" 101 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Momoko" 102 | @echo "# devhelp" 103 | 104 | epub: 105 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 106 | @echo 107 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 108 | 109 | latex: 110 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 111 | @echo 112 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 113 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 114 | "(use \`make latexpdf' here to do that automatically)." 115 | 116 | latexpdf: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo "Running LaTeX files through pdflatex..." 119 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 120 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 121 | 122 | text: 123 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 124 | @echo 125 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 126 | 127 | man: 128 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 129 | @echo 130 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 131 | 132 | texinfo: 133 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 134 | @echo 135 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 136 | @echo "Run \`make' in that directory to run these through makeinfo" \ 137 | "(use \`make info' here to do that automatically)." 138 | 139 | info: 140 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 141 | @echo "Running Texinfo files through makeinfo..." 142 | make -C $(BUILDDIR)/texinfo info 143 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 144 | 145 | gettext: 146 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 147 | @echo 148 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 149 | 150 | changes: 151 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 152 | @echo 153 | @echo "The overview file is in $(BUILDDIR)/changes." 154 | 155 | linkcheck: 156 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 157 | @echo 158 | @echo "Link check complete; look for any errors in the above output " \ 159 | "or in $(BUILDDIR)/linkcheck/output.txt." 160 | 161 | doctest: 162 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 163 | @echo "Testing of doctests in the sources finished, look at the " \ 164 | "results in $(BUILDDIR)/doctest/output.txt." 165 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | .. module:: momoko 3 | 4 | API 5 | === 6 | 7 | Classes, methods and stuff. 8 | 9 | 10 | Connections 11 | ----------- 12 | 13 | .. autoclass:: momoko.Pool 14 | :members: 15 | 16 | .. autoclass:: momoko.Connection 17 | :members: 18 | 19 | .. autofunction:: momoko.connect 20 | 21 | 22 | Exceptions 23 | ---------- 24 | 25 | .. autoclass:: momoko.PoolError 26 | :members: 27 | 28 | .. autoclass:: momoko.PartiallyConnectedError 29 | :members: 30 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | ../changelog.rst -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Momoko documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Dec 11 22:52:15 2012. 6 | # 7 | # This file is execfile()d with the current directory set to its containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys, os 16 | 17 | # If extensions (or modules to document with autodoc) are in another directory, 18 | # add these directories to sys.path here. If the directory is relative to the 19 | # documentation root, use os.path.abspath to make it absolute, like shown here. 20 | sys.path.insert(0, os.path.abspath('..')) 21 | 22 | # -- General configuration ----------------------------------------------------- 23 | 24 | # If your documentation needs a minimal Sphinx version, state it here. 25 | #needs_sphinx = '1.0' 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = ['sphinx.ext.autodoc'] 30 | 31 | # Add any paths that contain templates here, relative to this directory. 32 | templates_path = ['_templates'] 33 | 34 | # The suffix of source filenames. 35 | source_suffix = '.rst' 36 | 37 | # The encoding of source files. 38 | #source_encoding = 'utf-8-sig' 39 | 40 | # The master toctree document. 41 | master_doc = 'index' 42 | 43 | # General information about the project. 44 | project = 'Momoko' 45 | copyright = '2011-2015, Frank Smit & Zaar Hai' 46 | 47 | # The version info for the project you're documenting, acts as replacement for 48 | # |version| and |release|, also used in various other places throughout the 49 | # built documents. 50 | # 51 | # The short X.Y version. 52 | version = '2.2.5.1' 53 | # The full version, including alpha/beta/rc tags. 54 | release = '2.2.5.1' 55 | 56 | # The language for content autogenerated by Sphinx. Refer to documentation 57 | # for a list of supported languages. 58 | #language = None 59 | 60 | # There are two options for replacing |today|: either, you set today to some 61 | # non-false value, then it is used: 62 | #today = '' 63 | # Else, today_fmt is used as the format for a strftime call. 64 | #today_fmt = '%B %d, %Y' 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | exclude_patterns = ['_build'] 69 | 70 | # The reST default role (used for this markup: `text`) to use for all documents. 71 | #default_role = None 72 | 73 | # If true, '()' will be appended to :func: etc. cross-reference text. 74 | #add_function_parentheses = True 75 | 76 | # If true, the current module name will be prepended to all description 77 | # unit titles (such as .. function::). 78 | #add_module_names = True 79 | 80 | # If true, sectionauthor and moduleauthor directives will be shown in the 81 | # output. They are ignored by default. 82 | #show_authors = False 83 | 84 | # The name of the Pygments (syntax highlighting) style to use. 85 | pygments_style = 'sphinx' 86 | 87 | # A list of ignored prefixes for module index sorting. 88 | #modindex_common_prefix = [] 89 | 90 | 91 | # -- Options for HTML output --------------------------------------------------- 92 | 93 | # The theme to use for HTML and HTML Help pages. See the documentation for 94 | # a list of builtin themes. 95 | # html_theme = 'default' 96 | # html_theme = 'michiko' 97 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 98 | if on_rtd: 99 | html_theme = 'default' 100 | else: 101 | html_theme = 'nature' 102 | 103 | # Theme options are theme-specific and customize the look and feel of a theme 104 | # further. For a list of options available for each theme, see the 105 | # documentation. 106 | #html_theme_options = {} 107 | 108 | # Add any paths that contain custom themes here, relative to this directory. 109 | # html_theme_path = [] 110 | # html_theme_path = ['_themes'] 111 | 112 | # The name for this set of Sphinx documents. If None, it defaults to 113 | # " v documentation". 114 | #html_title = None 115 | 116 | # A shorter title for the navigation bar. Default is the same as html_title. 117 | #html_short_title = None 118 | 119 | # The name of an image file (relative to this directory) to place at the top 120 | # of the sidebar. 121 | #html_logo = None 122 | 123 | # The name of an image file (within the static path) to use as favicon of the 124 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 125 | # pixels large. 126 | #html_favicon = None 127 | 128 | # Add any paths that contain custom static files (such as style sheets) here, 129 | # relative to this directory. They are copied after the builtin static files, 130 | # so a file named "default.css" will overwrite the builtin "default.css". 131 | # html_static_path = ['_static'] 132 | 133 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 134 | # using the given strftime format. 135 | #html_last_updated_fmt = '%b %d, %Y' 136 | 137 | # If true, SmartyPants will be used to convert quotes and dashes to 138 | # typographically correct entities. 139 | #html_use_smartypants = True 140 | 141 | # Custom sidebar templates, maps document names to template names. 142 | #html_sidebars = {} 143 | 144 | # Additional templates that should be rendered to pages, maps page names to 145 | # template names. 146 | #html_additional_pages = {} 147 | 148 | # If false, no module index is generated. 149 | #html_domain_indices = True 150 | 151 | # If false, no index is generated. 152 | #html_use_index = True 153 | 154 | # If true, the index is split into individual pages for each letter. 155 | #html_split_index = False 156 | 157 | # If true, links to the reST sources are added to the pages. 158 | #html_show_sourcelink = True 159 | 160 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 161 | #html_show_sphinx = True 162 | 163 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 164 | #html_show_copyright = True 165 | 166 | # If true, an OpenSearch description file will be output, and all pages will 167 | # contain a tag referring to it. The value of this option must be the 168 | # base URL from which the finished HTML is served. 169 | #html_use_opensearch = '' 170 | 171 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 172 | #html_file_suffix = None 173 | 174 | # Output file base name for HTML help builder. 175 | htmlhelp_basename = 'Momokodoc' 176 | 177 | 178 | # -- Options for LaTeX output -------------------------------------------------- 179 | 180 | latex_elements = { 181 | # The paper size ('letterpaper' or 'a4paper'). 182 | #'papersize': 'letterpaper', 183 | 184 | # The font size ('10pt', '11pt' or '12pt'). 185 | #'pointsize': '10pt', 186 | 187 | # Additional stuff for the LaTeX preamble. 188 | #'preamble': '', 189 | } 190 | 191 | # Grouping the document tree into LaTeX files. List of tuples 192 | # (source start file, target name, title, author, documentclass [howto/manual]). 193 | latex_documents = [ 194 | ('index', 'Momoko.tex', 'Momoko Documentation', 195 | 'Frank Smit', 'manual'), 196 | ] 197 | 198 | # The name of an image file (relative to this directory) to place at the top of 199 | # the title page. 200 | #latex_logo = None 201 | 202 | # For "manual" documents, if this is true, then toplevel headings are parts, 203 | # not chapters. 204 | #latex_use_parts = False 205 | 206 | # If true, show page references after internal links. 207 | #latex_show_pagerefs = False 208 | 209 | # If true, show URL addresses after external links. 210 | #latex_show_urls = False 211 | 212 | # Documents to append as an appendix to all manuals. 213 | #latex_appendices = [] 214 | 215 | # If false, no module index is generated. 216 | #latex_domain_indices = True 217 | 218 | 219 | # -- Options for manual page output -------------------------------------------- 220 | 221 | # One entry per manual page. List of tuples 222 | # (source start file, name, description, authors, manual section). 223 | man_pages = [ 224 | ('index', 'momoko', 'Momoko Documentation', 225 | ['Frank Smit'], 1) 226 | ] 227 | 228 | # If true, show URL addresses after external links. 229 | #man_show_urls = False 230 | 231 | 232 | # -- Options for Texinfo output ------------------------------------------------ 233 | 234 | # Grouping the document tree into Texinfo files. List of tuples 235 | # (source start file, target name, title, author, 236 | # dir menu entry, description, category) 237 | texinfo_documents = [ 238 | ('index', 'Momoko', 'Momoko Documentation', 239 | 'Frank Smit', 'Momoko', "Momoko wraps Psycopg2's functionality for use in Tornado.", 240 | 'Miscellaneous'), 241 | ] 242 | 243 | # Documents to append as an appendix to all manuals. 244 | #texinfo_appendices = [] 245 | 246 | # If false, no module index is generated. 247 | #texinfo_domain_indices = True 248 | 249 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 250 | #texinfo_show_urls = 'footnote' 251 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _overview: 2 | 3 | Momoko 4 | ====== 5 | 6 | Momoko wraps Psycopg2_'s functionality for use in Tornado_. 7 | 8 | The latest source code can be found on Github_ and bug reports can be sent 9 | there too. All releases will be uploaded to PyPi_. 10 | 11 | 12 | .. _Psycopg2: http://initd.org/psycopg 13 | .. _Tornado: http://www.tornadoweb.org 14 | .. _Github: https://github.com/FSX/momoko 15 | .. _PyPi: https://pypi.python.org/pypi/Momoko 16 | 17 | Contents: 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | changelog 23 | installation 24 | tutorial 25 | api 26 | 27 | 28 | 29 | Indices and tables 30 | ------------------ 31 | 32 | * :ref:`genindex` 33 | * :ref:`search` 34 | 35 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | Installation 4 | ============ 5 | 6 | Momoko supports Python 2 and 3 and PyPy with psycopg2cffi_. 7 | And the only dependencies are Tornado_ and Psycopg2_ (or psycopg2cffi_). 8 | Installation is easy using *easy_install* or pip_:: 9 | 10 | pip install momoko 11 | 12 | The lastest source code can always be cloned from the `Github repository`_ with:: 13 | 14 | git clone git://github.com/FSX/momoko.git 15 | cd momoko 16 | python setup.py install 17 | 18 | Psycopg2 is used by default when installing Momoko, but psycopg2cffi 19 | can also be used by setting the ``MOMOKO_PSYCOPG2_IMPL`` environment variable to 20 | ``psycopg2cffi`` before running ``setup.py``. For example:: 21 | 22 | # 'psycopg2' or 'psycopg2cffi' 23 | export MOMOKO_PSYCOPG2_IMPL='psycopg2cffi' 24 | 25 | The unit tests all use this variable. It needs to be set if something else is used 26 | instead of Psycopg2 when running the unit tests. Besides ``MOMOKO_PSYCOPG2_IMPL`` 27 | there are also other variables that need to be set for the unit tests. 28 | 29 | Here's an example for the environment variables:: 30 | 31 | export MOMOKO_TEST_DB='your_db' # Default: momoko_test 32 | export MOMOKO_TEST_USER='your_user' # Default: postgres 33 | export MOMOKO_TEST_PASSWORD='your_password' # Empty de default 34 | export MOMOKO_TEST_HOST='localhost' # Empty de default 35 | export MOMOKO_TEST_PORT='5432' # Default: 5432 36 | 37 | # Set to '0' if hstore extension isn't enabled 38 | export MOMOKO_TEST_HSTORE='1' # Default: 0 39 | 40 | Momoko tests use tcproxy_ for simulating Postgres server unavailablity. The copy 41 | of tcproxy is bundled with Momoko, but you need to build it first:: 42 | 43 | make -C tcproxy 44 | 45 | Finally, running the tests is easy:: 46 | 47 | python setup.py test 48 | 49 | 50 | .. _tcproxy: https://github.com/dccmx/tcproxy 51 | .. _psycopg2cffi: http://pypi.python.org/pypi/psycopg2cffi 52 | .. _Tornado: http://www.tornadoweb.org/ 53 | .. _Psycopg2: http://initd.org/psycopg/ 54 | .. _pip: http://www.pip-installer.org/ 55 | .. _Github repository: https://github.com/FSX/momoko 56 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. linkcheck to check all external links for integrity 37 | echo. doctest to run all doctests embedded in the documentation if enabled 38 | goto end 39 | ) 40 | 41 | if "%1" == "clean" ( 42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 43 | del /q /s %BUILDDIR%\* 44 | goto end 45 | ) 46 | 47 | if "%1" == "html" ( 48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 49 | if errorlevel 1 exit /b 1 50 | echo. 51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 52 | goto end 53 | ) 54 | 55 | if "%1" == "dirhtml" ( 56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 57 | if errorlevel 1 exit /b 1 58 | echo. 59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 60 | goto end 61 | ) 62 | 63 | if "%1" == "singlehtml" ( 64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 68 | goto end 69 | ) 70 | 71 | if "%1" == "pickle" ( 72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished; now you can process the pickle files. 76 | goto end 77 | ) 78 | 79 | if "%1" == "json" ( 80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished; now you can process the JSON files. 84 | goto end 85 | ) 86 | 87 | if "%1" == "htmlhelp" ( 88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can run HTML Help Workshop with the ^ 92 | .hhp project file in %BUILDDIR%/htmlhelp. 93 | goto end 94 | ) 95 | 96 | if "%1" == "qthelp" ( 97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 98 | if errorlevel 1 exit /b 1 99 | echo. 100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 101 | .qhcp project file in %BUILDDIR%/qthelp, like this: 102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Momoko.qhcp 103 | echo.To view the help file: 104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Momoko.ghc 105 | goto end 106 | ) 107 | 108 | if "%1" == "devhelp" ( 109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished. 113 | goto end 114 | ) 115 | 116 | if "%1" == "epub" ( 117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 121 | goto end 122 | ) 123 | 124 | if "%1" == "latex" ( 125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 129 | goto end 130 | ) 131 | 132 | if "%1" == "text" ( 133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The text files are in %BUILDDIR%/text. 137 | goto end 138 | ) 139 | 140 | if "%1" == "man" ( 141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 145 | goto end 146 | ) 147 | 148 | if "%1" == "texinfo" ( 149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 150 | if errorlevel 1 exit /b 1 151 | echo. 152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 153 | goto end 154 | ) 155 | 156 | if "%1" == "gettext" ( 157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 158 | if errorlevel 1 exit /b 1 159 | echo. 160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 161 | goto end 162 | ) 163 | 164 | if "%1" == "changes" ( 165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 166 | if errorlevel 1 exit /b 1 167 | echo. 168 | echo.The overview file is in %BUILDDIR%/changes. 169 | goto end 170 | ) 171 | 172 | if "%1" == "linkcheck" ( 173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 174 | if errorlevel 1 exit /b 1 175 | echo. 176 | echo.Link check complete; look for any errors in the above output ^ 177 | or in %BUILDDIR%/linkcheck/output.txt. 178 | goto end 179 | ) 180 | 181 | if "%1" == "doctest" ( 182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Testing of doctests in the sources finished, look at the ^ 186 | results in %BUILDDIR%/doctest/output.txt. 187 | goto end 188 | ) 189 | 190 | :end 191 | -------------------------------------------------------------------------------- /docs/tutorial.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial: 2 | 3 | Tutorial 4 | ======== 5 | 6 | This tutorial will demonstrate all the functionality found in Momoko. It's assumed a 7 | working PostgreSQL database is available, and everything is done in the context of a 8 | simple tornado web application. Not everything is explained: because Momoko just 9 | wraps Psycopg2, the `Psycopg2 documentation`_ must be used alongside Momoko's. 10 | 11 | 12 | The principle 13 | ------------- 14 | Almost every method of :py:meth:`~momoko.Pool` and :py:meth:`~momoko.Connection` 15 | returns a `future`_. There are some notable exceptions, like 16 | :py:meth:`~momoko.Pool.close`; be sure to consult API documentation for the 17 | details. 18 | 19 | These future objects can be simply ``yield``-ed in Tornado methods decorated with ``gen.coroutine``. 20 | For SQL execution related methods these futures resolve to corresponding cursor objects. 21 | 22 | Trival example 23 | -------------- 24 | Here is the simplest synchronous version of connect/select code:: 25 | 26 | import psycopg2 27 | conn = psycopg2.connect(dsn="...") 28 | cursor = conn.cursor() 29 | cursor.execute("SELECT 1") 30 | rows = cursor.fetchall() 31 | 32 | And this is how the same code looks with Momoko/Tornado:: 33 | 34 | import momoko 35 | from tornado.ioloop import IOLoop 36 | ioloop = IOLoop.instance() 37 | 38 | conn = momoko.Connection(dsn="...") 39 | future = conn.connect() 40 | ioloop.add_future(future, lambda x: ioloop.stop()) 41 | ioloop.start() 42 | future.result() # raises exception on connection error 43 | 44 | future = conn.execute("SELECT 1") 45 | ioloop.add_future(future, lambda x: ioloop.stop()) 46 | ioloop.start() 47 | cursor = future.result() 48 | rows = cursor.fetchall() 49 | 50 | We create connection object. Then invoke ``connect()`` method that returns future that 51 | resolves to connection object itself when connection is ready (we already have connection 52 | object at hand, thus we just wait until future is ready, ignoring its result). 53 | 54 | Next we call ``execute()`` which returns future that resolves to ready-to-use cursor object. 55 | And we use IOLoop again to wait for this future to be ready. 56 | 57 | Now you know to use :py:meth:`~momoko.Connection` for working with with stand-alone 58 | connections to PostgreSQL in asynchronous mode. 59 | 60 | Introducing Pool 61 | ---------------- 62 | The real power of Momoko comes with :py:meth:`~momoko.Pool`. It provides several 63 | nice features that make it useful in production environments: 64 | 65 | Connection pooling 66 | It manages several connections and distributes queries requests between them. 67 | If all connections are busy, outstanding query requests are waiting in queue 68 | Automatic pool growing (stretching) 69 | You can allow automatic stretching - i.e. if all connections are busy and more 70 | requests are coming, Pool will open more connections up a certain limit 71 | Automatic reconnects 72 | If connections get terminated (database server restart, etc) Pool will automatically 73 | reconnect them and transparently retry query if it failed due to dead connection. 74 | 75 | 76 | Boilerplate 77 | ^^^^^^^^^^^ 78 | 79 | Here's the code that's needed for the rest of this tutorial. Each example will replace parts 80 | or extend upon this code. The code is kept simple and minimal; its purpose is just 81 | to demonstrate Momoko's functionality. Here it goes:: 82 | 83 | from tornado import gen 84 | from tornado.ioloop import IOLoop 85 | from tornado.httpserver import HTTPServer 86 | from tornado.options import parse_command_line 87 | from tornado import web 88 | 89 | import psycopg2 90 | import momoko 91 | 92 | 93 | class BaseHandler(web.RequestHandler): 94 | @property 95 | def db(self): 96 | return self.application.db 97 | 98 | 99 | class TutorialHandler(BaseHandler): 100 | def get(self): 101 | self.write('Some text here!') 102 | self.finish() 103 | 104 | 105 | if __name__ == '__main__': 106 | parse_command_line() 107 | application = web.Application([ 108 | (r'/', TutorialHandler) 109 | ], debug=True) 110 | 111 | ioloop = IOLoop.instance() 112 | 113 | application.db = momoko.Pool( 114 | dsn='dbname=your_db user=your_user password=very_secret_password ' 115 | 'host=localhost port=5432', 116 | size=1, 117 | ioloop=ioloop, 118 | ) 119 | 120 | # this is a one way to run ioloop in sync 121 | future = application.db.connect() 122 | ioloop.add_future(future, lambda f: ioloop.stop()) 123 | ioloop.start() 124 | future.result() # raises exception on connection error 125 | 126 | http_server = HTTPServer(application) 127 | http_server.listen(8888, 'localhost') 128 | ioloop.start() 129 | 130 | For more information about all the parameters passed to ``momoko.Pool`` see 131 | :py:class:`momoko.Pool` in the API documentation. 132 | 133 | 134 | Using Pool 135 | ---------- 136 | 137 | :py:meth:`~momoko.Pool.execute`, :py:meth:`~momoko.Pool.callproc`, :py:meth:`~momoko.Pool.transaction` 138 | and :py:meth:`~momoko.Pool.mogrify` are methods of :py:class:`momoko.Pool` which 139 | can be used to query the database. (Actually, ``mogrify()`` is only used to 140 | escape strings, but it needs a connection). All these methods, except ``mogrify()``, 141 | return a cursor or an exception object. All of the described retrieval methods in 142 | Psycopg2's documentation — fetchone_, fetchmany_, fetchall_, etc. — can be used 143 | to fetch the results. 144 | 145 | First, lets rewrite our trivial example using Tornado web handlers:: 146 | 147 | class TutorialHandler(BaseHandler): 148 | @gen.coroutine 149 | def get(self): 150 | cursor = yield self.db.execute("SELECT 1;") 151 | self.write("Results: %s" % cursor.fetchone()) 152 | self.finish() 153 | 154 | To execute several queries in parallel, accumulate corresponding futures and 155 | yield them at once:: 156 | 157 | class TutorialHandler(BaseHandler): 158 | @gen.coroutine 159 | def get(self): 160 | try: 161 | f1 = self.db.execute('select 1;') 162 | f2 = self.db.execute('select 2;') 163 | f3 = self.db.execute('select 3;') 164 | yield [f1, f2, f3] 165 | 166 | cursor1 = f1.result() 167 | cursor2 = f2.result() 168 | cursor3 = f3.result() 169 | 170 | except (psycopg2.Warning, psycopg2.Error) as error: 171 | self.write(str(error)) 172 | else: 173 | self.write('Q1: %r
' % (cursor1.fetchall(),)) 174 | self.write('Q2: %r
' % (cursor2.fetchall(),)) 175 | self.write('Q3: %r
' % (cursor3.fetchall(),)) 176 | 177 | self.finish() 178 | 179 | All the above examples use :py:meth:`~momoko.Pool.execute`, but work 180 | with :py:meth:`~momoko.Pool.callproc`, :py:meth:`~momoko.Pool.transaction` and 181 | :py:meth:`~momoko.Pool.mogrify` too. 182 | 183 | 184 | Advanced 185 | -------- 186 | 187 | Manual connection management 188 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 189 | You can manually acquire connection from the pool using the :py:meth:`~momoko.Pool.getconn` method. 190 | This is very useful, for example, for server-side cursors. 191 | 192 | It important to return connection back to the pool once you've done with it, even if an error occurs 193 | in the middle of your work. Use either 194 | :py:meth:`~momoko.Pool.putconn` 195 | method or 196 | :py:meth:`~momoko.Pool.manage` 197 | manager to return the connection. 198 | 199 | Here is the server-side cursor example (based on the code in momoko unittests):: 200 | 201 | @gen.coroutine 202 | def get(self): 203 | int_count = 1000 204 | offset = 0 205 | chunk = 10 206 | try: 207 | conn = yield self.db.getconn() 208 | with self.db.manage(conn): 209 | yield conn.execute("BEGIN") 210 | yield conn.execute("DECLARE all_ints CURSOR FOR SELECT * FROM unit_test_int_table") 211 | while offset < int_count: 212 | cursor = yield conn.execute("FETCH %s FROM all_ints", (chunk,)) 213 | rows = cursor.fetchall() 214 | # Do something with results... 215 | offset += chunk 216 | yield conn.execute("CLOSE all_ints") 217 | yield conn.execute("COMMIT") 218 | 219 | except Exception as error: 220 | self.write(str(error)) 221 | 222 | .. _Psycopg2 documentation: http://initd.org/psycopg/docs/cursor.html 223 | .. _tornado.gen: http://tornado.readthedocs.org/en/stable/gen.html 224 | .. _fetchone: http://initd.org/psycopg/docs/cursor.html#cursor.fetchone 225 | .. _fetchmany: http://initd.org/psycopg/docs/cursor.html#cursor.fetchmany 226 | .. _fetchall: http://initd.org/psycopg/docs/cursor.html#cursor.fetchall 227 | .. _Task: http://tornado.readthedocs.org/en/stable/gen.html#tornado.gen.Task 228 | .. _Wait: http://tornado.readthedocs.org/en/stable/gen.html#tornado.gen.Wait 229 | .. _WaitAll: http://tornado.readthedocs.org/en/stable/gen.html#tornado.gen.WaitAll 230 | .. _exceptions: http://initd.org/psycopg/docs/module.html#exceptions 231 | .. _future: http://tornado.readthedocs.org/en/latest/concurrent.html 232 | -------------------------------------------------------------------------------- /examples/gen_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | This example uses Tornado's gen_. 5 | 6 | .. _gen: http://www.tornadoweb.org/documentation/gen.html 7 | """ 8 | 9 | from __future__ import print_function 10 | 11 | import os 12 | 13 | import tornado.web 14 | import tornado.ioloop 15 | import tornado.options 16 | from tornado import gen 17 | import tornado.httpserver 18 | 19 | import momoko 20 | 21 | 22 | db_database = os.environ.get('MOMOKO_TEST_DB', 'momoko_test') 23 | db_user = os.environ.get('MOMOKO_TEST_USER', 'postgres') 24 | db_password = os.environ.get('MOMOKO_TEST_PASSWORD', '') 25 | db_host = os.environ.get('MOMOKO_TEST_HOST', '') 26 | db_port = os.environ.get('MOMOKO_TEST_PORT', 5432) 27 | enable_hstore = True if os.environ.get('MOMOKO_TEST_HSTORE', False) == '1' else False 28 | dsn = 'dbname=%s user=%s password=%s host=%s port=%s' % ( 29 | db_database, db_user, db_password, db_host, db_port) 30 | 31 | assert (db_database or db_user or db_password or db_host or db_port) is not None, ( 32 | 'Environment variables for the examples are not set. Please set the following ' 33 | 'variables: MOMOKO_TEST_DB, MOMOKO_TEST_USER, MOMOKO_TEST_PASSWORD, ' 34 | 'MOMOKO_TEST_HOST, MOMOKO_TEST_PORT') 35 | 36 | 37 | class BaseHandler(tornado.web.RequestHandler): 38 | @property 39 | def db(self): 40 | return self.application.db 41 | 42 | 43 | class OverviewHandler(BaseHandler): 44 | def get(self): 45 | self.write(""" 46 | 55 | """) 56 | self.finish() 57 | 58 | 59 | class MogrifyHandler(BaseHandler): 60 | @gen.coroutine 61 | def get(self): 62 | try: 63 | sql = yield self.db.mogrify("SELECT %s;", (1,)) 64 | self.write("SQL: %s
" % sql) 65 | except Exception as error: 66 | self.write(str(error)) 67 | 68 | self.finish() 69 | 70 | 71 | class SingleQueryHandler(BaseHandler): 72 | @gen.coroutine 73 | def get(self): 74 | try: 75 | cursor = yield self.db.execute("SELECT pg_sleep(%s);", (1,)) 76 | self.write("Query results: %s
\n" % cursor.fetchall()) 77 | except Exception as error: 78 | self.write(str(error)) 79 | 80 | self.finish() 81 | 82 | 83 | class HstoreQueryHandler(BaseHandler): 84 | @gen.coroutine 85 | def get(self): 86 | if enable_hstore: 87 | try: 88 | cursor = yield self.db.execute("SELECT 'a=>b, c=>d'::hstore;") 89 | self.write("Query results: %s
" % cursor.fetchall()) 90 | cursor = yield self.db.execute("SELECT %s;", ({"e": "f", "g": "h"},)) 91 | self.write("Query results: %s
" % cursor.fetchall()) 92 | except Exception as error: 93 | self.write(str(error)) 94 | else: 95 | self.write("hstore is not enabled") 96 | 97 | self.finish() 98 | 99 | 100 | class JsonQueryHandler(BaseHandler): 101 | @gen.coroutine 102 | def get(self): 103 | if self.db.server_version >= 90200: 104 | try: 105 | cursor = yield self.db.execute('SELECT \'{"a": "b", "c": "d"}\'::json;') 106 | self.write("Query results: %s
" % cursor.fetchall()) 107 | except Exception as error: 108 | self.write(str(error)) 109 | else: 110 | self.write("json is not enabled") 111 | 112 | self.finish() 113 | 114 | 115 | class MultiQueryHandler(BaseHandler): 116 | @gen.coroutine 117 | def get(self): 118 | cursor1, cursor2, cursor3 = yield [ 119 | self.db.execute("SELECT 1;"), 120 | self.db.mogrify("SELECT 2;"), 121 | self.db.execute("SELECT %s;", (3*1,)) 122 | ] 123 | 124 | self.write("Query 1 results: %s
" % cursor1.fetchall()) 125 | self.write("Query 2 results: %s
" % cursor2) 126 | self.write("Query 3 results: %s" % cursor3.fetchall()) 127 | 128 | self.finish() 129 | 130 | 131 | class TransactionHandler(BaseHandler): 132 | @gen.coroutine 133 | def get(self): 134 | try: 135 | cursors = yield self.db.transaction(( 136 | "SELECT 1, 12, 22, 11;", 137 | "SELECT 55, 22, 78, 13;", 138 | "SELECT 34, 13, 12, 34;", 139 | "SELECT 23, 12, 22, 23;", 140 | "SELECT 42, 23, 22, 11;", 141 | ("SELECT 49, %s, 23, 11;", ("STR",)), 142 | )) 143 | 144 | for i, cursor in enumerate(cursors): 145 | self.write("Query %s results: %s
" % (i, cursor.fetchall())) 146 | except Exception as error: 147 | self.write(str(error)) 148 | 149 | self.finish() 150 | 151 | 152 | class ConnectionQueryHandler(BaseHandler): 153 | def __init__(self, *args, **kwargs): 154 | self.http_connection_closed = False 155 | super(ConnectionQueryHandler, self).__init__(*args, **kwargs) 156 | 157 | @gen.coroutine 158 | def get(self): 159 | try: 160 | connection = yield self.db.getconn() 161 | with self.db.manage(connection): 162 | for i in range(5): 163 | if self.http_connection_closed: 164 | break 165 | cursor = yield connection.execute("SELECT pg_sleep(1);") 166 | self.write('Query %d results: %s
\n' % (i+1, cursor.fetchall())) 167 | self.flush() 168 | except Exception as error: 169 | self.write(str(error)) 170 | 171 | self.finish() 172 | 173 | def on_connection_close(self): 174 | self.http_connection_closed = True 175 | 176 | 177 | def main(): 178 | try: 179 | tornado.options.parse_command_line() 180 | application = tornado.web.Application([ 181 | (r'/', OverviewHandler), 182 | (r'/mogrify', MogrifyHandler), 183 | (r'/query', SingleQueryHandler), 184 | (r'/hstore', HstoreQueryHandler), 185 | (r'/json', JsonQueryHandler), 186 | (r'/transaction', TransactionHandler), 187 | (r'/multi_query', MultiQueryHandler), 188 | (r'/connection', ConnectionQueryHandler), 189 | ], debug=True) 190 | 191 | ioloop = tornado.ioloop.IOLoop.instance() 192 | 193 | application.db = momoko.Pool( 194 | dsn=dsn, 195 | size=1, 196 | max_size=3, 197 | ioloop=ioloop, 198 | setsession=("SET TIME ZONE UTC",), 199 | raise_connect_errors=False, 200 | ) 201 | 202 | # this is a one way to run ioloop in sync 203 | future = application.db.connect() 204 | ioloop.add_future(future, lambda f: ioloop.stop()) 205 | ioloop.start() 206 | 207 | if enable_hstore: 208 | future = application.db.register_hstore() 209 | # This is the other way to run ioloop in sync 210 | ioloop.run_sync(lambda: future) 211 | 212 | if application.db.server_version >= 90200: 213 | future = application.db.register_json() 214 | # This is the other way to run ioloop in sync 215 | ioloop.run_sync(lambda: future) 216 | 217 | http_server = tornado.httpserver.HTTPServer(application) 218 | http_server.listen(8888, 'localhost') 219 | ioloop.start() 220 | except KeyboardInterrupt: 221 | print('Exit') 222 | 223 | 224 | if __name__ == '__main__': 225 | main() 226 | -------------------------------------------------------------------------------- /momoko/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | momoko 4 | ====== 5 | 6 | Momoko wraps Psycopg2's functionality for use in Tornado. 7 | 8 | Copyright 2011-2014, Frank Smit & Zaar Hai. 9 | MIT, see LICENSE for more details. 10 | """ 11 | 12 | import psycopg2 13 | 14 | from .connection import Pool, Connection, connect 15 | from .exceptions import PoolError, PartiallyConnectedError 16 | 17 | 18 | try: 19 | psycopg2.extensions.POLL_OK 20 | except AttributeError: 21 | import warnings 22 | warnings.warn(RuntimeWarning( 23 | 'Psycopg2 does not have support for asynchronous connections. ' 24 | 'You need at least version 2.2.0 of Psycopg2 to use Momoko.')) 25 | -------------------------------------------------------------------------------- /momoko/connection.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | momoko.connection 4 | ================= 5 | 6 | Connection handling. 7 | 8 | Copyright 2011-2014, Frank Smit & Zaar Hai. 9 | MIT, see LICENSE for more details. 10 | """ 11 | 12 | from __future__ import print_function 13 | 14 | import sys 15 | if sys.version_info[0] >= 3: 16 | basestring = str 17 | 18 | import logging 19 | from functools import partial 20 | from collections import deque 21 | import time 22 | import datetime 23 | from contextlib import contextmanager 24 | 25 | import psycopg2 26 | from psycopg2.extras import register_hstore as _psy_register_hstore 27 | from psycopg2.extras import register_json as _psy_register_json 28 | from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE 29 | 30 | import tornado 31 | from tornado.ioloop import IOLoop 32 | from tornado.concurrent import chain_future, Future 33 | 34 | from .exceptions import PoolError, PartiallyConnectedError 35 | 36 | # Backfill for tornado 5 compatability 37 | # https://www.tornadoweb.org/en/stable/concurrent.html#tornado.concurrent.future_set_exc_info 38 | if tornado.version_info[0] < 5: 39 | def future_set_exc_info(future, exc_info): 40 | future.set_exc_info(exc_info) 41 | else: 42 | from tornado.concurrent import future_set_exc_info 43 | 44 | log = logging.getLogger('momoko') 45 | 46 | 47 | class ConnectionContainer(object): 48 | """ 49 | Helper class that stores connections according to their state 50 | """ 51 | def __init__(self): 52 | self.empty() 53 | 54 | def __repr__(self): 55 | return ('<%s at %x: %d free, %d busy, %d dead, %d pending, %d waiting>' 56 | % (self.__class__.__name__, 57 | id(self), 58 | len(self.free), 59 | len(self.busy), 60 | len(self.dead), 61 | len(self.pending), 62 | len(self.waiting_queue))) 63 | 64 | def empty(self): 65 | self.free = deque() 66 | self.busy = set() 67 | self.dead = set() 68 | self.pending = set() 69 | self.waiting_queue = deque() 70 | 71 | def add_free(self, conn): 72 | self.pending.discard(conn) 73 | log.debug("Handling free connection %s", conn.fileno) 74 | 75 | if not self.waiting_queue: 76 | log.debug("No outstanding requests - adding to free pool") 77 | conn.last_used_time = time.time() 78 | self.free.append(conn) 79 | return 80 | 81 | log.debug("There are outstanding requests - resumed future from waiting queue") 82 | self.busy.add(conn) 83 | future = self.waiting_queue.pop() 84 | future.set_result(conn) 85 | 86 | def add_dead(self, conn): 87 | log.debug("Adding dead connection") 88 | self.pending.discard(conn) 89 | self.dead.add(conn) 90 | 91 | # If everything is dead, abort anything pending. 92 | if not self.pending: 93 | self.abort_waiting_queue(Pool.DatabaseNotAvailable("No database connection available")) 94 | 95 | def acquire(self): 96 | """Occupy free connection""" 97 | future = Future() 98 | if self.free: 99 | conn = self.free.pop() 100 | self.busy.add(conn) 101 | future.set_result(conn) 102 | log.debug("Acquired free connection %s", conn.fileno) 103 | return future 104 | elif self.busy: 105 | log.debug("No free connections, and some are busy - put in waiting queue") 106 | self.waiting_queue.appendleft(future) 107 | return future 108 | elif self.pending: 109 | log.debug("No free connections, but some are pending - put in waiting queue") 110 | self.waiting_queue.appendleft(future) 111 | return future 112 | else: 113 | log.debug("All connections are dead") 114 | return None 115 | 116 | def release(self, conn): 117 | log.debug("About to release connection %s", conn.fileno) 118 | assert conn in self.busy, "Tried to release non-busy connection" 119 | self.busy.remove(conn) 120 | if conn.closed: 121 | log.debug("The connection is dead") 122 | self.dead.add(conn) 123 | else: 124 | log.debug("The connection is alive") 125 | self.add_free(conn) 126 | 127 | def abort_waiting_queue(self, error): 128 | while self.waiting_queue: 129 | future = self.waiting_queue.pop() 130 | future.set_exception(error) 131 | 132 | def close_alive(self): 133 | for conn in self.busy.union(self.free): 134 | if not conn.closed: 135 | conn.close() 136 | 137 | def shrink(self, target_size, delay_in_seconds): 138 | now = time.time() 139 | while len(self.free) > target_size and now - self.free[0].last_used_time > delay_in_seconds: 140 | conn = self.free.popleft() 141 | conn.close() 142 | 143 | @property 144 | def all_dead(self): 145 | return not (self.free or self.busy or self.pending) 146 | 147 | @property 148 | def total(self): 149 | return len(self.free) + len(self.busy) + len(self.dead) + len(self.pending) 150 | 151 | 152 | class Pool(object): 153 | """ 154 | Asynchronous connection pool object. All its methods are 155 | asynchronous unless stated otherwide in method description. 156 | 157 | :param string dsn: 158 | A `Data Source Name`_ string containing one of the following values: 159 | 160 | * **dbname** - the database name 161 | * **user** - user name used to authenticate 162 | * **password** - password used to authenticate 163 | * **host** - database host address (defaults to UNIX socket if not provided) 164 | * **port** - connection port number (defaults to 5432 if not provided) 165 | 166 | Or any other parameter supported by PostgreSQL. See the PostgreSQL 167 | documentation for a complete list of supported parameters_. 168 | 169 | :param connection_factory: 170 | The ``connection_factory`` argument can be used to create non-standard 171 | connections. The class returned should be a subclass of `psycopg2.extensions.connection`_. 172 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 173 | 174 | :param cursor_factory: 175 | The ``cursor_factory`` argument can be used to return non-standart cursor class 176 | The class returned should be a subclass of `psycopg2.extensions.cursor`_. 177 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 178 | 179 | :param int size: 180 | Minimal number of connections to maintain. ``size`` connections will be opened 181 | and maintained after calling :py:meth:`momoko.Pool.connect`. 182 | 183 | :param max_size: 184 | if not ``None``, the pool size will dynamically grow on demand up to ``max_size`` 185 | open connections. By default the connections will still be maintained even if 186 | when the pool load decreases. See also ``auto_shrink`` parameter. 187 | :type max_size: int or None 188 | 189 | :param ioloop: 190 | Tornado IOloop instance to use. Defaults to Tornado's ``IOLoop.instance()``. 191 | 192 | :param bool raise_connect_errors: 193 | Whether to raise :py:meth:`momoko.PartiallyConnectedError` when failing to 194 | connect to database during :py:meth:`momoko.Pool.connect`. 195 | 196 | :param int reconnect_interval: 197 | If database server becomes unavailable, the pool will try to reestablish 198 | the connection. The attempt frequency is ``reconnect_interval`` 199 | milliseconds. 200 | 201 | :param list setsession: 202 | List of intial sql commands to be executed once connection is established. 203 | If any of the commands fails, the connection will be closed. 204 | **NOTE:** The commands will be executed as one transaction block. 205 | 206 | :param bool auto_shrink: 207 | Garbage-collect idle connections. Only applicable if ``max_size`` was specified. 208 | Nevertheless, the pool will mainatain at least ``size`` connections. 209 | 210 | :param shrink_delay: 211 | A connection is declared idle if it was not used for ``shrink_delay`` time period. 212 | Idle connections will be garbage-collected if ``auto_shrink`` is set to ``True``. 213 | :type shrink_delay: :py:meth:`datetime.timedelta` 214 | 215 | :param shrink_period: 216 | If ``auto_shink`` is enabled, this parameter defines how the pool will check for 217 | idle connections. 218 | :type shrink_period: :py:meth:`datetime.timedelta` 219 | 220 | .. _Data Source Name: http://en.wikipedia.org/wiki/Data_Source_Name 221 | .. _parameters: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS 222 | .. _psycopg2.extensions.connection: http://initd.org/psycopg/docs/connection.html#connection 223 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 224 | """ 225 | 226 | class DatabaseNotAvailable(psycopg2.DatabaseError): 227 | """Raised when Pool can not connect to database server""" 228 | 229 | def __init__(self, 230 | dsn, 231 | connection_factory=None, 232 | cursor_factory=None, 233 | size=1, 234 | max_size=None, 235 | ioloop=None, 236 | raise_connect_errors=True, 237 | reconnect_interval=500, 238 | setsession=(), 239 | auto_shrink=False, 240 | shrink_delay=datetime.timedelta(minutes=2), 241 | shrink_period=datetime.timedelta(minutes=2) 242 | ): 243 | 244 | assert size > 0, "The connection pool size must be a number above 0." 245 | 246 | self.size = size 247 | self.max_size = max_size or size 248 | assert self.size <= self.max_size, "The connection pool max size must be of at least 'size'." 249 | 250 | self.dsn = dsn 251 | self.connection_factory = connection_factory 252 | self.cursor_factory = cursor_factory 253 | self.raise_connect_errors = raise_connect_errors 254 | self.reconnect_interval = float(reconnect_interval)/1000 # the parameter is in milliseconds 255 | self.setsession = setsession 256 | 257 | self.connected = False 258 | self.closed = False 259 | self.server_version = None 260 | 261 | self.ioloop = ioloop or IOLoop.instance() 262 | 263 | self.conns = ConnectionContainer() 264 | 265 | self._last_connect_time = 0 266 | self._no_conn_available_error = self.DatabaseNotAvailable("No database connection available") 267 | self.shrink_period = shrink_period 268 | self.shrink_delay = shrink_delay 269 | self.auto_shrink = auto_shrink 270 | if auto_shrink: 271 | self._auto_shrink() 272 | 273 | def _auto_shrink(self): 274 | self.conns.shrink(self.size, self.shrink_delay.seconds) 275 | self.ioloop.add_timeout(self.shrink_period, self._auto_shrink) 276 | 277 | def connect(self): 278 | """ 279 | Returns future that resolves to this Pool object. 280 | 281 | If some connection failed to connect *and* self.raise_connect_errors 282 | is true, raises :py:meth:`momoko.PartiallyConnectedError`. 283 | """ 284 | future = Future() 285 | pending = [self.size-1] 286 | 287 | def on_connect(fut): 288 | if pending[0]: 289 | pending[0] -= 1 290 | return 291 | # all connection attempts are complete 292 | if self.conns.dead and self.raise_connect_errors: 293 | ecp = PartiallyConnectedError("%s connection(s) failed to connect" % len(self.conns.dead)) 294 | future.set_exception(ecp) 295 | else: 296 | future.set_result(self) 297 | log.debug("All initial connection requests complete") 298 | 299 | for i in range(self.size): 300 | self.ioloop.add_future(self._new_connection(), on_connect) 301 | 302 | return future 303 | 304 | def getconn(self, ping=True): 305 | """ 306 | Acquire connection from the pool. 307 | 308 | You can then use this connection for subsequent queries. 309 | Just use ``connection.execute`` instead of ``Pool.execute``. 310 | 311 | Make sure to return connection to the pool by calling :py:meth:`momoko.Pool.putconn`, 312 | otherwise the connection will remain forever busy and you'll starve your pool. 313 | 314 | Returns a future that resolves to the acquired connection object. 315 | 316 | :param boolean ping: 317 | Whether to ping the connection before returning it by executing :py:meth:`momoko.Connection.ping`. 318 | """ 319 | rv = self.conns.acquire() 320 | if isinstance(rv, Future): 321 | self._reanimate_and_stretch_if_needed() 322 | future = rv 323 | else: 324 | # Else, all connections are dead 325 | future = Future() 326 | 327 | def on_reanimate_done(fut): 328 | if self.conns.all_dead: 329 | log.debug("all connections are still dead") 330 | future.set_exception(self._no_conn_available_error) 331 | return 332 | f = self.conns.acquire() 333 | assert isinstance(f, Future) 334 | chain_future(f, future) 335 | 336 | self.ioloop.add_future(self._reanimate(), on_reanimate_done) 337 | 338 | if not ping: 339 | return future 340 | else: 341 | return self._ping_future_connection(future) 342 | 343 | def putconn(self, connection): 344 | """ 345 | Return busy connection back to the pool. 346 | 347 | **NOTE:** This is a synchronous method. 348 | 349 | :param Connection connection: 350 | Connection object previously returned by :py:meth:`momoko.Pool.getconn`. 351 | """ 352 | 353 | self.conns.release(connection) 354 | 355 | if self.conns.all_dead: 356 | log.debug("All connections are dead - aborting waiting queue") 357 | self.conns.abort_waiting_queue(self._no_conn_available_error) 358 | 359 | @contextmanager 360 | def manage(self, connection): 361 | """ 362 | Context manager that automatically returns connection to the pool. 363 | You can use it instead of :py:meth:`momoko.Pool.putconn`:: 364 | 365 | connection = yield self.db.getconn() 366 | with self.db.manage(connection): 367 | cursor = yield connection.execute("BEGIN") 368 | ... 369 | """ 370 | assert connection in self.conns.busy, "Can not manage non-busy connection. Where did you get it from?" 371 | try: 372 | yield connection 373 | finally: 374 | self.putconn(connection) 375 | 376 | def ping(self): 377 | """ 378 | Make sure this connection is alive by executing SELECT 1 statement - 379 | i.e. roundtrip to the database. 380 | 381 | See :py:meth:`momoko.Connection.ping` for documentation about the 382 | parameters. 383 | """ 384 | return self._operate(Connection.ping) 385 | 386 | def execute(self, *args, **kwargs): 387 | """ 388 | Prepare and execute a database operation (query or command). 389 | 390 | See :py:meth:`momoko.Connection.execute` for documentation about the 391 | parameters. 392 | """ 393 | return self._operate(Connection.execute, args, kwargs) 394 | 395 | def callproc(self, *args, **kwargs): 396 | """ 397 | Call a stored database procedure with the given name. 398 | 399 | See :py:meth:`momoko.Connection.callproc` for documentation about the 400 | parameters. 401 | """ 402 | return self._operate(Connection.callproc, args, kwargs) 403 | 404 | def transaction(self, *args, **kwargs): 405 | """ 406 | Run a sequence of SQL queries in a database transaction. 407 | 408 | See :py:meth:`momoko.Connection.transaction` for documentation about the 409 | parameters. 410 | """ 411 | return self._operate(Connection.transaction, args, kwargs) 412 | 413 | def mogrify(self, *args, **kwargs): 414 | """ 415 | Return a query string after arguments binding. 416 | 417 | **NOTE:** This is NOT a synchronous method (contary to `momoko.Connection.mogrify`) 418 | - it asynchronously waits for available connection. For performance 419 | reasons, its better to create dedicated :py:meth:`momoko.Connection` 420 | object and use it directly for mogrification, this operation does not 421 | imply any real operation on the database server. 422 | 423 | See :py:meth:`momoko.Connection.mogrify` for documentation about the 424 | parameters. 425 | """ 426 | return self._operate(Connection.mogrify, args, kwargs, async_=False) 427 | 428 | def register_hstore(self, *args, **kwargs): 429 | """ 430 | Register adapter and typecaster for ``dict-hstore`` conversions. 431 | 432 | See :py:meth:`momoko.Connection.register_hstore` for documentation about 433 | the parameters. This method has no ``globally`` parameter, because it 434 | already registers hstore to all the connections in the pool. 435 | """ 436 | kwargs["globally"] = True 437 | return self._operate(Connection.register_hstore, args, kwargs) 438 | 439 | def register_json(self, *args, **kwargs): 440 | """ 441 | Create and register typecasters converting ``json`` type to Python objects. 442 | 443 | See :py:meth:`momoko.Connection.register_json` for documentation about 444 | the parameters. This method has no ``globally`` parameter, because it 445 | already registers json to all the connections in the pool. 446 | """ 447 | kwargs["globally"] = True 448 | return self._operate(Connection.register_json, args, kwargs) 449 | 450 | def close(self): 451 | """ 452 | Close the connection pool. 453 | 454 | **NOTE:** This is a synchronous method. 455 | """ 456 | if self.closed: 457 | raise PoolError('connection pool is already closed') 458 | 459 | self.conns.close_alive() 460 | self.conns.empty() 461 | self.closed = True 462 | 463 | def _operate(self, method, args=(), kwargs=None, async_=True, keep=False, connection=None): 464 | kwargs = kwargs or {} 465 | future = Future() 466 | 467 | retry = [] 468 | 469 | def when_available(fut): 470 | try: 471 | conn = fut.result() 472 | except psycopg2.Error: 473 | future_set_exc_info(future, sys.exc_info()) 474 | if retry and not keep: 475 | self.putconn(retry[0]) 476 | return 477 | 478 | log.debug("Obtained connection: %s", conn.fileno) 479 | try: 480 | future_or_result = method(conn, *args, **kwargs) 481 | except Exception: 482 | log.debug("Method failed synchronously") 483 | return self._retry(retry, when_available, conn, keep, future) 484 | 485 | if not async_: 486 | future.set_result(future_or_result) 487 | if not keep: 488 | self.putconn(conn) 489 | return 490 | 491 | def when_done(rfut): 492 | try: 493 | result = rfut.result() 494 | except psycopg2.Error: 495 | log.debug("Method failed Asynchronously") 496 | return self._retry(retry, when_available, conn, keep, future) 497 | 498 | future.set_result(result) 499 | if not keep: 500 | self.putconn(conn) 501 | 502 | self.ioloop.add_future(future_or_result, when_done) 503 | 504 | if not connection: 505 | self.ioloop.add_future(self.getconn(ping=False), when_available) 506 | else: 507 | f = Future() 508 | f.set_result(connection) 509 | when_available(f) 510 | return future 511 | 512 | def _retry(self, retry, what, conn, keep, future): 513 | if conn.closed: 514 | if not retry: 515 | retry.append(conn) 516 | self.ioloop.add_future(conn.connect(), what) 517 | return 518 | else: 519 | future.set_exception(self._no_conn_available_error) 520 | else: 521 | future_set_exc_info(future, sys.exc_info()) 522 | if not keep: 523 | self.putconn(conn) 524 | return 525 | 526 | def _reanimate(self): 527 | assert self.conns.dead, "BUG: don't call reanimate when there is no one to reanimate" 528 | 529 | future = Future() 530 | 531 | if self.ioloop.time() - self._last_connect_time < self.reconnect_interval: 532 | log.debug("Not reconnecting - too soon") 533 | future.set_result(None) 534 | return future 535 | 536 | pending = [len(self.conns.dead)-1] 537 | 538 | def on_connect(fut): 539 | if pending[0]: 540 | pending[0] -= 1 541 | return 542 | future.set_result(None) 543 | 544 | while self.conns.dead: 545 | conn = self.conns.dead.pop() 546 | self.ioloop.add_future(self._connect_one(conn), on_connect) 547 | 548 | return future 549 | 550 | def _reanimate_and_stretch_if_needed(self): 551 | if self.conns.dead: 552 | self._reanimate() 553 | return 554 | 555 | if self.conns.total == self.max_size: 556 | return # max size reached 557 | if self.conns.free: 558 | return # no point in stretching if there are free connections 559 | if self.conns.pending: 560 | if len(self.conns.pending) >= len(self.conns.waiting_queue): 561 | return # there are enough outstanding connection requests 562 | 563 | log.debug("Stretching pool") 564 | self._new_connection() 565 | 566 | def _new_connection(self): 567 | log.debug("Opening new connection") 568 | conn = Connection(self.dsn, 569 | connection_factory=self.connection_factory, 570 | cursor_factory=self.cursor_factory, 571 | ioloop=self.ioloop, 572 | setsession=self.setsession) 573 | return self._connect_one(conn) 574 | 575 | def _connect_one(self, conn): 576 | future = Future() 577 | self.conns.pending.add(conn) 578 | 579 | def on_connect(fut): 580 | try: 581 | fut.result() 582 | except psycopg2.Error: 583 | self.conns.add_dead(conn) 584 | else: 585 | self.conns.add_free(conn) 586 | self.server_version = conn.server_version 587 | self._last_connect_time = self.ioloop.time() 588 | future.set_result(conn) 589 | 590 | self.ioloop.add_future(conn.connect(), on_connect) 591 | return future 592 | 593 | def _ping_future_connection(self, conn_future): 594 | ping_future = Future() 595 | 596 | def on_connection_available(fut): 597 | try: 598 | conn = fut.result() 599 | except psycopg2.Error: 600 | log.debug("Aborting ping - failed to obtain connection") 601 | ping_future.set_exception(self._no_conn_available_error) 602 | return 603 | 604 | def on_ping_done(ping_fut): 605 | try: 606 | ping_fut.result() 607 | except psycopg2.Error: 608 | if conn.closed: 609 | ping_future.set_exception(self._no_conn_available_error) 610 | else: 611 | future_set_exc_info(ping_future, sys.exc_info()) 612 | self.putconn(conn) 613 | else: 614 | ping_future.set_result(conn) 615 | 616 | f = self._operate(Connection.ping, keep=True, connection=conn) 617 | self.ioloop.add_future(f, on_ping_done) 618 | 619 | self.ioloop.add_future(conn_future, on_connection_available) 620 | 621 | return ping_future 622 | 623 | 624 | class Connection(object): 625 | """ 626 | Asynchronous connection object. All its methods are 627 | asynchronous unless stated otherwide in method description. 628 | 629 | :param string dsn: 630 | A `Data Source Name`_ string containing one of the following values: 631 | 632 | * **dbname** - the database name 633 | * **user** - user name used to authenticate 634 | * **password** - password used to authenticate 635 | * **host** - database host address (defaults to UNIX socket if not provided) 636 | * **port** - connection port number (defaults to 5432 if not provided) 637 | 638 | Or any other parameter supported by PostgreSQL. See the PostgreSQL 639 | documentation for a complete list of supported parameters_. 640 | 641 | :param connection_factory: 642 | The ``connection_factory`` argument can be used to create non-standard 643 | connections. The class returned should be a subclass of `psycopg2.extensions.connection`_. 644 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 645 | 646 | :param cursor_factory: 647 | The ``cursor_factory`` argument can be used to return non-standart cursor class 648 | The class returned should be a subclass of `psycopg2.extensions.cursor`_. 649 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 650 | 651 | :param list setsession: 652 | List of intial sql commands to be executed once connection is established. 653 | If any of the commands fails, the connection will be closed. 654 | **NOTE:** The commands will be executed as one transaction block. 655 | 656 | .. _Data Source Name: http://en.wikipedia.org/wiki/Data_Source_Name 657 | .. _parameters: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS 658 | .. _psycopg2.extensions.connection: http://initd.org/psycopg/docs/connection.html#connection 659 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 660 | """ 661 | def __init__(self, 662 | dsn, 663 | connection_factory=None, 664 | cursor_factory=None, 665 | ioloop=None, 666 | setsession=()): 667 | 668 | self.dsn = dsn 669 | self.connection_factory = connection_factory 670 | self.cursor_factory = cursor_factory 671 | self.ioloop = ioloop or IOLoop.instance() 672 | self.setsession = setsession 673 | 674 | def connect(self): 675 | """ 676 | Initiate asynchronous connect. 677 | Returns future that resolves to this connection object. 678 | """ 679 | kwargs = {"async_": True} 680 | if self.connection_factory: 681 | kwargs["connection_factory"] = self.connection_factory 682 | if self.cursor_factory: 683 | kwargs["cursor_factory"] = self.cursor_factory 684 | 685 | future = Future() 686 | 687 | self.connection = None 688 | try: 689 | self.connection = psycopg2.connect(self.dsn, **kwargs) 690 | except psycopg2.Error: 691 | self.connection = None 692 | future_set_exc_info(future, sys.exc_info()) 693 | return future 694 | 695 | self.fileno = self.connection.fileno() 696 | 697 | if self.setsession: 698 | on_connect_future = Future() 699 | 700 | def on_connect(on_connect_future): 701 | self.ioloop.add_future(self.transaction(self.setsession), lambda x: future.set_result(self)) 702 | 703 | self.ioloop.add_future(on_connect_future, on_connect) 704 | callback = partial(self._io_callback, on_connect_future, self) 705 | else: 706 | callback = partial(self._io_callback, future, self) 707 | 708 | self.ioloop.add_handler(self.fileno, callback, IOLoop.WRITE) 709 | self.ioloop.add_future(future, self._set_server_version) 710 | self.ioloop.add_future(future, self._close_on_fail) 711 | 712 | return future 713 | 714 | def _set_server_version(self, future): 715 | if future.exception(): 716 | return 717 | self.server_version = self.connection.server_version 718 | 719 | def _close_on_fail(self, future): 720 | # If connection attempt evetually fails - marks connection as closed by ourselves 721 | # since psycopg2 does not do that for us (on connection attempts) 722 | if future.exception(): 723 | self.connection = None 724 | 725 | def _io_callback(self, future, result, fd=None, events=None): 726 | try: 727 | state = self.connection.poll() 728 | except (psycopg2.Warning, psycopg2.Error) as err: 729 | self.ioloop.remove_handler(self.fileno) 730 | future_set_exc_info(future, sys.exc_info()) 731 | else: 732 | try: 733 | if state == POLL_OK: 734 | self.ioloop.remove_handler(self.fileno) 735 | future.set_result(result) 736 | elif state == POLL_READ: 737 | self.ioloop.update_handler(self.fileno, IOLoop.READ) 738 | elif state == POLL_WRITE: 739 | self.ioloop.update_handler(self.fileno, IOLoop.WRITE) 740 | else: 741 | future.set_exception(psycopg2.OperationalError("poll() returned %s" % state)) 742 | except IOError: 743 | # Can happen when there are quite a lof of outstanding 744 | # requests. See https://github.com/FSX/momoko/issues/127 745 | self.ioloop.remove_handler(self.fileno) 746 | future.set_exception(psycopg2.OperationalError("IOError on socket")) 747 | 748 | def ping(self): 749 | """ 750 | Make sure this connection is alive by executing SELECT 1 statement - 751 | i.e. roundtrip to the database. 752 | 753 | Returns future. If it resolves sucessfully - the connection is alive (or dead otherwise). 754 | """ 755 | return self.execute("SELECT 1 AS ping") 756 | 757 | def execute(self, 758 | operation, 759 | parameters=(), 760 | cursor_factory=None): 761 | """ 762 | Prepare and execute a database operation (query or command). 763 | 764 | :param string operation: An SQL query. 765 | :param tuple/list/dict parameters: 766 | A list, tuple or dict with query parameters. See `Passing parameters to SQL queries`_ 767 | for more information. Defaults to an empty tuple. 768 | :param cursor_factory: 769 | The ``cursor_factory`` argument can be used to create non-standard cursors. 770 | The class returned must be a subclass of `psycopg2.extensions.cursor`_. 771 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 772 | 773 | Returns future that resolves to cursor object containing result. 774 | 775 | .. _Passing parameters to SQL queries: http://initd.org/psycopg/docs/usage.html#query-parameters 776 | .. _psycopg2.extensions.cursor: http://initd.org/psycopg/docs/extensions.html#psycopg2.extensions.cursor 777 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 778 | """ 779 | kwargs = {"cursor_factory": cursor_factory} if cursor_factory else {} 780 | cursor = self.connection.cursor(**kwargs) 781 | if parameters: 782 | cursor.execute(operation, parameters) 783 | else: 784 | cursor.execute(operation) 785 | 786 | future = Future() 787 | callback = partial(self._io_callback, future, cursor) 788 | self.ioloop.add_handler(self.fileno, callback, IOLoop.WRITE) 789 | return future 790 | 791 | def callproc(self, 792 | procname, 793 | parameters=(), 794 | cursor_factory=None): 795 | """ 796 | Call a stored database procedure with the given name. 797 | 798 | The sequence of parameters must contain one entry for each argument that 799 | the procedure expects. The result of the call is returned as modified copy 800 | of the input sequence. Input parameters are left untouched, output and 801 | input/output parameters replaced with possibly new values. 802 | 803 | The procedure may also provide a result set as output. This must then be 804 | made available through the standard `fetch*()`_ methods. 805 | 806 | :param string procname: The name of the database procedure. 807 | :param tuple/list parameters: 808 | A list or tuple with query parameters. See `Passing parameters to SQL queries`_ 809 | for more information. Defaults to an empty tuple. 810 | :param cursor_factory: 811 | The ``cursor_factory`` argument can be used to create non-standard cursors. 812 | The class returned must be a subclass of `psycopg2.extensions.cursor`_. 813 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 814 | 815 | Returns future that resolves to cursor object containing result. 816 | 817 | .. _fetch*(): http://initd.org/psycopg/docs/cursor.html#fetch 818 | .. _Passing parameters to SQL queries: http://initd.org/psycopg/docs/usage.html#query-parameters 819 | .. _psycopg2.extensions.cursor: http://initd.org/psycopg/docs/extensions.html#psycopg2.extensions.cursor 820 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 821 | """ 822 | kwargs = {"cursor_factory": cursor_factory} if cursor_factory else {} 823 | cursor = self.connection.cursor(**kwargs) 824 | cursor.callproc(procname, parameters) 825 | 826 | future = Future() 827 | callback = partial(self._io_callback, future, cursor) 828 | self.ioloop.add_handler(self.fileno, callback, IOLoop.WRITE) 829 | return future 830 | 831 | def mogrify(self, operation, parameters=()): 832 | """ 833 | Return a query string after arguments binding. 834 | 835 | The string returned is exactly the one that would be sent to the database 836 | running the execute() method or similar. 837 | 838 | **NOTE:** This is a synchronous method. 839 | 840 | :param string operation: An SQL query. 841 | :param tuple/list parameters: 842 | A list or tuple with query parameters. See `Passing parameters to SQL queries`_ 843 | for more information. Defaults to an empty tuple. 844 | 845 | .. _Passing parameters to SQL queries: http://initd.org/psycopg/docs/usage.html#query-parameters 846 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 847 | """ 848 | cursor = self.connection.cursor() 849 | return cursor.mogrify(operation, parameters) 850 | 851 | def transaction(self, 852 | statements, 853 | cursor_factory=None, 854 | auto_rollback=True): 855 | """ 856 | Run a sequence of SQL queries in a database transaction. 857 | 858 | :param tuple/list statements: 859 | List or tuple containing SQL queries with or without parameters. An item 860 | can be a string (SQL query without parameters) or a tuple/list with two items, 861 | an SQL query and a tuple/list/dict with parameters. 862 | 863 | See `Passing parameters to SQL queries`_ for more information. 864 | :param cursor_factory: 865 | The ``cursor_factory`` argument can be used to create non-standard cursors. 866 | The class returned must be a subclass of `psycopg2.extensions.cursor`_. 867 | See `Connection and cursor factories`_ for details. Defaults to ``None``. 868 | :param bool auto_rollback: 869 | If one of the transaction statements fails, try to automatically 870 | execute ROLLBACK to abort the transaction. If ROLLBACK fails, it would 871 | not be raised, but only logged. 872 | 873 | Returns future that resolves to ``list`` of cursors. Each cursor contains the result 874 | of the corresponding transaction statement. 875 | 876 | .. _Passing parameters to SQL queries: http://initd.org/psycopg/docs/usage.html#query-parameters 877 | .. _psycopg2.extensions.cursor: http://initd.org/psycopg/docs/extensions.html#psycopg2.extensions.cursor 878 | .. _Connection and cursor factories: http://initd.org/psycopg/docs/advanced.html#subclassing-cursor 879 | """ 880 | cursors = [] 881 | transaction_future = Future() 882 | 883 | queue = self._statement_generator(statements) 884 | 885 | def exec_statements(future): 886 | try: 887 | cursor = future.result() 888 | cursors.append(cursor) 889 | except Exception as error: 890 | if auto_rollback and not self.closed: 891 | self._rollback(transaction_future, error) 892 | else: 893 | future_set_exc_info(transaction_future, sys.exc_info()) 894 | return 895 | 896 | try: 897 | operation, parameters = next(queue) 898 | except StopIteration: 899 | transaction_future.set_result(cursors[1:-1]) 900 | return 901 | 902 | f = self.execute(operation, parameters, cursor_factory) 903 | self.ioloop.add_future(f, exec_statements) 904 | 905 | self.ioloop.add_future(self.execute("BEGIN;"), exec_statements) 906 | return transaction_future 907 | 908 | def _statement_generator(self, statements): 909 | for statement in statements: 910 | if isinstance(statement, basestring): 911 | yield (statement, ()) 912 | else: 913 | yield statement[:2] 914 | yield ('COMMIT;', ()) 915 | 916 | def _rollback(self, transaction_future, error): 917 | def rollback_callback(rb_future): 918 | try: 919 | rb_future.result() 920 | except Exception as rb_error: 921 | log.warn("Failed to ROLLBACK transaction %s", rb_error) 922 | transaction_future.set_exception(error) 923 | self.ioloop.add_future(self.execute("ROLLBACK;"), rollback_callback) 924 | 925 | def _register(self, future, registrator, fut): 926 | try: 927 | cursor = fut.result() 928 | except Exception: 929 | future_set_exc_info(future, sys.exc_info()) 930 | return 931 | 932 | oid, array_oid = cursor.fetchone() 933 | registrator(oid, array_oid) 934 | future.set_result(None) 935 | 936 | def register_hstore(self, globally=False, unicode=False): 937 | """ 938 | Register adapter and typecaster for ``dict-hstore`` conversions. 939 | 940 | More information on the hstore datatype can be found on the 941 | Psycopg2 |hstoredoc|_. 942 | 943 | :param boolean globally: 944 | Register the adapter globally, not only on this connection. 945 | :param boolean unicode: 946 | If ``True``, keys and values returned from the database will be ``unicode`` 947 | instead of ``str``. The option is not available on Python 3. 948 | 949 | Returns future that resolves to ``None``. 950 | 951 | .. |hstoredoc| replace:: documentation 952 | 953 | .. _hstoredoc: http://initd.org/psycopg/docs/extras.html#hstore-data-type 954 | """ 955 | future = Future() 956 | registrator = partial(_psy_register_hstore, None, globally, unicode) 957 | callback = partial(self._register, future, registrator) 958 | self.ioloop.add_future(self.execute( 959 | "SELECT 'hstore'::regtype::oid AS hstore_oid, 'hstore[]'::regtype::oid AS hstore_arr_oid", 960 | ), callback) 961 | 962 | return future 963 | 964 | def register_json(self, globally=False, loads=None): 965 | """ 966 | Create and register typecasters converting ``json`` type to Python objects. 967 | 968 | More information on the json datatype can be found on the Psycopg2 |regjsondoc|_. 969 | 970 | :param boolean globally: 971 | Register the adapter globally, not only on this connection. 972 | :param function loads: 973 | The function used to parse the data into a Python object. If ``None`` 974 | use ``json.loads()``, where ``json`` is the module chosen according to 975 | the Python version. See psycopg2.extra docs. 976 | 977 | Returns future that resolves to ``None``. 978 | 979 | .. |regjsondoc| replace:: documentation 980 | 981 | .. _regjsondoc: http://initd.org/psycopg/docs/extras.html#json-adaptation 982 | """ 983 | future = Future() 984 | registrator = partial(_psy_register_json, None, globally, loads) 985 | callback = partial(self._register, future, registrator) 986 | self.ioloop.add_future(self.execute( 987 | "SELECT 'json'::regtype::oid AS json_oid, 'json[]'::regtype::oid AS json_arr_oid" 988 | ), callback) 989 | 990 | return future 991 | 992 | @property 993 | def closed(self): 994 | """ 995 | Indicates whether the connection is closed or not. 996 | """ 997 | # 0 = open, 1 = closed, 2 = 'something horrible happened' 998 | return self.connection.closed > 0 if self.connection else True 999 | 1000 | def close(self): 1001 | """ 1002 | Closes the connection. 1003 | 1004 | **NOTE:** This is a synchronous method. 1005 | """ 1006 | if self.connection: 1007 | self.connection.close() 1008 | 1009 | 1010 | def connect(*args, **kwargs): 1011 | """ 1012 | Connection factory. 1013 | See :py:meth:`momoko.Connection` for documentation about the parameters. 1014 | 1015 | Returns future that resolves to :py:meth:`momoko.Connection` object or raises exception. 1016 | """ 1017 | return Connection(*args, **kwargs).connect() 1018 | -------------------------------------------------------------------------------- /momoko/exceptions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | momoko.exceptions 4 | ================= 5 | 6 | Exceptions. 7 | 8 | Copyright 2011-2014, Frank Smit & Zaar Hai. 9 | MIT, see LICENSE for more details. 10 | """ 11 | 12 | 13 | class PoolError(Exception): 14 | """ 15 | Raised when something goes wrong in the connection pool. 16 | """ 17 | pass 18 | 19 | 20 | class PartiallyConnectedError(PoolError): 21 | """ 22 | Raised when :py:meth:`momoko.Pool` can not initialize all of the requested connections. 23 | """ -------------------------------------------------------------------------------- /perf_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import absolute_import 3 | 4 | import threading 5 | 6 | from tests import * 7 | 8 | """ 9 | Quick and dirty performance test - async vs threads. 10 | By default Postgresql is configured to support up 100 connections. 11 | Change max_connections to 1100 in /etc/postgresql/9.4/main/postgresql.conf 12 | to run this example. 13 | 14 | NOTE: This benchmark is purely synthetic. In real life you'll be bound by database 15 | query throughput! 16 | 17 | So far, on psycopg2, momoko pool "context switching" is about 4 times slower compared to 18 | python thread contenxt switching when issueing queries that do nothing (SELECT 1). 19 | On my laptop momoko.Pool can do only about 8000 qps, but simple consequtive threading 20 | can do over 30,000. 21 | 22 | On psycopg2cffi, things are bit better - momoko.Pool only about 50% slower. 23 | But that's only because psycopg2cffi is less performant. 24 | 25 | After chaning queries to actually do something (SELECT pg_sleep(0.002)) I was able to 26 | actually measure thread (or Pool) overhead more accurately. 27 | 28 | This is the typical run: 29 | 30 | Threads(1): 27.04 seconds 31 | Pool(1): 28.72 seconds 32 | Threads(10): 2.34 seconds 33 | Pool(10): 2.67 seconds 34 | Threads(100): 0.62 seconds 35 | Pool(100): 1.38 seconds 36 | Threads(1000): 1.16 seconds 37 | Pool(1000): 1.50 seconds 38 | 39 | Looks like threads are at their best when their number is about 100. 40 | At concurrency of 1000, threads get 87% penalty while pool takes only 8%. 41 | """ 42 | 43 | 44 | class MomokoPoolPerformanceTest(PoolBaseTest): 45 | pool_size = 1 46 | amount = 10000 47 | query = "SELECT pg_sleep(0.002)" 48 | 49 | def run_thread_queries(self, amount=amount, thread_num=1): 50 | conns = [] 51 | for i in range(thread_num): 52 | conn = psycopg2.connect(good_dsn) 53 | conns.append(conn) 54 | 55 | def runner(conn): 56 | for i in range(int(amount/thread_num)): 57 | with conn.cursor() as cur: 58 | cur.execute(self.query) 59 | cur.fetchall() 60 | 61 | start = time.time() 62 | threads = [] 63 | for conn in conns: 64 | thread = threading.Thread(target=runner, args=(conn,)) 65 | thread.start() 66 | threads.append(thread) 67 | for thread in threads: 68 | thread.join() 69 | delta = time.time() - start 70 | for conn in conns: 71 | conn.close() 72 | return delta 73 | 74 | def run_pool_queries(self, amount=amount, thread_num=1): 75 | 76 | db = self.build_pool_sync(dsn=good_dsn, size=thread_num) 77 | 78 | start = time.time() 79 | 80 | def runner(x): 81 | futures = [] 82 | for j in range(amount): 83 | futures.append(db.execute(self.query)) 84 | yield futures 85 | 86 | gen_test(timeout=300)(runner)(self) 87 | delta = time.time() - start 88 | db.close() 89 | return delta 90 | 91 | def test_perf(self): 92 | print("\n") 93 | for threads in (1, 10, 100, 1000): 94 | print("Threads(%s): %.2f seconds" % (threads, self.run_thread_queries(thread_num=threads))) 95 | print("Pool(%s): %.2f seconds" % (threads, self.run_pool_queries(thread_num=threads))) 96 | 97 | 98 | if __name__ == '__main__': 99 | if debug: 100 | FORMAT = '%(asctime)-15s %(levelname)s:%(name)s %(funcName)-15s: %(message)s' 101 | logging.basicConfig(format=FORMAT) 102 | logging.getLogger("momoko").setLevel(logging.DEBUG) 103 | logging.getLogger("unittest").setLevel(logging.DEBUG) 104 | unittest.main() 105 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | 5 | # The multiprocessingimport is to prevent the following error 6 | # after all the tests have been executed. 7 | # Error in atexit._run_exitfuncs: 8 | # TypeError: 'NoneType' object is not callable 9 | 10 | # From: http://article.gmane.org/gmane.comp.python.peak/2509 11 | # Work around setuptools bug 12 | # http://article.gmane.org/gmane.comp.python.peak/2509 13 | import multiprocessing 14 | 15 | try: 16 | from setuptools import setup, Extension, Command 17 | except ImportError: 18 | from distutils.core import setup, Extension, Command 19 | 20 | 21 | dependencies = ['tornado >= 4.0, <6.0', ] 22 | psycopg2_impl = os.environ.get('MOMOKO_PSYCOPG2_IMPL', 'psycopg2') 23 | 24 | if psycopg2_impl == 'psycopg2cffi': 25 | print('Using psycopg2cffi') 26 | dependencies.append('psycopg2cffi') 27 | elif psycopg2_impl == 'psycopg2ct': 28 | print('Using psycopg2ct') 29 | dependencies.append('psycopg2ct') 30 | else: 31 | print('Using psycopg2') 32 | dependencies.append('psycopg2') 33 | 34 | 35 | setup( 36 | name='Momoko', 37 | version='2.2.5.1', 38 | description="Momoko wraps Psycopg2's functionality for use in Tornado.", 39 | long_description=open('README.rst').read(), 40 | author='Frank Smit & Zaar Hai', 41 | author_email='frank@61924.nl', 42 | url='http://momoko.61924.nl/', 43 | packages=['momoko'], 44 | license='MIT', 45 | test_suite='tests', 46 | install_requires=dependencies, 47 | classifiers = [ 48 | 'Development Status :: 5 - Production/Stable', 49 | 'Intended Audience :: Developers', 50 | 'License :: OSI Approved :: MIT License', 51 | 'Programming Language :: Python :: Implementation :: PyPy', 52 | 'Programming Language :: Python :: Implementation :: CPython', 53 | 'Programming Language :: Python :: 2.7', 54 | 'Programming Language :: Python :: 3.3', 55 | 'Programming Language :: Python :: 3.4', 56 | 'Programming Language :: Python :: 3.5', 57 | 'Programming Language :: Python :: 3.6', 58 | 'Programming Language :: Python :: 3.7', 59 | 'Topic :: Database', 60 | 'Topic :: Database :: Front-Ends' 61 | ] 62 | ) 63 | -------------------------------------------------------------------------------- /tcproxy/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.DS_Store 3 | *.log 4 | cachegrind.out.* 5 | callgrind.out.* 6 | massif.out.* 7 | memcheck.out 8 | tcproxy 9 | core 10 | -------------------------------------------------------------------------------- /tcproxy/ChangeLog: -------------------------------------------------------------------------------- 1 | 2011-06-2 Release 0.2.2 2 | * fix a bug that causes infinit loop when process write 3 | 4 | 2011-05-21 Release 0.2.1 5 | * fix ip address parse bug 6 | 7 | 2011-05-20 Release 0.2 8 | * nonblock connect implemented 9 | * semicolon bind to address not port now 10 | * some performance optimization 11 | * some bugfix 12 | 13 | 2011-05-18 Release 0.1.1 14 | * -l option added to write log file 15 | * minor code refinement 16 | 17 | 2011-05-16 Release 0.1 18 | * first public beta release 19 | -------------------------------------------------------------------------------- /tcproxy/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright dccmx. All rights reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to 5 | deal in the Software without restriction, including without limitation the 6 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 | sell copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 | IN THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /tcproxy/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # tcproxy - Makefile 3 | # 4 | # Author: dccmx 5 | # 6 | 7 | default: all 8 | 9 | .DEFAULT: 10 | cd src && $(MAKE) $@ 11 | -------------------------------------------------------------------------------- /tcproxy/README.md: -------------------------------------------------------------------------------- 1 | **Copied from https://github.com/dccmx/tcproxy.git. Used for connection issues simulation.** 2 | 3 | tcproxy 4 | ======= 5 | tcproxy is a small efficient tcp proxy that can be used for port forwarding or load balancing. 6 | 7 | sample usage 8 | ------------ 9 | tcproxy "11212 -> 11211" 10 | tcproxy "192.168.0.1:11212 -> 192.168.0.2:11211" 11 | 12 | not implemented yet 13 | --------------- 14 | tcproxy "any:11212 -> rr{192.168.0.100:11211 192.168.0.101:11211 192.168.0.102:11211}" 15 | tcproxy "any:11212 -> hash{192.168.0.100:11211 192.168.0.101:11211 192.168.0.102:11211}" 16 | -------------------------------------------------------------------------------- /tcproxy/TODO: -------------------------------------------------------------------------------- 1 | failover 2 | tcp pool 3 | thread 4 | 5 | tcproxy "any:11212\ 6 | <-> rr{\ 7 | 192.168.0.100:11211 -> 192.168.0.100:11212\ 8 | 192.168.0.101:11211\ 9 | 192.168.0.102:11211}\ 10 | -> 192.168.0.103:11211" 11 | -------------------------------------------------------------------------------- /tcproxy/src/Makefile: -------------------------------------------------------------------------------- 1 | PROGNAME = tcproxy 2 | 3 | OBJS = tcproxy.o ae.o util.o policy.o zmalloc.o anet.o 4 | 5 | CFLAGS_GEN = -Wall -Werror -g $(CFLAGS) 6 | CFLAGS_DBG = -ggdb $(CFLAGS_GEN) 7 | CFLAGS_OPT = -O3 -Wno-format $(CFLAGS_GEN) 8 | DEBUG ?= 9 | 10 | CCCOLOR="\033[34m" 11 | LINKCOLOR="\033[34;1m" 12 | SRCCOLOR="\033[33m" 13 | BINCOLOR="\033[37;1m" 14 | MAKECOLOR="\033[32;1m" 15 | ENDCOLOR="\033[0m" 16 | 17 | QUIET_CC = @printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR); 18 | QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR); 19 | 20 | LDFLAGS += 21 | LIBS += 22 | 23 | all: $(PROGNAME) 24 | 25 | %.o: %.c 26 | $(QUIET_CC)$(CC) -c $(CFLAGS) $(CFLAGS_OPT) $(DEBUG) $(COMPILE_TIME) $< 27 | 28 | $(PROGNAME): $(OBJS) 29 | $(QUIET_LINK)$(CC) -o $(PROGNAME) $(CFLAGS_OPT) $(DEBUG) $(OBJS) $(CCLINK) 30 | @echo 31 | @echo "Make Complete. Read README for how to use." 32 | @echo 33 | @echo "Having problems with it? Send complains and bugs to dccmx@dccmx.com" 34 | @echo 35 | 36 | clean: 37 | rm -f $(PROGNAME) core core.[1-9][0-9]* *.o memcheck.out callgrind.out.[1-9][0-9]* massif.out.[1-9][0-9]* 38 | -------------------------------------------------------------------------------- /tcproxy/src/ae.c: -------------------------------------------------------------------------------- 1 | /* A simple event-driven programming library. Originally I wrote this code 2 | * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated 3 | * it in form of a library for easy reuse. 4 | * 5 | * Copyright (c) 2006-2010, Salvatore Sanfilippo 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * * Redistributions of source code must retain the above copyright notice, 12 | * this list of conditions and the following disclaimer. 13 | * * Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * * Neither the name of Redis nor the names of its contributors may be used 17 | * to endorse or promote products derived from this software without 18 | * specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | 41 | #include "ae.h" 42 | #include "zmalloc.h" 43 | #include "config.h" 44 | 45 | /* Include the best multiplexing layer supported by this system. 46 | * The following should be ordered by performances, descending. */ 47 | #ifdef HAVE_EPOLL 48 | #include "ae_epoll.c" 49 | #else 50 | #ifdef HAVE_KQUEUE 51 | #include "ae_kqueue.c" 52 | #else 53 | #include "ae_select.c" 54 | #endif 55 | #endif 56 | 57 | aeEventLoop *aeCreateEventLoop(int setsize) { 58 | aeEventLoop *eventLoop; 59 | int i; 60 | 61 | if ((eventLoop = zmalloc(sizeof(*eventLoop))) == NULL) goto err; 62 | eventLoop->events = zmalloc(sizeof(aeFileEvent)*setsize); 63 | eventLoop->fired = zmalloc(sizeof(aeFiredEvent)*setsize); 64 | if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; 65 | eventLoop->setsize = setsize; 66 | eventLoop->timeEventHead = NULL; 67 | eventLoop->timeEventNextId = 0; 68 | eventLoop->stop = 0; 69 | eventLoop->maxfd = -1; 70 | eventLoop->beforesleep = NULL; 71 | if (aeApiCreate(eventLoop) == -1) goto err; 72 | /* Events with mask == AE_NONE are not set. So let's initialize the 73 | * vector with it. */ 74 | for (i = 0; i < setsize; i++) 75 | eventLoop->events[i].mask = AE_NONE; 76 | return eventLoop; 77 | 78 | err: 79 | if (eventLoop) { 80 | zfree(eventLoop->events); 81 | zfree(eventLoop->fired); 82 | zfree(eventLoop); 83 | } 84 | return NULL; 85 | } 86 | 87 | void aeDeleteEventLoop(aeEventLoop *eventLoop) { 88 | aeApiFree(eventLoop); 89 | zfree(eventLoop->events); 90 | zfree(eventLoop->fired); 91 | zfree(eventLoop); 92 | } 93 | 94 | void aeStop(aeEventLoop *eventLoop) { 95 | eventLoop->stop = 1; 96 | } 97 | 98 | int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, 99 | aeFileProc *proc, void *clientData) 100 | { 101 | if (fd >= eventLoop->setsize) return AE_ERR; 102 | aeFileEvent *fe = &eventLoop->events[fd]; 103 | 104 | if (aeApiAddEvent(eventLoop, fd, mask) == -1) 105 | return AE_ERR; 106 | fe->mask |= mask; 107 | if (mask & AE_READABLE) fe->rfileProc = proc; 108 | if (mask & AE_WRITABLE) fe->wfileProc = proc; 109 | fe->clientData = clientData; 110 | if (fd > eventLoop->maxfd) 111 | eventLoop->maxfd = fd; 112 | return AE_OK; 113 | } 114 | 115 | void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) 116 | { 117 | if (fd >= eventLoop->setsize) return; 118 | aeFileEvent *fe = &eventLoop->events[fd]; 119 | 120 | if (fe->mask == AE_NONE) return; 121 | fe->mask = fe->mask & (~mask); 122 | if (fd == eventLoop->maxfd && fe->mask == AE_NONE) { 123 | /* Update the max fd */ 124 | int j; 125 | 126 | for (j = eventLoop->maxfd-1; j >= 0; j--) 127 | if (eventLoop->events[j].mask != AE_NONE) break; 128 | eventLoop->maxfd = j; 129 | } 130 | aeApiDelEvent(eventLoop, fd, mask); 131 | } 132 | 133 | int aeGetFileEvents(aeEventLoop *eventLoop, int fd) { 134 | if (fd >= eventLoop->setsize) return 0; 135 | aeFileEvent *fe = &eventLoop->events[fd]; 136 | 137 | return fe->mask; 138 | } 139 | 140 | static void aeGetTime(long *seconds, long *milliseconds) 141 | { 142 | struct timeval tv; 143 | 144 | gettimeofday(&tv, NULL); 145 | *seconds = tv.tv_sec; 146 | *milliseconds = tv.tv_usec/1000; 147 | } 148 | 149 | static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { 150 | long cur_sec, cur_ms, when_sec, when_ms; 151 | 152 | aeGetTime(&cur_sec, &cur_ms); 153 | when_sec = cur_sec + milliseconds/1000; 154 | when_ms = cur_ms + milliseconds%1000; 155 | if (when_ms >= 1000) { 156 | when_sec ++; 157 | when_ms -= 1000; 158 | } 159 | *sec = when_sec; 160 | *ms = when_ms; 161 | } 162 | 163 | long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, 164 | aeTimeProc *proc, void *clientData, 165 | aeEventFinalizerProc *finalizerProc) 166 | { 167 | long long id = eventLoop->timeEventNextId++; 168 | aeTimeEvent *te; 169 | 170 | te = zmalloc(sizeof(*te)); 171 | if (te == NULL) return AE_ERR; 172 | te->id = id; 173 | aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); 174 | te->timeProc = proc; 175 | te->finalizerProc = finalizerProc; 176 | te->clientData = clientData; 177 | te->next = eventLoop->timeEventHead; 178 | eventLoop->timeEventHead = te; 179 | return id; 180 | } 181 | 182 | int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) 183 | { 184 | aeTimeEvent *te, *prev = NULL; 185 | 186 | te = eventLoop->timeEventHead; 187 | while(te) { 188 | if (te->id == id) { 189 | if (prev == NULL) 190 | eventLoop->timeEventHead = te->next; 191 | else 192 | prev->next = te->next; 193 | if (te->finalizerProc) 194 | te->finalizerProc(eventLoop, te->clientData); 195 | zfree(te); 196 | return AE_OK; 197 | } 198 | prev = te; 199 | te = te->next; 200 | } 201 | return AE_ERR; /* NO event with the specified ID found */ 202 | } 203 | 204 | /* Search the first timer to fire. 205 | * This operation is useful to know how many time the select can be 206 | * put in sleep without to delay any event. 207 | * If there are no timers NULL is returned. 208 | * 209 | * Note that's O(N) since time events are unsorted. 210 | * Possible optimizations (not needed by Redis so far, but...): 211 | * 1) Insert the event in order, so that the nearest is just the head. 212 | * Much better but still insertion or deletion of timers is O(N). 213 | * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). 214 | */ 215 | static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) 216 | { 217 | aeTimeEvent *te = eventLoop->timeEventHead; 218 | aeTimeEvent *nearest = NULL; 219 | 220 | while(te) { 221 | if (!nearest || te->when_sec < nearest->when_sec || 222 | (te->when_sec == nearest->when_sec && 223 | te->when_ms < nearest->when_ms)) 224 | nearest = te; 225 | te = te->next; 226 | } 227 | return nearest; 228 | } 229 | 230 | /* Process time events */ 231 | static int processTimeEvents(aeEventLoop *eventLoop) { 232 | int processed = 0; 233 | aeTimeEvent *te; 234 | long long maxId; 235 | 236 | te = eventLoop->timeEventHead; 237 | maxId = eventLoop->timeEventNextId-1; 238 | while(te) { 239 | long now_sec, now_ms; 240 | long long id; 241 | 242 | if (te->id > maxId) { 243 | te = te->next; 244 | continue; 245 | } 246 | aeGetTime(&now_sec, &now_ms); 247 | if (now_sec > te->when_sec || 248 | (now_sec == te->when_sec && now_ms >= te->when_ms)) 249 | { 250 | int retval; 251 | 252 | id = te->id; 253 | retval = te->timeProc(eventLoop, id, te->clientData); 254 | processed++; 255 | /* After an event is processed our time event list may 256 | * no longer be the same, so we restart from head. 257 | * Still we make sure to don't process events registered 258 | * by event handlers itself in order to don't loop forever. 259 | * To do so we saved the max ID we want to handle. 260 | * 261 | * FUTURE OPTIMIZATIONS: 262 | * Note that this is NOT great algorithmically. Redis uses 263 | * a single time event so it's not a problem but the right 264 | * way to do this is to add the new elements on head, and 265 | * to flag deleted elements in a special way for later 266 | * deletion (putting references to the nodes to delete into 267 | * another linked list). */ 268 | if (retval != AE_NOMORE) { 269 | aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); 270 | } else { 271 | aeDeleteTimeEvent(eventLoop, id); 272 | } 273 | te = eventLoop->timeEventHead; 274 | } else { 275 | te = te->next; 276 | } 277 | } 278 | return processed; 279 | } 280 | 281 | /* Process every pending time event, then every pending file event 282 | * (that may be registered by time event callbacks just processed). 283 | * Without special flags the function sleeps until some file event 284 | * fires, or when the next time event occurrs (if any). 285 | * 286 | * If flags is 0, the function does nothing and returns. 287 | * if flags has AE_ALL_EVENTS set, all the kind of events are processed. 288 | * if flags has AE_FILE_EVENTS set, file events are processed. 289 | * if flags has AE_TIME_EVENTS set, time events are processed. 290 | * if flags has AE_DONT_WAIT set the function returns ASAP until all 291 | * the events that's possible to process without to wait are processed. 292 | * 293 | * The function returns the number of events processed. */ 294 | int aeProcessEvents(aeEventLoop *eventLoop, int flags) 295 | { 296 | int processed = 0, numevents; 297 | 298 | /* Nothing to do? return ASAP */ 299 | if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; 300 | 301 | /* Note that we want call select() even if there are no 302 | * file events to process as long as we want to process time 303 | * events, in order to sleep until the next time event is ready 304 | * to fire. */ 305 | if (eventLoop->maxfd != -1 || 306 | ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { 307 | int j; 308 | aeTimeEvent *shortest = NULL; 309 | struct timeval tv, *tvp; 310 | 311 | if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) 312 | shortest = aeSearchNearestTimer(eventLoop); 313 | if (shortest) { 314 | long now_sec, now_ms; 315 | 316 | /* Calculate the time missing for the nearest 317 | * timer to fire. */ 318 | aeGetTime(&now_sec, &now_ms); 319 | tvp = &tv; 320 | tvp->tv_sec = shortest->when_sec - now_sec; 321 | if (shortest->when_ms < now_ms) { 322 | tvp->tv_usec = ((shortest->when_ms+1000) - now_ms)*1000; 323 | tvp->tv_sec --; 324 | } else { 325 | tvp->tv_usec = (shortest->when_ms - now_ms)*1000; 326 | } 327 | if (tvp->tv_sec < 0) tvp->tv_sec = 0; 328 | if (tvp->tv_usec < 0) tvp->tv_usec = 0; 329 | } else { 330 | /* If we have to check for events but need to return 331 | * ASAP because of AE_DONT_WAIT we need to se the timeout 332 | * to zero */ 333 | if (flags & AE_DONT_WAIT) { 334 | tv.tv_sec = tv.tv_usec = 0; 335 | tvp = &tv; 336 | } else { 337 | /* Otherwise we can block */ 338 | tvp = NULL; /* wait forever */ 339 | } 340 | } 341 | 342 | numevents = aeApiPoll(eventLoop, tvp); 343 | for (j = 0; j < numevents; j++) { 344 | aeFileEvent *fe = &eventLoop->events[eventLoop->fired[j].fd]; 345 | int mask = eventLoop->fired[j].mask; 346 | int fd = eventLoop->fired[j].fd; 347 | int rfired = 0; 348 | 349 | /* note the fe->mask & mask & ... code: maybe an already processed 350 | * event removed an element that fired and we still didn't 351 | * processed, so we check if the event is still valid. */ 352 | if (fe->mask & mask & AE_READABLE) { 353 | rfired = 1; 354 | fe->rfileProc(eventLoop,fd,fe->clientData,mask); 355 | } 356 | if (fe->mask & mask & AE_WRITABLE) { 357 | if (!rfired || fe->wfileProc != fe->rfileProc) 358 | fe->wfileProc(eventLoop,fd,fe->clientData,mask); 359 | } 360 | processed++; 361 | } 362 | } 363 | /* Check time events */ 364 | if (flags & AE_TIME_EVENTS) 365 | processed += processTimeEvents(eventLoop); 366 | 367 | return processed; /* return the number of processed file/time events */ 368 | } 369 | 370 | /* Wait for millseconds until the given file descriptor becomes 371 | * writable/readable/exception */ 372 | int aeWait(int fd, int mask, long long milliseconds) { 373 | struct pollfd pfd; 374 | int retmask = 0, retval; 375 | 376 | memset(&pfd, 0, sizeof(pfd)); 377 | pfd.fd = fd; 378 | if (mask & AE_READABLE) pfd.events |= POLLIN; 379 | if (mask & AE_WRITABLE) pfd.events |= POLLOUT; 380 | 381 | if ((retval = poll(&pfd, 1, milliseconds))== 1) { 382 | if (pfd.revents & POLLIN) retmask |= AE_READABLE; 383 | if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; 384 | return retmask; 385 | } else { 386 | return retval; 387 | } 388 | } 389 | 390 | void aeMain(aeEventLoop *eventLoop) { 391 | eventLoop->stop = 0; 392 | while (!eventLoop->stop) { 393 | if (eventLoop->beforesleep != NULL) 394 | eventLoop->beforesleep(eventLoop); 395 | aeProcessEvents(eventLoop, AE_ALL_EVENTS); 396 | } 397 | } 398 | 399 | char *aeGetApiName(void) { 400 | return aeApiName(); 401 | } 402 | 403 | void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep) { 404 | eventLoop->beforesleep = beforesleep; 405 | } 406 | -------------------------------------------------------------------------------- /tcproxy/src/ae.h: -------------------------------------------------------------------------------- 1 | /* A simple event-driven programming library. Originally I wrote this code 2 | * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated 3 | * it in form of a library for easy reuse. 4 | * 5 | * Copyright (c) 2006-2010, Salvatore Sanfilippo 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions are met: 10 | * 11 | * * Redistributions of source code must retain the above copyright notice, 12 | * this list of conditions and the following disclaimer. 13 | * * Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * * Neither the name of Redis nor the names of its contributors may be used 17 | * to endorse or promote products derived from this software without 18 | * specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef __AE_H__ 34 | #define __AE_H__ 35 | 36 | #define AE_OK 0 37 | #define AE_ERR -1 38 | 39 | #define AE_NONE 0 40 | #define AE_READABLE 1 41 | #define AE_WRITABLE 2 42 | 43 | #define AE_FILE_EVENTS 1 44 | #define AE_TIME_EVENTS 2 45 | #define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) 46 | #define AE_DONT_WAIT 4 47 | 48 | #define AE_NOMORE -1 49 | 50 | /* Macros */ 51 | #define AE_NOTUSED(V) ((void) V) 52 | 53 | struct aeEventLoop; 54 | 55 | /* Types and data structures */ 56 | typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); 57 | typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); 58 | typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); 59 | typedef void aeBeforeSleepProc(struct aeEventLoop *eventLoop); 60 | 61 | /* File event structure */ 62 | typedef struct aeFileEvent { 63 | int mask; /* one of AE_(READABLE|WRITABLE) */ 64 | aeFileProc *rfileProc; 65 | aeFileProc *wfileProc; 66 | void *clientData; 67 | } aeFileEvent; 68 | 69 | /* Time event structure */ 70 | typedef struct aeTimeEvent { 71 | long long id; /* time event identifier. */ 72 | long when_sec; /* seconds */ 73 | long when_ms; /* milliseconds */ 74 | aeTimeProc *timeProc; 75 | aeEventFinalizerProc *finalizerProc; 76 | void *clientData; 77 | struct aeTimeEvent *next; 78 | } aeTimeEvent; 79 | 80 | /* A fired event */ 81 | typedef struct aeFiredEvent { 82 | int fd; 83 | int mask; 84 | } aeFiredEvent; 85 | 86 | /* State of an event based program */ 87 | typedef struct aeEventLoop { 88 | int maxfd; /* highest file descriptor currently registered */ 89 | int setsize; /* max number of file descriptors tracked */ 90 | long long timeEventNextId; 91 | aeFileEvent *events; /* Registered events */ 92 | aeFiredEvent *fired; /* Fired events */ 93 | aeTimeEvent *timeEventHead; 94 | int stop; 95 | void *apidata; /* This is used for polling API specific data */ 96 | aeBeforeSleepProc *beforesleep; 97 | } aeEventLoop; 98 | 99 | /* Prototypes */ 100 | aeEventLoop *aeCreateEventLoop(int setsize); 101 | void aeDeleteEventLoop(aeEventLoop *eventLoop); 102 | void aeStop(aeEventLoop *eventLoop); 103 | int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, 104 | aeFileProc *proc, void *clientData); 105 | void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); 106 | int aeGetFileEvents(aeEventLoop *eventLoop, int fd); 107 | long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, 108 | aeTimeProc *proc, void *clientData, 109 | aeEventFinalizerProc *finalizerProc); 110 | int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); 111 | int aeProcessEvents(aeEventLoop *eventLoop, int flags); 112 | int aeWait(int fd, int mask, long long milliseconds); 113 | void aeMain(aeEventLoop *eventLoop); 114 | char *aeGetApiName(void); 115 | void aeSetBeforeSleepProc(aeEventLoop *eventLoop, aeBeforeSleepProc *beforesleep); 116 | 117 | #endif 118 | -------------------------------------------------------------------------------- /tcproxy/src/ae_epoll.c: -------------------------------------------------------------------------------- 1 | /* Linux epoll(2) based ae.c module 2 | * Copyright (C) 2009-2010 Salvatore Sanfilippo - antirez@gmail.com 3 | * Released under the BSD license. See the COPYING file for more info. */ 4 | 5 | #include 6 | 7 | typedef struct aeApiState { 8 | int epfd; 9 | struct epoll_event *events; 10 | } aeApiState; 11 | 12 | static int aeApiCreate(aeEventLoop *eventLoop) { 13 | aeApiState *state = zmalloc(sizeof(aeApiState)); 14 | 15 | if (!state) return -1; 16 | state->events = zmalloc(sizeof(struct epoll_event)*eventLoop->setsize); 17 | if (!state->events) { 18 | zfree(state); 19 | return -1; 20 | } 21 | state->epfd = epoll_create(1024); /* 1024 is just an hint for the kernel */ 22 | if (state->epfd == -1) { 23 | zfree(state->events); 24 | zfree(state); 25 | return -1; 26 | } 27 | eventLoop->apidata = state; 28 | return 0; 29 | } 30 | 31 | static void aeApiFree(aeEventLoop *eventLoop) { 32 | aeApiState *state = eventLoop->apidata; 33 | 34 | close(state->epfd); 35 | zfree(state->events); 36 | zfree(state); 37 | } 38 | 39 | static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { 40 | aeApiState *state = eventLoop->apidata; 41 | struct epoll_event ee; 42 | /* If the fd was already monitored for some event, we need a MOD 43 | * operation. Otherwise we need an ADD operation. */ 44 | int op = eventLoop->events[fd].mask == AE_NONE ? 45 | EPOLL_CTL_ADD : EPOLL_CTL_MOD; 46 | 47 | ee.events = 0; 48 | mask |= eventLoop->events[fd].mask; /* Merge old events */ 49 | if (mask & AE_READABLE) ee.events |= EPOLLIN; 50 | if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; 51 | ee.data.u64 = 0; /* avoid valgrind warning */ 52 | ee.data.fd = fd; 53 | if (epoll_ctl(state->epfd,op,fd,&ee) == -1) return -1; 54 | return 0; 55 | } 56 | 57 | static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int delmask) { 58 | aeApiState *state = eventLoop->apidata; 59 | struct epoll_event ee; 60 | int mask = eventLoop->events[fd].mask & (~delmask); 61 | 62 | ee.events = 0; 63 | if (mask & AE_READABLE) ee.events |= EPOLLIN; 64 | if (mask & AE_WRITABLE) ee.events |= EPOLLOUT; 65 | ee.data.u64 = 0; /* avoid valgrind warning */ 66 | ee.data.fd = fd; 67 | if (mask != AE_NONE) { 68 | epoll_ctl(state->epfd,EPOLL_CTL_MOD,fd,&ee); 69 | } else { 70 | /* Note, Kernel < 2.6.9 requires a non null event pointer even for 71 | * EPOLL_CTL_DEL. */ 72 | epoll_ctl(state->epfd,EPOLL_CTL_DEL,fd,&ee); 73 | } 74 | } 75 | 76 | static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { 77 | aeApiState *state = eventLoop->apidata; 78 | int retval, numevents = 0; 79 | 80 | retval = epoll_wait(state->epfd,state->events,eventLoop->setsize, 81 | tvp ? (tvp->tv_sec*1000 + tvp->tv_usec/1000) : -1); 82 | if (retval > 0) { 83 | int j; 84 | 85 | numevents = retval; 86 | for (j = 0; j < numevents; j++) { 87 | int mask = 0; 88 | struct epoll_event *e = state->events+j; 89 | 90 | if (e->events & EPOLLIN) mask |= AE_READABLE; 91 | if (e->events & EPOLLOUT) mask |= AE_WRITABLE; 92 | eventLoop->fired[j].fd = e->data.fd; 93 | eventLoop->fired[j].mask = mask; 94 | } 95 | } 96 | return numevents; 97 | } 98 | 99 | static char *aeApiName(void) { 100 | return "epoll"; 101 | } 102 | -------------------------------------------------------------------------------- /tcproxy/src/ae_kqueue.c: -------------------------------------------------------------------------------- 1 | /* Kqueue(2)-based ae.c module 2 | * Copyright (C) 2009 Harish Mallipeddi - harish.mallipeddi@gmail.com 3 | * Released under the BSD license. See the COPYING file for more info. */ 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | typedef struct aeApiState { 10 | int kqfd; 11 | struct kevent *events; 12 | } aeApiState; 13 | 14 | static int aeApiCreate(aeEventLoop *eventLoop) { 15 | aeApiState *state = zmalloc(sizeof(aeApiState)); 16 | 17 | if (!state) return -1; 18 | state->events = zmalloc(sizeof(struct kevent)*eventLoop->setsize); 19 | if (!state->events) { 20 | zfree(state); 21 | return -1; 22 | } 23 | state->kqfd = kqueue(); 24 | if (state->kqfd == -1) { 25 | zfree(state->events); 26 | zfree(state); 27 | return -1; 28 | } 29 | eventLoop->apidata = state; 30 | 31 | return 0; 32 | } 33 | 34 | static void aeApiFree(aeEventLoop *eventLoop) { 35 | aeApiState *state = eventLoop->apidata; 36 | 37 | close(state->kqfd); 38 | zfree(state->events); 39 | zfree(state); 40 | } 41 | 42 | static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { 43 | aeApiState *state = eventLoop->apidata; 44 | struct kevent ke; 45 | 46 | if (mask & AE_READABLE) { 47 | EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); 48 | if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; 49 | } 50 | if (mask & AE_WRITABLE) { 51 | EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); 52 | if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; 53 | } 54 | return 0; 55 | } 56 | 57 | static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { 58 | aeApiState *state = eventLoop->apidata; 59 | struct kevent ke; 60 | 61 | if (mask & AE_READABLE) { 62 | EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); 63 | kevent(state->kqfd, &ke, 1, NULL, 0, NULL); 64 | } 65 | if (mask & AE_WRITABLE) { 66 | EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); 67 | kevent(state->kqfd, &ke, 1, NULL, 0, NULL); 68 | } 69 | } 70 | 71 | static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { 72 | aeApiState *state = eventLoop->apidata; 73 | int retval, numevents = 0; 74 | 75 | if (tvp != NULL) { 76 | struct timespec timeout; 77 | timeout.tv_sec = tvp->tv_sec; 78 | timeout.tv_nsec = tvp->tv_usec * 1000; 79 | retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, 80 | &timeout); 81 | } else { 82 | retval = kevent(state->kqfd, NULL, 0, state->events, eventLoop->setsize, 83 | NULL); 84 | } 85 | 86 | if (retval > 0) { 87 | int j; 88 | 89 | numevents = retval; 90 | for(j = 0; j < numevents; j++) { 91 | int mask = 0; 92 | struct kevent *e = state->events+j; 93 | 94 | if (e->filter == EVFILT_READ) mask |= AE_READABLE; 95 | if (e->filter == EVFILT_WRITE) mask |= AE_WRITABLE; 96 | eventLoop->fired[j].fd = e->ident; 97 | eventLoop->fired[j].mask = mask; 98 | } 99 | } 100 | return numevents; 101 | } 102 | 103 | static char *aeApiName(void) { 104 | return "kqueue"; 105 | } 106 | -------------------------------------------------------------------------------- /tcproxy/src/ae_select.c: -------------------------------------------------------------------------------- 1 | /* Select()-based ae.c module 2 | * Copyright (C) 2009-2010 Salvatore Sanfilippo - antirez@gmail.com 3 | * Released under the BSD license. See the COPYING file for more info. */ 4 | 5 | #include 6 | 7 | typedef struct aeApiState { 8 | fd_set rfds, wfds; 9 | /* We need to have a copy of the fd sets as it's not safe to reuse 10 | * FD sets after select(). */ 11 | fd_set _rfds, _wfds; 12 | } aeApiState; 13 | 14 | static int aeApiCreate(aeEventLoop *eventLoop) { 15 | aeApiState *state = zmalloc(sizeof(aeApiState)); 16 | 17 | if (!state) return -1; 18 | FD_ZERO(&state->rfds); 19 | FD_ZERO(&state->wfds); 20 | eventLoop->apidata = state; 21 | return 0; 22 | } 23 | 24 | static void aeApiFree(aeEventLoop *eventLoop) { 25 | zfree(eventLoop->apidata); 26 | } 27 | 28 | static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { 29 | aeApiState *state = eventLoop->apidata; 30 | 31 | if (mask & AE_READABLE) FD_SET(fd,&state->rfds); 32 | if (mask & AE_WRITABLE) FD_SET(fd,&state->wfds); 33 | return 0; 34 | } 35 | 36 | static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { 37 | aeApiState *state = eventLoop->apidata; 38 | 39 | if (mask & AE_READABLE) FD_CLR(fd,&state->rfds); 40 | if (mask & AE_WRITABLE) FD_CLR(fd,&state->wfds); 41 | } 42 | 43 | static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { 44 | aeApiState *state = eventLoop->apidata; 45 | int retval, j, numevents = 0; 46 | 47 | memcpy(&state->_rfds,&state->rfds,sizeof(fd_set)); 48 | memcpy(&state->_wfds,&state->wfds,sizeof(fd_set)); 49 | 50 | retval = select(eventLoop->maxfd+1, 51 | &state->_rfds,&state->_wfds,NULL,tvp); 52 | if (retval > 0) { 53 | for (j = 0; j <= eventLoop->maxfd; j++) { 54 | int mask = 0; 55 | aeFileEvent *fe = &eventLoop->events[j]; 56 | 57 | if (fe->mask == AE_NONE) continue; 58 | if (fe->mask & AE_READABLE && FD_ISSET(j,&state->_rfds)) 59 | mask |= AE_READABLE; 60 | if (fe->mask & AE_WRITABLE && FD_ISSET(j,&state->_wfds)) 61 | mask |= AE_WRITABLE; 62 | eventLoop->fired[numevents].fd = j; 63 | eventLoop->fired[numevents].mask = mask; 64 | numevents++; 65 | } 66 | } 67 | return numevents; 68 | } 69 | 70 | static char *aeApiName(void) { 71 | return "select"; 72 | } 73 | -------------------------------------------------------------------------------- /tcproxy/src/anet.c: -------------------------------------------------------------------------------- 1 | /* anet.c -- Basic TCP socket stuff made a bit less boring 2 | * 3 | * Copyright (c) 2006-2010, Salvatore Sanfilippo 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * * Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * * Neither the name of Redis nor the names of its contributors may be used 15 | * to endorse or promote products derived from this software without 16 | * specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | * POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #include "fmacros.h" 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | 48 | #include "anet.h" 49 | 50 | static void anetSetError(char *err, const char *fmt, ...) 51 | { 52 | va_list ap; 53 | 54 | if (!err) return; 55 | va_start(ap, fmt); 56 | vsnprintf(err, ANET_ERR_LEN, fmt, ap); 57 | va_end(ap); 58 | } 59 | 60 | int anetNonBlock(char *err, int fd) 61 | { 62 | int flags; 63 | 64 | /* Set the socket nonblocking. 65 | * Note that fcntl(2) for F_GETFL and F_SETFL can't be 66 | * interrupted by a signal. */ 67 | if ((flags = fcntl(fd, F_GETFL)) == -1) { 68 | anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno)); 69 | return ANET_ERR; 70 | } 71 | if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { 72 | anetSetError(err, "fcntl(F_SETFL,O_NONBLOCK): %s", strerror(errno)); 73 | return ANET_ERR; 74 | } 75 | return ANET_OK; 76 | } 77 | 78 | int anetTcpNoDelay(char *err, int fd) 79 | { 80 | int yes = 1; 81 | if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)) == -1) 82 | { 83 | anetSetError(err, "setsockopt TCP_NODELAY: %s", strerror(errno)); 84 | return ANET_ERR; 85 | } 86 | return ANET_OK; 87 | } 88 | 89 | int anetSetSendBuffer(char *err, int fd, int buffsize) 90 | { 91 | if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffsize, sizeof(buffsize)) == -1) 92 | { 93 | anetSetError(err, "setsockopt SO_SNDBUF: %s", strerror(errno)); 94 | return ANET_ERR; 95 | } 96 | return ANET_OK; 97 | } 98 | 99 | int anetTcpKeepAlive(char *err, int fd) 100 | { 101 | int yes = 1; 102 | if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)) == -1) { 103 | anetSetError(err, "setsockopt SO_KEEPALIVE: %s", strerror(errno)); 104 | return ANET_ERR; 105 | } 106 | return ANET_OK; 107 | } 108 | 109 | int anetResolve(char *err, char *host, char *ipbuf) 110 | { 111 | struct sockaddr_in sa; 112 | 113 | sa.sin_family = AF_INET; 114 | if (inet_aton(host, &sa.sin_addr) == 0) { 115 | struct hostent *he; 116 | 117 | he = gethostbyname(host); 118 | if (he == NULL) { 119 | anetSetError(err, "can't resolve: %s", host); 120 | return ANET_ERR; 121 | } 122 | memcpy(&sa.sin_addr, he->h_addr, sizeof(struct in_addr)); 123 | } 124 | strcpy(ipbuf,inet_ntoa(sa.sin_addr)); 125 | return ANET_OK; 126 | } 127 | 128 | static int anetCreateSocket(char *err, int domain) { 129 | int s, on = 1; 130 | if ((s = socket(domain, SOCK_STREAM, 0)) == -1) { 131 | anetSetError(err, "creating socket: %s", strerror(errno)); 132 | return ANET_ERR; 133 | } 134 | 135 | /* Make sure connection-intensive things like the redis benckmark 136 | * will be able to close/open sockets a zillion of times */ 137 | if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) { 138 | anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno)); 139 | return ANET_ERR; 140 | } 141 | return s; 142 | } 143 | 144 | #define ANET_CONNECT_NONE 0 145 | #define ANET_CONNECT_NONBLOCK 1 146 | static int anetTcpGenericConnect(char *err, char *addr, int port, int flags) 147 | { 148 | int s; 149 | struct sockaddr_in sa; 150 | 151 | if ((s = anetCreateSocket(err,AF_INET)) == ANET_ERR) 152 | return ANET_ERR; 153 | 154 | sa.sin_family = AF_INET; 155 | sa.sin_port = htons(port); 156 | if (inet_aton(addr, &sa.sin_addr) == 0) { 157 | struct hostent *he; 158 | 159 | he = gethostbyname(addr); 160 | if (he == NULL) { 161 | anetSetError(err, "can't resolve: %s", addr); 162 | close(s); 163 | return ANET_ERR; 164 | } 165 | memcpy(&sa.sin_addr, he->h_addr, sizeof(struct in_addr)); 166 | } 167 | if (flags & ANET_CONNECT_NONBLOCK) { 168 | if (anetNonBlock(err,s) != ANET_OK) 169 | return ANET_ERR; 170 | } 171 | if (connect(s, (struct sockaddr*)&sa, sizeof(sa)) == -1) { 172 | if (errno == EINPROGRESS && 173 | flags & ANET_CONNECT_NONBLOCK) 174 | return s; 175 | 176 | anetSetError(err, "connect: %s", strerror(errno)); 177 | close(s); 178 | return ANET_ERR; 179 | } 180 | return s; 181 | } 182 | 183 | int anetTcpConnect(char *err, char *addr, int port) 184 | { 185 | return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONE); 186 | } 187 | 188 | int anetTcpNonBlockConnect(char *err, char *addr, int port) 189 | { 190 | return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONBLOCK); 191 | } 192 | 193 | int anetUnixGenericConnect(char *err, char *path, int flags) 194 | { 195 | int s; 196 | struct sockaddr_un sa; 197 | 198 | if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) 199 | return ANET_ERR; 200 | 201 | sa.sun_family = AF_LOCAL; 202 | strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); 203 | if (flags & ANET_CONNECT_NONBLOCK) { 204 | if (anetNonBlock(err,s) != ANET_OK) 205 | return ANET_ERR; 206 | } 207 | if (connect(s,(struct sockaddr*)&sa,sizeof(sa)) == -1) { 208 | if (errno == EINPROGRESS && 209 | flags & ANET_CONNECT_NONBLOCK) 210 | return s; 211 | 212 | anetSetError(err, "connect: %s", strerror(errno)); 213 | close(s); 214 | return ANET_ERR; 215 | } 216 | return s; 217 | } 218 | 219 | int anetUnixConnect(char *err, char *path) 220 | { 221 | return anetUnixGenericConnect(err,path,ANET_CONNECT_NONE); 222 | } 223 | 224 | int anetUnixNonBlockConnect(char *err, char *path) 225 | { 226 | return anetUnixGenericConnect(err,path,ANET_CONNECT_NONBLOCK); 227 | } 228 | 229 | /* Like read(2) but make sure 'count' is read before to return 230 | * (unless error or EOF condition is encountered) */ 231 | int anetRead(int fd, char *buf, int count) 232 | { 233 | int nread, totlen = 0; 234 | while(totlen != count) { 235 | nread = read(fd,buf,count-totlen); 236 | if (nread == 0) return totlen; 237 | if (nread == -1) return -1; 238 | totlen += nread; 239 | buf += nread; 240 | } 241 | return totlen; 242 | } 243 | 244 | /* Like write(2) but make sure 'count' is read before to return 245 | * (unless error is encountered) */ 246 | int anetWrite(int fd, char *buf, int count) 247 | { 248 | int nwritten, totlen = 0; 249 | while(totlen != count) { 250 | nwritten = write(fd,buf,count-totlen); 251 | if (nwritten == 0) return totlen; 252 | if (nwritten == -1) return -1; 253 | totlen += nwritten; 254 | buf += nwritten; 255 | } 256 | return totlen; 257 | } 258 | 259 | static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len) { 260 | if (bind(s,sa,len) == -1) { 261 | anetSetError(err, "bind: %s", strerror(errno)); 262 | close(s); 263 | return ANET_ERR; 264 | } 265 | 266 | /* Use a backlog of 512 entries. We pass 511 to the listen() call because 267 | * the kernel does: backlogsize = roundup_pow_of_two(backlogsize + 1); 268 | * which will thus give us a backlog of 512 entries */ 269 | if (listen(s, 511) == -1) { 270 | anetSetError(err, "listen: %s", strerror(errno)); 271 | close(s); 272 | return ANET_ERR; 273 | } 274 | return ANET_OK; 275 | } 276 | 277 | int anetTcpServer(char *err, int port, char *bindaddr) 278 | { 279 | int s; 280 | struct sockaddr_in sa; 281 | 282 | if ((s = anetCreateSocket(err,AF_INET)) == ANET_ERR) 283 | return ANET_ERR; 284 | 285 | memset(&sa,0,sizeof(sa)); 286 | sa.sin_family = AF_INET; 287 | sa.sin_port = htons(port); 288 | sa.sin_addr.s_addr = htonl(INADDR_ANY); 289 | if (bindaddr && inet_aton(bindaddr, &sa.sin_addr) == 0) { 290 | anetSetError(err, "invalid bind address"); 291 | close(s); 292 | return ANET_ERR; 293 | } 294 | if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa)) == ANET_ERR) 295 | return ANET_ERR; 296 | return s; 297 | } 298 | 299 | int anetUnixServer(char *err, char *path, mode_t perm) 300 | { 301 | int s; 302 | struct sockaddr_un sa; 303 | 304 | if ((s = anetCreateSocket(err,AF_LOCAL)) == ANET_ERR) 305 | return ANET_ERR; 306 | 307 | memset(&sa,0,sizeof(sa)); 308 | sa.sun_family = AF_LOCAL; 309 | strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); 310 | if (anetListen(err,s,(struct sockaddr*)&sa,sizeof(sa)) == ANET_ERR) 311 | return ANET_ERR; 312 | if (perm) 313 | chmod(sa.sun_path, perm); 314 | return s; 315 | } 316 | 317 | static int anetGenericAccept(char *err, int s, struct sockaddr *sa, socklen_t *len) { 318 | int fd; 319 | while(1) { 320 | fd = accept(s,sa,len); 321 | if (fd == -1) { 322 | if (errno == EINTR) 323 | continue; 324 | else { 325 | anetSetError(err, "accept: %s", strerror(errno)); 326 | return ANET_ERR; 327 | } 328 | } 329 | break; 330 | } 331 | return fd; 332 | } 333 | 334 | int anetTcpAccept(char *err, int s, char *ip, int *port) { 335 | int fd; 336 | struct sockaddr_in sa; 337 | socklen_t salen = sizeof(sa); 338 | if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == ANET_ERR) 339 | return ANET_ERR; 340 | 341 | if (ip) strcpy(ip,inet_ntoa(sa.sin_addr)); 342 | if (port) *port = ntohs(sa.sin_port); 343 | return fd; 344 | } 345 | 346 | int anetUnixAccept(char *err, int s) { 347 | int fd; 348 | struct sockaddr_un sa; 349 | socklen_t salen = sizeof(sa); 350 | if ((fd = anetGenericAccept(err,s,(struct sockaddr*)&sa,&salen)) == ANET_ERR) 351 | return ANET_ERR; 352 | 353 | return fd; 354 | } 355 | 356 | int anetPeerToString(int fd, char *ip, int *port) { 357 | struct sockaddr_in sa; 358 | socklen_t salen = sizeof(sa); 359 | 360 | if (getpeername(fd,(struct sockaddr*)&sa,&salen) == -1) { 361 | *port = 0; 362 | ip[0] = '?'; 363 | ip[1] = '\0'; 364 | return -1; 365 | } 366 | if (ip) strcpy(ip,inet_ntoa(sa.sin_addr)); 367 | if (port) *port = ntohs(sa.sin_port); 368 | return 0; 369 | } 370 | -------------------------------------------------------------------------------- /tcproxy/src/anet.h: -------------------------------------------------------------------------------- 1 | /* anet.c -- Basic TCP socket stuff made a bit less boring 2 | * 3 | * Copyright (c) 2006-2010, Salvatore Sanfilippo 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * * Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * * Neither the name of Redis nor the names of its contributors may be used 15 | * to endorse or promote products derived from this software without 16 | * specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | * POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef ANET_H 32 | #define ANET_H 33 | 34 | #define ANET_OK 0 35 | #define ANET_ERR -1 36 | #define ANET_ERR_LEN 256 37 | 38 | #if defined(__sun) 39 | #define AF_LOCAL AF_UNIX 40 | #endif 41 | 42 | int anetTcpConnect(char *err, char *addr, int port); 43 | int anetTcpNonBlockConnect(char *err, char *addr, int port); 44 | int anetUnixConnect(char *err, char *path); 45 | int anetUnixNonBlockConnect(char *err, char *path); 46 | int anetRead(int fd, char *buf, int count); 47 | int anetResolve(char *err, char *host, char *ipbuf); 48 | int anetTcpServer(char *err, int port, char *bindaddr); 49 | int anetUnixServer(char *err, char *path, mode_t perm); 50 | int anetTcpAccept(char *err, int serversock, char *ip, int *port); 51 | int anetUnixAccept(char *err, int serversock); 52 | int anetWrite(int fd, char *buf, int count); 53 | int anetNonBlock(char *err, int fd); 54 | int anetTcpNoDelay(char *err, int fd); 55 | int anetTcpKeepAlive(char *err, int fd); 56 | int anetPeerToString(int fd, char *ip, int *port); 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /tcproxy/src/config.h: -------------------------------------------------------------------------------- 1 | #ifndef __CONFIG_H 2 | #define __CONFIG_H 3 | 4 | #ifdef __APPLE__ 5 | #include 6 | #endif 7 | 8 | /* Test for proc filesystem */ 9 | #ifdef __linux__ 10 | #define HAVE_PROCFS 1 11 | #endif 12 | 13 | /* Test for task_info() */ 14 | #if defined(__APPLE__) 15 | #define HAVE_TASKINFO 1 16 | #endif 17 | 18 | /* Test for backtrace() */ 19 | #if defined(__APPLE__) || defined(__linux__) 20 | #define HAVE_BACKTRACE 1 21 | #endif 22 | 23 | /* Test for polling API */ 24 | #ifdef __linux__ 25 | #define HAVE_EPOLL 1 26 | #endif 27 | 28 | #if (defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined (__NetBSD__) 29 | #define HAVE_KQUEUE 1 30 | #endif 31 | 32 | /* Define aof_fsync to fdatasync() in Linux and fsync() for all the rest */ 33 | #ifdef __linux__ 34 | #define aof_fsync fdatasync 35 | #else 36 | #define aof_fsync fsync 37 | #endif 38 | 39 | /* Byte ordering detection */ 40 | #include /* This will likely define BYTE_ORDER */ 41 | 42 | #ifndef BYTE_ORDER 43 | #if (BSD >= 199103) 44 | # include 45 | #else 46 | #if defined(linux) || defined(__linux__) 47 | # include 48 | #else 49 | #define LITTLE_ENDIAN 1234 /* least-significant byte first (vax, pc) */ 50 | #define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ 51 | #define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp)*/ 52 | 53 | #if defined(vax) || defined(ns32000) || defined(sun386) || defined(__i386__) || \ 54 | defined(MIPSEL) || defined(_MIPSEL) || defined(BIT_ZERO_ON_RIGHT) || \ 55 | defined(__alpha__) || defined(__alpha) 56 | #define BYTE_ORDER LITTLE_ENDIAN 57 | #endif 58 | 59 | #if defined(sel) || defined(pyr) || defined(mc68000) || defined(sparc) || \ 60 | defined(is68k) || defined(tahoe) || defined(ibm032) || defined(ibm370) || \ 61 | defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) ||\ 62 | defined(apollo) || defined(__convex__) || defined(_CRAY) || \ 63 | defined(__hppa) || defined(__hp9000) || \ 64 | defined(__hp9000s300) || defined(__hp9000s700) || \ 65 | defined (BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc) 66 | #define BYTE_ORDER BIG_ENDIAN 67 | #endif 68 | #endif /* linux */ 69 | #endif /* BSD */ 70 | #endif /* BYTE_ORDER */ 71 | 72 | #if defined(__BYTE_ORDER) && !defined(BYTE_ORDER) 73 | #if (__BYTE_ORDER == __LITTLE_ENDIAN) 74 | #define BYTE_ORDER LITTLE_ENDIAN 75 | #else 76 | #define BYTE_ORDER BIG_ENDIAN 77 | #endif 78 | #endif 79 | 80 | #if !defined(BYTE_ORDER) || \ 81 | (BYTE_ORDER != BIG_ENDIAN && BYTE_ORDER != LITTLE_ENDIAN) 82 | /* you must determine what the correct bit order is for 83 | * your compiler - the next line is an intentional error 84 | * which will force your compiles to bomb until you fix 85 | * the above macros. 86 | */ 87 | #error "Undefined or invalid BYTE_ORDER" 88 | #endif 89 | 90 | #endif 91 | -------------------------------------------------------------------------------- /tcproxy/src/fmacros.h: -------------------------------------------------------------------------------- 1 | #ifndef _REDIS_FMACRO_H 2 | #define _REDIS_FMACRO_H 3 | 4 | #define _BSD_SOURCE 5 | 6 | #if defined(__linux__) || defined(__OpenBSD__) 7 | #define _XOPEN_SOURCE 700 8 | #else 9 | #define _XOPEN_SOURCE 10 | #endif 11 | 12 | #define _LARGEFILE_SOURCE 13 | #define _FILE_OFFSET_BITS 64 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /tcproxy/src/policy.c: -------------------------------------------------------------------------------- 1 | 2 | #line 1 "policy.rl" 3 | #include 4 | #include 5 | #include 6 | 7 | #include "policy.h" 8 | 9 | static Hostent host; 10 | static int addr_p; 11 | static int have_addr; 12 | 13 | 14 | #line 92 "policy.rl" 15 | 16 | 17 | 18 | #line 19 "policy.c" 19 | static const char _policy_parser_actions[] = { 20 | 0, 1, 3, 1, 4, 1, 6, 1, 21 | 7, 1, 8, 1, 9, 1, 10, 2, 22 | 0, 3, 2, 2, 4, 2, 3, 4, 23 | 2, 5, 1, 3, 8, 0, 3, 4, 24 | 0, 3, 2, 4, 5, 8, 0, 3, 25 | 2, 4 26 | }; 27 | 28 | static const unsigned char _policy_parser_key_offsets[] = { 29 | 0, 0, 4, 9, 11, 12, 19, 21, 30 | 24, 26, 29, 31, 34, 37, 38, 40, 31 | 43, 44, 47, 48, 49, 50, 51, 52, 32 | 53, 55, 57, 62, 67, 73, 74, 75, 33 | 76, 78, 82, 86, 90, 94, 96, 97, 34 | 98, 99, 100, 101, 102, 103, 104, 106, 35 | 109, 111, 114, 116, 119, 122, 125, 126, 36 | 129, 130, 135, 140, 141, 142, 143, 144, 37 | 145, 146, 147, 148, 149, 151, 153, 156, 38 | 158, 161, 163, 166, 169, 170, 172, 176, 39 | 180, 184, 188, 190, 193, 194, 197, 198, 40 | 203, 208, 209, 210, 211, 212, 213, 214, 41 | 215, 216, 217, 218, 221, 223, 225, 227, 42 | 229, 229, 232, 235 43 | }; 44 | 45 | static const char _policy_parser_trans_keys[] = { 46 | 97, 108, 48, 57, 32, 45, 46, 48, 47 | 57, 32, 45, 62, 32, 97, 104, 108, 48 | 114, 48, 57, 48, 57, 46, 48, 57, 49 | 48, 57, 46, 48, 57, 48, 57, 58, 50 | 48, 57, 58, 48, 57, 58, 48, 57, 51 | 46, 48, 57, 46, 46, 48, 57, 46, 52 | 110, 121, 97, 115, 104, 32, 123, 32, 53 | 123, 32, 97, 108, 48, 57, 32, 46, 54 | 125, 48, 57, 32, 97, 108, 125, 48, 55 | 57, 110, 121, 58, 48, 57, 32, 125, 56 | 48, 57, 32, 125, 48, 57, 32, 125, 57 | 48, 57, 32, 125, 48, 57, 32, 125, 58 | 111, 99, 97, 108, 104, 111, 115, 116, 59 | 48, 57, 46, 48, 57, 48, 57, 46, 60 | 48, 57, 48, 57, 58, 48, 57, 58, 61 | 48, 57, 46, 48, 57, 46, 46, 48, 62 | 57, 46, 32, 46, 125, 48, 57, 32, 63 | 46, 125, 48, 57, 111, 99, 97, 108, 64 | 104, 111, 115, 116, 114, 32, 123, 48, 65 | 57, 46, 48, 57, 48, 57, 46, 48, 66 | 57, 48, 57, 58, 48, 57, 58, 48, 67 | 57, 58, 48, 57, 32, 45, 48, 57, 68 | 32, 45, 48, 57, 32, 45, 48, 57, 69 | 32, 45, 48, 57, 32, 45, 46, 48, 70 | 57, 46, 46, 48, 57, 46, 32, 45, 71 | 46, 48, 57, 32, 45, 46, 48, 57, 72 | 110, 121, 111, 99, 97, 108, 104, 111, 73 | 115, 116, 46, 48, 57, 48, 57, 48, 74 | 57, 48, 57, 48, 57, 46, 48, 57, 75 | 46, 48, 57, 0 76 | }; 77 | 78 | static const char _policy_parser_single_lengths[] = { 79 | 0, 2, 3, 2, 1, 5, 0, 1, 80 | 0, 1, 0, 1, 1, 1, 0, 1, 81 | 1, 1, 1, 1, 1, 1, 1, 1, 82 | 2, 2, 3, 3, 4, 1, 1, 1, 83 | 0, 2, 2, 2, 2, 2, 1, 1, 84 | 1, 1, 1, 1, 1, 1, 0, 1, 85 | 0, 1, 0, 1, 1, 1, 1, 1, 86 | 1, 3, 3, 1, 1, 1, 1, 1, 87 | 1, 1, 1, 1, 2, 0, 1, 0, 88 | 1, 0, 1, 1, 1, 0, 2, 2, 89 | 2, 2, 2, 1, 1, 1, 1, 3, 90 | 3, 1, 1, 1, 1, 1, 1, 1, 91 | 1, 1, 1, 1, 0, 0, 0, 0, 92 | 0, 1, 1, 0 93 | }; 94 | 95 | static const char _policy_parser_range_lengths[] = { 96 | 0, 1, 1, 0, 0, 1, 1, 1, 97 | 1, 1, 1, 1, 1, 0, 1, 1, 98 | 0, 1, 0, 0, 0, 0, 0, 0, 99 | 0, 0, 1, 1, 1, 0, 0, 0, 100 | 1, 1, 1, 1, 1, 0, 0, 0, 101 | 0, 0, 0, 0, 0, 0, 1, 1, 102 | 1, 1, 1, 1, 1, 1, 0, 1, 103 | 0, 1, 1, 0, 0, 0, 0, 0, 104 | 0, 0, 0, 0, 0, 1, 1, 1, 105 | 1, 1, 1, 1, 0, 1, 1, 1, 106 | 1, 1, 0, 1, 0, 1, 0, 1, 107 | 1, 0, 0, 0, 0, 0, 0, 0, 108 | 0, 0, 0, 1, 1, 1, 1, 1, 109 | 0, 1, 1, 0 110 | }; 111 | 112 | static const short _policy_parser_index_offsets[] = { 113 | 0, 0, 4, 9, 12, 14, 21, 23, 114 | 26, 28, 31, 33, 36, 39, 41, 43, 115 | 46, 48, 51, 53, 55, 57, 59, 61, 116 | 63, 66, 69, 74, 79, 85, 87, 89, 117 | 91, 93, 97, 101, 105, 109, 112, 114, 118 | 116, 118, 120, 122, 124, 126, 128, 130, 119 | 133, 135, 138, 140, 143, 146, 149, 151, 120 | 154, 156, 161, 166, 168, 170, 172, 174, 121 | 176, 178, 180, 182, 184, 187, 189, 192, 122 | 194, 197, 199, 202, 205, 207, 209, 213, 123 | 217, 221, 225, 228, 231, 233, 236, 238, 124 | 243, 248, 250, 252, 254, 256, 258, 260, 125 | 262, 264, 266, 268, 271, 273, 275, 277, 126 | 279, 280, 283, 286 127 | }; 128 | 129 | static const char _policy_parser_indicies[] = { 130 | 2, 3, 1, 0, 4, 5, 6, 7, 131 | 0, 8, 9, 0, 10, 0, 10, 12, 132 | 13, 14, 15, 11, 0, 16, 0, 17, 133 | 18, 0, 19, 0, 20, 21, 0, 22, 134 | 0, 24, 23, 0, 24, 25, 0, 24, 135 | 0, 26, 0, 20, 27, 0, 20, 0, 136 | 17, 28, 0, 17, 0, 29, 0, 25, 137 | 0, 30, 0, 31, 0, 32, 0, 33, 138 | 34, 0, 35, 36, 0, 36, 38, 39, 139 | 37, 0, 40, 41, 43, 42, 0, 44, 140 | 38, 39, 43, 37, 0, 45, 0, 46, 141 | 0, 47, 0, 48, 0, 40, 43, 49, 142 | 0, 40, 43, 50, 0, 40, 43, 51, 143 | 0, 40, 43, 52, 0, 40, 43, 0, 144 | 53, 0, 54, 0, 55, 0, 56, 0, 145 | 57, 0, 58, 0, 59, 0, 46, 0, 146 | 60, 0, 61, 62, 0, 63, 0, 64, 147 | 65, 0, 66, 0, 47, 67, 0, 47, 148 | 46, 0, 64, 68, 0, 64, 0, 61, 149 | 69, 0, 61, 0, 40, 41, 43, 70, 150 | 0, 40, 41, 43, 51, 0, 71, 0, 151 | 72, 0, 73, 0, 74, 0, 75, 0, 152 | 76, 0, 77, 0, 25, 0, 78, 0, 153 | 79, 80, 0, 81, 0, 82, 83, 0, 154 | 84, 0, 85, 86, 0, 87, 0, 89, 155 | 88, 0, 89, 90, 0, 89, 0, 91, 156 | 0, 4, 5, 92, 0, 4, 5, 93, 157 | 0, 4, 5, 94, 0, 4, 5, 95, 158 | 0, 4, 5, 0, 85, 96, 0, 85, 159 | 0, 82, 97, 0, 82, 0, 4, 5, 160 | 6, 98, 0, 4, 5, 6, 94, 0, 161 | 99, 0, 90, 0, 100, 0, 101, 0, 162 | 102, 0, 103, 0, 104, 0, 105, 0, 163 | 106, 0, 90, 0, 107, 108, 0, 109, 164 | 0, 110, 0, 111, 0, 112, 0, 0, 165 | 107, 113, 0, 107, 111, 0, 0, 0 166 | }; 167 | 168 | static const char _policy_parser_trans_targs[] = { 169 | 0, 2, 89, 91, 3, 4, 69, 87, 170 | 3, 4, 5, 99, 19, 21, 59, 67, 171 | 7, 8, 17, 9, 10, 15, 11, 12, 172 | 14, 13, 100, 16, 18, 20, 22, 23, 173 | 24, 25, 26, 25, 26, 27, 29, 38, 174 | 28, 46, 57, 107, 28, 30, 31, 32, 175 | 33, 34, 35, 36, 37, 39, 40, 41, 176 | 42, 43, 44, 45, 47, 48, 55, 49, 177 | 50, 53, 51, 52, 54, 56, 58, 60, 178 | 61, 62, 63, 64, 65, 66, 68, 25, 179 | 26, 70, 71, 85, 72, 73, 83, 74, 180 | 75, 77, 76, 78, 79, 80, 81, 82, 181 | 84, 86, 88, 90, 92, 93, 94, 95, 182 | 96, 97, 98, 6, 105, 101, 102, 103, 183 | 104, 106 184 | }; 185 | 186 | static const char _policy_parser_trans_actions[] = { 187 | 13, 31, 15, 15, 5, 5, 1, 21, 188 | 0, 0, 0, 36, 27, 0, 27, 0, 189 | 1, 1, 1, 1, 1, 1, 1, 1, 190 | 24, 1, 18, 1, 1, 1, 0, 0, 191 | 0, 11, 11, 0, 0, 31, 15, 15, 192 | 7, 1, 21, 7, 0, 1, 1, 24, 193 | 18, 3, 3, 3, 3, 1, 1, 1, 194 | 1, 1, 1, 1, 1, 1, 1, 1, 195 | 1, 1, 1, 1, 1, 1, 21, 1, 196 | 1, 1, 1, 1, 1, 1, 0, 9, 197 | 9, 1, 1, 1, 1, 1, 1, 1, 198 | 1, 24, 1, 18, 3, 3, 3, 3, 199 | 1, 1, 21, 1, 1, 1, 1, 1, 200 | 1, 1, 1, 1, 21, 3, 3, 3, 201 | 3, 21 202 | }; 203 | 204 | static const char _policy_parser_eof_actions[] = { 205 | 0, 13, 13, 13, 13, 13, 13, 13, 206 | 13, 13, 13, 13, 13, 13, 13, 13, 207 | 13, 13, 13, 13, 13, 13, 13, 13, 208 | 13, 13, 13, 13, 13, 13, 13, 13, 209 | 13, 13, 13, 13, 13, 13, 13, 13, 210 | 13, 13, 13, 13, 13, 13, 13, 13, 211 | 13, 13, 13, 13, 13, 13, 13, 13, 212 | 13, 13, 13, 13, 13, 13, 13, 13, 213 | 13, 13, 13, 13, 13, 13, 13, 13, 214 | 13, 13, 13, 13, 13, 13, 13, 13, 215 | 13, 13, 13, 13, 13, 13, 13, 13, 216 | 13, 13, 13, 13, 13, 13, 13, 13, 217 | 13, 13, 13, 7, 7, 7, 7, 7, 218 | 7, 7, 7, 0 219 | }; 220 | 221 | static const int policy_parser_start = 1; 222 | 223 | 224 | #line 95 "policy.rl" 225 | 226 | Policy *ParsePolicy(const char *p) { 227 | Policy *policy = malloc(sizeof(Policy)); 228 | 229 | memset(policy, 0, sizeof(Policy)); 230 | host.addr = NULL; 231 | 232 | #line 237 "policy.c" 233 | { 234 | policy->cs = policy_parser_start; 235 | } 236 | 237 | #line 102 "policy.rl" 238 | 239 | policy->p = p; 240 | policy->pe = p + strlen(p); 241 | policy->eof = policy->pe; 242 | 243 | 244 | #line 249 "policy.c" 245 | { 246 | int _klen; 247 | unsigned int _trans; 248 | const char *_acts; 249 | unsigned int _nacts; 250 | const char *_keys; 251 | 252 | if ( ( policy->p) == ( policy->pe) ) 253 | goto _test_eof; 254 | if ( policy->cs == 0 ) 255 | goto _out; 256 | _resume: 257 | _keys = _policy_parser_trans_keys + _policy_parser_key_offsets[ policy->cs]; 258 | _trans = _policy_parser_index_offsets[ policy->cs]; 259 | 260 | _klen = _policy_parser_single_lengths[ policy->cs]; 261 | if ( _klen > 0 ) { 262 | const char *_lower = _keys; 263 | const char *_mid; 264 | const char *_upper = _keys + _klen - 1; 265 | while (1) { 266 | if ( _upper < _lower ) 267 | break; 268 | 269 | _mid = _lower + ((_upper-_lower) >> 1); 270 | if ( (*( policy->p)) < *_mid ) 271 | _upper = _mid - 1; 272 | else if ( (*( policy->p)) > *_mid ) 273 | _lower = _mid + 1; 274 | else { 275 | _trans += (unsigned int)(_mid - _keys); 276 | goto _match; 277 | } 278 | } 279 | _keys += _klen; 280 | _trans += _klen; 281 | } 282 | 283 | _klen = _policy_parser_range_lengths[ policy->cs]; 284 | if ( _klen > 0 ) { 285 | const char *_lower = _keys; 286 | const char *_mid; 287 | const char *_upper = _keys + (_klen<<1) - 2; 288 | while (1) { 289 | if ( _upper < _lower ) 290 | break; 291 | 292 | _mid = _lower + (((_upper-_lower) >> 1) & ~1); 293 | if ( (*( policy->p)) < _mid[0] ) 294 | _upper = _mid - 2; 295 | else if ( (*( policy->p)) > _mid[1] ) 296 | _lower = _mid + 2; 297 | else { 298 | _trans += (unsigned int)((_mid - _keys)>>1); 299 | goto _match; 300 | } 301 | } 302 | _trans += _klen; 303 | } 304 | 305 | _match: 306 | _trans = _policy_parser_indicies[_trans]; 307 | policy->cs = _policy_parser_trans_targs[_trans]; 308 | 309 | if ( _policy_parser_trans_actions[_trans] == 0 ) 310 | goto _again; 311 | 312 | _acts = _policy_parser_actions + _policy_parser_trans_actions[_trans]; 313 | _nacts = (unsigned int) *_acts++; 314 | while ( _nacts-- > 0 ) 315 | { 316 | switch ( *_acts++ ) 317 | { 318 | case 0: 319 | #line 18 "policy.rl" 320 | { 321 | addr_p = 0; 322 | host.addr = NULL; 323 | have_addr = 0; 324 | } 325 | break; 326 | case 1: 327 | #line 24 "policy.rl" 328 | { 329 | have_addr = 1; 330 | } 331 | break; 332 | case 2: 333 | #line 28 "policy.rl" 334 | { 335 | host.port = 0; 336 | } 337 | break; 338 | case 3: 339 | #line 32 "policy.rl" 340 | { 341 | if (host.addr == NULL) { 342 | host.addr = malloc(16 * sizeof(char)); 343 | } 344 | host.addr[addr_p] = (*( policy->p)); 345 | addr_p++; 346 | } 347 | break; 348 | case 4: 349 | #line 40 "policy.rl" 350 | { 351 | host.port = host.port * 10 + ((*( policy->p)) - '0'); 352 | } 353 | break; 354 | case 5: 355 | #line 44 "policy.rl" 356 | { 357 | host.addr[addr_p] = '\0'; 358 | } 359 | break; 360 | case 6: 361 | #line 48 "policy.rl" 362 | { 363 | if (!have_addr) { 364 | free(host.addr); 365 | host.addr = NULL; 366 | } 367 | policy->listen = host; 368 | host.addr = NULL; 369 | } 370 | break; 371 | case 7: 372 | #line 57 "policy.rl" 373 | { 374 | if (!have_addr) { 375 | free(host.addr); 376 | host.addr = NULL; 377 | } 378 | policy->nhost++; 379 | policy->hosts = realloc(policy->hosts, sizeof(Hostent) * policy->nhost); 380 | policy->hosts[policy->nhost - 1] = host; 381 | host.addr = NULL; 382 | } 383 | break; 384 | case 8: 385 | #line 68 "policy.rl" 386 | { 387 | policy->type = PROXY_RR; 388 | } 389 | break; 390 | case 9: 391 | #line 72 "policy.rl" 392 | { 393 | policy->type = PROXY_HASH; 394 | } 395 | break; 396 | case 10: 397 | #line 76 "policy.rl" 398 | { 399 | LogFatal("policy syntax error around:\"%s\"\n", ( policy->p)); 400 | } 401 | break; 402 | #line 407 "policy.c" 403 | } 404 | } 405 | 406 | _again: 407 | if ( policy->cs == 0 ) 408 | goto _out; 409 | if ( ++( policy->p) != ( policy->pe) ) 410 | goto _resume; 411 | _test_eof: {} 412 | if ( ( policy->p) == ( policy->eof) ) 413 | { 414 | const char *__acts = _policy_parser_actions + _policy_parser_eof_actions[ policy->cs]; 415 | unsigned int __nacts = (unsigned int) *__acts++; 416 | while ( __nacts-- > 0 ) { 417 | switch ( *__acts++ ) { 418 | case 7: 419 | #line 57 "policy.rl" 420 | { 421 | if (!have_addr) { 422 | free(host.addr); 423 | host.addr = NULL; 424 | } 425 | policy->nhost++; 426 | policy->hosts = realloc(policy->hosts, sizeof(Hostent) * policy->nhost); 427 | policy->hosts[policy->nhost - 1] = host; 428 | host.addr = NULL; 429 | } 430 | break; 431 | case 10: 432 | #line 76 "policy.rl" 433 | { 434 | LogFatal("policy syntax error around:\"%s\"\n", ( policy->p)); 435 | } 436 | break; 437 | #line 442 "policy.c" 438 | } 439 | } 440 | } 441 | 442 | _out: {} 443 | } 444 | 445 | #line 108 "policy.rl" 446 | 447 | if (policy->cs == 448 | #line 453 "policy.c" 449 | 0 450 | #line 109 "policy.rl" 451 | ) { 452 | free(policy); 453 | return NULL; 454 | } 455 | 456 | return policy; 457 | } 458 | 459 | void FreePolicy(Policy *policy) { 460 | int i; 461 | free(policy->listen.addr); 462 | for (i = 0; i < policy->nhost; i++) { 463 | free(policy->hosts[i].addr); 464 | } 465 | free(policy->hosts); 466 | free(policy); 467 | } 468 | -------------------------------------------------------------------------------- /tcproxy/src/policy.h: -------------------------------------------------------------------------------- 1 | #ifndef _POLICY_H_ 2 | #define _POLICY_H_ 3 | 4 | #include "util.h" 5 | 6 | #define PROXY_RR 0 7 | #define PROXY_HASH 1 8 | 9 | typedef struct Hostent { 10 | char *addr; 11 | int port; 12 | } Hostent; 13 | 14 | typedef struct Policy { 15 | Hostent listen; 16 | 17 | int type; 18 | 19 | Hostent *hosts; 20 | int nhost; 21 | 22 | int curhost; 23 | 24 | //ragel stuff 25 | const char *p, *pe, *eof; 26 | int cs; 27 | } Policy; 28 | 29 | void FreePolicy(Policy *policy); 30 | Policy *ParsePolicy(const char *str); 31 | 32 | #endif /* _POLICY_H_ */ 33 | 34 | -------------------------------------------------------------------------------- /tcproxy/src/policy.rl: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "policy.h" 6 | 7 | static Hostent host; 8 | static int addr_p; 9 | static int have_addr; 10 | 11 | %%{ 12 | machine policy_parser; 13 | access policy->; 14 | variable p policy->p; 15 | variable pe policy->pe; 16 | variable eof policy->eof; 17 | 18 | action init_host { 19 | addr_p = 0; 20 | host.addr = NULL; 21 | have_addr = 0; 22 | } 23 | 24 | action have_addr { 25 | have_addr = 1; 26 | } 27 | 28 | action init_port { 29 | host.port = 0; 30 | } 31 | 32 | action append_addr { 33 | if (host.addr == NULL) { 34 | host.addr = malloc(16 * sizeof(char)); 35 | } 36 | host.addr[addr_p] = fc; 37 | addr_p++; 38 | } 39 | 40 | action append_port { 41 | host.port = host.port * 10 + (fc - '0'); 42 | } 43 | 44 | action finish_addr { 45 | host.addr[addr_p] = '\0'; 46 | } 47 | 48 | action listen_addr { 49 | if (!have_addr) { 50 | free(host.addr); 51 | host.addr = NULL; 52 | } 53 | policy->listen = host; 54 | host.addr = NULL; 55 | } 56 | 57 | action append_host { 58 | if (!have_addr) { 59 | free(host.addr); 60 | host.addr = NULL; 61 | } 62 | policy->nhost++; 63 | policy->hosts = realloc(policy->hosts, sizeof(Hostent) * policy->nhost); 64 | policy->hosts[policy->nhost - 1] = host; 65 | host.addr = NULL; 66 | } 67 | 68 | action set_rr { 69 | policy->type = PROXY_RR; 70 | } 71 | 72 | action set_hash { 73 | policy->type = PROXY_HASH; 74 | } 75 | 76 | action error { 77 | LogFatal("policy syntax error around:\"%s\"\n", fpc); 78 | } 79 | 80 | ws = (' '); 81 | port = (digit {1,5}); 82 | dottedip = (digit {1,3} '.' digit {1,3} '.' digit {1,3} '.' digit {1,3}); 83 | addr = ('localhost' | 'any' | dottedip) $append_addr %finish_addr; 84 | host = ((addr ':' >have_addr)? port >init_port $append_port) >init_host; 85 | 86 | type = ('rr' %set_rr | 'hash' %set_hash); 87 | group = (type ws* '{' ws* host (ws+ >append_host host)* ws* '}' >append_host); 88 | 89 | policy = (host %listen_addr ws* '->' ws* (host >set_rr %append_host | group)); 90 | 91 | main := (policy) $!error; 92 | }%% 93 | 94 | %% write data; 95 | 96 | Policy *ParsePolicy(const char *p) { 97 | Policy *policy = malloc(sizeof(Policy)); 98 | 99 | memset(policy, 0, sizeof(Policy)); 100 | host.addr = NULL; 101 | %% write init; 102 | 103 | policy->p = p; 104 | policy->pe = p + strlen(p); 105 | policy->eof = policy->pe; 106 | 107 | %% write exec; 108 | 109 | if (policy->cs == %%{write error;}%%) { 110 | free(policy); 111 | return NULL; 112 | } 113 | 114 | return policy; 115 | } 116 | 117 | void FreePolicy(Policy *policy) { 118 | int i; 119 | free(policy->listen.addr); 120 | for (i = 0; i < policy->nhost; i++) { 121 | free(policy->hosts[i].addr); 122 | } 123 | free(policy->hosts); 124 | free(policy); 125 | } 126 | -------------------------------------------------------------------------------- /tcproxy/src/tcproxy.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "policy.h" 12 | #include "util.h" 13 | #include "ae.h" 14 | #include "anet.h" 15 | 16 | #define MAX_WRITE_PER_EVENT 1024*1024*1024 17 | #define CLIENT_CLOSE_AFTER_SENT 0x01 18 | #define VERSION "0.9.2" 19 | 20 | Policy *policy; 21 | static int run_daemonize = 0; 22 | static char error_[1024]; 23 | aeEventLoop *el; 24 | 25 | typedef struct Client { 26 | int fd; 27 | int flags; 28 | 29 | struct Client *remote; 30 | BufferList *blist; 31 | 32 | void (*OnError)(struct Client *c); 33 | void (*OnRemoteDown)(struct Client *c); 34 | } Client; 35 | 36 | void FreeRemote(Client *c); 37 | void ReadIncome(aeEventLoop *el, int fd, void *privdata, int mask); 38 | 39 | void Usage() { 40 | printf("usage:\n" 41 | " tcproxy [options] \"proxy policy\"\n" 42 | "options:\n" 43 | " -l file specify log file\n" 44 | " -d run in background\n" 45 | " -v show detailed log\n" 46 | " --version show version and exit\n" 47 | " -h show help and exit\n\n" 48 | "examples:\n" 49 | " tcproxy \"11212 -> 11211\"\n" 50 | " tcproxy \"127.0.0.1:6379 -> rr{192.168.0.100:6379 192.168.0.101:6379}\"\n\n" 51 | ); 52 | exit(EXIT_SUCCESS); 53 | } 54 | 55 | void ParseArgs(int argc, char **argv) { 56 | int i, j; 57 | const char *logfile = "stderr"; 58 | int loglevel = kError; 59 | 60 | InitLogger(loglevel, NULL); 61 | 62 | for (i = 1; i < argc; i++) { 63 | if (argv[i][0] == '-') { 64 | if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")) { 65 | Usage(); 66 | } else if (!strcmp(argv[i], "--version")) { 67 | printf("tcproxy "VERSION"\n\n"); 68 | exit(EXIT_SUCCESS); 69 | } else if (!strcmp(argv[i], "-d")) { 70 | run_daemonize = 1; 71 | } else if (!strcmp(argv[i], "-l")) { 72 | if (++i >= argc) LogFatal("file name must be specified"); 73 | logfile = argv[i]; 74 | } else if (!strncmp(argv[i], "-v", 2)) { 75 | for (j = 1; argv[i][j] != '\0'; j++) { 76 | if (argv[i][j] == 'v') loglevel++; 77 | else LogFatal("invalid argument %s", argv[i]);; 78 | } 79 | } else { 80 | LogFatal("unknow option %s\n", argv[i]); 81 | } 82 | } else { 83 | policy = ParsePolicy(argv[i]); 84 | } 85 | } 86 | 87 | InitLogger(loglevel, logfile); 88 | 89 | if (policy == NULL) { 90 | LogFatal("policy not valid"); 91 | } 92 | } 93 | 94 | void SignalHandler(int signo) { 95 | if (signo == SIGINT || signo == SIGTERM) { 96 | el->stop = 1; 97 | } 98 | } 99 | 100 | void RemoteDown(Client *r) { 101 | r->remote->OnRemoteDown(r->remote); 102 | } 103 | 104 | Client *AllocRemote(Client *c) { 105 | Client *r = malloc(sizeof(Client)); 106 | r->flags = 0; 107 | int fd = anetTcpNonBlockConnect(error_, policy->hosts[0].addr, policy->hosts[0].port); 108 | 109 | if (r == NULL || fd == -1) return NULL; 110 | LogDebug("connect remote fd %d", fd); 111 | anetNonBlock(NULL, fd); 112 | anetTcpNoDelay(NULL, fd); 113 | r->fd = fd; 114 | r->remote = c; 115 | r->OnError = RemoteDown; 116 | r->blist = AllocBufferList(3); 117 | if (aeCreateFileEvent(el, r->fd, AE_READABLE, ReadIncome, r) == AE_ERR) { 118 | close(fd); 119 | return NULL; 120 | } 121 | 122 | LogDebug("new remote %d %d", r->fd, c->fd); 123 | 124 | return r; 125 | } 126 | 127 | void FreeClient(Client *c) { 128 | if (c == NULL) return; 129 | LogDebug("free client %d", c->fd); 130 | aeDeleteFileEvent(el, c->fd, AE_READABLE); 131 | aeDeleteFileEvent(el, c->fd, AE_WRITABLE); 132 | close(c->fd); 133 | FreeRemote(c->remote); 134 | FreeBufferList(c->blist); 135 | free(c); 136 | } 137 | 138 | void CloseAfterSent(Client *c) { 139 | int len; 140 | if (BufferListGetData(c->blist, &len) == NULL) { 141 | // no data remains to be sent, close this client 142 | FreeClient(c); 143 | } else { 144 | c->flags |= CLIENT_CLOSE_AFTER_SENT; 145 | } 146 | } 147 | 148 | void ReAllocRemote(Client *c) { 149 | // TODO 150 | } 151 | 152 | Client *AllocClient(int fd) { 153 | Client *c = malloc(sizeof(Client)); 154 | c->flags = 0; 155 | if (c == NULL) return NULL; 156 | 157 | anetNonBlock(NULL, fd); 158 | anetTcpNoDelay(NULL, fd); 159 | 160 | c->fd = fd; 161 | c->blist = AllocBufferList(3); 162 | c->remote = AllocRemote(c); 163 | c->OnError = FreeClient; 164 | // c->OnRemoteDown = ReAllocRemote; 165 | c->OnRemoteDown = CloseAfterSent; // freeclient temprarily before hot switch done 166 | if (c->remote == NULL) { 167 | close(fd); 168 | free(c); 169 | return NULL; 170 | } 171 | 172 | LogDebug("New client fd:%d remotefd:%d", c->fd, c->remote->fd); 173 | 174 | return c; 175 | } 176 | 177 | void FreeRemote(Client *r) { 178 | LogDebug("free remote"); 179 | aeDeleteFileEvent(el, r->fd, AE_READABLE); 180 | aeDeleteFileEvent(el, r->fd, AE_WRITABLE); 181 | close(r->fd); 182 | FreeBufferList(r->blist); 183 | free(r); 184 | } 185 | 186 | void SendOutcome(aeEventLoop *el, int fd, void *privdata, int mask) { 187 | LogDebug("SendOutcome"); 188 | Client *c = (Client*)privdata; 189 | int len, nwritten = 0, totwritten = 0; 190 | char *buf; 191 | 192 | buf = BufferListGetData(c->blist, &len); 193 | if (buf == NULL) { 194 | LogDebug("delete write event"); 195 | aeDeleteFileEvent(el, fd, AE_WRITABLE); 196 | } 197 | 198 | while (1) { 199 | buf = BufferListGetData(c->blist, &len); 200 | if (buf == NULL) { 201 | // no data to send 202 | if (c->flags & CLIENT_CLOSE_AFTER_SENT) { 203 | FreeClient(c); 204 | return; 205 | } 206 | break; 207 | } 208 | nwritten = send(fd, buf, len, MSG_DONTWAIT); 209 | if (nwritten <= 0) break; 210 | 211 | totwritten += nwritten; 212 | LogDebug("write and pop data %p %d", c->blist, nwritten); 213 | BufferListPop(c->blist, nwritten); 214 | /* Note that we avoid to send more than MAX_WRITE_PER_EVENT 215 | * bytes, in a single threaded server it's a good idea to serve 216 | * other clients as well, even if a very large request comes from 217 | * super fast link that is always able to accept data*/ 218 | if (totwritten > MAX_WRITE_PER_EVENT) break; 219 | } 220 | 221 | LogDebug("totwritten %d", totwritten); 222 | 223 | if (nwritten == -1) { 224 | if (errno == EAGAIN) { 225 | nwritten = 0; 226 | } else { 227 | LogDebug("write error %s", strerror(errno)); 228 | c->OnError(c); 229 | return; 230 | } 231 | } 232 | } 233 | 234 | int SetWriteEvent(Client *c) { 235 | if (aeCreateFileEvent(el, c->fd, AE_WRITABLE, SendOutcome, c) == AE_ERR) { 236 | LogError("Set write event failed"); 237 | return -1; 238 | } 239 | return 0; 240 | } 241 | 242 | void ReadIncome(aeEventLoop *el, int fd, void *privdata, int mask) { 243 | LogDebug("read in come"); 244 | Client *c = (Client*)privdata; 245 | Client *r = c->remote; 246 | char *buf; 247 | int len, nread = 0; 248 | 249 | while (1) { 250 | buf = BufferListGetSpace(r->blist, &len); 251 | if (buf == NULL) break; 252 | nread = recv(fd, buf, len, 0); 253 | if (nread == -1) { 254 | if (errno == EAGAIN) { 255 | // no data 256 | nread = 0; 257 | } else { 258 | // connection error 259 | goto ERROR; 260 | } 261 | } else if (nread == 0) { 262 | // connection closed 263 | LogInfo("connection closed"); 264 | goto ERROR; 265 | } 266 | 267 | if (nread) { 268 | BufferListPush(r->blist, nread); 269 | SetWriteEvent(r); 270 | LogDebug("set write"); 271 | } else { 272 | break; 273 | } 274 | } 275 | 276 | return; 277 | 278 | ERROR: 279 | c->OnError(c); 280 | } 281 | 282 | void AcceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) { 283 | int cport, cfd; 284 | char cip[128]; 285 | 286 | cfd = anetTcpAccept(error_, fd, cip, &cport); 287 | if (cfd == AE_ERR) { 288 | LogError("Accept client connection failed: %s", error_); 289 | return; 290 | } 291 | LogInfo("Accepted client from %s:%d", cip, cport); 292 | 293 | Client *c = AllocClient(cfd); 294 | 295 | if (c == NULL || aeCreateFileEvent(el, cfd, AE_READABLE, ReadIncome, c) == AE_ERR) { 296 | LogError("Create event failed"); 297 | FreeClient(c); 298 | } 299 | } 300 | 301 | int main(int argc, char **argv) { 302 | int i, listen_fd; 303 | struct sigaction sig_action; 304 | 305 | ParseArgs(argc, argv); 306 | 307 | if (run_daemonize) Daemonize(); 308 | 309 | sig_action.sa_handler = SignalHandler; 310 | sig_action.sa_flags = SA_RESTART; 311 | sigemptyset(&sig_action.sa_mask); 312 | sigaction(SIGINT, &sig_action, NULL); 313 | sigaction(SIGTERM, &sig_action, NULL); 314 | sigaction(SIGPIPE, &sig_action, NULL); 315 | 316 | if ((policy->listen.addr == NULL) || !strcmp(policy->listen.addr, "any")) { 317 | free(policy->listen.addr); 318 | policy->listen.addr = strdup("0.0.0.0"); 319 | } else if (!strcmp(policy->listen.addr, "localhost")) { 320 | free(policy->listen.addr); 321 | policy->listen.addr = strdup("127.0.0.1"); 322 | } 323 | 324 | listen_fd = anetTcpServer(error_, policy->listen.port, policy->listen.addr); 325 | 326 | el = aeCreateEventLoop(65536); 327 | 328 | if (listen_fd < 0 || aeCreateFileEvent(el, listen_fd, AE_READABLE, AcceptTcpHandler, NULL) == AE_ERR) { 329 | LogFatal("listen failed: %s", strerror(errno)); 330 | } 331 | 332 | LogInfo("listenning on %s:%d", (policy->listen.addr? policy->listen.addr : "any"), policy->listen.port); 333 | for (i = 0; i < policy->nhost; i++) { 334 | if (policy->hosts[i].addr == NULL) policy->hosts[i].addr = strdup("127.0.0.1"); 335 | LogInfo("proxy to %s:%d", policy->hosts[i].addr, policy->hosts[i].port); 336 | } 337 | 338 | aeMain(el); 339 | 340 | aeDeleteEventLoop(el); 341 | 342 | FreePolicy(policy); 343 | 344 | return 0; 345 | } 346 | -------------------------------------------------------------------------------- /tcproxy/src/util.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "util.h" 12 | 13 | static LogLevel log_level = kDebug; 14 | static FILE *log_file = NULL; 15 | static char now_str[sizeof("2011/11/11 11:11:11")]; 16 | static const char *LevelName[] = { 17 | "NONE", 18 | "FATAL", 19 | "CRITICAL", 20 | "ERROR", 21 | "WARNING", 22 | "INFO", 23 | "DEBUG", 24 | }; 25 | 26 | static void UpdateTime() { 27 | static time_t now = 0; 28 | time_t t = time(NULL); 29 | 30 | //update time every second 31 | if (t - now == 0) return; 32 | now = t; 33 | 34 | struct tm tm; 35 | localtime_r(&now, &tm); 36 | sprintf(now_str, "%04d/%02d/%02d %02d:%02d:%02d", 37 | 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, 38 | tm.tm_hour, tm.tm_min, tm.tm_sec); 39 | } 40 | 41 | void LogPrint(LogLevel level, const char *fmt, ...) { 42 | va_list args; 43 | if (level > log_level) return; 44 | va_start(args, fmt); 45 | if (log_file) vfprintf(log_file, fmt, args); 46 | va_end(args); 47 | fflush(log_file); 48 | } 49 | 50 | void LogInternal(LogLevel level, const char *fmt, ...) { 51 | va_list args; 52 | if (level > log_level) return; 53 | UpdateTime(); 54 | if (log_file) fprintf(log_file, "%s [%s] ", now_str, LevelName[level]); 55 | va_start(args, fmt); 56 | if (log_file) vfprintf(log_file, fmt, args); 57 | va_end(args); 58 | fflush(log_file); 59 | } 60 | 61 | void InitLogger(LogLevel level, const char *filename) { 62 | log_level = level; 63 | 64 | if (filename == NULL || strcmp(filename, "stderr") == 0 || strcmp(filename, "") == 0) { 65 | log_file = stderr; 66 | } else if (strcmp(filename, "stdout") == 0) { 67 | log_file = stdout; 68 | } else { 69 | log_file = fopen(filename, "a+"); 70 | } 71 | } 72 | 73 | 74 | void Daemonize() { 75 | int fd; 76 | 77 | if (fork() != 0) exit(0); /* parent exits */ 78 | 79 | setsid(); /* create a new session */ 80 | 81 | if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { 82 | dup2(fd, STDIN_FILENO); 83 | dup2(fd, STDOUT_FILENO); 84 | dup2(fd, STDERR_FILENO); 85 | if (fd > STDERR_FILENO) close(fd); 86 | } 87 | } 88 | 89 | BufferList *AllocBufferList(int n) { 90 | BufferList *blist = malloc(sizeof(BufferList)); 91 | BufferListNode *buf = malloc(sizeof(BufferListNode)); 92 | BufferListNode *pre; 93 | int i; 94 | 95 | buf->size = 0; 96 | buf->next = NULL; 97 | 98 | blist->head = buf; 99 | pre = blist->head = blist->write_node = buf; 100 | 101 | for (i = 1; i < n; i++) { 102 | buf = malloc(sizeof(BufferListNode)); 103 | buf->size = 0; 104 | buf->next = NULL; 105 | pre->next = buf; 106 | pre = buf; 107 | } 108 | 109 | blist->tail = buf; 110 | 111 | blist->read_pos = 0; 112 | 113 | return blist; 114 | } 115 | 116 | void FreeBufferList(BufferList *blist) { 117 | BufferListNode *cur = blist->head; 118 | while (cur != NULL) { 119 | blist->head = cur->next; 120 | free(cur); 121 | cur = blist->head; 122 | } 123 | free(blist); 124 | } 125 | 126 | // get free space from current write node 127 | char *BufferListGetSpace(BufferList *blist, int *len) { 128 | if (blist->write_node == blist->tail && blist->write_node->size == BUFFER_CHUNK_SIZE) { 129 | *len = 0; 130 | LogDebug("tail full"); 131 | return NULL; 132 | } 133 | *len = BUFFER_CHUNK_SIZE - blist->write_node->size; 134 | return blist->write_node->data + blist->write_node->size; 135 | } 136 | 137 | // push data into buffer 138 | void BufferListPush(BufferList *blist, int len) { 139 | blist->write_node->size += len; 140 | LogDebug("head %p tail %p cur %p data %d", blist->head, blist->tail, blist->write_node, blist->head->size - blist->read_pos); 141 | if (blist->write_node->size == BUFFER_CHUNK_SIZE && blist->write_node != blist->tail) { 142 | // move to next chunk 143 | blist->write_node = blist->write_node->next; 144 | } 145 | } 146 | 147 | // always get data from head 148 | char *BufferListGetData(BufferList *blist, int *len) { 149 | if (blist->head == blist->write_node && blist->read_pos == blist->head->size) { 150 | *len = 0; 151 | LogDebug("head empty"); 152 | return NULL; 153 | } 154 | *len = blist->head->size - blist->read_pos; 155 | return blist->head->data + blist->read_pos; 156 | } 157 | 158 | // pop data out from buffer 159 | void BufferListPop(BufferList *blist, int len) { 160 | blist->read_pos += len; 161 | LogDebug("head %p tail %p cur %p data %d", blist->head, blist->tail, blist->write_node, blist->head->size - blist->read_pos); 162 | if (blist->read_pos == blist->head->size && blist->head != blist->write_node) { 163 | // head empty, and head is not the node we are writing into, move to tail 164 | BufferListNode *cur = blist->head; 165 | blist->head = blist->head->next; 166 | blist->tail->next = cur; 167 | blist->tail = cur; 168 | cur->size = 0; 169 | cur->next = NULL; 170 | blist->read_pos = 0; 171 | if (blist->head == NULL) { 172 | // there is only one chunk in buffer list 173 | LogDebug("head null"); 174 | exit(0); 175 | blist->head = blist->tail; 176 | } 177 | } 178 | // else leave it there, further get data will return NULL 179 | } 180 | -------------------------------------------------------------------------------- /tcproxy/src/util.h: -------------------------------------------------------------------------------- 1 | #ifndef _UTIL_H_ 2 | #define _UTIL_H_ 3 | 4 | #define BUFFER_CHUNK_SIZE 1024*1024*2 5 | 6 | typedef enum LogLevel { 7 | kNone = 0, 8 | kFatal, 9 | kCritical, 10 | kError, 11 | kWarning, 12 | kInfo, 13 | kDebug, 14 | } LogLevel; 15 | 16 | #define LogInfo(s...) do {\ 17 | LogInternal(kInfo, s);\ 18 | LogPrint(kInfo, "\n"); \ 19 | }while(0) 20 | 21 | #define LogWarning(s...) do {\ 22 | LogInternal(kWarning, s);\ 23 | LogPrint(kWarning, "\n"); \ 24 | }while(0) 25 | 26 | #define LogError(s...) do {\ 27 | LogInternal(kError, s);\ 28 | LogPrint(kError, "\n"); \ 29 | }while(0) 30 | 31 | #define LogCritical(s...) do {\ 32 | LogInternal(kCritical, s);\ 33 | LogPrint(kCritical, "\n"); \ 34 | }while(0) 35 | 36 | #define LogFatal(s...) do {\ 37 | LogInternal(kFatal, s);\ 38 | LogPrint(kFatal, "\n"); \ 39 | exit(EXIT_FAILURE);\ 40 | }while(0) 41 | 42 | #ifdef DEBUG 43 | #define LogDebug(s...) do {\ 44 | LogInternal(kDebug, s);\ 45 | LogPrint(kDebug, " [%s]", __PRETTY_FUNCTION__);\ 46 | LogPrint(kDebug, "\n"); \ 47 | }while(0) 48 | #else 49 | #define LogDebug(s...) 50 | #endif 51 | 52 | void InitLogger(LogLevel level, const char *filename); 53 | void LogInternal(LogLevel level, const char *fmt, ...); 54 | void LogPrint(LogLevel level, const char *fmt, ...); 55 | 56 | typedef struct BufferListNode { 57 | char data[BUFFER_CHUNK_SIZE]; 58 | int size; 59 | struct BufferListNode *next; 60 | } BufferListNode; 61 | 62 | typedef struct BufferList { 63 | BufferListNode *head; 64 | BufferListNode *tail; 65 | int read_pos; 66 | BufferListNode *write_node; 67 | } BufferList; 68 | 69 | BufferList *AllocBufferList(int n); 70 | 71 | void FreeBufferList(BufferList *blist); 72 | char *BufferListGetData(BufferList *blist, int *len); 73 | char *BufferListGetSpace(BufferList *blist, int *len); 74 | void BufferListPop(BufferList *blist, int len); 75 | void BufferListPush(BufferList *blist, int len); 76 | 77 | void Daemonize(); 78 | 79 | #endif /* _UTIL_H_ */ 80 | -------------------------------------------------------------------------------- /tcproxy/src/zmalloc.c: -------------------------------------------------------------------------------- 1 | /* zmalloc - total amount of allocated memory aware version of malloc() 2 | * 3 | * Copyright (c) 2009-2010, Salvatore Sanfilippo 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * * Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * * Neither the name of Redis nor the names of its contributors may be used 15 | * to endorse or promote products derived from this software without 16 | * specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | * POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #include 32 | #include 33 | 34 | /* This function provide us access to the original libc free(). This is useful 35 | * for instance to free results obtained by backtrace_symbols(). We need 36 | * to define this function before including zmalloc.h that may shadow the 37 | * free implementation if we use jemalloc or another non standard allocator. */ 38 | void zlibc_free(void *ptr) { 39 | free(ptr); 40 | } 41 | 42 | #include 43 | #include 44 | #include "config.h" 45 | #include "zmalloc.h" 46 | 47 | #ifdef HAVE_MALLOC_SIZE 48 | #define PREFIX_SIZE (0) 49 | #else 50 | #if defined(__sun) || defined(__sparc) || defined(__sparc__) 51 | #define PREFIX_SIZE (sizeof(long long)) 52 | #else 53 | #define PREFIX_SIZE (sizeof(size_t)) 54 | #endif 55 | #endif 56 | 57 | /* Explicitly override malloc/free etc when using tcmalloc. */ 58 | #if defined(USE_TCMALLOC) 59 | #define malloc(size) tc_malloc(size) 60 | #define calloc(count,size) tc_calloc(count,size) 61 | #define realloc(ptr,size) tc_realloc(ptr,size) 62 | #define free(ptr) tc_free(ptr) 63 | #elif defined(USE_JEMALLOC) 64 | #define malloc(size) je_malloc(size) 65 | #define calloc(count,size) je_calloc(count,size) 66 | #define realloc(ptr,size) je_realloc(ptr,size) 67 | #define free(ptr) je_free(ptr) 68 | #endif 69 | 70 | #ifdef HAVE_ATOMIC 71 | #define update_zmalloc_stat_add(__n) __sync_add_and_fetch(&used_memory, (__n)) 72 | #define update_zmalloc_stat_sub(__n) __sync_sub_and_fetch(&used_memory, (__n)) 73 | #else 74 | #define update_zmalloc_stat_add(__n) do { \ 75 | pthread_mutex_lock(&used_memory_mutex); \ 76 | used_memory += (__n); \ 77 | pthread_mutex_unlock(&used_memory_mutex); \ 78 | } while(0) 79 | 80 | #define update_zmalloc_stat_sub(__n) do { \ 81 | pthread_mutex_lock(&used_memory_mutex); \ 82 | used_memory -= (__n); \ 83 | pthread_mutex_unlock(&used_memory_mutex); \ 84 | } while(0) 85 | 86 | #endif 87 | 88 | #define update_zmalloc_stat_alloc(__n,__size) do { \ 89 | size_t _n = (__n); \ 90 | if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ 91 | if (zmalloc_thread_safe) { \ 92 | update_zmalloc_stat_add(_n); \ 93 | } else { \ 94 | used_memory += _n; \ 95 | } \ 96 | } while(0) 97 | 98 | #define update_zmalloc_stat_free(__n) do { \ 99 | size_t _n = (__n); \ 100 | if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ 101 | if (zmalloc_thread_safe) { \ 102 | update_zmalloc_stat_sub(_n); \ 103 | } else { \ 104 | used_memory -= _n; \ 105 | } \ 106 | } while(0) 107 | 108 | static size_t used_memory = 0; 109 | static int zmalloc_thread_safe = 0; 110 | pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER; 111 | 112 | static void zmalloc_oom(size_t size) { 113 | fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n", 114 | size); 115 | fflush(stderr); 116 | abort(); 117 | } 118 | 119 | void *zmalloc(size_t size) { 120 | void *ptr = malloc(size+PREFIX_SIZE); 121 | 122 | if (!ptr) zmalloc_oom(size); 123 | #ifdef HAVE_MALLOC_SIZE 124 | update_zmalloc_stat_alloc(zmalloc_size(ptr),size); 125 | return ptr; 126 | #else 127 | *((size_t*)ptr) = size; 128 | update_zmalloc_stat_alloc(size+PREFIX_SIZE,size); 129 | return (char*)ptr+PREFIX_SIZE; 130 | #endif 131 | } 132 | 133 | void *zcalloc(size_t size) { 134 | void *ptr = calloc(1, size+PREFIX_SIZE); 135 | 136 | if (!ptr) zmalloc_oom(size); 137 | #ifdef HAVE_MALLOC_SIZE 138 | update_zmalloc_stat_alloc(zmalloc_size(ptr),size); 139 | return ptr; 140 | #else 141 | *((size_t*)ptr) = size; 142 | update_zmalloc_stat_alloc(size+PREFIX_SIZE,size); 143 | return (char*)ptr+PREFIX_SIZE; 144 | #endif 145 | } 146 | 147 | void *zrealloc(void *ptr, size_t size) { 148 | #ifndef HAVE_MALLOC_SIZE 149 | void *realptr; 150 | #endif 151 | size_t oldsize; 152 | void *newptr; 153 | 154 | if (ptr == NULL) return zmalloc(size); 155 | #ifdef HAVE_MALLOC_SIZE 156 | oldsize = zmalloc_size(ptr); 157 | newptr = realloc(ptr,size); 158 | if (!newptr) zmalloc_oom(size); 159 | 160 | update_zmalloc_stat_free(oldsize); 161 | update_zmalloc_stat_alloc(zmalloc_size(newptr),size); 162 | return newptr; 163 | #else 164 | realptr = (char*)ptr-PREFIX_SIZE; 165 | oldsize = *((size_t*)realptr); 166 | newptr = realloc(realptr,size+PREFIX_SIZE); 167 | if (!newptr) zmalloc_oom(size); 168 | 169 | *((size_t*)newptr) = size; 170 | update_zmalloc_stat_free(oldsize); 171 | update_zmalloc_stat_alloc(size,size); 172 | return (char*)newptr+PREFIX_SIZE; 173 | #endif 174 | } 175 | 176 | /* Provide zmalloc_size() for systems where this function is not provided by 177 | * malloc itself, given that in that case we store an header with this 178 | * information as the first bytes of every allocation. */ 179 | #ifndef HAVE_MALLOC_SIZE 180 | size_t zmalloc_size(void *ptr) { 181 | void *realptr = (char*)ptr-PREFIX_SIZE; 182 | size_t size = *((size_t*)realptr); 183 | /* Assume at least that all the allocations are padded at sizeof(long) by 184 | * the underlying allocator. */ 185 | if (size&(sizeof(long)-1)) size += sizeof(long)-(size&(sizeof(long)-1)); 186 | return size+PREFIX_SIZE; 187 | } 188 | #endif 189 | 190 | void zfree(void *ptr) { 191 | #ifndef HAVE_MALLOC_SIZE 192 | void *realptr; 193 | size_t oldsize; 194 | #endif 195 | 196 | if (ptr == NULL) return; 197 | #ifdef HAVE_MALLOC_SIZE 198 | update_zmalloc_stat_free(zmalloc_size(ptr)); 199 | free(ptr); 200 | #else 201 | realptr = (char*)ptr-PREFIX_SIZE; 202 | oldsize = *((size_t*)realptr); 203 | update_zmalloc_stat_free(oldsize+PREFIX_SIZE); 204 | free(realptr); 205 | #endif 206 | } 207 | 208 | char *zstrdup(const char *s) { 209 | size_t l = strlen(s)+1; 210 | char *p = zmalloc(l); 211 | 212 | memcpy(p,s,l); 213 | return p; 214 | } 215 | 216 | size_t zmalloc_used_memory(void) { 217 | size_t um; 218 | 219 | if (zmalloc_thread_safe) { 220 | #ifdef HAVE_ATOMIC 221 | um = __sync_add_and_fetch(&used_memory, 0); 222 | #else 223 | pthread_mutex_lock(&used_memory_mutex); 224 | um = used_memory; 225 | pthread_mutex_unlock(&used_memory_mutex); 226 | #endif 227 | } 228 | else { 229 | um = used_memory; 230 | } 231 | 232 | return um; 233 | } 234 | 235 | void zmalloc_enable_thread_safeness(void) { 236 | zmalloc_thread_safe = 1; 237 | } 238 | 239 | /* Get the RSS information in an OS-specific way. 240 | * 241 | * WARNING: the function zmalloc_get_rss() is not designed to be fast 242 | * and may not be called in the busy loops where Redis tries to release 243 | * memory expiring or swapping out objects. 244 | * 245 | * For this kind of "fast RSS reporting" usages use instead the 246 | * function RedisEstimateRSS() that is a much faster (and less precise) 247 | * version of the funciton. */ 248 | 249 | #if defined(HAVE_PROCFS) 250 | #include 251 | #include 252 | #include 253 | #include 254 | 255 | size_t zmalloc_get_rss(void) { 256 | int page = sysconf(_SC_PAGESIZE); 257 | size_t rss; 258 | char buf[4096]; 259 | char filename[256]; 260 | int fd, count; 261 | char *p, *x; 262 | 263 | snprintf(filename,256,"/proc/%d/stat",getpid()); 264 | if ((fd = open(filename,O_RDONLY)) == -1) return 0; 265 | if (read(fd,buf,4096) <= 0) { 266 | close(fd); 267 | return 0; 268 | } 269 | close(fd); 270 | 271 | p = buf; 272 | count = 23; /* RSS is the 24th field in /proc//stat */ 273 | while(p && count--) { 274 | p = strchr(p,' '); 275 | if (p) p++; 276 | } 277 | if (!p) return 0; 278 | x = strchr(p,' '); 279 | if (!x) return 0; 280 | *x = '\0'; 281 | 282 | rss = strtoll(p,NULL,10); 283 | rss *= page; 284 | return rss; 285 | } 286 | #elif defined(HAVE_TASKINFO) 287 | #include 288 | #include 289 | #include 290 | #include 291 | #include 292 | #include 293 | #include 294 | 295 | size_t zmalloc_get_rss(void) { 296 | task_t task = MACH_PORT_NULL; 297 | struct task_basic_info t_info; 298 | mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT; 299 | 300 | if (task_for_pid(current_task(), getpid(), &task) != KERN_SUCCESS) 301 | return 0; 302 | task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count); 303 | 304 | return t_info.resident_size; 305 | } 306 | #else 307 | size_t zmalloc_get_rss(void) { 308 | /* If we can't get the RSS in an OS-specific way for this system just 309 | * return the memory usage we estimated in zmalloc().. 310 | * 311 | * Fragmentation will appear to be always 1 (no fragmentation) 312 | * of course... */ 313 | return zmalloc_used_memory(); 314 | } 315 | #endif 316 | 317 | /* Fragmentation = RSS / allocated-bytes */ 318 | float zmalloc_get_fragmentation_ratio(void) { 319 | return (float)zmalloc_get_rss()/zmalloc_used_memory(); 320 | } 321 | -------------------------------------------------------------------------------- /tcproxy/src/zmalloc.h: -------------------------------------------------------------------------------- 1 | /* zmalloc - total amount of allocated memory aware version of malloc() 2 | * 3 | * Copyright (c) 2009-2010, Salvatore Sanfilippo 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * * Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * * Neither the name of Redis nor the names of its contributors may be used 15 | * to endorse or promote products derived from this software without 16 | * specific prior written permission. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | * POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | #ifndef __ZMALLOC_H 32 | #define __ZMALLOC_H 33 | 34 | /* Double expansion needed for stringification of macro values. */ 35 | #define __xstr(s) __str(s) 36 | #define __str(s) #s 37 | 38 | #if defined(USE_TCMALLOC) 39 | #define ZMALLOC_LIB ("tcmalloc-" __xstr(TC_VERSION_MAJOR) "." __xstr(TC_VERSION_MINOR)) 40 | #include 41 | #if (TC_VERSION_MAJOR == 1 && TC_VERSION_MINOR >= 6) || (TC_VERSION_MAJOR > 1) 42 | #define HAVE_MALLOC_SIZE 1 43 | #define zmalloc_size(p) tc_malloc_size(p) 44 | #else 45 | #error "Newer version of tcmalloc required" 46 | #endif 47 | 48 | #elif defined(USE_JEMALLOC) 49 | #define ZMALLOC_LIB ("jemalloc-" __xstr(JEMALLOC_VERSION_MAJOR) "." __xstr(JEMALLOC_VERSION_MINOR) "." __xstr(JEMALLOC_VERSION_BUGFIX)) 50 | #define JEMALLOC_MANGLE 51 | #include 52 | #if (JEMALLOC_VERSION_MAJOR == 2 && JEMALLOC_VERSION_MINOR >= 1) || (JEMALLOC_VERSION_MAJOR > 2) 53 | #define HAVE_MALLOC_SIZE 1 54 | #define zmalloc_size(p) JEMALLOC_P(malloc_usable_size)(p) 55 | #else 56 | #error "Newer version of jemalloc required" 57 | #endif 58 | 59 | #elif defined(__APPLE__) 60 | #include 61 | #define HAVE_MALLOC_SIZE 1 62 | #define zmalloc_size(p) malloc_size(p) 63 | #endif 64 | 65 | #ifndef ZMALLOC_LIB 66 | #define ZMALLOC_LIB "libc" 67 | #endif 68 | 69 | void *zmalloc(size_t size); 70 | void *zcalloc(size_t size); 71 | void *zrealloc(void *ptr, size_t size); 72 | void zfree(void *ptr); 73 | char *zstrdup(const char *s); 74 | size_t zmalloc_used_memory(void); 75 | void zmalloc_enable_thread_safeness(void); 76 | float zmalloc_get_fragmentation_ratio(void); 77 | size_t zmalloc_get_rss(void); 78 | void zlibc_free(void *ptr); 79 | 80 | #ifndef HAVE_MALLOC_SIZE 81 | size_t zmalloc_size(void *ptr); 82 | #endif 83 | 84 | #endif /* __ZMALLOC_H */ 85 | --------------------------------------------------------------------------------