├── .github └── ISSUE_TEMPLATE.md ├── .gitignore ├── .travis.yml ├── ChangeLog ├── LICENSE ├── MANIFEST.in ├── README.md ├── appveyor.yml ├── docs ├── Makefile ├── conf.py ├── index.rst └── themes │ └── acid │ ├── conf.py.conf │ ├── layout.html │ ├── sourcelink.html │ ├── static │ └── acid.css │ └── theme.conf ├── examples ├── address-book.py ├── dirtybench-gdbm.py ├── dirtybench.py ├── keystore │ ├── README.md │ ├── __init__.py │ ├── interfaces.py │ ├── lmdb.py │ ├── main.py │ ├── static │ │ └── index.html │ ├── web.py │ └── webapi.py ├── nastybench.py ├── parabench.py └── words.gz ├── lib ├── lmdb.h ├── mdb.c ├── midl.c ├── midl.h ├── py-lmdb │ └── preload.h ├── win32-stdint │ └── stdint.h └── win32 │ ├── inttypes.h │ └── unistd.h ├── lmdb ├── __init__.py ├── __main__.py ├── cffi.py ├── cpython.c └── tool.py ├── misc ├── cursor-del-break.c ├── cursor_put_pyparse.diff ├── gdb.commands ├── helpers.sh ├── its7733.c ├── readers_mrb_env.patch ├── run_in_vm.py ├── runtests-travisci.sh ├── test_monster_acid_trace.diff ├── tox.ini ├── windows_build.py └── windows_setup.py ├── setup.py └── tests ├── crash_test.py ├── cursor_test.py ├── env_test.py ├── iteration_test.py ├── package_test.py ├── testlib.py ├── tool_test.py └── txn_test.py /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Edit this issue template as appropriate. If none of it seems relevant to your 2 | issue, then you may delete the entire template, however, I may not respond to 3 | issues that do not include important information that is mentioned in this 4 | template! 5 | 6 | 7 | ### Affected Operating Systems 8 | 9 | * Linux 10 | * Windows 11 | * BSD 12 | * [Other] 13 | 14 | ### Affected py-lmdb Version 15 | 16 | e.g. "print lmdb.__version__" 17 | 18 | ### py-lmdb Installation Method 19 | 20 | e.g. sudo pip install lmdb 21 | 22 | ### Using bundled or distribution-provided LMDB library? 23 | 24 | Bundled 25 | 26 | ### Distribution name and LMDB library version 27 | 28 | Use "print lmdb.version()" from a Python prompt. 29 | 30 | ### Machine "free -m" output 31 | 32 | e.g. 33 | 34 | ``` 35 | total used free shared buffers cached 36 | Mem: 24154 23874 279 21 386 8175 37 | -/+ buffers/cache: 15313 8840 38 | Swap: 0 0 0 39 | ``` 40 | 41 | ### Other important machine info 42 | 43 | Running under cgroups? Containers? Weird filesystems in use? Network 44 | filesystem? Patched kernel? ... 45 | 46 | 47 | ### Describe Your Problem 48 | 49 | XXXXX 50 | 51 | 52 | ### Errors/exceptions Encountered 53 | 54 | e.g. 55 | 56 | ``` 57 | Traceback (most recent call last): 58 | File "", line 1, in 59 | MemoryError 60 | ``` 61 | 62 | 63 | 64 | ### Describe What You Expected To Happen 65 | 66 | e.g. 67 | 68 | I expected the transaction to commit successfully. 69 | 70 | 71 | 72 | ### Describe What Happened Instead 73 | 74 | e.g. 75 | 76 | The Python process crashed. 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | MANIFEST 2 | __pycache__ 3 | build 4 | dist 5 | docs/_build 6 | ll.sh 7 | lmdb.egg-info 8 | lmdb/_config.py 9 | lo.sh 10 | old 11 | .tox/ 12 | tests/test.py 13 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | matrix: 4 | exclude: 5 | - python: 3.2 6 | env: LMDB_FORCE_CPYTHON=1 7 | - python: pypy 8 | env: LMDB_FORCE_CPYTHON=1 9 | 10 | python: 11 | - 2.6 12 | - 2.7 13 | - 3.3 14 | - 3.4 15 | - 3.5 16 | - 3.6 17 | - nightly 18 | - pypy 19 | 20 | env: 21 | - LMDB_FORCE_CFFI=1 22 | - LMDB_FORCE_CPYTHON=1 23 | install: 24 | - sudo apt-get install gdb 25 | - pip install cffi 26 | 27 | - python setup.py develop 28 | - if [[ $TRAVIS_PYTHON_VERSION == '3.2' ]]; then pip install -I py==1.4.20 pytest==2.5.2; fi 29 | before_script: 30 | - source misc/helpers.sh 31 | 32 | script: 33 | - > 34 | if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; 35 | then 36 | sudo ./misc/runtests-travisci.sh 37 | else 38 | with_gdb python -m pytest tests; 39 | fi 40 | 41 | notifications: 42 | email: false 43 | -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | 2 | 2017-09-30 v0.94 3 | 4 | * CPython argument parsing now matches the behaviour of CFFI, and most sane 5 | Python APIs: a bool parameter is considered to be true if it is any truthy 6 | value, not just if it is exactly True. Reported by Nic Watson. 7 | 8 | 9 | 2017-07-16 v0.93 10 | 11 | * py-lmdb is now built with AppVeyor CI, providing early feedback on Windows 12 | build health. Egg and wheel artifacts are being generated, removing the need 13 | for a dedicated Windows build machine, however there is no mechanism to 14 | paublish these to PyPI yet. 15 | 16 | * The "warm" tool command did not function on Python 3.x. Reported by Github 17 | user dev351. 18 | 19 | * Tests now pass on non-4kb page-sized machines, such as ppc64le. Reported by 20 | Jonathan J. Helmus. 21 | 22 | * Windows 3.6 eggs and wheels are now available on PyPI, and tests are run 23 | against 3.6. Reported by Ofek Lev. 24 | 25 | * Python 3.2 is no longer supported, due to yet more pointless breakage 26 | introduced in pip/pkg_resources. 27 | 28 | * py-lmdb currently does not support LMDB >=0.9.19 due to interface changes in 29 | LMDB. Support will appear in a future release. 30 | 31 | 32 | 2016-10-17 v0.92 33 | 34 | * Changes to support __all__ caused the CPython module to fail to import at 35 | runtime on Python 3. This was hidden during testing as the CFFI module was 36 | successfully imported. 37 | 38 | 39 | 2016-10-17 v0.91 40 | 41 | * The docstring for NotFoundError was clarified to indicate that it is 42 | not raised in normal circumstances. 43 | 44 | * CFFI open_db() would always attempt to use a write transaction, even if the 45 | environment was opened with readonly=True. Now both CPython and CFFI will 46 | use a read-only transaction in this case. Reported by Github user 47 | handloomweaver. 48 | 49 | * The source distribution previously did not include a LICENSE file, and may 50 | have included random cached junk from the source tree during build. Reported 51 | by Thomas Petazzoni. 52 | 53 | * Transaction.id() was broken on Python 2.5. 54 | 55 | * Repair Travis CI build again. 56 | 57 | * CFFI Cursor did not correctly return empty strings for key()/value()/item() 58 | when iternext()/iterprev() had reached the start/end of the database. 59 | Detected by tests contributed by Ong Teck Wu. 60 | 61 | * The package can now be imported from within a CPython subinterpreter. Fix 62 | contributed by Vitaly Repin. 63 | 64 | * lmdb.tool --delete would not delete keys in some circumstances. Fix 65 | contributed by Vitaly Repin. 66 | 67 | * Calls to Cursor.set_range_dup() could lead to memory corruption due to 68 | Cursor's idea of the key and value failing to be updated correctly. Reported 69 | by Michael Lazarev. 70 | 71 | * The lmdb.tool copy command now supports a --compact flag. Contributed by 72 | Achal Dave. 73 | 74 | * The lmdb.tool edit command selects the correct database when --delete is 75 | specified. Contributed by ispequalnp. 76 | 77 | * lmdb.tool correctly supports the -r flag to select a read-only environment. 78 | Contributed by ispequalnp. 79 | 80 | * The lmdb.tool --txn_size parameter was removed, as it was never implemented, 81 | and its original function is no longer necessary with modern LMDB. Reported 82 | by Achal Dave. 83 | 84 | * The documentation template was updated to fix broken links. Contributed by 85 | Adam Chainz. 86 | 87 | * The Travis CI build configuration was heavily refactored by Alexander Zhukov. 88 | Automated tests are running under Travis CI once more. 89 | 90 | * The CPython extension module did not define __all__. It is now defined 91 | contain the same names as on CFFI. 92 | 93 | * Both implementations were updated to remove lmdb.open() from __all__, 94 | ensuring "from lmdb import *" does not shadow the builtin open(). The 95 | function can still be invoked using its fully qualified name, and the alias 96 | "Environment" may be used when "from lmdb import *" is used. Reported by 97 | Alexander Zhukov. 98 | 99 | * The CPython extension exported BadRSlotError, instead of BadRslotError. The 100 | exception's name was corrected to match CFFI. 101 | 102 | * Environment.open_db() now supports integerdup=True, dupfixed=True, and 103 | integerkey=True flags. Based on a patch by Jonathan Heyman. 104 | 105 | 106 | 2016-07-11 v0.90 107 | 108 | * This release was deleted from PyPI due to an erroneous pull request 109 | upgrading the bundled LMDB to mdb.master. 110 | 111 | 112 | 2016-02-12 v0.89 113 | 114 | * LMDB 0.9.18 is bundled. 115 | 116 | * CPython Iterator.next() was incorrectly defined as pointing at the 117 | implementation for Cursor.next(), triggering a crash if the method was ever 118 | invoked manually. Reported by Kimikazu Kato. 119 | 120 | 121 | 2016-01-24 v0.88 122 | 123 | * LMDB 0.9.17 is bundled. 124 | 125 | * Transaction.id() is exposed. 126 | 127 | * Binary wheels are built for Python 3.5 Windows 32/64-bit. 128 | 129 | 130 | 2015-08-11 v0.87 131 | 132 | * Environment.set_mapsize() was added to allow runtime adjustment of the 133 | environment map size. 134 | 135 | * Remove non-determinism from setup.py, to support Debian's reproducible 136 | builds project. Patch by Chris Lamb. 137 | 138 | * Documentation correctness and typo fixes. Patch by Gustav Larsson. 139 | 140 | * examples/keystore: beginnings of example that integrates py-lmdb with an 141 | asynchronous IO loop. 142 | 143 | 144 | 2015-06-07 v0.86 145 | 146 | * LMDB_FORCE_SYSTEM builds were broken by the GIL/page fault change. This 147 | release fixes the problem. 148 | 149 | * Various cosmetic fixes to documentation. 150 | 151 | 152 | 2015-06-06 v0.85 153 | 154 | * New exception class: lmdb.BadDbiError. 155 | 156 | * Environment.copy() and Environment.copyfd() now support compact=True, to 157 | trigger database compaction while copying. 158 | 159 | * Various small documentation updates. 160 | 161 | * CPython set_range_dup() and set_key_dup() both invoked MDB_GET_BOTH, however 162 | set_range_dup() should have instead invoked MDB_GET_BOTH_RANGE. Fix by 163 | Matthew Battifarano. 164 | 165 | * lmdb.tool module was broken on Win32, since Win32 lacks signal.SIGWINCH. Fix 166 | suggested by David Khess. 167 | 168 | * LMDB 0.9.14 is bundled along with extra fixes from mdb.RE/0.9 (release 169 | engineering) branch. 170 | 171 | * CPython previously lacked a Cursor.close() method. Problem was noticed by 172 | Jos Vos. 173 | 174 | * Several memory leaks affecting the CFFI implementation when running on 175 | CPython were fixed, apparent only when repeatedly opening and discarding a 176 | large number of environments. Noticed by Jos Vos. 177 | 178 | * The CPython extension previously did not support weakrefs on Environment 179 | objects, and the implementation for Transaction objects was flawed. The 180 | extension now correctly invalidates weakrefs during deallocation. 181 | 182 | * Both variants now try to avoid taking page faults with the GIL held, 183 | accomplished by touching one byte of every page in a value during reads. 184 | This does not guarantee faults will never occur with the GIL held, but it 185 | drastically reduces the possibility. The binding should now be suitable for 186 | use in multi-threaded applications with databases containing >2KB values 187 | where the entire database does not fit in RAM. 188 | 189 | 190 | 2014-09-22 v0.84 191 | 192 | * LMDB 0.9.14 is bundled. 193 | 194 | * CFFI Cursor.putmulti() could crash when append=False and a key already 195 | existed. 196 | 197 | 198 | 2014-06-24 v0.83 199 | 200 | * LMDB 0.9.13 is bundled along with extra fixes from upstream Git. 201 | 202 | * Environment.__enter__() and __exit__() are implemented, allowing 203 | Environments to behave like context managers. 204 | 205 | * Cursor.close(), __enter__() and __exit__() are implemented, allowing Cursors 206 | to be explicitly closed. In CFFI this mechanism *must* be used when many 207 | cursors are used within a single transaction, otherwise a resource leak will 208 | occur. 209 | 210 | * Dependency tracking in CFFI is now much faster, especially on PyPy, however 211 | at a cost: Cursor use must always be wrapped in a context manager, or 212 | .close() must be manually invoked for discarded Cursors when the parent 213 | transaction is long lived. 214 | 215 | * Fixed crash in CFFI Cursor.putmulti(). 216 | 217 | 218 | 2014-05-26 v0.82 219 | 220 | * Both variants now implement max_spare_txns, reducing the cost of creating a 221 | read-only transaction 4x for an uncontended database and by up to 20x for 222 | very read-busy environments. By default only 1 read-only transaction is 223 | cached, adjust max_spare_txns= parameter if your script operates multiple 224 | simultaneous read transactions. 225 | 226 | * Patch from Vladimir Vladimirov implementing MDB_NOLOCK. 227 | 228 | * The max_spare_iters and max_spare_cursors parameters were removed, neither 229 | ever had any effect. 230 | 231 | * Cursor.putmulti() implemented based on a patch from Luke Kenneth Casson 232 | Leighton. This function moves the loop required to batch populate a 233 | database out of Python and into C. 234 | 235 | * The bundled LMDB 0.9.11 has been updated with several fixes from upstream 236 | Git. 237 | 238 | * The cost of using keyword arguments in the CPython extension was 239 | significantly reduced. 240 | 241 | 242 | 2014-04-26 v0.81 243 | 244 | * On Python 2.x the extension module would silently interpret Unicode 245 | instances as buffer objects, causing UCS-2/UCS-4 string data to end up in 246 | the database. This was never intentional and now raises TypeError. Any 247 | Unicode data passed to py-lmdb must explicitly be encoded with .encode() 248 | first. 249 | 250 | * open_db()'s name argument was renamed to key, and its semantics now match 251 | get() and put(): in other words the key must be a bytestring, and passing 252 | Unicode will raise TypeError. 253 | 254 | * The extension module now builds under Python 3.4 on Windows. 255 | 256 | 257 | 2014-04-21 v0.80 258 | 259 | * Both variants now build successfully as 32 bit / 64bit binaries on 260 | Windows under Visual Studio 9.0, the compiler for Python 2.7. This enables 261 | py-lmdb to be installed via pip on Windows without requiring a compiler to 262 | be available. In future, .egg/.whl releases will be pre-built for all recent 263 | Python versions on Windows. 264 | 265 | Known bugs: Environment.copy() and Environment.copyfd() currently produce a 266 | database that cannot be reopened. 267 | 268 | * The lmdb.enable_drop_gil() function was removed. Its purpose was 269 | experimental at best, confusing at worst. 270 | 271 | 272 | 2014-03-17 v0.79 273 | 274 | * CPython Cursor.delete() lacked dupdata argument, fixed. 275 | 276 | * Fixed minor bug where CFFI _get_cursor() did not note its idea of 277 | the current key and value were up to date. 278 | 279 | * Cursor.replace() and Cursor.pop() updated for MDB_DUPSORT databases. For 280 | pop(), the first data item is popped and returned. For replace(), the first 281 | data item is returned, and all duplicates for the key are replaced. 282 | 283 | * Implement remaining Cursor methods necessary for working with MDB_DUPSORT 284 | databases: next_dup(), next_nodup(), prev_dup(), prev_nodup(), first_dup(), 285 | last_dup(), set_key_dup(), set_range_dup(), iternext_dup(), 286 | iternext_nodup(), iterprev_dup(), iterprev_nodup(). 287 | 288 | * The default for Transaction.put(dupdata=...) and Cursor.put(dupdata=...) has 289 | changed from False to True. The previous default did not reflect LMDB's 290 | normal mode of operation. 291 | 292 | * LMDB 0.9.11 is bundled along with extra fixes from upstream Git. 293 | 294 | 295 | 2014-01-18 v0.78 296 | 297 | * Patch from bra-fsn to fix LMDB_LIBDIR. 298 | 299 | * Various inaccurate documentation improvements. 300 | 301 | * Initial work towards Windows/Microsoft Visual C++ 9.0 build. 302 | 303 | * LMDB 0.9.11 is now bundled. 304 | 305 | * To work around install failures minimum CFFI version is now >=0.8.0. 306 | 307 | * ticket #38: remove all buffer object hacks. This results in ~50% slowdown 308 | for cursor enumeration, but results in far simpler object lifetimes. A 309 | future version may introduce a better mechanism for achieving the same 310 | performance without loss of sanity. 311 | 312 | 313 | 2013-11-30 v0.77 314 | 315 | * Added Environment.max_key_size(), Environment.max_readers(). 316 | 317 | * CFFI now raises the correct Error subclass associated with an MDB_* return 318 | code. 319 | 320 | * Numerous CFFI vs. CPython behavioural inconsistencies have been fixed. 321 | 322 | * An endless variety of Unicode related 2.x/3.x/CPython/CFFI fixes were made. 323 | 324 | * LMDB 0.9.10 is now bundled, along with some extra fixes from Git. 325 | 326 | * Added Environment(meminit=...) option. 327 | 328 | 329 | 2013-10-28 v0.76 330 | 331 | * Added support for Environment(..., readahead=False). 332 | 333 | * LMDB 0.9.9 is now bundled. 334 | 335 | * Many Python 2.5 and 3.x fixes were made. Future changes are automatically 336 | tested via Travis CI . 337 | 338 | * When multiple cursors exist, and one cursor performs a mutation, 339 | remaining cursors may have returned corrupt results via key(), value(), 340 | or item(). Mutations are now explicitly tracked and cause the cursor's 341 | data to be refreshed in this case. 342 | 343 | * setup.py was adjusted to ensure the distutils default of '-DNDEBUG' is never 344 | defined while building LMDB. This caused many important checks in the engine 345 | to be disabled. 346 | 347 | * The old 'transactionless' API was removed. A future version may support the 348 | same API, but the implementation will be different. 349 | 350 | * Transaction.pop() and Cursor.pop() helpers added, to complement 351 | Transaction.replace() and Cursor.replace(). 352 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The OpenLDAP Public License 2 | Version 2.8, 17 August 2003 3 | 4 | Redistribution and use of this software and associated documentation 5 | ("Software"), with or without modification, are permitted provided 6 | that the following conditions are met: 7 | 8 | 1. Redistributions in source form must retain copyright statements 9 | and notices, 10 | 11 | 2. Redistributions in binary form must reproduce applicable copyright 12 | statements and notices, this list of conditions, and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution, and 15 | 16 | 3. Redistributions must contain a verbatim copy of this document. 17 | 18 | The OpenLDAP Foundation may revise this license from time to time. 19 | Each revision is distinguished by a version number. You may use 20 | this Software under terms of this license revision or under the 21 | terms of any subsequent revision of the license. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS 24 | CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, 25 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 26 | AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 27 | SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) 28 | OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, 29 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 32 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 | POSSIBILITY OF SUCH DAMAGE. 36 | 37 | The names of the authors and copyright holders must not be used in 38 | advertising or otherwise to promote the sale, use or other dealing 39 | in this Software without specific, written prior permission. Title 40 | to copyright in this Software shall at all times remain with copyright 41 | holders. 42 | 43 | OpenLDAP is a registered trademark of the OpenLDAP Foundation. 44 | 45 | Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, 46 | California, USA. All Rights Reserved. Permission to copy and 47 | distribute verbatim copies of this document is granted. 48 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft . 2 | graft lib 3 | graft tests 4 | include ChangeLog 5 | include LICENSE 6 | prune docs/_build 7 | prune lmdb/__pycache__ 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # py-lmdb Needs a Maintainer! 3 | 4 | I simply don't have time for this project right now, and still the issues keep 5 | piling in. Are you a heavy py-lmdb user and understand most bits of the API? 6 | Got some spare time to give a binding you use a little love? Dab hand at C and 7 | CFFI? Access to a Visual Studio build machine? Please drop me an e-mail: dw at 8 | botanicus dot net. TLC and hand-holding will be provided as necessary, I just 9 | have no bandwidth left to write new code. 10 | 11 | 12 | ### CI State 13 | 14 | | Platform | Branch | Status | 15 | | -------- | ------ | ------ | 16 | | UNIX | ``master`` | [![master](https://travis-ci.org/dw/py-lmdb.png?branch=master)](https://travis-ci.org/dw/py-lmdb/branches) | 17 | | Windows | ``master`` | [![master](https://ci.appveyor.com/api/projects/status/cx2sau39bufi3t0t/branch/master?svg=true)](https://ci.appveyor.com/project/dw/py-lmdb/branch/master) | 18 | | UNIX | ``release`` | [![release](https://travis-ci.org/dw/py-lmdb.png?branch=release)](https://travis-ci.org/dw/py-lmdb/branches) | 19 | | Windows | ``release`` | [![release](https://ci.appveyor.com/api/projects/status/cx2sau39bufi3t0t/branch/release?svg=true)](https://ci.appveyor.com/project/dw/py-lmdb/branch/release) | 20 | 21 | If you care whether the tests are passing, check out the repository and execute 22 | the tests under your desired target Python release, as the Travis CI build has 23 | a bad habit of breaking due to external factors approximately every 3 months. 24 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | matrix: 3 | - PYTHON: 'C:\Python26' 4 | - PYTHON: 'C:\Python26-x64' 5 | - PYTHON: 'C:\Python27' 6 | - PYTHON: 'C:\Python27-x64' 7 | - PYTHON: 'C:\Python33' 8 | #- PYTHON: 'C:\Python33-x64' 9 | - PYTHON: 'C:\Python34' 10 | #- PYTHON: 'C:\Python34-x64' 11 | - PYTHON: 'C:\Python35' 12 | - PYTHON: 'C:\Python35-x64' 13 | - PYTHON: 'C:\Python36' 14 | - PYTHON: 'C:\Python36-x64' 15 | 16 | artifacts: 17 | - { 18 | path: 'dist\*.egg', 19 | type: 'file' 20 | } 21 | - { 22 | path: 'dist\*.whl', 23 | type: 'file' 24 | } 25 | 26 | install: 27 | # http://springflex.blogspot.co.uk/2014/02/how-to-fix-valueerror-when-trying-to.html. Works for 33-x64 only. 28 | - 'COPY "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars64.bat" "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvarsamd64.bat"' 29 | - '%PYTHON%\scripts\pip.exe install wheel pytest .' 30 | 31 | build_script: 32 | - '%PYTHON%\python.exe setup.py bdist_egg' 33 | - '%PYTHON%\python.exe setup.py bdist_wheel' 34 | 35 | test_script: 36 | - 'cd tests' 37 | - '%PYTHON%\python.exe -m pytest .' 38 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/lmdb.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/lmdb.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/lmdb" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/lmdb" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # lmdb documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Feb 5 00:39:26 2013. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.insert(0, os.path.abspath('..')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] 29 | 30 | intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)} 31 | 32 | # Add any paths that contain templates here, relative to this directory. 33 | # templates_path = ['_templates'] 34 | 35 | # The suffix of source filenames. 36 | source_suffix = '.rst' 37 | 38 | # The encoding of source files. 39 | #source_encoding = 'utf-8-sig' 40 | 41 | # The master toctree document. 42 | master_doc = 'index' 43 | 44 | # General information about the project. 45 | project = u'lmdb' 46 | copyright = u'2013, David Wilson' 47 | 48 | # The version info for the project you're documenting, acts as replacement for 49 | # |version| and |release|, also used in various other places throughout the 50 | # built documents. 51 | # 52 | 53 | def grep_version(): 54 | path = os.path.join(os.path.dirname(__file__), '../lmdb/__init__.py') 55 | with open(path) as fp: 56 | for line in fp: 57 | if line.startswith('__version__'): 58 | return eval(line.split()[-1]) 59 | 60 | # The short X.Y version. 61 | version = grep_version() 62 | # The full version, including alpha/beta/rc tags. 63 | release = version 64 | 65 | # The language for content autogenerated by Sphinx. Refer to documentation 66 | # for a list of supported languages. 67 | #language = None 68 | 69 | # There are two options for replacing |today|: either, you set today to some 70 | # non-false value, then it is used: 71 | #today = '' 72 | # Else, today_fmt is used as the format for a strftime call. 73 | #today_fmt = '%B %d, %Y' 74 | 75 | # List of patterns, relative to source directory, that match files and 76 | # directories to ignore when looking for source files. 77 | exclude_patterns = ['_build'] 78 | 79 | # The reST default role (used for this markup: `text`) to use for all documents. 80 | #default_role = None 81 | 82 | # If true, '()' will be appended to :func: etc. cross-reference text. 83 | #add_function_parentheses = True 84 | 85 | # If true, the current module name will be prepended to all description 86 | # unit titles (such as .. function::). 87 | #add_module_names = True 88 | 89 | # If true, sectionauthor and moduleauthor directives will be shown in the 90 | # output. They are ignored by default. 91 | #show_authors = False 92 | 93 | # The name of the Pygments (syntax highlighting) style to use. 94 | pygments_style = 'sphinx' 95 | 96 | # A list of ignored prefixes for module index sorting. 97 | #modindex_common_prefix = [] 98 | 99 | 100 | # -- Options for HTML output --------------------------------------------------- 101 | 102 | # The theme to use for HTML and HTML Help pages. See the documentation for 103 | # a list of builtin themes. 104 | html_theme = 'acid' 105 | 106 | # Theme options are theme-specific and customize the look and feel of a theme 107 | # further. For a list of options available for each theme, see the 108 | # documentation. 109 | html_theme_options = { 110 | 'github_repo': 'https://github.com/dw/py-lmdb/' 111 | } 112 | 113 | # Add any paths that contain custom themes here, relative to this directory. 114 | html_theme_path = ['themes'] 115 | 116 | # The name for this set of Sphinx documents. If None, it defaults to 117 | # " v documentation". 118 | #html_title = None 119 | 120 | # A shorter title for the navigation bar. Default is the same as html_title. 121 | #html_short_title = None 122 | 123 | # The name of an image file (relative to this directory) to place at the top 124 | # of the sidebar. 125 | #html_logo = None 126 | 127 | # The name of an image file (within the static path) to use as favicon of the 128 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 129 | # pixels large. 130 | #html_favicon = None 131 | 132 | # Add any paths that contain custom static files (such as style sheets) here, 133 | # relative to this directory. They are copied after the builtin static files, 134 | # so a file named "default.css" will overwrite the builtin "default.css". 135 | html_static_path = ['_static'] 136 | 137 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 138 | # using the given strftime format. 139 | #html_last_updated_fmt = '%b %d, %Y' 140 | 141 | # If true, SmartyPants will be used to convert quotes and dashes to 142 | # typographically correct entities. 143 | #html_use_smartypants = True 144 | 145 | # Custom sidebar templates, maps document names to template names. 146 | html_sidebars = {} 147 | 148 | # Additional templates that should be rendered to pages, maps page names to 149 | # template names. 150 | #html_additional_pages = {} 151 | 152 | # If false, no module index is generated. 153 | #html_domain_indices = True 154 | 155 | # If false, no index is generated. 156 | html_use_index = False 157 | 158 | # If true, the index is split into individual pages for each letter. 159 | #html_split_index = False 160 | 161 | # If true, links to the reST sources are added to the pages. 162 | html_show_sourcelink = False 163 | 164 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 165 | html_show_sphinx = False 166 | 167 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 168 | #html_show_copyright = True 169 | 170 | # If true, an OpenSearch description file will be output, and all pages will 171 | # contain a tag referring to it. The value of this option must be the 172 | # base URL from which the finished HTML is served. 173 | #html_use_opensearch = '' 174 | 175 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 176 | #html_file_suffix = None 177 | 178 | # Output file base name for HTML help builder. 179 | htmlhelp_basename = 'lmdbdoc' 180 | 181 | 182 | # -- Options for LaTeX output -------------------------------------------------- 183 | 184 | latex_elements = { 185 | # The paper size ('letterpaper' or 'a4paper'). 186 | #'papersize': 'letterpaper', 187 | 188 | # The font size ('10pt', '11pt' or '12pt'). 189 | #'pointsize': '10pt', 190 | 191 | # Additional stuff for the LaTeX preamble. 192 | #'preamble': '', 193 | } 194 | 195 | # Grouping the document tree into LaTeX files. List of tuples 196 | # (source start file, target name, title, author, documentclass [howto/manual]). 197 | latex_documents = [ 198 | ('index', 'lmdb.tex', u'lmdb Documentation', 199 | u'David Wilson', 'manual'), 200 | ] 201 | 202 | # The name of an image file (relative to this directory) to place at the top of 203 | # the title page. 204 | #latex_logo = None 205 | 206 | # For "manual" documents, if this is true, then toplevel headings are parts, 207 | # not chapters. 208 | #latex_use_parts = False 209 | 210 | # If true, show page references after internal links. 211 | #latex_show_pagerefs = False 212 | 213 | # If true, show URL addresses after external links. 214 | #latex_show_urls = False 215 | 216 | # Documents to append as an appendix to all manuals. 217 | #latex_appendices = [] 218 | 219 | # If false, no module index is generated. 220 | #latex_domain_indices = True 221 | 222 | 223 | # -- Options for manual page output -------------------------------------------- 224 | 225 | # One entry per manual page. List of tuples 226 | # (source start file, name, description, authors, manual section). 227 | man_pages = [ 228 | ('index', 'lmdb', u'lmdb Documentation', 229 | [u'David Wilson'], 1) 230 | ] 231 | 232 | # If true, show URL addresses after external links. 233 | #man_show_urls = False 234 | 235 | 236 | # -- Options for Texinfo output ------------------------------------------------ 237 | 238 | # Grouping the document tree into Texinfo files. List of tuples 239 | # (source start file, target name, title, author, 240 | # dir menu entry, description, category) 241 | texinfo_documents = [ 242 | ('index', 'lmdb', u'lmdb Documentation', 243 | u'David Wilson', 'lmdb', 'One line description of project.', 244 | 'Miscellaneous'), 245 | ] 246 | 247 | # Documents to append as an appendix to all manuals. 248 | #texinfo_appendices = [] 249 | 250 | # If false, no module index is generated. 251 | #texinfo_domain_indices = True 252 | 253 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 254 | #texinfo_show_urls = 'footnote' 255 | 256 | 257 | 258 | import sys 259 | 260 | class Mock(object): 261 | def __init__(self, *args, **kwargs): 262 | pass 263 | 264 | def __call__(self, *args, **kwargs): 265 | return Mock() 266 | 267 | @classmethod 268 | def __getattr__(cls, name): 269 | if name in ('__file__', '__path__'): 270 | return '/dev/null' 271 | elif 0 and name[0] == name[0].upper(): 272 | mockType = type(name, (), {}) 273 | mockType.__module__ = __name__ 274 | return mockType 275 | else: 276 | return Mock() 277 | 278 | MOCK_MODULES = ['cffi'] 279 | for mod_name in MOCK_MODULES: 280 | sys.modules[mod_name] = Mock() 281 | -------------------------------------------------------------------------------- /docs/themes/acid/conf.py.conf: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import sys, os 4 | extensions = [] 5 | templates_path = ['{{ template_dir }}', 'templates', '_templates', '.templates'] 6 | source_suffix = '{{ project.suffix }}' 7 | master_doc = 'index' 8 | project = u'{{ project.name }}' 9 | copyright = u'{{ project.copyright }}' 10 | version = '{{ project.version }}' 11 | release = '{{ project.version }}' 12 | exclude_patterns = ['_build'] 13 | pygments_style = 'sphinx' 14 | html_theme = '{{ project.theme }}' 15 | html_theme_path = ['.', '_theme', '.theme'] 16 | htmlhelp_basename = '{{ project.slug }}' 17 | file_insertion_enabled = False 18 | latex_documents = [ 19 | ('index', '{{ project.slug }}.tex', u'{{ project.name }} Documentation', 20 | u'{{ project.copyright }}', 'manual'), 21 | ] 22 | -------------------------------------------------------------------------------- /docs/themes/acid/layout.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {# Strip out the default script files we provide, but keep some, like mathjax. #} 5 | {% set script_files = [] %} 6 | {%- set url_root = pathto('', 1) %} 7 | {%- if url_root == '#' %}{% set url_root = '' %}{% endif %} 8 | {%- set titlesuffix = " — "|safe + docstitle|e %} 9 | 10 | {%- macro sidebar() %} 11 |
12 |
13 | {%- if logo %} 14 | 17 | {%- endif %} 18 | 19 | {% if theme_github_repo %} 20 |

21 | GitHub Repository 22 |

23 | {% endif %} 24 | 25 |

26 | Docs for last release 27 |

28 | 29 |

30 | Docs for master branch 31 |

32 | 33 | {%- block sidebartoc %} 34 | {%- include "localtoc.html" %} 35 | {%- endblock %} 36 | {%- block sidebarrel %} 37 | {%- include "relations.html" %} 38 | {%- endblock %} 39 |
40 |
41 | {%- endmacro %} 42 | 43 | {{ title|striptags|e }}{{ titlesuffix }} 44 | 45 | 46 | {%- for cssfile in css_files %} 47 | 48 | {%- endfor %} 49 | 50 | 62 | 63 | {%- for scriptfile in script_files %} 64 | 65 | {%- endfor %} 66 | 67 | 68 | {%- block content %} 69 | {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %} 70 | 71 |
72 | {%- block document %} 73 |
74 |
75 |
76 | {% block body %} {% endblock %} 77 |
78 |
79 |
80 | {%- endblock %} 81 | 82 | {%- block sidebar2 %}{{ sidebar() }}{% endblock %} 83 |
84 |
85 | {%- endblock %} 86 | 87 | {% macro relbar() %}{% endmacro %} 88 | {%- block relbar2 %}{{ relbar() }}{% endblock %} 89 | 90 | 91 | 106 | 107 | {% if not using_theme %} 108 | {# Keep this here, so that the RTD logo doesn't stomp on the bottom of the theme. #} 109 |
110 |
111 |
112 | {% endif %} 113 | 114 | 115 | 127 | 128 | -------------------------------------------------------------------------------- /docs/themes/acid/sourcelink.html: -------------------------------------------------------------------------------- 1 | {% if 0 %}{% endif %} 2 | -------------------------------------------------------------------------------- /docs/themes/acid/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = acid.css 4 | pygments_style = sphinx 5 | 6 | [options] 7 | github_repo = 8 | 9 | rightsidebar = false 10 | stickysidebar = false 11 | collapsiblesidebar = false 12 | externalrefs = false 13 | 14 | footerbgcolor = #11303d 15 | footertextcolor = #ffffff 16 | sidebarbgcolor = #1c4e63 17 | sidebarbtncolor = #3c6e83 18 | sidebartextcolor = #ffffff 19 | sidebarlinkcolor = #98dbcc 20 | relbarbgcolor = #133f52 21 | relbartextcolor = #ffffff 22 | relbarlinkcolor = #ffffff 23 | bgcolor = #ffffff 24 | textcolor = #000000 25 | headbgcolor = #f2f2f2 26 | headtextcolor = #20435c 27 | headlinkcolor = #c60f0f 28 | linkcolor = #355f7c 29 | visitedlinkcolor = #355f7c 30 | codebgcolor = #eeffcc 31 | codetextcolor = #333333 32 | 33 | bodyfont = sans-serif 34 | headfont = 'Trebuchet MS', sans-serif 35 | -------------------------------------------------------------------------------- /examples/address-book.py: -------------------------------------------------------------------------------- 1 | 2 | import lmdb 3 | 4 | # Open (and create if necessary) our database environment. Must specify 5 | # max_dbs=... since we're opening subdbs. 6 | env = lmdb.open('/tmp/address-book.lmdb', max_dbs=10) 7 | 8 | # Now create subdbs for home and business addresses. 9 | home_db = env.open_db('home') 10 | business_db = env.open_db('business') 11 | 12 | 13 | # Add some telephone numbers to each DB: 14 | with env.begin(write=True) as txn: 15 | txn.put('mum', '012345678', db=home_db) 16 | txn.put('dad', '011232211', db=home_db) 17 | txn.put('dentist', '044415121', db=home_db) 18 | txn.put('hospital', '078126321', db=home_db) 19 | 20 | txn.put('vendor', '0917465628', db=business_db) 21 | txn.put('customer', '0553211232', db=business_db) 22 | txn.put('coworker', '0147652935', db=business_db) 23 | txn.put('boss', '0123151232', db=business_db) 24 | txn.put('manager', '0644810485', db=business_db) 25 | 26 | 27 | # Iterate each DB to show the keys are sorted: 28 | with env.begin() as txn: 29 | for name, db in ('home', home_db), ('business', business_db): 30 | print 'DB:', name 31 | for key, value in txn.cursor(db=db): 32 | print ' ', key, value 33 | print 34 | 35 | 36 | # Now let's update some phone numbers. We can specify the default subdb when 37 | # starting the transaction, rather than pass it in every time: 38 | with env.begin(write=True, db=home_db) as txn: 39 | print 'Updating number for dentist' 40 | txn.put('dentist', '099991231') 41 | 42 | print 'Deleting number for hospital' 43 | txn.delete('hospital') 44 | print 45 | 46 | print 'Home DB is now:' 47 | for key, value in txn.cursor(): 48 | print ' ', key, value 49 | print 50 | 51 | 52 | # Now let's look up a number in the business DB 53 | with env.begin(db=business_db) as txn: 54 | print 'Boss telephone number:', txn.get('boss') 55 | print 56 | 57 | 58 | # We got fired, time to delete all keys from the business DB. 59 | with env.begin(write=True) as txn: 60 | print 'Deleting all numbers from business DB:' 61 | txn.drop(business_db, delete=False) 62 | 63 | print 'Adding number for recruiter to business DB' 64 | txn.put('recruiter', '04123125324', db=business_db) 65 | 66 | print 'Business DB is now:' 67 | for key, value in txn.cursor(db=business_db): 68 | print ' ', key, value 69 | print 70 | -------------------------------------------------------------------------------- /examples/dirtybench-gdbm.py: -------------------------------------------------------------------------------- 1 | 2 | from pprint import pprint 3 | import os 4 | import shutil 5 | import tempfile 6 | 7 | from time import time as now 8 | import random 9 | import gdbm 10 | 11 | MAP_SIZE = 1048576 * 400 12 | DB_PATH = '/ram/testdb-gdbm' 13 | 14 | if os.path.exists('/ram'): 15 | DB_PATH = '/ram/testdb-gdbm' 16 | else: 17 | DB_PATH = tempfile.mktemp(prefix='dirtybench-gdbm') 18 | 19 | 20 | def x(): 21 | big = '' # '*' * 400 22 | 23 | if os.path.exists(DB_PATH): 24 | os.unlink(DB_PATH) 25 | 26 | t0 = now() 27 | words = set(file('/usr/share/dict/words').readlines()) 28 | words.update([w.upper() for w in words]) 29 | words.update([w[::-1] for w in words]) 30 | words.update([w[::-1].upper() for w in words]) 31 | words.update(['-'.join(w) for w in words]) 32 | #words.update(['+'.join(w) for w in words]) 33 | #words.update(['/'.join(w) for w in words]) 34 | words = list(words) 35 | alllen = sum(len(w) for w in words) 36 | avglen = alllen / len(words) 37 | print 'permutate %d words avglen %d took %.2fsec' % (len(words), avglen, now()-t0) 38 | 39 | getword = iter(words).next 40 | 41 | env = gdbm.open(DB_PATH, 'c') 42 | 43 | run = True 44 | t0 = now() 45 | last = t0 46 | while run: 47 | try: 48 | for _ in xrange(50000): 49 | word = getword() 50 | env[word] = big or word 51 | except StopIteration: 52 | run = False 53 | 54 | t1 = now() 55 | if (t1 - last) > 2: 56 | print '%.2fs (%d/sec)' % (t1-t0, len(words)/(t1-t0)) 57 | last = t1 58 | 59 | t1 = now() 60 | print 'done all %d in %.2fs (%d/sec)' % (len(words), t1-t0, len(words)/(t1-t0)) 61 | last = t1 62 | 63 | print 64 | print 65 | 66 | t0 = now() 67 | lst = sum(env[k] and 1 for k in env.keys()) 68 | t1 = now() 69 | print 'enum %d (key, value) pairs took %.2f sec' % ((lst), t1-t0) 70 | 71 | t0 = now() 72 | lst = sum(1 or env[k] for k in reversed(env.keys())) 73 | t1 = now() 74 | print 'reverse enum %d (key, value) pairs took %.2f sec' % ((lst), t1-t0) 75 | 76 | t0 = now() 77 | for word in words: 78 | env[word] 79 | t1 = now() 80 | print 'rand lookup all keys %.2f sec (%d/sec)' % (t1-t0, lst/(t1-t0)) 81 | 82 | t0 = now() 83 | for word in words: 84 | hash(env[word]) 85 | t1 = now() 86 | print 'per txn rand lookup+hash all keys %.2f sec (%d/sec)' % (t1-t0, lst/(t1-t0)) 87 | 88 | t0 = now() 89 | for word in words: 90 | hash(env[word]) 91 | t1 = now() 92 | print 'rand lookup+hash all keys %.2f sec (%d/sec)' % (t1-t0, lst/(t1-t0)) 93 | 94 | t0 = now() 95 | for word in words: 96 | env[word] 97 | t1 = now() 98 | print 'rand lookup all buffers %.2f sec (%d/sec)' % (t1-t0, lst/(t1-t0)) 99 | 100 | t0 = now() 101 | for word in words: 102 | hash(env[word]) 103 | t1 = now() 104 | print 'rand lookup+hash all buffers %.2f sec (%d/sec)' % (t1-t0, lst/(t1-t0)) 105 | 106 | 107 | # 108 | # get+put 109 | # 110 | 111 | getword = iter(sorted(words)).next 112 | run = True 113 | t0 = now() 114 | last = t0 115 | while run: 116 | try: 117 | for _ in xrange(50000): 118 | word = getword() 119 | old = env[word] 120 | env[word] = word 121 | except StopIteration: 122 | run = False 123 | 124 | t1 = now() 125 | if (t1 - last) > 2: 126 | print '%.2fs (%d/sec)' % (t1-t0, len(words)/(t1-t0)) 127 | last = t1 128 | 129 | t1 = now() 130 | print 'get+put all %d in %.2fs (%d/sec)' % (len(words), t1-t0, len(words)/(t1-t0)) 131 | last = t1 132 | 133 | 134 | # 135 | # REPLACE 136 | # 137 | 138 | getword = iter(sorted(words)).next 139 | run = True 140 | t0 = now() 141 | last = t0 142 | while run: 143 | try: 144 | for _ in xrange(50000): 145 | word = getword() 146 | old = env[word] 147 | except StopIteration: 148 | run = False 149 | 150 | t1 = now() 151 | if (t1 - last) > 2: 152 | print '%.2fs (%d/sec)' % (t1-t0, len(words)/(t1-t0)) 153 | last = t1 154 | 155 | t1 = now() 156 | print 'replace all %d in %.2fs (%d/sec)' % (len(words), t1-t0, len(words)/(t1-t0)) 157 | last = t1 158 | 159 | 160 | 161 | 162 | x() 163 | -------------------------------------------------------------------------------- /examples/dirtybench.py: -------------------------------------------------------------------------------- 1 | 2 | from pprint import pprint 3 | import atexit 4 | import gzip 5 | import itertools 6 | import os 7 | import shutil 8 | import sys 9 | import tempfile 10 | 11 | from time import time as now 12 | import random 13 | import lmdb 14 | 15 | MAP_SIZE = 1048576 * 400 16 | DB_PATH = '/ram/testdb' 17 | USE_SPARSE_FILES = sys.platform != 'darwin' 18 | 19 | if os.path.exists('/ram'): 20 | DB_PATH = '/ram/testdb' 21 | else: 22 | DB_PATH = tempfile.mktemp(prefix='dirtybench') 23 | 24 | 25 | env = None 26 | @atexit.register 27 | def cleanup(): 28 | if env: 29 | env.close() 30 | if os.path.exists(DB_PATH): 31 | shutil.rmtree(DB_PATH) 32 | 33 | 34 | def reopen_env(**kwargs): 35 | if env: 36 | env.close() 37 | if os.path.exists(DB_PATH): 38 | shutil.rmtree(DB_PATH) 39 | return lmdb.open(DB_PATH, map_size=MAP_SIZE, writemap=USE_SPARSE_FILES, **kwargs) 40 | 41 | 42 | def case(title, **params): 43 | def wrapper(func): 44 | t0 = now() 45 | count = func() 46 | t1 = now() 47 | print('%40s: %2.3fs %8d/sec' % (title, t1-t0, count/(t1-t0))) 48 | return func 49 | return wrapper 50 | 51 | 52 | def x(): 53 | big = '' # '*' * 400 54 | 55 | t0 = now() 56 | words_path = os.path.join(os.path.dirname(__file__), 'words.gz') 57 | words = set(gzip.open(words_path).read().splitlines()) 58 | words.update([w.upper() for w in words]) 59 | words.update([w[::-1] for w in words]) 60 | words.update([w[::-1].upper() for w in words]) 61 | words.update(['-'.join(w) for w in words]) 62 | #words.update(['+'.join(w) for w in words]) 63 | #words.update(['/'.join(w) for w in words]) 64 | words = list(words) 65 | alllen = sum(len(w) for w in words) 66 | avglen = alllen / len(words) 67 | print 'permutate %d words avglen %d took %.2fsec' % (len(words), avglen, now()-t0) 68 | print 'DB_PATH:', DB_PATH 69 | 70 | words_sorted = sorted(words) 71 | items = [(w, big or w) for w in words] 72 | items_sorted = [(w, big or w) for w in words_sorted] 73 | 74 | global env 75 | env = reopen_env() 76 | 77 | @case('insert') 78 | def test(): 79 | with env.begin(write=True) as txn: 80 | for word in words: 81 | txn.put(word, big or word) 82 | return len(words) 83 | 84 | 85 | st = env.stat() 86 | print 87 | print 'stat:', st 88 | print 'k+v size %.2fkb avg %d, on-disk size: %.2fkb avg %d' %\ 89 | ((2*alllen) / 1024., (2*alllen)/len(words), 90 | (st['psize'] * st['leaf_pages']) / 1024., 91 | (st['psize'] * st['leaf_pages']) / len(words)) 92 | print 93 | 94 | 95 | @case('enum (key, value) pairs') 96 | def test(): 97 | with env.begin() as txn: 98 | return sum(1 for _ in txn.cursor()) 99 | 100 | 101 | @case('reverse enum (key, value) pairs') 102 | def test(): 103 | with env.begin() as txn: 104 | return sum(1 for _ in txn.cursor().iterprev()) 105 | 106 | 107 | @case('enum (key, value) buffers') 108 | def test(): 109 | with env.begin(buffers=True) as txn: 110 | return sum(1 for _ in txn.cursor()) 111 | 112 | 113 | print 114 | 115 | 116 | @case('rand lookup') 117 | def test(): 118 | with env.begin() as txn: 119 | for word in words: 120 | txn.get(word) 121 | return len(words) 122 | 123 | 124 | @case('per txn rand lookup') 125 | def test(): 126 | for word in words: 127 | with env.begin() as txn: 128 | txn.get(word) 129 | return len(words) 130 | 131 | 132 | @case('rand lookup+hash') 133 | def test(): 134 | with env.begin() as txn: 135 | for word in words: 136 | hash(txn.get(word)) 137 | return len(words) 138 | 139 | 140 | @case('rand lookup buffers') 141 | def test(): 142 | with env.begin(buffers=True) as txn: 143 | for word in words: 144 | txn.get(word) 145 | return len(words) 146 | 147 | 148 | @case('rand lookup+hash buffers') 149 | def test(): 150 | with env.begin(buffers=True) as txn: 151 | for word in words: 152 | hash(txn.get(word)) 153 | return len(words) 154 | 155 | 156 | @case('rand lookup buffers (cursor)') 157 | def test(): 158 | with env.begin(buffers=True) as txn: 159 | cursget = txn.cursor().get 160 | for word in words: 161 | cursget(word) 162 | return len(words) 163 | 164 | 165 | print 166 | 167 | 168 | @case('get+put') 169 | def test(): 170 | with env.begin(write=True) as txn: 171 | for word in words: 172 | txn.get(word) 173 | txn.put(word, word) 174 | return len(words) 175 | 176 | 177 | @case('replace') 178 | def test(): 179 | with env.begin(write=True) as txn: 180 | for word in words: 181 | txn.replace(word, word) 182 | return len(words) 183 | 184 | 185 | @case('get+put (cursor)') 186 | def test(): 187 | with env.begin(write=True) as txn: 188 | with txn.cursor() as cursor: 189 | for word in words: 190 | cursor.get(word) 191 | cursor.put(word, word) 192 | return len(words) 193 | 194 | 195 | @case('replace (cursor)') 196 | def test(): 197 | with env.begin(write=True) as txn: 198 | with txn.cursor() as cursor: 199 | for word in words: 200 | cursor.replace(word, word) 201 | return len(words) 202 | 203 | 204 | print 205 | 206 | 207 | env = reopen_env() 208 | @case('insert (rand)') 209 | def test(): 210 | with env.begin(write=True) as txn: 211 | for word in words: 212 | txn.put(word, big or word) 213 | return len(words) 214 | 215 | 216 | env = reopen_env() 217 | @case('insert (seq)') 218 | def test(): 219 | with env.begin(write=True) as txn: 220 | for word in words_sorted: 221 | txn.put(word, big or word) 222 | return len(words) 223 | 224 | 225 | env = reopen_env() 226 | @case('insert (rand), reuse cursor') 227 | def test(): 228 | with env.begin(write=True) as txn: 229 | curs = txn.cursor() 230 | for word in words: 231 | curs.put(word, big or word) 232 | return len(words) 233 | env = reopen_env() 234 | 235 | 236 | @case('insert (seq), reuse cursor') 237 | def test(): 238 | with env.begin(write=True) as txn: 239 | curs = txn.cursor() 240 | for word in words_sorted: 241 | curs.put(word, big or word) 242 | return len(words) 243 | 244 | 245 | env = reopen_env() 246 | @case('insert, putmulti') 247 | def test(): 248 | with env.begin(write=True) as txn: 249 | txn.cursor().putmulti(items) 250 | return len(words) 251 | 252 | 253 | env = reopen_env() 254 | @case('insert, putmulti+generator') 255 | def test(): 256 | with env.begin(write=True) as txn: 257 | txn.cursor().putmulti((w, big or w) for w in words) 258 | return len(words) 259 | 260 | 261 | print 262 | 263 | 264 | env = reopen_env() 265 | @case('append') 266 | def test(): 267 | with env.begin(write=True) as txn: 268 | for word in words_sorted: 269 | txn.put(word, big or word, append=True) 270 | return len(words) 271 | 272 | 273 | env = reopen_env() 274 | @case('append, reuse cursor') 275 | def test(): 276 | with env.begin(write=True) as txn: 277 | curs = txn.cursor() 278 | for word in words_sorted: 279 | curs.put(word, big or word, append=True) 280 | return len(words) 281 | 282 | 283 | env = reopen_env() 284 | @case('append+putmulti') 285 | def test(): 286 | with env.begin(write=True) as txn: 287 | txn.cursor().putmulti(items_sorted, append=True) 288 | return len(words) 289 | 290 | 291 | print 292 | st = env.stat() 293 | print 'stat:', st 294 | print 'k+v size %.2fkb avg %d, on-disk size: %.2fkb avg %d' %\ 295 | ((2*alllen) / 1024., (2*alllen)/len(words), 296 | (st['psize'] * st['leaf_pages']) / 1024., 297 | (st['psize'] * st['leaf_pages']) / len(words)) 298 | 299 | x() 300 | -------------------------------------------------------------------------------- /examples/keystore/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Ultra minimal async IO example 3 | 4 | This requires a recent version of Twisted to run. From the parent directory, 5 | execute it as: 6 | 7 | python -m keystore.main 8 | 9 | It wraps LMDB in a very basic REST API, demonstrating how to handle database IO 10 | using a thread pool without blocking the main thread's select loop. 11 | 12 | The internals are structured relatively sanely, but the `keystore.lmdb` 13 | implementation itself is fairly horrid: a single GET will always cause a large 14 | amount of machinery to run. However, given a large and slow enough (non RAM, 15 | spinning rust) DB, this example will successfully keep the main loop responsive 16 | at all times even though multiple LMDB `mdb_get()` invocations are running 17 | concurrently. 18 | 19 | Not shown here, but may be added sometime later to demonstrate the techniques: 20 | 21 | * Combining multiple reads into a single transaction and context switch. 22 | * Combining writes into a single write transaction and context switch. 23 | 24 | 25 | ## Calling LMDB synchronously 26 | 27 | This example never calls LMDB directly from the main loop, even though in some 28 | restricted circumstances that may be completely safe. Such a situation might 29 | look like: 30 | 31 | * The database is guaranteed to always be in RAM. 32 | * Database writes are never contended. 33 | * Disk IO is very fast, or `sync=False` is used. 34 | 35 | In almost every case, it is likely better to design an application that handles 36 | the possibility that calls into LMDB will trigger slow IO, if not now then at 37 | some point in 10 years when all the original developers have left your project. 38 | -------------------------------------------------------------------------------- /examples/keystore/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rwightman/py-lmdb/a71bf7fce4f8e5f6a0a4f5d3ee86410b647b9962/examples/keystore/__init__.py -------------------------------------------------------------------------------- /examples/keystore/interfaces.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import absolute_import 3 | import zope.interface 4 | 5 | 6 | class IKeyStoreSync(zope.interface.Interface): 7 | def get(key): 8 | """ 9 | Fetch a key 10 | @return None or bytestring value. 11 | """ 12 | 13 | def put(key, value): 14 | """ 15 | Create or overwrite a key. 16 | @param key Bytestring key. 17 | @param value Bytestring alue. 18 | @return deferred producing None or error. 19 | """ 20 | 21 | def delete(key): 22 | """ 23 | Delete a key. 24 | @param key Bytestring key. 25 | @return deferred producing None or error. 26 | """ 27 | 28 | def seek(key): 29 | """ 30 | Seek to a key, or the next highest key. 31 | @param key Bytestring key. 32 | """ 33 | 34 | def first(): 35 | """ 36 | Seek to the first key in the store. 37 | @return True if positioned on a key. 38 | """ 39 | 40 | def last(): 41 | """ 42 | Seek to the last key in the store. 43 | @return True if positioned on a key. 44 | """ 45 | 46 | def next(): 47 | """ 48 | Seek to the next key in the store. 49 | @return True if positioned on a key. 50 | """ 51 | 52 | def prev(): 53 | """ 54 | Seek to the previous key in the store. 55 | @return True if positioned on a key. 56 | """ 57 | 58 | 59 | class IKeyStore(zope.interface.Interface): 60 | def get(key): 61 | """ 62 | Fetch a key. 63 | @return deferred producing None or bytestring value. 64 | """ 65 | 66 | def getKeys(key, count): 67 | """ 68 | Fetch a list of keys. 69 | @param key 70 | Bytestring first key to return, or None for first/last key 71 | in space. 72 | @param count Number of keys including first key to return. 73 | """ 74 | 75 | def getKeysReverse(key, count): 76 | """ 77 | Fetch a list of keys, walking in reverse. 78 | @param key 79 | Bytestring first key to return, or None for first/last key 80 | in space. 81 | @param count Number of keys including first key to return. 82 | """ 83 | 84 | def getItems(key, count): 85 | """ 86 | Fetch a list of (key, value) tuples. 87 | @param key 88 | Bytestring first key to return, or None for first/last key 89 | in space. 90 | @param count Number of keys including first key to return. 91 | """ 92 | 93 | def getItemsReverse(key, count): 94 | """ 95 | Fetch a list of (key, value) tuples. 96 | @param key 97 | Bytestring first key to return, or None for first/last key 98 | in space. 99 | @param count Number of keys including first key to return. 100 | """ 101 | 102 | def put(key): 103 | """ 104 | Create or overwrite a key. 105 | 106 | @param key Bytestring key. 107 | @param value Bytestring alue. 108 | @return deferred producing None or error. 109 | """ 110 | 111 | def delete(key): 112 | """ 113 | Delete a key. 114 | @param key Bytestring key. 115 | @return deferred producing None or error. 116 | """ 117 | 118 | def putGroup(func): 119 | """ 120 | Execute a function in the context of synchronous (IKeyStoreSync) 121 | transaction, in a private thread. 122 | 123 | @param func Function accepting IKeyStoreSync parameter. 124 | @returns deferred producing None or error. 125 | """ 126 | -------------------------------------------------------------------------------- /examples/keystore/lmdb.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import absolute_import 3 | import operator 4 | 5 | import twisted.internet.defer 6 | import twisted.internet.threads 7 | import zope.interface 8 | 9 | import keystore.interfaces 10 | 11 | 12 | class LmdbKeyStoreSync(object): 13 | zope.interface.implements(keystore.interfaces.IKeyStoreSync) 14 | cursor = None 15 | 16 | def __init__(self, env, write): 17 | self.txn = env.begin(write=write) 18 | 19 | def __enter__(self): 20 | return self 21 | 22 | def __exit__(self, e_type, e_val, e_tb): 23 | if e_type: 24 | self.txn.abort() 25 | else: 26 | self.txn.commit() 27 | self.txn = None 28 | 29 | def get(self, key): 30 | return self.txn.get(key) 31 | 32 | def put(self, key, value): 33 | self.txn.put(key, value) 34 | 35 | def delete(self, key): 36 | self.txn.delete(key) 37 | 38 | # 39 | # Cursor. 40 | # 41 | 42 | @property 43 | def key(self): 44 | return self.cursor.key() 45 | 46 | @property 47 | def value(self): 48 | return self.cursor.value() 49 | 50 | def seek(self, key): 51 | self.cursor.set_range(key) 52 | 53 | def next(self): 54 | return self.cursor.next() 55 | 56 | def prev(self): 57 | return self.cursor.prev() 58 | 59 | 60 | def _reader_task(env, func): 61 | with LmdbKeyStoreSync(env, write=False) as sync: 62 | return func(sync) 63 | 64 | 65 | def _writer_task(env, func): 66 | with LmdbKeyStoreSync(env, write=True) as sync: 67 | return func(sync) 68 | 69 | 70 | class LmdbKeyStore(object): 71 | zope.interface.implements(keystore.interfaces.IKeyStore) 72 | 73 | def __init__(self, reactor, pool, env): 74 | self.reactor = reactor 75 | self.pool = pool 76 | self.env = env 77 | 78 | def _call_in_thread(self, func): 79 | return twisted.internet.threads.deferToThreadPool( 80 | self.reactor, 81 | self.pool, 82 | func) 83 | 84 | def get(self, key): 85 | twisted.python.log.msg('get(%r, %r)', key) 86 | get = lambda sync: sync.get(key) 87 | return self._call_in_thread(lambda: _reader_task(self.env, get)) 88 | 89 | def _get_forward(self, sync, key, count, getter): 90 | positioned = sync.seek(key) 91 | out = [] 92 | for x in xrange(count): 93 | if not positioned: 94 | break 95 | out.append(getter(sync)) 96 | positioned = sync.next() 97 | return out 98 | 99 | def _get_reverse(self, sync, key, count, getter): 100 | out = [] 101 | positioned = sync.seek(key) 102 | if not positioned: 103 | positioned = sync.last() 104 | for x in xrange(count): 105 | if not positioned: 106 | break 107 | out.append(sync.key) 108 | positioned = sync.prev() 109 | return out 110 | 111 | _key_getter = operator.attrgetter('key') 112 | _item_getter = operator.attrgetter('key', 'value') 113 | 114 | def getKeys(self, key, count): 115 | get = lambda sync: self._get_forward(sync, key, count, 116 | self._key_getter) 117 | return self._call_in_thread(lambda: _reader_task(self.env, get)) 118 | 119 | def getKeysReverse(self, key, count): 120 | get = lambda sync: self._get_reverse(sync, key, count, 121 | self._key_getter) 122 | return self._call_in_thread(lambda: _reader_task(self.env, get)) 123 | 124 | def getItems(self, key, count): 125 | get = lambda sync: self._get_forward(sync, key, count, 126 | self._item_getter) 127 | return self._call_in_thread(lambda: _reader_task(self.env, get)) 128 | 129 | def getItemsReverse(self, key, count): 130 | get = lambda sync: self._get_reverse(sync, key, count, 131 | self._item_getter) 132 | return self._call_in_thread(lambda: _reader_task(self.env, get)) 133 | 134 | def put(self, key, value): 135 | twisted.python.log.msg('put(%r, %r)', key, value) 136 | put = lambda sync: sync.put(key, value) 137 | return self._call_in_thread(lambda: _writer_task(self.env, put)) 138 | 139 | def delete(self, key): 140 | delete = lambda sync: sync.delete(key) 141 | return self._call_in_thread(lambda: _writer_task(self.env, delete)) 142 | 143 | def putGroup(self, func): 144 | return self._call_in_thread(lambda: _writer_task(self.env, func)) 145 | -------------------------------------------------------------------------------- /examples/keystore/main.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import absolute_import 3 | import webbrowser 4 | 5 | import twisted.internet.reactor 6 | 7 | import lmdb 8 | import keystore.lmdb 9 | import keystore.webapi 10 | 11 | 12 | def main(): 13 | port = 9999 14 | interface = '127.0.0.1' 15 | url = 'http://%s:%d/' % (interface, port) 16 | env = lmdb.open('/tmp/foo') 17 | reactor = twisted.internet.reactor 18 | pool = reactor.getThreadPool() 19 | store = keystore.lmdb.LmdbKeyStore(reactor, pool, env) 20 | site = keystore.webapi.create_site(store) 21 | reactor.listenTCP(port, site, interface=interface) 22 | reactor.callLater(0, webbrowser.open, url) 23 | reactor.run() 24 | 25 | if __name__ == '__main__': 26 | main() 27 | -------------------------------------------------------------------------------- /examples/keystore/static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 45 | 46 | 59 | 60 | 61 |
66 | Request path:
67 | Request body:
68 | 69 | 70 |
71 |
72 | -------------------------------------------------------------------------------- /examples/keystore/web.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import absolute_import 3 | import twisted.python.failure 4 | import twisted.python.log 5 | import twisted.web.http 6 | import twisted.web.resource 7 | import twisted.web.server 8 | 9 | 10 | class Response(object): 11 | def __init__(self, body=None, headers=None, status=None): 12 | self.body = body 13 | self.headers = headers or {} 14 | self.status = status 15 | 16 | 17 | class DeferrableResourceRender(object): 18 | def __init__(self, resource_, request): 19 | self.resource = resource_ 20 | self.request = request 21 | self.closed = False 22 | 23 | def _notify_finish(self, resp): 24 | self.closed = resp 25 | 26 | def _on_render_error(self, failure): 27 | twisted.python.log.err(failure) 28 | if not self.closed: 29 | self.request.setResponseCode(twisted.web.http.INTERNAL_SERVER_ERROR) 30 | self.request.finish() 31 | 32 | def _on_render_done(self, resp): 33 | if not isinstance(resp, Response): 34 | exc = TypeError("render() didn't return Response, got %r" % (resp,)) 35 | self._on_render_error(twisted.python.failure.Failure(exc)) 36 | return 37 | 38 | if resp.body is not None and type(resp.body) is not bytes: 39 | exc = TypeError("render() returned %r, not bytes" % 40 | (type(resp.body),)) 41 | self._on_render_error(twisted.python.failure.Failure(exc)) 42 | return 43 | 44 | if self.closed: 45 | return 46 | 47 | if resp.status is not None: 48 | self.request.setResponseCode(resp.status) 49 | for key, value in resp.headers.iteritems(): 50 | self.request.setHeader(key, value) 51 | if resp.body is not None: 52 | self.request.setHeader('Content-Length', len(resp.body)) 53 | self.request.write(resp.body) 54 | self.request.finish() 55 | 56 | def start_render(self): 57 | method = self.request.method 58 | handler = getattr(self.resource, 'render_' + method, None) 59 | if handler is None: 60 | self._on_render_done(Response(status=405)) 61 | return 62 | 63 | self.request.notifyFinish().addBoth(self._notify_finish) 64 | d = twisted.internet.defer.maybeDeferred(handler, self.request) 65 | d.addCallbacks(self._on_render_done, self._on_render_error) 66 | return twisted.web.server.NOT_DONE_YET 67 | 68 | 69 | class DeferrableResource(twisted.web.resource.Resource): 70 | def render(self, request): 71 | render = DeferrableResourceRender(self, request) 72 | return render.start_render() 73 | -------------------------------------------------------------------------------- /examples/keystore/webapi.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import absolute_import 3 | import pkg_resources 4 | import keystore.web 5 | import twisted.web.server 6 | 7 | 8 | def read_resource(path): 9 | return pkg_resources.resource_string('keystore', path) 10 | 11 | 12 | class KeyResource(keystore.web.DeferrableResource): 13 | isLeaf = True 14 | 15 | def __init__(self, store): 16 | keystore.web.DeferrableResource.__init__(self) 17 | self.store = store 18 | 19 | def _get_done(self, value): 20 | if value is None: 21 | return keystore.web.Response(status=404) 22 | else: 23 | return keystore.web.Response(body=value) 24 | 25 | def render_GET(self, request): 26 | d = self.store.get(request.path) 27 | d.addCallback(self._get_done) 28 | return d 29 | 30 | def render_PUT(self, request): 31 | value = request.content.read() 32 | d = self.store.put(request.path, value) 33 | d.addCallback(lambda _: keystore.web.Response(status=202)) 34 | return d 35 | 36 | def render_DELETE(self, request): 37 | d = self.store.delete(request.path) 38 | d.addCallback(lambda _: keystore.web.Response(status=204)) 39 | return d 40 | 41 | 42 | class NamespaceResource(keystore.web.DeferrableResource): 43 | isLeaf = False 44 | 45 | def __init__(self, store): 46 | keystore.web.DeferrableResource.__init__(self) 47 | self.store = store 48 | 49 | def getChild(self, path, request): 50 | return KeyResource(self.store) 51 | 52 | 53 | class StaticResource(twisted.web.resource.Resource): 54 | def __init__(self, pkg_path): 55 | twisted.web.resource.Resource.__init__(self) 56 | self.data = read_resource(pkg_path) 57 | 58 | def render(self, request): 59 | return self.data 60 | 61 | 62 | def create_site(store): 63 | root = twisted.web.resource.Resource() 64 | root.putChild('', StaticResource('static/index.html')) 65 | root.putChild('db', NamespaceResource(store)) 66 | return twisted.web.server.Site(root) 67 | -------------------------------------------------------------------------------- /examples/nastybench.py: -------------------------------------------------------------------------------- 1 | 2 | # Roughly approximates some of Symas microbenchmark. 3 | 4 | from time import time 5 | import random 6 | import shutil 7 | import os 8 | import tempfile 9 | 10 | import lmdb 11 | 12 | 13 | val = ' ' * 100 14 | MAX_KEYS = int(1e6) 15 | 16 | t0 = time() 17 | 18 | urandom = file('/dev/urandom', 'rb', 1048576).read 19 | 20 | keys = set() 21 | while len(keys) < MAX_KEYS: 22 | for _ in xrange(min(1000, MAX_KEYS - len(keys))): 23 | keys.add(urandom(16)) 24 | 25 | print 'make %d keys in %.2fsec' % (len(keys), time() - t0) 26 | keys = list(keys) 27 | 28 | if os.path.exists('/ram'): 29 | DB_PATH = '/ram/dbtest' 30 | else: 31 | DB_PATH = tempfile.mktemp(prefix='nastybench') 32 | 33 | if os.path.exists(DB_PATH): 34 | shutil.rmtree(DB_PATH) 35 | 36 | env = lmdb.open(DB_PATH, map_size=1048576 * 1024, 37 | metasync=False, sync=False, map_async=True) 38 | 39 | nextkey = iter(keys).next 40 | run = True 41 | while run: 42 | with env.begin(write=True) as txn: 43 | try: 44 | for _ in xrange(10000): 45 | txn.put(nextkey(), val) 46 | except StopIteration: 47 | run = False 48 | 49 | d = time() - t0 50 | env.sync(True) 51 | print 'insert %d keys in %.2fsec (%d/sec)' % (len(keys), d, len(keys) / d) 52 | 53 | 54 | 55 | nextkey = iter(keys).next 56 | t0 = time() 57 | 58 | with env.begin() as txn: 59 | try: 60 | while 1: 61 | txn.get(nextkey()) 62 | except StopIteration: 63 | pass 64 | 65 | d = time() - t0 66 | print 'random lookup %d keys in %.2fsec (%d/sec)' % (len(keys), d, len(keys)/d) 67 | 68 | 69 | nextkey = iter(keys).next 70 | t0 = time() 71 | 72 | with env.begin(buffers=True) as txn: 73 | try: 74 | while 1: 75 | txn.get(nextkey()) 76 | except StopIteration: 77 | pass 78 | 79 | d = time() - t0 80 | print 'random lookup %d buffers in %.2fsec (%d/sec)' % (len(keys), d, len(keys)/d) 81 | 82 | 83 | nextkey = iter(keys).next 84 | t0 = time() 85 | 86 | with env.begin(buffers=True) as txn: 87 | try: 88 | while 1: 89 | hash(txn.get(nextkey())) 90 | except StopIteration: 91 | pass 92 | 93 | d = time() - t0 94 | print 'random lookup+hash %d buffers in %.2fsec (%d/sec)' % (len(keys), d, len(keys)/d) 95 | 96 | 97 | 98 | nextkey = iter(keys).next 99 | t0 = time() 100 | 101 | with env.begin(buffers=True) as txn: 102 | nextrec = txn.cursor().iternext().next 103 | try: 104 | while 1: 105 | nextrec() 106 | except StopIteration: 107 | pass 108 | 109 | d = time() - t0 110 | print 'seq read %d buffers in %.2fsec (%d/sec)' % (len(keys), d, len(keys)/d) 111 | -------------------------------------------------------------------------------- /examples/parabench.py: -------------------------------------------------------------------------------- 1 | 2 | # Roughly approximates some of Symas microbenchmark. 3 | 4 | import multiprocessing 5 | import os 6 | import random 7 | import shutil 8 | import sys 9 | import tempfile 10 | import time 11 | 12 | try: 13 | import affinity 14 | except: 15 | affinity = False 16 | import lmdb 17 | 18 | 19 | USE_SPARSE_FILES = sys.platform != 'darwin' 20 | DB_PATH = '/ram/dbtest' 21 | MAX_KEYS = int(4e6) 22 | 23 | if os.path.exists('/ram'): 24 | DB_PATH = '/ram/dbtest' 25 | else: 26 | DB_PATH = tempfile.mktemp(prefix='parabench') 27 | 28 | 29 | def open_env(): 30 | return lmdb.open(DB_PATH, 31 | map_size=1048576 * 1024, 32 | metasync=False, 33 | sync=False, 34 | map_async=True, 35 | writemap=USE_SPARSE_FILES) 36 | 37 | 38 | def make_keys(): 39 | t0 = time.time() 40 | urandom = file('/dev/urandom', 'rb', 1048576).read 41 | 42 | keys = set() 43 | while len(keys) < MAX_KEYS: 44 | for _ in xrange(min(1000, MAX_KEYS - len(keys))): 45 | keys.add(urandom(16)) 46 | 47 | print 'make %d keys in %.2fsec' % (len(keys), time.time() - t0) 48 | keys = list(keys) 49 | 50 | nextkey = iter(keys).next 51 | run = True 52 | val = ' ' * 100 53 | env = open_env() 54 | while run: 55 | with env.begin(write=True) as txn: 56 | try: 57 | for _ in xrange(10000): 58 | txn.put(nextkey(), val) 59 | except StopIteration: 60 | run = False 61 | 62 | d = time.time() - t0 63 | env.sync(True) 64 | env.close() 65 | print 'insert %d keys in %.2fsec (%d/sec)' % (len(keys), d, len(keys) / d) 66 | 67 | 68 | if 'drop' in sys.argv and os.path.exists(DB_PATH): 69 | shutil.rmtree(DB_PATH) 70 | 71 | if not os.path.exists(DB_PATH): 72 | make_keys() 73 | 74 | 75 | env = open_env() 76 | with env.begin() as txn: 77 | keys = list(txn.cursor().iternext(values=False)) 78 | env.close() 79 | 80 | 81 | def run(idx): 82 | if affinity: 83 | affinity.set_process_affinity_mask(os.getpid(), 1 << idx) 84 | 85 | env = open_env() 86 | k = list(keys) 87 | random.shuffle(k) 88 | k = k[:1000] 89 | 90 | while 1: 91 | with env.begin() as txn: 92 | nextkey = iter(k).next 93 | try: 94 | while 1: 95 | hash(txn.get(nextkey())) 96 | except StopIteration: 97 | pass 98 | arr[idx] += len(k) 99 | 100 | 101 | 102 | nproc = int(sys.argv[1]) 103 | arr = multiprocessing.Array('L', xrange(nproc)) 104 | for x in xrange(nproc): 105 | arr[x] = 0 106 | procs = [multiprocessing.Process(target=run, args=(x,)) for x in xrange(nproc)] 107 | [p.start() for p in procs] 108 | 109 | 110 | t0 = time.time() 111 | while True: 112 | time.sleep(2) 113 | d = time.time() - t0 114 | lk = sum(arr) 115 | print 'lookup %d keys in %.2fsec (%d/sec)' % (lk, d, lk / d) 116 | 117 | -------------------------------------------------------------------------------- /examples/words.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rwightman/py-lmdb/a71bf7fce4f8e5f6a0a4f5d3ee86410b647b9962/examples/words.gz -------------------------------------------------------------------------------- /lib/midl.c: -------------------------------------------------------------------------------- 1 | /** @file midl.c 2 | * @brief ldap bdb back-end ID List functions */ 3 | /* $OpenLDAP$ */ 4 | /* This work is part of OpenLDAP Software . 5 | * 6 | * Copyright 2000-2015 The OpenLDAP Foundation. 7 | * All rights reserved. 8 | * 9 | * Redistribution and use in source and binary forms, with or without 10 | * modification, are permitted only as authorized by the OpenLDAP 11 | * Public License. 12 | * 13 | * A copy of this license is available in the file LICENSE in the 14 | * top-level directory of the distribution or, alternatively, at 15 | * . 16 | */ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "midl.h" 24 | 25 | /** @defgroup internal LMDB Internals 26 | * @{ 27 | */ 28 | /** @defgroup idls ID List Management 29 | * @{ 30 | */ 31 | #define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) 32 | 33 | unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) 34 | { 35 | /* 36 | * binary search of id in ids 37 | * if found, returns position of id 38 | * if not found, returns first position greater than id 39 | */ 40 | unsigned base = 0; 41 | unsigned cursor = 1; 42 | int val = 0; 43 | unsigned n = ids[0]; 44 | 45 | while( 0 < n ) { 46 | unsigned pivot = n >> 1; 47 | cursor = base + pivot + 1; 48 | val = CMP( ids[cursor], id ); 49 | 50 | if( val < 0 ) { 51 | n = pivot; 52 | 53 | } else if ( val > 0 ) { 54 | base = cursor; 55 | n -= pivot + 1; 56 | 57 | } else { 58 | return cursor; 59 | } 60 | } 61 | 62 | if( val > 0 ) { 63 | ++cursor; 64 | } 65 | return cursor; 66 | } 67 | 68 | #if 0 /* superseded by append/sort */ 69 | int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) 70 | { 71 | unsigned x, i; 72 | 73 | x = mdb_midl_search( ids, id ); 74 | assert( x > 0 ); 75 | 76 | if( x < 1 ) { 77 | /* internal error */ 78 | return -2; 79 | } 80 | 81 | if ( x <= ids[0] && ids[x] == id ) { 82 | /* duplicate */ 83 | assert(0); 84 | return -1; 85 | } 86 | 87 | if ( ++ids[0] >= MDB_IDL_DB_MAX ) { 88 | /* no room */ 89 | --ids[0]; 90 | return -2; 91 | 92 | } else { 93 | /* insert id */ 94 | for (i=ids[0]; i>x; i--) 95 | ids[i] = ids[i-1]; 96 | ids[x] = id; 97 | } 98 | 99 | return 0; 100 | } 101 | #endif 102 | 103 | MDB_IDL mdb_midl_alloc(int num) 104 | { 105 | MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID)); 106 | if (ids) { 107 | *ids++ = num; 108 | *ids = 0; 109 | } 110 | return ids; 111 | } 112 | 113 | void mdb_midl_free(MDB_IDL ids) 114 | { 115 | if (ids) 116 | free(ids-1); 117 | } 118 | 119 | void mdb_midl_shrink( MDB_IDL *idp ) 120 | { 121 | MDB_IDL ids = *idp; 122 | if (*(--ids) > MDB_IDL_UM_MAX && 123 | (ids = realloc(ids, (MDB_IDL_UM_MAX+2) * sizeof(MDB_ID)))) 124 | { 125 | *ids++ = MDB_IDL_UM_MAX; 126 | *idp = ids; 127 | } 128 | } 129 | 130 | static int mdb_midl_grow( MDB_IDL *idp, int num ) 131 | { 132 | MDB_IDL idn = *idp-1; 133 | /* grow it */ 134 | idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID)); 135 | if (!idn) 136 | return ENOMEM; 137 | *idn++ += num; 138 | *idp = idn; 139 | return 0; 140 | } 141 | 142 | int mdb_midl_need( MDB_IDL *idp, unsigned num ) 143 | { 144 | MDB_IDL ids = *idp; 145 | num += ids[0]; 146 | if (num > ids[-1]) { 147 | num = (num + num/4 + (256 + 2)) & -256; 148 | if (!(ids = realloc(ids-1, num * sizeof(MDB_ID)))) 149 | return ENOMEM; 150 | *ids++ = num - 2; 151 | *idp = ids; 152 | } 153 | return 0; 154 | } 155 | 156 | int mdb_midl_append( MDB_IDL *idp, MDB_ID id ) 157 | { 158 | MDB_IDL ids = *idp; 159 | /* Too big? */ 160 | if (ids[0] >= ids[-1]) { 161 | if (mdb_midl_grow(idp, MDB_IDL_UM_MAX)) 162 | return ENOMEM; 163 | ids = *idp; 164 | } 165 | ids[0]++; 166 | ids[ids[0]] = id; 167 | return 0; 168 | } 169 | 170 | int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ) 171 | { 172 | MDB_IDL ids = *idp; 173 | /* Too big? */ 174 | if (ids[0] + app[0] >= ids[-1]) { 175 | if (mdb_midl_grow(idp, app[0])) 176 | return ENOMEM; 177 | ids = *idp; 178 | } 179 | memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID)); 180 | ids[0] += app[0]; 181 | return 0; 182 | } 183 | 184 | int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ) 185 | { 186 | MDB_ID *ids = *idp, len = ids[0]; 187 | /* Too big? */ 188 | if (len + n > ids[-1]) { 189 | if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX)) 190 | return ENOMEM; 191 | ids = *idp; 192 | } 193 | ids[0] = len + n; 194 | ids += len; 195 | while (n) 196 | ids[n--] = id++; 197 | return 0; 198 | } 199 | 200 | void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ) 201 | { 202 | MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k; 203 | idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */ 204 | old_id = idl[j]; 205 | while (i) { 206 | merge_id = merge[i--]; 207 | for (; old_id < merge_id; old_id = idl[--j]) 208 | idl[k--] = old_id; 209 | idl[k--] = merge_id; 210 | } 211 | idl[0] = total; 212 | } 213 | 214 | /* Quicksort + Insertion sort for small arrays */ 215 | 216 | #define SMALL 8 217 | #define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; } 218 | 219 | void 220 | mdb_midl_sort( MDB_IDL ids ) 221 | { 222 | /* Max possible depth of int-indexed tree * 2 items/level */ 223 | int istack[sizeof(int)*CHAR_BIT * 2]; 224 | int i,j,k,l,ir,jstack; 225 | MDB_ID a, itmp; 226 | 227 | ir = (int)ids[0]; 228 | l = 1; 229 | jstack = 0; 230 | for(;;) { 231 | if (ir - l < SMALL) { /* Insertion sort */ 232 | for (j=l+1;j<=ir;j++) { 233 | a = ids[j]; 234 | for (i=j-1;i>=1;i--) { 235 | if (ids[i] >= a) break; 236 | ids[i+1] = ids[i]; 237 | } 238 | ids[i+1] = a; 239 | } 240 | if (jstack == 0) break; 241 | ir = istack[jstack--]; 242 | l = istack[jstack--]; 243 | } else { 244 | k = (l + ir) >> 1; /* Choose median of left, center, right */ 245 | MIDL_SWAP(ids[k], ids[l+1]); 246 | if (ids[l] < ids[ir]) { 247 | MIDL_SWAP(ids[l], ids[ir]); 248 | } 249 | if (ids[l+1] < ids[ir]) { 250 | MIDL_SWAP(ids[l+1], ids[ir]); 251 | } 252 | if (ids[l] < ids[l+1]) { 253 | MIDL_SWAP(ids[l], ids[l+1]); 254 | } 255 | i = l+1; 256 | j = ir; 257 | a = ids[l+1]; 258 | for(;;) { 259 | do i++; while(ids[i] > a); 260 | do j--; while(ids[j] < a); 261 | if (j < i) break; 262 | MIDL_SWAP(ids[i],ids[j]); 263 | } 264 | ids[l+1] = ids[j]; 265 | ids[j] = a; 266 | jstack += 2; 267 | if (ir-i+1 >= j-l) { 268 | istack[jstack] = ir; 269 | istack[jstack-1] = i; 270 | ir = j-1; 271 | } else { 272 | istack[jstack] = j-1; 273 | istack[jstack-1] = l; 274 | l = i; 275 | } 276 | } 277 | } 278 | } 279 | 280 | unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ) 281 | { 282 | /* 283 | * binary search of id in ids 284 | * if found, returns position of id 285 | * if not found, returns first position greater than id 286 | */ 287 | unsigned base = 0; 288 | unsigned cursor = 1; 289 | int val = 0; 290 | unsigned n = (unsigned)ids[0].mid; 291 | 292 | while( 0 < n ) { 293 | unsigned pivot = n >> 1; 294 | cursor = base + pivot + 1; 295 | val = CMP( id, ids[cursor].mid ); 296 | 297 | if( val < 0 ) { 298 | n = pivot; 299 | 300 | } else if ( val > 0 ) { 301 | base = cursor; 302 | n -= pivot + 1; 303 | 304 | } else { 305 | return cursor; 306 | } 307 | } 308 | 309 | if( val > 0 ) { 310 | ++cursor; 311 | } 312 | return cursor; 313 | } 314 | 315 | int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ) 316 | { 317 | unsigned x, i; 318 | 319 | x = mdb_mid2l_search( ids, id->mid ); 320 | 321 | if( x < 1 ) { 322 | /* internal error */ 323 | return -2; 324 | } 325 | 326 | if ( x <= ids[0].mid && ids[x].mid == id->mid ) { 327 | /* duplicate */ 328 | return -1; 329 | } 330 | 331 | if ( ids[0].mid >= MDB_IDL_UM_MAX ) { 332 | /* too big */ 333 | return -2; 334 | 335 | } else { 336 | /* insert id */ 337 | ids[0].mid++; 338 | for (i=(unsigned)ids[0].mid; i>x; i--) 339 | ids[i] = ids[i-1]; 340 | ids[x] = *id; 341 | } 342 | 343 | return 0; 344 | } 345 | 346 | int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ) 347 | { 348 | /* Too big? */ 349 | if (ids[0].mid >= MDB_IDL_UM_MAX) { 350 | return -2; 351 | } 352 | ids[0].mid++; 353 | ids[ids[0].mid] = *id; 354 | return 0; 355 | } 356 | 357 | /** @} */ 358 | /** @} */ 359 | -------------------------------------------------------------------------------- /lib/midl.h: -------------------------------------------------------------------------------- 1 | /** @file midl.h 2 | * @brief LMDB ID List header file. 3 | * 4 | * This file was originally part of back-bdb but has been 5 | * modified for use in libmdb. Most of the macros defined 6 | * in this file are unused, just left over from the original. 7 | * 8 | * This file is only used internally in libmdb and its definitions 9 | * are not exposed publicly. 10 | */ 11 | /* $OpenLDAP$ */ 12 | /* This work is part of OpenLDAP Software . 13 | * 14 | * Copyright 2000-2015 The OpenLDAP Foundation. 15 | * All rights reserved. 16 | * 17 | * Redistribution and use in source and binary forms, with or without 18 | * modification, are permitted only as authorized by the OpenLDAP 19 | * Public License. 20 | * 21 | * A copy of this license is available in the file LICENSE in the 22 | * top-level directory of the distribution or, alternatively, at 23 | * . 24 | */ 25 | 26 | #ifndef _MDB_MIDL_H_ 27 | #define _MDB_MIDL_H_ 28 | 29 | #include 30 | 31 | #ifdef __cplusplus 32 | extern "C" { 33 | #endif 34 | 35 | /** @defgroup internal LMDB Internals 36 | * @{ 37 | */ 38 | 39 | /** @defgroup idls ID List Management 40 | * @{ 41 | */ 42 | /** A generic unsigned ID number. These were entryIDs in back-bdb. 43 | * Preferably it should have the same size as a pointer. 44 | */ 45 | typedef size_t MDB_ID; 46 | 47 | /** An IDL is an ID List, a sorted array of IDs. The first 48 | * element of the array is a counter for how many actual 49 | * IDs are in the list. In the original back-bdb code, IDLs are 50 | * sorted in ascending order. For libmdb IDLs are sorted in 51 | * descending order. 52 | */ 53 | typedef MDB_ID *MDB_IDL; 54 | 55 | /* IDL sizes - likely should be even bigger 56 | * limiting factors: sizeof(ID), thread stack size 57 | */ 58 | #define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ 59 | #define MDB_IDL_DB_SIZE (1<. 11 | * 12 | * OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | * 14 | * Individual files and/or contributed packages may be copyright by 15 | * other parties and/or subject to additional restrictions. 16 | * 17 | * This work also contains materials derived from public sources. 18 | * 19 | * Additional information about OpenLDAP can be obtained at 20 | * . 21 | */ 22 | 23 | #ifndef LMDB_PRELOAD_H 24 | #define LMDB_PRELOAD_H 25 | 26 | /** 27 | * Touch a byte from every page in `x`, causing any read faults necessary for 28 | * copying the value to occur. This should be called with the GIL released, in 29 | * order to dramatically decrease the chances of a page fault being taken with 30 | * the GIL held. 31 | * 32 | * We do this since PyMalloc cannot be invoked with the GIL released, and we 33 | * cannot know the size of the MDB result value before dropping the GIL. This 34 | * seems the simplest and cheapest compromise to ensuring multithreaded Python 35 | * apps don't hard stall when dealing with a database larger than RAM. 36 | */ 37 | static void preload(int rc, void *x, size_t size) { 38 | if(rc == 0) { 39 | volatile char j; 40 | int i; 41 | for(i = 0; i < size; i += 4096) { 42 | j = ((volatile char *)x)[i]; 43 | } 44 | (void) j; /* -Wunused-variable */ 45 | } 46 | } 47 | 48 | #endif /* !LMDB_PRELOAD_H */ 49 | -------------------------------------------------------------------------------- /lib/win32-stdint/stdint.h: -------------------------------------------------------------------------------- 1 | // ISO C9x compliant stdint.h for Microsoft Visual Studio 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 3 | // 4 | // Copyright (c) 2006-2013 Alexander Chemeris 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright notice, 10 | // this list of conditions and the following disclaimer. 11 | // 12 | // 2. Redistributions in binary form must reproduce the above copyright 13 | // notice, this list of conditions and the following disclaimer in the 14 | // documentation and/or other materials provided with the distribution. 15 | // 16 | // 3. Neither the name of the product nor the names of its contributors may 17 | // be used to endorse or promote products derived from this software 18 | // without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 23 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | // 31 | /////////////////////////////////////////////////////////////////////////////// 32 | 33 | #ifndef _MSC_VER // [ 34 | #error "Use this header only with Microsoft Visual C++ compilers!" 35 | #endif // _MSC_VER ] 36 | 37 | #ifndef _MSC_STDINT_H_ // [ 38 | #define _MSC_STDINT_H_ 39 | 40 | #if _MSC_VER > 1000 41 | #pragma once 42 | #endif 43 | 44 | #if _MSC_VER >= 1600 // [ 45 | #include 46 | #else // ] _MSC_VER >= 1600 [ 47 | 48 | #include 49 | 50 | // For Visual Studio 6 in C++ mode and for many Visual Studio versions when 51 | // compiling for ARM we should wrap include with 'extern "C++" {}' 52 | // or compiler give many errors like this: 53 | // error C2733: second C linkage of overloaded function 'wmemchr' not allowed 54 | #ifdef __cplusplus 55 | extern "C" { 56 | #endif 57 | # include 58 | #ifdef __cplusplus 59 | } 60 | #endif 61 | 62 | // Define _W64 macros to mark types changing their size, like intptr_t. 63 | #ifndef _W64 64 | # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 65 | # define _W64 __w64 66 | # else 67 | # define _W64 68 | # endif 69 | #endif 70 | 71 | 72 | // 7.18.1 Integer types 73 | 74 | // 7.18.1.1 Exact-width integer types 75 | 76 | // Visual Studio 6 and Embedded Visual C++ 4 doesn't 77 | // realize that, e.g. char has the same size as __int8 78 | // so we give up on __intX for them. 79 | #if (_MSC_VER < 1300) 80 | typedef signed char int8_t; 81 | typedef signed short int16_t; 82 | typedef signed int int32_t; 83 | typedef unsigned char uint8_t; 84 | typedef unsigned short uint16_t; 85 | typedef unsigned int uint32_t; 86 | #else 87 | typedef signed __int8 int8_t; 88 | typedef signed __int16 int16_t; 89 | typedef signed __int32 int32_t; 90 | typedef unsigned __int8 uint8_t; 91 | typedef unsigned __int16 uint16_t; 92 | typedef unsigned __int32 uint32_t; 93 | #endif 94 | typedef signed __int64 int64_t; 95 | typedef unsigned __int64 uint64_t; 96 | 97 | 98 | // 7.18.1.2 Minimum-width integer types 99 | typedef int8_t int_least8_t; 100 | typedef int16_t int_least16_t; 101 | typedef int32_t int_least32_t; 102 | typedef int64_t int_least64_t; 103 | typedef uint8_t uint_least8_t; 104 | typedef uint16_t uint_least16_t; 105 | typedef uint32_t uint_least32_t; 106 | typedef uint64_t uint_least64_t; 107 | 108 | // 7.18.1.3 Fastest minimum-width integer types 109 | typedef int8_t int_fast8_t; 110 | typedef int16_t int_fast16_t; 111 | typedef int32_t int_fast32_t; 112 | typedef int64_t int_fast64_t; 113 | typedef uint8_t uint_fast8_t; 114 | typedef uint16_t uint_fast16_t; 115 | typedef uint32_t uint_fast32_t; 116 | typedef uint64_t uint_fast64_t; 117 | 118 | // 7.18.1.4 Integer types capable of holding object pointers 119 | #ifdef _WIN64 // [ 120 | typedef signed __int64 intptr_t; 121 | typedef unsigned __int64 uintptr_t; 122 | #else // _WIN64 ][ 123 | typedef _W64 signed int intptr_t; 124 | typedef _W64 unsigned int uintptr_t; 125 | #endif // _WIN64 ] 126 | 127 | // 7.18.1.5 Greatest-width integer types 128 | typedef int64_t intmax_t; 129 | typedef uint64_t uintmax_t; 130 | 131 | 132 | // 7.18.2 Limits of specified-width integer types 133 | 134 | #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 135 | 136 | // 7.18.2.1 Limits of exact-width integer types 137 | #define INT8_MIN ((int8_t)_I8_MIN) 138 | #define INT8_MAX _I8_MAX 139 | #define INT16_MIN ((int16_t)_I16_MIN) 140 | #define INT16_MAX _I16_MAX 141 | #define INT32_MIN ((int32_t)_I32_MIN) 142 | #define INT32_MAX _I32_MAX 143 | #define INT64_MIN ((int64_t)_I64_MIN) 144 | #define INT64_MAX _I64_MAX 145 | #define UINT8_MAX _UI8_MAX 146 | #define UINT16_MAX _UI16_MAX 147 | #define UINT32_MAX _UI32_MAX 148 | #define UINT64_MAX _UI64_MAX 149 | 150 | // 7.18.2.2 Limits of minimum-width integer types 151 | #define INT_LEAST8_MIN INT8_MIN 152 | #define INT_LEAST8_MAX INT8_MAX 153 | #define INT_LEAST16_MIN INT16_MIN 154 | #define INT_LEAST16_MAX INT16_MAX 155 | #define INT_LEAST32_MIN INT32_MIN 156 | #define INT_LEAST32_MAX INT32_MAX 157 | #define INT_LEAST64_MIN INT64_MIN 158 | #define INT_LEAST64_MAX INT64_MAX 159 | #define UINT_LEAST8_MAX UINT8_MAX 160 | #define UINT_LEAST16_MAX UINT16_MAX 161 | #define UINT_LEAST32_MAX UINT32_MAX 162 | #define UINT_LEAST64_MAX UINT64_MAX 163 | 164 | // 7.18.2.3 Limits of fastest minimum-width integer types 165 | #define INT_FAST8_MIN INT8_MIN 166 | #define INT_FAST8_MAX INT8_MAX 167 | #define INT_FAST16_MIN INT16_MIN 168 | #define INT_FAST16_MAX INT16_MAX 169 | #define INT_FAST32_MIN INT32_MIN 170 | #define INT_FAST32_MAX INT32_MAX 171 | #define INT_FAST64_MIN INT64_MIN 172 | #define INT_FAST64_MAX INT64_MAX 173 | #define UINT_FAST8_MAX UINT8_MAX 174 | #define UINT_FAST16_MAX UINT16_MAX 175 | #define UINT_FAST32_MAX UINT32_MAX 176 | #define UINT_FAST64_MAX UINT64_MAX 177 | 178 | // 7.18.2.4 Limits of integer types capable of holding object pointers 179 | #ifdef _WIN64 // [ 180 | # define INTPTR_MIN INT64_MIN 181 | # define INTPTR_MAX INT64_MAX 182 | # define UINTPTR_MAX UINT64_MAX 183 | #else // _WIN64 ][ 184 | # define INTPTR_MIN INT32_MIN 185 | # define INTPTR_MAX INT32_MAX 186 | # define UINTPTR_MAX UINT32_MAX 187 | #endif // _WIN64 ] 188 | 189 | // 7.18.2.5 Limits of greatest-width integer types 190 | #define INTMAX_MIN INT64_MIN 191 | #define INTMAX_MAX INT64_MAX 192 | #define UINTMAX_MAX UINT64_MAX 193 | 194 | // 7.18.3 Limits of other integer types 195 | 196 | #ifdef _WIN64 // [ 197 | # define PTRDIFF_MIN _I64_MIN 198 | # define PTRDIFF_MAX _I64_MAX 199 | #else // _WIN64 ][ 200 | # define PTRDIFF_MIN _I32_MIN 201 | # define PTRDIFF_MAX _I32_MAX 202 | #endif // _WIN64 ] 203 | 204 | #define SIG_ATOMIC_MIN INT_MIN 205 | #define SIG_ATOMIC_MAX INT_MAX 206 | 207 | #ifndef SIZE_MAX // [ 208 | # ifdef _WIN64 // [ 209 | # define SIZE_MAX _UI64_MAX 210 | # else // _WIN64 ][ 211 | # define SIZE_MAX _UI32_MAX 212 | # endif // _WIN64 ] 213 | #endif // SIZE_MAX ] 214 | 215 | // WCHAR_MIN and WCHAR_MAX are also defined in 216 | #ifndef WCHAR_MIN // [ 217 | # define WCHAR_MIN 0 218 | #endif // WCHAR_MIN ] 219 | #ifndef WCHAR_MAX // [ 220 | # define WCHAR_MAX _UI16_MAX 221 | #endif // WCHAR_MAX ] 222 | 223 | #define WINT_MIN 0 224 | #define WINT_MAX _UI16_MAX 225 | 226 | #endif // __STDC_LIMIT_MACROS ] 227 | 228 | 229 | // 7.18.4 Limits of other integer types 230 | 231 | #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 232 | 233 | // 7.18.4.1 Macros for minimum-width integer constants 234 | 235 | #define INT8_C(val) val##i8 236 | #define INT16_C(val) val##i16 237 | #define INT32_C(val) val##i32 238 | #define INT64_C(val) val##i64 239 | 240 | #define UINT8_C(val) val##ui8 241 | #define UINT16_C(val) val##ui16 242 | #define UINT32_C(val) val##ui32 243 | #define UINT64_C(val) val##ui64 244 | 245 | // 7.18.4.2 Macros for greatest-width integer constants 246 | // These #ifndef's are needed to prevent collisions with . 247 | // Check out Issue 9 for the details. 248 | #ifndef INTMAX_C // [ 249 | # define INTMAX_C INT64_C 250 | #endif // INTMAX_C ] 251 | #ifndef UINTMAX_C // [ 252 | # define UINTMAX_C UINT64_C 253 | #endif // UINTMAX_C ] 254 | 255 | #endif // __STDC_CONSTANT_MACROS ] 256 | 257 | #endif // _MSC_VER >= 1600 ] 258 | 259 | #endif // _MSC_STDINT_H_ ] 260 | -------------------------------------------------------------------------------- /lib/win32/inttypes.h: -------------------------------------------------------------------------------- 1 | // ISO C9x compliant inttypes.h for Microsoft Visual Studio 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 3 | // 4 | // Copyright (c) 2006-2013 Alexander Chemeris 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright notice, 10 | // this list of conditions and the following disclaimer. 11 | // 12 | // 2. Redistributions in binary form must reproduce the above copyright 13 | // notice, this list of conditions and the following disclaimer in the 14 | // documentation and/or other materials provided with the distribution. 15 | // 16 | // 3. Neither the name of the product nor the names of its contributors may 17 | // be used to endorse or promote products derived from this software 18 | // without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 23 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | // 31 | /////////////////////////////////////////////////////////////////////////////// 32 | 33 | #ifndef _MSC_VER // [ 34 | #error "Use this header only with Microsoft Visual C++ compilers!" 35 | #endif // _MSC_VER ] 36 | 37 | #ifndef _MSC_INTTYPES_H_ // [ 38 | #define _MSC_INTTYPES_H_ 39 | 40 | #if _MSC_VER > 1000 41 | #pragma once 42 | #endif 43 | 44 | #include "stdint.h" 45 | 46 | // 7.8 Format conversion of integer types 47 | 48 | typedef struct { 49 | intmax_t quot; 50 | intmax_t rem; 51 | } imaxdiv_t; 52 | 53 | // 7.8.1 Macros for format specifiers 54 | 55 | #if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 56 | 57 | // The fprintf macros for signed integers are: 58 | #define PRId8 "d" 59 | #define PRIi8 "i" 60 | #define PRIdLEAST8 "d" 61 | #define PRIiLEAST8 "i" 62 | #define PRIdFAST8 "d" 63 | #define PRIiFAST8 "i" 64 | 65 | #define PRId16 "hd" 66 | #define PRIi16 "hi" 67 | #define PRIdLEAST16 "hd" 68 | #define PRIiLEAST16 "hi" 69 | #define PRIdFAST16 "hd" 70 | #define PRIiFAST16 "hi" 71 | 72 | #define PRId32 "I32d" 73 | #define PRIi32 "I32i" 74 | #define PRIdLEAST32 "I32d" 75 | #define PRIiLEAST32 "I32i" 76 | #define PRIdFAST32 "I32d" 77 | #define PRIiFAST32 "I32i" 78 | 79 | #define PRId64 "I64d" 80 | #define PRIi64 "I64i" 81 | #define PRIdLEAST64 "I64d" 82 | #define PRIiLEAST64 "I64i" 83 | #define PRIdFAST64 "I64d" 84 | #define PRIiFAST64 "I64i" 85 | 86 | #define PRIdMAX "I64d" 87 | #define PRIiMAX "I64i" 88 | 89 | #define PRIdPTR "Id" 90 | #define PRIiPTR "Ii" 91 | 92 | // The fprintf macros for unsigned integers are: 93 | #define PRIo8 "o" 94 | #define PRIu8 "u" 95 | #define PRIx8 "x" 96 | #define PRIX8 "X" 97 | #define PRIoLEAST8 "o" 98 | #define PRIuLEAST8 "u" 99 | #define PRIxLEAST8 "x" 100 | #define PRIXLEAST8 "X" 101 | #define PRIoFAST8 "o" 102 | #define PRIuFAST8 "u" 103 | #define PRIxFAST8 "x" 104 | #define PRIXFAST8 "X" 105 | 106 | #define PRIo16 "ho" 107 | #define PRIu16 "hu" 108 | #define PRIx16 "hx" 109 | #define PRIX16 "hX" 110 | #define PRIoLEAST16 "ho" 111 | #define PRIuLEAST16 "hu" 112 | #define PRIxLEAST16 "hx" 113 | #define PRIXLEAST16 "hX" 114 | #define PRIoFAST16 "ho" 115 | #define PRIuFAST16 "hu" 116 | #define PRIxFAST16 "hx" 117 | #define PRIXFAST16 "hX" 118 | 119 | #define PRIo32 "I32o" 120 | #define PRIu32 "I32u" 121 | #define PRIx32 "I32x" 122 | #define PRIX32 "I32X" 123 | #define PRIoLEAST32 "I32o" 124 | #define PRIuLEAST32 "I32u" 125 | #define PRIxLEAST32 "I32x" 126 | #define PRIXLEAST32 "I32X" 127 | #define PRIoFAST32 "I32o" 128 | #define PRIuFAST32 "I32u" 129 | #define PRIxFAST32 "I32x" 130 | #define PRIXFAST32 "I32X" 131 | 132 | #define PRIo64 "I64o" 133 | #define PRIu64 "I64u" 134 | #define PRIx64 "I64x" 135 | #define PRIX64 "I64X" 136 | #define PRIoLEAST64 "I64o" 137 | #define PRIuLEAST64 "I64u" 138 | #define PRIxLEAST64 "I64x" 139 | #define PRIXLEAST64 "I64X" 140 | #define PRIoFAST64 "I64o" 141 | #define PRIuFAST64 "I64u" 142 | #define PRIxFAST64 "I64x" 143 | #define PRIXFAST64 "I64X" 144 | 145 | #define PRIoMAX "I64o" 146 | #define PRIuMAX "I64u" 147 | #define PRIxMAX "I64x" 148 | #define PRIXMAX "I64X" 149 | 150 | #define PRIoPTR "Io" 151 | #define PRIuPTR "Iu" 152 | #define PRIxPTR "Ix" 153 | #define PRIXPTR "IX" 154 | 155 | // The fscanf macros for signed integers are: 156 | #define SCNd8 "d" 157 | #define SCNi8 "i" 158 | #define SCNdLEAST8 "d" 159 | #define SCNiLEAST8 "i" 160 | #define SCNdFAST8 "d" 161 | #define SCNiFAST8 "i" 162 | 163 | #define SCNd16 "hd" 164 | #define SCNi16 "hi" 165 | #define SCNdLEAST16 "hd" 166 | #define SCNiLEAST16 "hi" 167 | #define SCNdFAST16 "hd" 168 | #define SCNiFAST16 "hi" 169 | 170 | #define SCNd32 "ld" 171 | #define SCNi32 "li" 172 | #define SCNdLEAST32 "ld" 173 | #define SCNiLEAST32 "li" 174 | #define SCNdFAST32 "ld" 175 | #define SCNiFAST32 "li" 176 | 177 | #define SCNd64 "I64d" 178 | #define SCNi64 "I64i" 179 | #define SCNdLEAST64 "I64d" 180 | #define SCNiLEAST64 "I64i" 181 | #define SCNdFAST64 "I64d" 182 | #define SCNiFAST64 "I64i" 183 | 184 | #define SCNdMAX "I64d" 185 | #define SCNiMAX "I64i" 186 | 187 | #ifdef _WIN64 // [ 188 | # define SCNdPTR "I64d" 189 | # define SCNiPTR "I64i" 190 | #else // _WIN64 ][ 191 | # define SCNdPTR "ld" 192 | # define SCNiPTR "li" 193 | #endif // _WIN64 ] 194 | 195 | // The fscanf macros for unsigned integers are: 196 | #define SCNo8 "o" 197 | #define SCNu8 "u" 198 | #define SCNx8 "x" 199 | #define SCNX8 "X" 200 | #define SCNoLEAST8 "o" 201 | #define SCNuLEAST8 "u" 202 | #define SCNxLEAST8 "x" 203 | #define SCNXLEAST8 "X" 204 | #define SCNoFAST8 "o" 205 | #define SCNuFAST8 "u" 206 | #define SCNxFAST8 "x" 207 | #define SCNXFAST8 "X" 208 | 209 | #define SCNo16 "ho" 210 | #define SCNu16 "hu" 211 | #define SCNx16 "hx" 212 | #define SCNX16 "hX" 213 | #define SCNoLEAST16 "ho" 214 | #define SCNuLEAST16 "hu" 215 | #define SCNxLEAST16 "hx" 216 | #define SCNXLEAST16 "hX" 217 | #define SCNoFAST16 "ho" 218 | #define SCNuFAST16 "hu" 219 | #define SCNxFAST16 "hx" 220 | #define SCNXFAST16 "hX" 221 | 222 | #define SCNo32 "lo" 223 | #define SCNu32 "lu" 224 | #define SCNx32 "lx" 225 | #define SCNX32 "lX" 226 | #define SCNoLEAST32 "lo" 227 | #define SCNuLEAST32 "lu" 228 | #define SCNxLEAST32 "lx" 229 | #define SCNXLEAST32 "lX" 230 | #define SCNoFAST32 "lo" 231 | #define SCNuFAST32 "lu" 232 | #define SCNxFAST32 "lx" 233 | #define SCNXFAST32 "lX" 234 | 235 | #define SCNo64 "I64o" 236 | #define SCNu64 "I64u" 237 | #define SCNx64 "I64x" 238 | #define SCNX64 "I64X" 239 | #define SCNoLEAST64 "I64o" 240 | #define SCNuLEAST64 "I64u" 241 | #define SCNxLEAST64 "I64x" 242 | #define SCNXLEAST64 "I64X" 243 | #define SCNoFAST64 "I64o" 244 | #define SCNuFAST64 "I64u" 245 | #define SCNxFAST64 "I64x" 246 | #define SCNXFAST64 "I64X" 247 | 248 | #define SCNoMAX "I64o" 249 | #define SCNuMAX "I64u" 250 | #define SCNxMAX "I64x" 251 | #define SCNXMAX "I64X" 252 | 253 | #ifdef _WIN64 // [ 254 | # define SCNoPTR "I64o" 255 | # define SCNuPTR "I64u" 256 | # define SCNxPTR "I64x" 257 | # define SCNXPTR "I64X" 258 | #else // _WIN64 ][ 259 | # define SCNoPTR "lo" 260 | # define SCNuPTR "lu" 261 | # define SCNxPTR "lx" 262 | # define SCNXPTR "lX" 263 | #endif // _WIN64 ] 264 | 265 | #endif // __STDC_FORMAT_MACROS ] 266 | 267 | // 7.8.2 Functions for greatest-width integer types 268 | 269 | // 7.8.2.1 The imaxabs function 270 | #define imaxabs _abs64 271 | 272 | // 7.8.2.2 The imaxdiv function 273 | 274 | // This is modified version of div() function from Microsoft's div.c found 275 | // in %MSVC.NET%\crt\src\div.c 276 | #ifdef STATIC_IMAXDIV // [ 277 | static 278 | #else // STATIC_IMAXDIV ][ 279 | _inline 280 | #endif // STATIC_IMAXDIV ] 281 | imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) 282 | { 283 | imaxdiv_t result; 284 | 285 | result.quot = numer / denom; 286 | result.rem = numer % denom; 287 | 288 | if (numer < 0 && result.rem > 0) { 289 | // did division wrong; must fix up 290 | ++result.quot; 291 | result.rem -= denom; 292 | } 293 | 294 | return result; 295 | } 296 | 297 | // 7.8.2.3 The strtoimax and strtoumax functions 298 | #define strtoimax _strtoi64 299 | #define strtoumax _strtoui64 300 | 301 | // 7.8.2.4 The wcstoimax and wcstoumax functions 302 | #define wcstoimax _wcstoi64 303 | #define wcstoumax _wcstoui64 304 | 305 | 306 | #endif // _MSC_INTTYPES_H_ ] 307 | -------------------------------------------------------------------------------- /lib/win32/unistd.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rwightman/py-lmdb/a71bf7fce4f8e5f6a0a4f5d3ee86410b647b9962/lib/win32/unistd.h -------------------------------------------------------------------------------- /lmdb/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013 The py-lmdb authors, all rights reserved. 2 | # 3 | # Redistribution and use in source and binary forms, with or without 4 | # modification, are permitted only as authorized by the OpenLDAP 5 | # Public License. 6 | # 7 | # A copy of this license is available in the file LICENSE in the 8 | # top-level directory of the distribution or, alternatively, at 9 | # . 10 | # 11 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 12 | # 13 | # Individual files and/or contributed packages may be copyright by 14 | # other parties and/or subject to additional restrictions. 15 | # 16 | # This work also contains materials derived from public sources. 17 | # 18 | # Additional information about OpenLDAP can be obtained at 19 | # . 20 | 21 | """ 22 | cffi wrapper for OpenLDAP's "Lightning" MDB database. 23 | 24 | Please see https://lmdb.readthedocs.io/ 25 | """ 26 | 27 | import os 28 | import sys 29 | 30 | def _reading_docs(): 31 | # Hack: disable speedups while testing or reading docstrings. Don't check 32 | # for basename for embedded python - variable 'argv' does not exists there. 33 | if not(hasattr(sys, 'argv')): 34 | return False 35 | 36 | basename = os.path.basename(sys.argv[0]) 37 | return any(x in basename for x in ('sphinx-build', 'pydoc')) 38 | 39 | try: 40 | if _reading_docs() or os.getenv('LMDB_FORCE_CFFI') is not None: 41 | raise ImportError 42 | from lmdb.cpython import * 43 | from lmdb.cpython import open 44 | from lmdb.cpython import __all__ 45 | except ImportError: 46 | if (not _reading_docs()) and os.getenv('LMDB_FORCE_CPYTHON') is not None: 47 | raise 48 | from lmdb.cffi import * 49 | from lmdb.cffi import open 50 | from lmdb.cffi import __all__ 51 | from lmdb.cffi import __doc__ 52 | 53 | __version__ = '0.93' 54 | 55 | # Hack to support Python v2.5 'python -mlmdb' 56 | if __name__ == '__main__': 57 | import lmdb.tool 58 | import atexit 59 | atexit.register(lmdb.tool.main) 60 | -------------------------------------------------------------------------------- /lmdb/__main__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013 The py-lmdb authors, all rights reserved. 2 | # 3 | # Redistribution and use in source and binary forms, with or without 4 | # modification, are permitted only as authorized by the OpenLDAP 5 | # Public License. 6 | # 7 | # A copy of this license is available in the file LICENSE in the 8 | # top-level directory of the distribution or, alternatively, at 9 | # . 10 | # 11 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 12 | # 13 | # Individual files and/or contributed packages may be copyright by 14 | # other parties and/or subject to additional restrictions. 15 | # 16 | # This work also contains materials derived from public sources. 17 | # 18 | # Additional information about OpenLDAP can be obtained at 19 | # . 20 | 21 | # Hack to support Python >=v2.6 'pythom -mlmdb' 22 | from __future__ import absolute_import 23 | import lmdb.tool 24 | lmdb.tool.main() 25 | -------------------------------------------------------------------------------- /lmdb/tool.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | """ 24 | Basic tools for working with LMDB. 25 | 26 | copy: Consistent high speed backup an environment. 27 | %prog copy -e source.lmdb target.lmdb 28 | 29 | copyfd: Consistent high speed backup an environment to stdout. 30 | %prog copyfd -e source.lmdb > target.lmdb/data.mdb 31 | 32 | drop: Delete one or more sub-databases. 33 | %prog drop db1 34 | 35 | dump: Dump one or more databases to disk in 'cdbmake' format. 36 | Usage: dump [db1=file1.cdbmake db2=file2.cdbmake] 37 | 38 | If no databases are given, dumps the main database to 'main.cdbmake'. 39 | 40 | edit: Add/delete/replace values from a database. 41 | %prog edit --set key=value --set-file key=/path \\ 42 | --add key=value --add-file key=/path/to/file \\ 43 | --delete key 44 | 45 | get: Read one or more values from a database. 46 | %prog get [ [ [..]]] 47 | 48 | readers: Display readers in the lock table 49 | %prog readers -e /path/to/db [-c] 50 | 51 | If -c is specified, clear stale readers. 52 | 53 | restore: Read one or more database from disk in 'cdbmake' format. 54 | %prog restore db1=file1.cdbmake db2=file2.cdbmake 55 | 56 | The special db name ":main:" may be used to indicate the main DB. 57 | 58 | rewrite: Re-create an environment using MDB_APPEND 59 | %prog rewrite -e src.lmdb -E dst.lmdb [ [ ..]] 60 | 61 | If no databases are given, rewrites only the main database. 62 | 63 | shell: Open interactive console with ENV set to the open environment. 64 | 65 | stat: Print environment statistics. 66 | 67 | warm: Read environment into page cache sequentially. 68 | 69 | watch: Show live environment statistics 70 | """ 71 | 72 | from __future__ import absolute_import 73 | from __future__ import with_statement 74 | import array 75 | import collections 76 | import csv 77 | import functools 78 | import optparse 79 | import os 80 | import pprint 81 | import signal 82 | import string 83 | import struct 84 | import sys 85 | import time 86 | 87 | # Python3.x bikeshedded trechery. 88 | try: 89 | from io import BytesIO as StringIO 90 | except ImportError: 91 | try: 92 | from cStringIO import StringIO # type: ignore 93 | except ImportError: 94 | from StringIO import StringIO # type: ignore 95 | 96 | import lmdb 97 | 98 | 99 | BUF_SIZE = 10485760 100 | ENV = None 101 | DB = None 102 | 103 | # How strings get encoded to and decoded from DB 104 | ENCODING = 'utf-8' 105 | 106 | 107 | def _to_bytes(s): 108 | """Given either a Python 2.x or 3.x str, return either a str (Python 2.x) 109 | or a bytes instance (Python 3.x).""" 110 | return globals().get('unicode', str)(s).encode(ENCODING) 111 | 112 | 113 | def isprint(c): 114 | """Return ``True`` if the character `c` can be printed visibly and without 115 | adversely affecting printing position (e.g. newline).""" 116 | return c in string.printable and ord(c) > 16 117 | 118 | 119 | def xxd(s): 120 | """Return a vaguely /usr/bin/xxd formatted representation of the bytestring 121 | `s`.""" 122 | sio = StringIO() 123 | pr = _to_bytes('') 124 | for idx, ch in enumerate(s): 125 | ch = chr(ch) 126 | if not (idx % 16): 127 | if idx: 128 | sio.write(_to_bytes(' ')) 129 | sio.write(pr) 130 | sio.write(_to_bytes('\n')) 131 | sio.write(_to_bytes('%07x:' % idx)) 132 | pr = _to_bytes('') 133 | if not (idx % 2): 134 | sio.write(_to_bytes(' ')) 135 | # import pdb; pdb.set_trace() 136 | sio.write(_to_bytes('%02x' % (ord(ch),))) 137 | pr += _to_bytes(ch) if isprint(ch) else _to_bytes('.') 138 | 139 | if idx % 16: 140 | need = 15 - (idx % 16) 141 | # fill remainder of last line. 142 | sio.write(_to_bytes(' ') * need) 143 | sio.write(_to_bytes(' ') * (need // 2)) 144 | sio.write(_to_bytes(' ')) 145 | sio.write(pr) 146 | 147 | sio.write(_to_bytes('\n')) 148 | return sio.getvalue().decode(ENCODING) 149 | 150 | 151 | def make_parser(): 152 | parser = optparse.OptionParser() 153 | parser.prog = 'python -mlmdb' 154 | parser.usage = '%prog [options] \n' + __doc__.rstrip() 155 | parser.add_option('-e', '--env', help='Environment file to open') 156 | parser.add_option('-d', '--db', help='Database to open (default: main)') 157 | parser.add_option('-r', '--read', help='Open environment read-only') 158 | parser.add_option('-S', '--map_size', type='int', default='10', 159 | help='Map size in megabytes (default: 10)') 160 | parser.add_option('-s', '--use-single-file', action='store_true', 161 | help='The database was created as a single file and not a subdirectory') 162 | # FIXME: implement --all 163 | # parser.add_option('-a', '--all', action='store_true', 164 | # help='Make "dump" dump all databases') 165 | parser.add_option('-E', '--target_env', 166 | help='Target environment file for "dumpfd"') 167 | parser.add_option('-x', '--xxd', action='store_true', 168 | help='Print values in xxd format') 169 | parser.add_option('-M', '--max-dbs', type='int', default=128, 170 | help='Maximum open DBs (default: 128)') 171 | parser.add_option('--out-fd', type='int', default=1, 172 | help='"copyfd" command target fd') 173 | group = parser.add_option_group('Options for "copy" command') 174 | group.add_option('--compact', action='store_true', default=False, 175 | help='Perform compaction while copying.') 176 | group = parser.add_option_group('Options for "edit" command') 177 | group.add_option('--set', action='append', 178 | help='List of key=value pairs to set.') 179 | group.add_option('--set-file', action='append', 180 | help='List of key pairs to read from files.') 181 | group.add_option('--add', action='append', 182 | help='List of key=value pairs to add.') 183 | group.add_option('--add-file', action='append', 184 | help='List of key pairs to read from files.') 185 | group.add_option('--delete', action='append', 186 | help='List of key=value pairs to delete.') 187 | group = parser.add_option_group('Options for "readers" command') 188 | group.add_option('-c', '--clean', action='store_true', 189 | help='Clean stale readers? (default: no)') 190 | group = parser.add_option_group('Options for "watch" command') 191 | group.add_option('--csv', action='store_true', 192 | help='Generate CSV instead of terminal output.') 193 | group.add_option('--interval', type='int', default=1, 194 | help='Interval size (default: 1sec)') 195 | group.add_option('--window', type='int', default=10, 196 | help='Average window size (default: 10)') 197 | return parser 198 | 199 | 200 | def die(fmt, *args): 201 | if args: 202 | fmt %= args 203 | sys.stderr.write('lmdb.tool: %s\n' % (fmt,)) 204 | raise SystemExit(1) 205 | 206 | 207 | def dump_cursor_to_fp(cursor, fp): 208 | for key, value in cursor: 209 | fp.write(_to_bytes('+%d,%d:' % (len(key), len(value)))) 210 | fp.write(key) 211 | fp.write(_to_bytes('->')) 212 | fp.write(value) 213 | fp.write(_to_bytes('\n')) 214 | fp.write(_to_bytes('\n')) 215 | 216 | 217 | def db_map_from_args(args): 218 | db_map = {} 219 | 220 | for arg in args: 221 | dbname, sep, path = arg.partition('=') 222 | if not sep: 223 | die('DB specification missing "=": %r', arg) 224 | 225 | if dbname == ':main:': 226 | dbname = None 227 | if dbname in db_map: 228 | die('DB specified twice: %r', arg) 229 | db_map[dbname] = (ENV.open_db(_to_bytes(dbname) if dbname else None), path) 230 | 231 | if not db_map: 232 | db_map[':main:'] = (ENV.open_db(None), 'main.cdbmake') 233 | return db_map 234 | 235 | 236 | def cmd_copy(opts, args): 237 | if len(args) != 1: 238 | die('Please specify output directory (see --help)') 239 | 240 | output_dir = args[0] 241 | if os.path.exists(output_dir): 242 | die('Output directory %r already exists.', output_dir) 243 | 244 | os.makedirs(output_dir, int('0755', 8)) 245 | print('Running copy to %r....' % (output_dir,)) 246 | ENV.copy(output_dir, compact=opts.compact) 247 | 248 | 249 | def cmd_copyfd(opts, args): 250 | if args: 251 | die('"copyfd" command takes no arguments (see --help)') 252 | 253 | try: 254 | fp = os.fdopen(opts.out_fd, 'w', 0) 255 | except OSError: 256 | e = sys.exc_info()[1] 257 | die('Bad --out-fd %d: %s', opts.out_fd, e) 258 | 259 | ENV.copyfd(opts.out_fd) 260 | 261 | 262 | def cmd_dump(opts, args): 263 | db_map = db_map_from_args(args) 264 | with ENV.begin(buffers=True) as txn: 265 | for dbname, (db, path) in db_map.items(): 266 | with open(path, 'wb', BUF_SIZE) as fp: 267 | print('Dumping to %r...' % (path,)) 268 | cursor = txn.cursor(db=db) 269 | dump_cursor_to_fp(cursor, fp) 270 | 271 | 272 | def restore_cursor_from_fp(txn, fp, db): 273 | read = fp.read 274 | read1 = functools.partial(read, 1) 275 | read_until = lambda sep: ''.join(iter(read1, sep)) # NOQA: E731 276 | 277 | rec_nr = 0 278 | 279 | while True: 280 | rec_nr += 1 281 | plus = read(1) 282 | if plus == '\n': 283 | break 284 | elif plus != '+': 285 | die('bad or missing plus, line/record #%d', rec_nr) 286 | 287 | try: 288 | klen = int(read_until(','), 10) 289 | dlen = int(read_until(':'), 10) 290 | except ValueError: 291 | die('bad or missing length, line/record #%d', rec_nr) 292 | 293 | key = read(klen) 294 | if read(2) != '->': 295 | die('bad or missing separator, line/record #%d', rec_nr) 296 | 297 | data = read(dlen) 298 | if (len(key) + len(data)) != (klen + dlen): 299 | die('short key or data, line/record #%d', rec_nr) 300 | 301 | if read(1) != '\n': 302 | die('bad line ending, line/record #%d', rec_nr) 303 | 304 | txn.put(key, data, db=db) 305 | 306 | return rec_nr 307 | 308 | 309 | def cmd_drop(opts, args): 310 | if not args: 311 | die('Must specify at least one sub-database (see --help)') 312 | 313 | dbs = map(ENV.open_db, (map(_to_bytes, args))) 314 | for idx, db in enumerate(dbs): 315 | name = args[idx] 316 | if name == ':main:': 317 | die('Cannot drop main DB') 318 | print('Dropping DB %r...' % (name,)) 319 | with ENV.begin(write=True) as txn: 320 | txn.drop(db) 321 | 322 | 323 | def cmd_readers(opts, args): 324 | if opts.clean: 325 | print('Cleaned %d stale entries.' % (ENV.reader_check(),)) 326 | print(ENV.readers()) 327 | 328 | 329 | def cmd_restore(opts, args): 330 | db_map = db_map_from_args(args) 331 | with ENV.begin(buffers=True, write=True) as txn: 332 | for dbname, (db, path) in db_map.items(): 333 | with open(path, 'rb', BUF_SIZE) as fp: 334 | print('Restoring from %r...' % (path,)) 335 | count = restore_cursor_from_fp(txn, fp, db) 336 | print('Loaded %d keys from %r' % (count, path)) 337 | 338 | 339 | def delta(hst): 340 | return [(hst[i] - hst[i - 1]) for i in range(1, len(hst))] 341 | 342 | 343 | SYS_BLOCK = '/sys/block' 344 | 345 | 346 | def _find_diskstat(path): 347 | if not os.path.exists(SYS_BLOCK): 348 | return 349 | st = os.stat(path) 350 | devs = '%s:%s' % (st.st_dev >> 8, st.st_dev & 0xff) 351 | 352 | def maybe(rootpath): 353 | dpath = os.path.join(rootpath, 'dev') 354 | if os.path.exists(dpath): 355 | with open(dpath) as fp: 356 | if fp.read().strip() == devs: 357 | return os.path.join(rootpath, 'stat') 358 | 359 | for name in os.listdir(SYS_BLOCK): 360 | basepath = os.path.join(SYS_BLOCK, name) 361 | statpath = maybe(basepath) 362 | if statpath: 363 | return statpath 364 | for name in os.listdir(basepath): 365 | base2path = os.path.join(basepath, name) 366 | statpath = maybe(base2path) 367 | if statpath: 368 | return statpath 369 | 370 | 371 | class DiskStatter(object): 372 | FIELDS = ( 373 | 'reads', 374 | 'reads_merged', 375 | 'sectors_read', 376 | 'read_ms', 377 | 'writes', 378 | 'writes_merged', 379 | 'sectors_written', 380 | 'write_ms', 381 | 'io_count', 382 | 'io_ms', 383 | 'total_ms' 384 | ) 385 | 386 | def __init__(self, path): 387 | self.fp = open(path) 388 | self.refresh() 389 | 390 | def refresh(self): 391 | self.fp.seek(0) 392 | vars(self).update((self.FIELDS[i], int(s)) 393 | for i, s in enumerate(self.fp.read().split())) 394 | 395 | 396 | def cmd_watch(opts, args): 397 | info = None 398 | stat = None 399 | 400 | def window(func): 401 | history = collections.deque() 402 | 403 | def windowfunc(): 404 | history.append(func()) 405 | if len(history) > opts.window: 406 | history.popleft() 407 | if len(history) <= 1: 408 | return 0 409 | n = sum(delta(history)) / float(len(history) - 1) 410 | return n / opts.interval 411 | return windowfunc 412 | 413 | envmb = lambda: (info['last_pgno'] * stat['psize']) / 1048576. # NOQA 414 | 415 | cols = [ 416 | ('%d', 'Depth', lambda: stat['depth']), 417 | ('%d', 'Branch', lambda: stat['branch_pages']), 418 | ('%d', 'Leaf', lambda: stat['leaf_pages']), 419 | ('%+d', 'Leaf/s', window(lambda: stat['leaf_pages'])), 420 | ('%d', 'Oflow', lambda: stat['overflow_pages']), 421 | ('%+d', 'Oflow/s', window(lambda: stat['overflow_pages'])), 422 | ('%d', 'Recs', lambda: stat['entries']), 423 | ('%+d', 'Recs/s', window(lambda: stat['entries'])), 424 | ('%d', 'Rdrs', lambda: info['num_readers']), 425 | ('%.2f', 'EnvMb', envmb), 426 | ('%+.2f', 'EnvMb/s', window(envmb)), 427 | ('%d', 'Txs', lambda: info['last_txnid']), 428 | ('%+.2f', 'Txs/s', window(lambda: info['last_txnid'])) 429 | ] 430 | 431 | statter = None 432 | statpath = _find_diskstat(ENV.path()) 433 | if statpath: 434 | statter = DiskStatter(statpath) 435 | cols += [ 436 | ('%+d', 'SctRd/s', window(lambda: statter.sectors_read)), 437 | ('%+d', 'SctWr/s', window(lambda: statter.sectors_written)), 438 | ] 439 | 440 | term_width = 0 441 | widths = [len(head) for _, head, _ in cols] 442 | 443 | if opts.csv: 444 | writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL) 445 | writer.writerow([head for _, head, _ in cols]) 446 | 447 | cnt = 0 448 | try: 449 | while True: 450 | stat = ENV.stat() 451 | info = ENV.info() 452 | if statter: 453 | statter.refresh() 454 | 455 | vals = [] 456 | for i, (fmt, head, func) in enumerate(cols): 457 | val = fmt % func() 458 | vals.append(val) 459 | widths[i] = max(widths[i], len(val)) 460 | 461 | if opts.csv: 462 | writer.writerow(vals) 463 | else: 464 | if term_width != _TERM_WIDTH or not (cnt % (_TERM_HEIGHT - 2)): 465 | for i, (fmt, head, func) in enumerate(cols): 466 | sys.stdout.write(head.rjust(widths[i] + 1)) 467 | sys.stdout.write('\n') 468 | term_width = _TERM_WIDTH 469 | for i, val in enumerate(vals): 470 | sys.stdout.write(val.rjust(widths[i] + 1)) 471 | sys.stdout.write('\n') 472 | 473 | time.sleep(opts.interval) 474 | cnt += 1 475 | except KeyboardInterrupt: 476 | pass 477 | 478 | 479 | def cmd_warm(opts, args): 480 | stat = ENV.stat() 481 | info = ENV.info() 482 | 483 | bufsize = 32768 484 | last_offset = stat['psize'] * info['last_pgno'] 485 | buf = array.array('B', _to_bytes('\x00' * bufsize)) 486 | t0 = time.time() 487 | 488 | if opts.use_single_file: 489 | fp = open(opts.env, 'rb', bufsize) 490 | else: 491 | fp = open(opts.env + '/data.mdb', 'rb', bufsize) 492 | while fp.tell() < last_offset: 493 | fp.readinto(buf) 494 | print('Warmed %.2fmb in %dms' % 495 | (last_offset / 1048576., 1000 * (time.time() - t0))) 496 | 497 | 498 | def cmd_rewrite(opts, args): 499 | if not opts.target_env: 500 | die('Must specify target environment path with -E') 501 | 502 | src_info = ENV.info() 503 | target_env = lmdb.open(opts.target_env, 504 | map_size=src_info['map_size'] * 2, 505 | max_dbs=opts.max_dbs, sync=False, 506 | writemap=True, map_async=True, 507 | metasync=False) 508 | 509 | dbs = [] 510 | for arg in args: 511 | name = None if arg == ':main:' else arg 512 | src_db = ENV.open_db(name) 513 | dst_db = target_env.open_db(name) 514 | dbs.append((arg, src_db, dst_db)) 515 | 516 | if not dbs: 517 | dbs.append((':main:', ENV.open_db(None), target_env.open_db(None))) 518 | 519 | for name, src_db, dst_db in dbs: 520 | print('Writing %r...' % (name,)) 521 | with target_env.begin(db=dst_db, write=True) as wtxn: 522 | with ENV.begin(db=src_db, buffers=True) as rtxn: 523 | for key, value in rtxn.cursor(): 524 | wtxn.put(key, value, append=True) 525 | 526 | print('Syncing..') 527 | target_env.sync(True) 528 | 529 | 530 | def cmd_get(opts, args): 531 | print_header = len(args) > 1 532 | 533 | with ENV.begin(buffers=True, db=DB) as txn: 534 | for arg in args: 535 | value = txn.get(_to_bytes(arg)) 536 | if value is None: 537 | print('%r: missing' % (arg,)) 538 | continue 539 | if print_header: 540 | print('%r:' % (arg,)) 541 | if opts.xxd: 542 | print(xxd(value)) 543 | else: 544 | print(value) 545 | 546 | 547 | def cmd_edit(opts, args): 548 | if args: 549 | die('Edit command only takes options, not arguments (see --help)') 550 | 551 | with ENV.begin(write=True) as txn: 552 | cursor = txn.cursor(db=DB) 553 | for elem in opts.add or []: 554 | key, _, value = _to_bytes(elem).partition(_to_bytes('=')) 555 | cursor.put(key, value, overwrite=False) 556 | 557 | for elem in opts.set or []: 558 | key, _, value = _to_bytes(elem).partition(_to_bytes('=')) 559 | cursor.put(key, value) 560 | 561 | for key in opts.delete or []: 562 | txn.delete(_to_bytes(key), db=DB) 563 | 564 | for elem in opts.add_file or []: 565 | key, _, path = _to_bytes(elem).partition(_to_bytes('=')) 566 | with open(path, 'rb') as fp: 567 | cursor.put(key, fp.read(), overwrite=False) 568 | 569 | for elem in opts.set_file or []: 570 | key, _, path = _to_bytes(elem).partition(_to_bytes('=')) 571 | with open(path, 'rb') as fp: 572 | cursor.put(key, fp.read()) 573 | 574 | 575 | def cmd_shell(opts, args): 576 | import code 577 | import readline # NOQA 578 | code.InteractiveConsole(globals()).interact() 579 | 580 | 581 | def cmd_stat(opts, args): 582 | pprint.pprint(ENV.stat()) 583 | pprint.pprint(ENV.info()) 584 | 585 | 586 | def _get_term_width(default=(80, 25)): 587 | try: 588 | import fcntl # No fcntl on win32 589 | import termios # No termios on win32 590 | s = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234') 591 | height, width = struct.unpack('hh', s) 592 | return width, height 593 | except: 594 | return default 595 | 596 | 597 | def _on_sigwinch(*args): 598 | global _TERM_WIDTH, _TERM_HEIGHT 599 | _TERM_WIDTH, _TERM_HEIGHT = _get_term_width() 600 | 601 | 602 | def main(): 603 | parser = make_parser() 604 | opts, args = parser.parse_args() 605 | 606 | if not args: 607 | die('Please specify a command (see --help)') 608 | if not opts.env: 609 | die('Please specify environment (--env)') 610 | 611 | global ENV 612 | ENV = lmdb.open(opts.env, map_size=opts.map_size * 1048576, subdir=not opts.use_single_file, 613 | max_dbs=opts.max_dbs, create=False, readonly=opts.read == 'READ') 614 | 615 | if opts.db: 616 | global DB 617 | DB = ENV.open_db(opts.db) 618 | 619 | if hasattr(signal, 'SIGWINCH'): # Disable on win32. 620 | signal.signal(signal.SIGWINCH, _on_sigwinch) 621 | _on_sigwinch() 622 | 623 | func = globals().get('cmd_' + args[0]) 624 | if not func: 625 | die('No such command: %r' % (args[0],)) 626 | 627 | func(opts, args[1:]) 628 | 629 | 630 | if __name__ == '__main__': 631 | main() 632 | -------------------------------------------------------------------------------- /misc/cursor-del-break.c: -------------------------------------------------------------------------------- 1 | // http://www.openldap.org/its/index.cgi/Software%20Bugs?id=7722 2 | // gcc -g -I src/py-lmdb/lib -o cursor-del-break cursor-del-break.c 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "lmdb.h" 10 | #include "mdb.c" 11 | #include "midl.c" 12 | 13 | 14 | void check(int x) 15 | { 16 | if(x) { 17 | fprintf(stderr, "eek %s\n", mdb_strerror(x)); 18 | _exit(1); 19 | } 20 | } 21 | 22 | #define RECS 2048 23 | #define DB_PATH "/ram/tdb" 24 | 25 | MDB_dbi dbi; 26 | MDB_txn *txn; 27 | MDB_env *env; 28 | MDB_cursor *c1; 29 | 30 | char recpattern[256]; 31 | MDB_val keyv; 32 | MDB_val valv; 33 | 34 | void new_txn(void) 35 | { 36 | if(txn) { 37 | fprintf(stderr, "commit\n"); 38 | check(mdb_txn_commit(txn)); 39 | } 40 | check(mdb_txn_begin(env, NULL, 0, &txn)); 41 | } 42 | 43 | int main(void) 44 | { 45 | check(mdb_env_create(&env)); 46 | check(mdb_env_set_mapsize(env, 1048576UL*1024UL*3UL)); 47 | check(mdb_env_set_maxreaders(env, 126)); 48 | check(mdb_env_set_maxdbs(env, 1)); 49 | if(! access(DB_PATH, X_OK)) { 50 | system("rm -rf " DB_PATH); 51 | } 52 | check(mkdir(DB_PATH, 0777)); 53 | check(mdb_env_open(env, DB_PATH, MDB_MAPASYNC|MDB_NOSYNC|MDB_NOMETASYNC, 0644)); 54 | new_txn(); 55 | check(mdb_dbi_open(txn, NULL, 0, &dbi)); 56 | 57 | // make pattern 58 | int i; 59 | for(i = 0; i < sizeof recpattern; i++) { 60 | recpattern[i] = i % 256; 61 | } 62 | 63 | for(i = 0; i < RECS; i++) { 64 | char keybuf[40]; 65 | keyv.mv_size = sprintf(keybuf, "%08x", i); 66 | keyv.mv_data = keybuf; 67 | valv.mv_size = sizeof recpattern; 68 | valv.mv_data = recpattern; 69 | check(mdb_put(txn, dbi, &keyv, &valv, 0)); 70 | } 71 | 72 | new_txn(); 73 | 74 | check(mdb_cursor_open(txn, dbi, &c1)); 75 | check(mdb_cursor_get(c1, &keyv, &valv, MDB_FIRST)); 76 | check(mdb_del(txn, dbi, &keyv, NULL)); 77 | 78 | for(i = 1; i < RECS; i++) { 79 | check(mdb_cursor_get(c1, &keyv, &valv, MDB_NEXT)); 80 | char keybuf[40]; 81 | int sz = sprintf(keybuf, "%08x", i); 82 | check((!(sz==keyv.mv_size)) || memcmp(keyv.mv_data, keybuf, sz)); 83 | check(memcmp(valv.mv_data, recpattern, sizeof recpattern)); 84 | printf("%d\n", i); 85 | check(mdb_del(txn, dbi, &keyv, NULL)); 86 | } 87 | 88 | new_txn(); 89 | } 90 | -------------------------------------------------------------------------------- /misc/cursor_put_pyparse.diff: -------------------------------------------------------------------------------- 1 | diff --git a/lmdb/cpython.c b/lmdb/cpython.c 2 | index dd1c8b9..ced5ea3 100644 3 | --- a/lmdb/cpython.c 4 | +++ b/lmdb/cpython.c 5 | @@ -2319,11 +2319,25 @@ cursor_put(CursorObject *self, PyObject *args, PyObject *kwds) 6 | {ARG_BOOL, OVERWRITE_S, OFFSET(cursor_put, overwrite)}, 7 | {ARG_BOOL, APPEND_S, OFFSET(cursor_put, append)} 8 | }; 9 | + static char *keywords[] = { 10 | + "key", "value", "dupdata", "overwrite", "append", NULL 11 | + }; 12 | + PyObject *key, *val; 13 | int flags; 14 | int rc; 15 | 16 | static PyObject *cache = NULL; 17 | - if(parse_args(self->valid, SPECSIZE(), argspec, &cache, args, kwds, &arg)) { 18 | + if(! self->valid) { 19 | + return err_invalid(); 20 | + } 21 | + if(! PyArg_ParseTupleAndKeywords(args, kwds, "OO|iii", keywords, 22 | + &key, &val, &arg.dupdata, &arg.overwrite, 23 | + &arg.append)) { 24 | + return NULL; 25 | + } 26 | + 27 | + if(val_from_buffer(&arg.key, key) || 28 | + val_from_buffer(&arg.val, val)) { 29 | return NULL; 30 | } 31 | 32 | -------------------------------------------------------------------------------- /misc/gdb.commands: -------------------------------------------------------------------------------- 1 | # vim:syntax=gdb 2 | # 3 | # Print a backtrace if a program crashes. Run using: 4 | # gdb -x misc/gdb.commands --args argv0 argv1 .. 5 | # 6 | 7 | set confirm off 8 | 9 | define hook-stop 10 | init-if-undefined $_exitcode = 999 11 | if $_exitcode == 999 12 | echo Abnormal stop.\n 13 | backtrace 14 | quit 2 15 | else 16 | echo Normal exit.\n 17 | quit $_exitcode 18 | end 19 | end 20 | 21 | run 22 | -------------------------------------------------------------------------------- /misc/helpers.sh: -------------------------------------------------------------------------------- 1 | quiet() { 2 | "$@" > /tmp/$$ || { cat /tmp/$$; return 1; } 3 | } 4 | 5 | clean() { 6 | git clean -qdfx 7 | find /usr/local/lib -name '*lmdb*' | xargs rm -rf 8 | find /usr/lib -name '*lmdb*' | xargs rm -rf 9 | } 10 | 11 | with_gdb() { 12 | gdb --batch -x misc/gdb.commands --args "$@" 13 | } 14 | 15 | native() { 16 | clean 17 | unset LMDB_FORCE_CFFI 18 | export LMDB_FORCE_CPYTHON=1 19 | quiet $1 setup.py develop 20 | quiet $1 -c 'import lmdb.cpython' 21 | with_gdb $1 -m pytest tests || fail=1 22 | } 23 | 24 | cffi() { 25 | clean 26 | unset LMDB_FORCE_CPYTHON 27 | export LMDB_FORCE_CFFI=1 28 | quiet $1 setup.py develop 29 | quiet $1 -c 'import lmdb.cffi' 30 | with_gdb $1 -m pytest tests || fail=1 31 | } 32 | -------------------------------------------------------------------------------- /misc/its7733.c: -------------------------------------------------------------------------------- 1 | // http://www.openldap.org/its/index.cgi/Software%20Bugs?id=7733 2 | // gcc -g -I ../lib -o its7733 its7733.c 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "lmdb.h" 10 | #include "mdb.c" 11 | #include "midl.c" 12 | 13 | 14 | void check(int x) 15 | { 16 | if(x) { 17 | fprintf(stderr, "eek %s\n", mdb_strerror(x)); 18 | _exit(1); 19 | } 20 | } 21 | 22 | #define DB_PATH "/ram/tdb" 23 | 24 | MDB_dbi dbi; 25 | MDB_txn *txn; 26 | MDB_env *env; 27 | MDB_cursor *c1; 28 | 29 | MDB_val keyv; 30 | MDB_val valv; 31 | 32 | void new_txn(void) 33 | { 34 | if(txn) { 35 | fprintf(stderr, "commit\n"); 36 | check(mdb_txn_commit(txn)); 37 | } 38 | check(mdb_txn_begin(env, NULL, 0, &txn)); 39 | } 40 | 41 | 42 | void put(const char *k) 43 | { 44 | keyv.mv_size = strlen(k); 45 | keyv.mv_data = k; 46 | valv.mv_size = 0; 47 | valv.mv_data = ""; 48 | check(mdb_put(txn, dbi, &keyv, &valv, 0)); 49 | } 50 | 51 | int main(void) 52 | { 53 | check(mdb_env_create(&env)); 54 | check(mdb_env_set_mapsize(env, 1048576UL*1024UL*3UL)); 55 | check(mdb_env_set_maxreaders(env, 126)); 56 | check(mdb_env_set_maxdbs(env, 1)); 57 | if(! access(DB_PATH, X_OK)) { 58 | system("rm -rf " DB_PATH); 59 | } 60 | check(mkdir(DB_PATH, 0777)); 61 | check(mdb_env_open(env, DB_PATH, MDB_MAPASYNC|MDB_NOSYNC|MDB_NOMETASYNC, 0644)); 62 | new_txn(); 63 | check(mdb_dbi_open(txn, NULL, 0, &dbi)); 64 | 65 | put("a"); 66 | put("b"); 67 | put("baa"); 68 | put("d"); 69 | 70 | new_txn(); 71 | 72 | check(mdb_cursor_open(txn, dbi, &c1)); 73 | check(mdb_cursor_get(c1, &keyv, &valv, MDB_LAST)); 74 | check(mdb_cursor_del(c1, 0)); 75 | check(mdb_cursor_del(c1, 0)); 76 | new_txn(); 77 | } 78 | -------------------------------------------------------------------------------- /misc/readers_mrb_env.patch: -------------------------------------------------------------------------------- 1 | Store the address of the MDB_env structure that owns a reader in addition to 2 | its PID and TID, to allow multiple MDB_envs to be open on the same thread, 3 | since on mdb_env_close(), LMDB unconditionally obliterates any readers with a 4 | matching PID. This patch extends the test to (PID, MDB_env*). 5 | 6 | diff --git a/lib/mdb.c b/lib/mdb.c 7 | index fd0a3b5..f2ebdfa 100644 8 | --- a/lib/mdb.c 9 | +++ b/lib/mdb.c 10 | @@ -536,6 +536,8 @@ typedef struct MDB_rxbody { 11 | txnid_t mrb_txnid; 12 | /** The process ID of the process owning this reader txn. */ 13 | MDB_PID_T mrb_pid; 14 | + /** MDB_env within the process owning this reader txn. */ 15 | + void * mrb_env; 16 | /** The thread ID of the thread owning this txn. */ 17 | pthread_t mrb_tid; 18 | } MDB_rxbody; 19 | @@ -547,6 +549,7 @@ typedef struct MDB_reader { 20 | /** shorthand for mrb_txnid */ 21 | #define mr_txnid mru.mrx.mrb_txnid 22 | #define mr_pid mru.mrx.mrb_pid 23 | +#define mr_env mru.mrx.mrb_env 24 | #define mr_tid mru.mrx.mrb_tid 25 | /** cache line alignment */ 26 | char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)]; 27 | @@ -2285,6 +2288,7 @@ mdb_txn_renew0(MDB_txn *txn) 28 | return MDB_READERS_FULL; 29 | } 30 | ti->mti_readers[i].mr_pid = pid; 31 | + ti->mti_readers[i].mr_env = env; 32 | ti->mti_readers[i].mr_tid = tid; 33 | if (i == nr) 34 | ti->mti_numreaders = ++nr; 35 | @@ -4254,7 +4258,8 @@ mdb_env_close0(MDB_env *env, int excl) 36 | * me_txkey with its destructor must be disabled first. 37 | */ 38 | for (i = env->me_numreaders; --i >= 0; ) 39 | - if (env->me_txns->mti_readers[i].mr_pid == pid) 40 | + if (env->me_txns->mti_readers[i].mr_pid == pid 41 | + && env->me_txns->mti_readers[i].mr_env == env) 42 | env->me_txns->mti_readers[i].mr_pid = 0; 43 | #ifdef _WIN32 44 | if (env->me_rmutex) { 45 | -------------------------------------------------------------------------------- /misc/run_in_vm.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import atexit 25 | import json 26 | import os 27 | import shutil 28 | import socket 29 | import subprocess 30 | import sys 31 | import tempfile 32 | 33 | 34 | def run(*args): 35 | if os.path.exists('build'): 36 | shutil.rmtree('build') 37 | try: 38 | subprocess.check_call(args) 39 | except: 40 | print '!!! COMMAND WAS:', args 41 | raise 42 | 43 | 44 | def qmp_write(fp, o): 45 | buf = json.dumps(o) + '\n' 46 | fp.write(buf.replace('{', '{ ')) 47 | 48 | 49 | def qmp_read(fp): 50 | s = fp.readline() 51 | return json.loads(s) 52 | 53 | 54 | def qmp_say_hello(fp): 55 | assert 'QMP' in qmp_read(fp) 56 | qmp_write(fp, {'execute': 'qmp_capabilities'}) 57 | assert qmp_read(fp)['return'] == {} 58 | 59 | 60 | def qmp_command(fp, name, args): 61 | qmp_write(fp, {'execute': name, 'arguments': args}) 62 | while True: 63 | o = qmp_read(fp) 64 | if 'return' not in o: 65 | print 'skip', o 66 | continue 67 | print 'cmd out', o 68 | return o['return'] 69 | 70 | 71 | def qmp_monitor(fp, cmd): 72 | return qmp_command(fp, 'human-monitor-command', { 73 | 'command-line': cmd 74 | }) 75 | 76 | 77 | def main(): 78 | vm = sys.argv[1] 79 | cmdline = sys.argv[2:] 80 | 81 | rsock, wsock = socket.socketpair() 82 | rfp = rsock.makefile('r+b', 1) 83 | 84 | qemu_path = '/usr/local/bin/qemu-system-x86_64' 85 | qemu_args = ['sudo', qemu_path, '-enable-kvm', '-m', '1024', 86 | '-qmp', 'stdio', '-nographic', '-S', 87 | '-vnc', '127.0.0.1:0', 88 | '-net', 'user,hostfwd=tcp:127.0.0.1:9422-:22', 89 | '-net', 'nic,model=virtio', 90 | '-drive', 'file=%s,if=virtio' % (vm,)] 91 | print ' '.join(qemu_args).replace('qmp', 'monitor') 92 | exit() 93 | proc = subprocess.Popen(qemu_args, 94 | stdin=wsock.fileno(), stdout=wsock.fileno() 95 | ) 96 | 97 | qmp_say_hello(rfp) 98 | assert '' == qmp_monitor(rfp, 'loadvm 1') 99 | assert '' == qmp_monitor(rfp, 'cont') 100 | import time 101 | time.sleep(100) 102 | qmp_monitor(rfp, 'quit') 103 | 104 | if __name__ == '__main__': 105 | main() 106 | -------------------------------------------------------------------------------- /misc/runtests-travisci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | source misc/helpers.sh 4 | 5 | quiet() { 6 | "$@" > /tmp/$$ || { cat /tmp/$$; return 1; } 7 | } 8 | 9 | # Delete Travis PyPy or it'll supercede the PPA version. 10 | rm -rf /usr/local/pypy/bin /usr/local/lib/pypy2.7 11 | find /usr/lib -name '*setuptools*' | xargs rm -rf 12 | find /usr/local/lib -name '*setuptools*' | xargs rm -rf 13 | 14 | quiet add-apt-repository -y ppa:fkrull/deadsnakes 15 | quiet add-apt-repository -y ppa:pypy 16 | quiet apt-get -qq update 17 | quiet apt-get install --force-yes -qq python{2.5,2.6}-dev libffi-dev gdb 18 | 19 | wget -qO ez_setup_24.py \ 20 | https://raw.githubusercontent.com/pypa/setuptools/bootstrap-py24/ez_setup.py 21 | wget -q https://raw.githubusercontent.com/pypa/setuptools/bootstrap/ez_setup.py 22 | 23 | quiet python2.5 ez_setup_24.py 24 | quiet python2.6 ez_setup.py 25 | 26 | quiet python2.5 -measy_install py==1.4.20 pytest==2.5.2 27 | quiet python2.6 -measy_install py==1.4.20 pytest==2.5.2 cffi 28 | 29 | native python2.5 30 | native python2.6 31 | cffi python2.6 32 | 33 | 34 | [ "$fail" ] && exit 1 35 | exit 0 36 | -------------------------------------------------------------------------------- /misc/test_monster_acid_trace.diff: -------------------------------------------------------------------------------- 1 | diff --git a/tests/crash_test.py b/tests/crash_test.py 2 | index 1ac3b46..3785bd5 100644 3 | --- a/tests/crash_test.py 4 | +++ b/tests/crash_test.py 5 | @@ -173,21 +173,43 @@ class MultiCursorDeleteTest(unittest.TestCase): 6 | c2.delete() 7 | assert next(c1f) == B('eeee') 8 | 9 | + def _trace(self, op, *args): 10 | + bits = [] 11 | + for arg in args: 12 | + if isinstance(arg, bytes): 13 | + bits.append(arg.encode('hex')) 14 | + elif isinstance(arg, bool): 15 | + bits.append(bytes(int(arg))) 16 | + self.fp.write('%s %s %s\n' % (self.idx, op, ' '.join(bits))) 17 | + 18 | def test_monster(self): 19 | # Generate predictable sequence of sizes. 20 | rand = random.Random() 21 | rand.seed(0) 22 | 23 | + self.fp = open('trace.out', 'w') 24 | + self._counter = 0 25 | + self.idx = 0 26 | + 27 | txn = self.env.begin(write=True) 28 | keys = [] 29 | for i in range(20000): 30 | key = B('%06x' % i) 31 | val = B('x' * rand.randint(76, 350)) 32 | + self._trace('put', key, val) 33 | assert txn.put(key, val) 34 | keys.append(key) 35 | 36 | + 37 | + iter_id = self._counter 38 | + self._counter += 1 39 | + self._trace('iter', iter_id, '0', False) 40 | + 41 | deleted = 0 42 | for key in txn.cursor().iternext(values=False): 43 | + self._trace('fetch', iter_id) 44 | + self._trace('yield', iter_id, key, '') 45 | + self._trace('delete', key) 46 | assert txn.delete(key), key 47 | deleted += 1 48 | 49 | -------------------------------------------------------------------------------- /misc/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | ; py25_c disabled because TRAVISCI RAGE 3 | envlist = pypy_cffi,py26_cffi,py27_cffi,py30_cffi,py31_cffi,py32_cffi,py33_cffi,py26_c,py27_c,py33_c 4 | skipsdist = True 5 | 6 | [testenv] 7 | install_command = pip install {opts} {packages} 8 | commands = 9 | pip install -e . 10 | python -mpytest ./tests 11 | 12 | [testenv:pypy_cffi] 13 | basepython = pypy 14 | deps = 15 | pytest 16 | 17 | [testenv:py26_cffi] 18 | basepython = python2.6 19 | deps = 20 | pytest 21 | cffi 22 | setenv = LMDB_FORCE_CFFI=1 23 | 24 | [testenv:py27_cffi] 25 | basepython = python2.7 26 | deps = 27 | pytest 28 | cffi 29 | setenv = LMDB_FORCE_CFFI=1 30 | 31 | [testenv:py30_cffi] 32 | basepython = python3.0 33 | deps = 34 | pytest 35 | cffi 36 | setenv = LMDB_FORCE_CFFI=1 37 | 38 | [testenv:py31_cffi] 39 | basepython = python3.1 40 | deps = 41 | pytest 42 | cffi 43 | setenv = LMDB_FORCE_CFFI=1 44 | 45 | [testenv:py32_cffi] 46 | basepython = python3.2 47 | deps = 48 | pytest 49 | cffi 50 | setenv = LMDB_FORCE_CFFI=1 51 | 52 | [testenv:py33_cffi] 53 | basepython = python3.3 54 | deps = 55 | pytest 56 | cffi 57 | setenv = LMDB_FORCE_CFFI=1 58 | 59 | [testenv:py34_cffi] 60 | basepython = python3.4 61 | deps = 62 | pytest 63 | cffi 64 | setenv = LMDB_FORCE_CFFI=1 65 | 66 | [testenv:py25_c] 67 | deps = 68 | pytest 69 | setenv = PIP_INSECURE=1 70 | basepython = python2.5 71 | 72 | [testenv:py26_c] 73 | deps = 74 | pytest 75 | basepython = python2.6 76 | 77 | [testenv:py27_c] 78 | deps = 79 | pytest 80 | basepython = python2.7 81 | 82 | [testenv:py33_c] 83 | deps = 84 | pytest 85 | basepython = python3.3 86 | 87 | ;[testenv:py34_c] 88 | ;basepython = python3.4 89 | -------------------------------------------------------------------------------- /misc/windows_build.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import glob 25 | import os 26 | import shutil 27 | import subprocess 28 | import sys 29 | import tempfile 30 | 31 | INTERPS = ( 32 | ('Python26', False), 33 | ('Python26-64', False), 34 | ('Python27', False), 35 | ('Python27-64', False), 36 | #('Python31', False), 37 | #('Python31-64', False), 38 | #('Python32', False), 39 | ('Python32-64', False), 40 | ('Python33', False), 41 | ('Python33-64', False), 42 | ('Python34', False), 43 | ('Python34-64', False), 44 | ('Python35', False), 45 | ('Python35-64', False), 46 | ('Python36', False), 47 | ('Python36-64', False), 48 | ) 49 | 50 | 51 | def interp_path(interp): 52 | return r'C:\%s\Python' % (interp,) 53 | 54 | def pip_path(interp): 55 | return os.path.join(os.path.dirname(interp), 56 | 'scripts', 'pip.exe') 57 | 58 | def interp_has_module(path, module): 59 | return run_or_false(path, '-c', 'import ' + module) 60 | 61 | 62 | def run(*args): 63 | if os.path.exists('build'): 64 | shutil.rmtree('build') 65 | try: 66 | subprocess.check_call(args) 67 | except: 68 | print '!!! COMMAND WAS:', args 69 | raise 70 | 71 | 72 | def run_or_false(*args): 73 | try: 74 | run(*args) 75 | except subprocess.CalledProcessError: 76 | return False 77 | return True 78 | 79 | 80 | def main(): 81 | run('git', 'clean', '-dfx', 'dist') 82 | for interp, is_cffi in INTERPS: 83 | path = interp_path(interp) 84 | run('git', 'clean', '-dfx', 'build', 'temp', 'lmdb') 85 | run(pip_path(path), 'install', '-e', '.') 86 | if is_cffi: 87 | os.environ['LMDB_FORCE_CFFI'] = '1' 88 | os.environ.pop('LMDB_FORCE_CPYTHON', '') 89 | else: 90 | os.environ['LMDB_FORCE_CPYTHON'] = '1' 91 | os.environ.pop('LMDB_FORCE_CFFI', '') 92 | if os.path.exists('lmdb\\cpython.pyd'): 93 | os.unlink('lmdb\\cpython.pyd') 94 | #run(path, '-mpy.test') 95 | run(path, 'setup.py', 'bdist_egg') 96 | run(path, 'setup.py', 'bdist_wheel') 97 | run(sys.executable, '-m', 'twine', 'upload', 98 | '--skip-existing', *glob.glob('dist/*')) 99 | 100 | if __name__ == '__main__': 101 | main() 102 | -------------------------------------------------------------------------------- /misc/windows_setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import os 25 | import urllib 26 | 27 | from windows_build import interp_has_module 28 | from windows_build import interp_path 29 | from windows_build import INTERPS 30 | from windows_build import run 31 | from windows_build import run_or_false 32 | 33 | EZSETUP_URL = ('https://bitbucket.org/pypa/setuptools' 34 | '/raw/bootstrap/ez_setup.py') 35 | 36 | 37 | def ezsetup_path(): 38 | path = os.path.join(os.environ['TEMP'], 'ez_setup.py') 39 | if not os.path.exists(path): 40 | fp = urllib.urlopen(EZSETUP_URL) 41 | with open(path, 'wb') as fp2: 42 | fp2.write(fp.read()) 43 | fp.close() 44 | return path 45 | 46 | 47 | def easy_install_path(interp): 48 | return os.path.join(os.path.dirname(interp), 49 | 'scripts', 'easy_install.exe') 50 | 51 | 52 | def main(): 53 | for interp, is_cffi in INTERPS: 54 | path = interp_path(interp) 55 | run_or_false(path, '-m', 'ensurepip') 56 | if not interp_has_module(path, 'easy_install'): 57 | run(path, ezsetup_path()) 58 | for pkg in 'pip', 'cffi', 'pytest', 'wheel': 59 | modname = 'py.test' if pkg == 'pytest' else pkg 60 | if not interp_has_module(path, modname): 61 | run(easy_install_path(path), pkg) 62 | 63 | 64 | if __name__ == '__main__': 65 | main() 66 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | from __future__ import with_statement 25 | 26 | import os 27 | import sys 28 | import platform 29 | 30 | from setuptools import Extension 31 | from setuptools import setup 32 | 33 | try: 34 | import memsink 35 | except ImportError: 36 | memsink = None 37 | 38 | 39 | if hasattr(platform, 'python_implementation'): 40 | use_cpython = platform.python_implementation() == 'CPython' 41 | else: 42 | use_cpython = True 43 | 44 | if os.getenv('LMDB_FORCE_CFFI') is not None: 45 | use_cpython = False 46 | 47 | if sys.version[:3] < '2.5': 48 | sys.stderr.write('Error: py-lmdb requires at least CPython 2.5\n') 49 | raise SystemExit(1) 50 | 51 | if sys.version[:3] in ('3.0', '3.1', '3.2'): 52 | use_cpython = False 53 | 54 | 55 | # 56 | # Figure out which LMDB implementation to use. 57 | # 58 | 59 | if os.getenv('LMDB_INCLUDEDIR'): 60 | extra_include_dirs = [os.getenv('LMDB_INCLUDEDIR')] 61 | else: 62 | extra_include_dirs = [] 63 | 64 | if os.getenv('LMDB_LIBDIR'): 65 | extra_library_dirs = [os.getenv('LMDB_LIBDIR')] 66 | else: 67 | extra_library_dirs = [] 68 | 69 | extra_include_dirs += ['lib/py-lmdb'] 70 | 71 | if os.getenv('LMDB_FORCE_SYSTEM') is not None: 72 | print('py-lmdb: Using system version of liblmdb.') 73 | extra_sources = [] 74 | extra_include_dirs += [] 75 | libraries = ['lmdb'] 76 | else: 77 | print('py-lmdb: Using bundled liblmdb; override with LMDB_FORCE_SYSTEM=1.') 78 | extra_sources = ['lib/mdb.c', 'lib/midl.c'] 79 | extra_include_dirs += ['lib'] 80 | libraries = [] 81 | 82 | 83 | # distutils perplexingly forces NDEBUG for package code! 84 | extra_compile_args = ['-UNDEBUG'] 85 | 86 | # Disable some Clang/GCC warnings. 87 | if not os.getenv('LMDB_MAINTAINER'): 88 | extra_compile_args += ['-w'] 89 | 90 | 91 | # Microsoft Visual Studio 9 ships with neither inttypes.h, stdint.h, or a sane 92 | # definition for ssize_t, so here we add lib/win32 to the search path, which 93 | # contains emulation header files provided by a third party. We force-include 94 | # Python.h everywhere since it has a portable definition of ssize_t, which 95 | # inttypes.h and stdint.h lack, and to avoid having to modify the LMDB source 96 | # code. Advapi32 is needed for LMDB's use of Windows security APIs. 97 | p = sys.version.find('MSC v.') 98 | msvc_ver = int(sys.version[p+6:p+10]) if p != -1 else None 99 | 100 | if sys.platform.startswith('win'): 101 | # If running on Visual Studio<=2010 we must provide . Newer 102 | # versions provide it out of the box. 103 | if msvc_ver and not msvc_ver >= 1600: 104 | extra_include_dirs += ['lib\\win32-stdint'] 105 | extra_include_dirs += ['lib\\win32'] 106 | extra_compile_args += [r'/FIPython.h'] 107 | libraries += ['Advapi32'] 108 | 109 | 110 | # Capture setup.py configuration for later use by cffi, otherwise the 111 | # configuration may differ, forcing a recompile (and therefore likely compile 112 | # errors). This happens even when `use_cpython` since user might want to 113 | # LMDB_FORCE_CFFI=1 during testing. 114 | with open('lmdb/_config.py', 'w') as fp: 115 | fp.write('CONFIG = dict(%r)\n\n' % (( 116 | ('extra_compile_args', extra_compile_args), 117 | ('extra_sources', extra_sources), 118 | ('extra_library_dirs', extra_library_dirs), 119 | ('extra_include_dirs', extra_include_dirs), 120 | ('libraries', libraries), 121 | ),)) 122 | 123 | 124 | if use_cpython: 125 | print('py-lmdb: Using CPython extension; override with LMDB_FORCE_CFFI=1.') 126 | install_requires = [] 127 | if memsink: 128 | extra_compile_args += ['-DHAVE_MEMSINK', 129 | '-I' + os.path.dirname(memsink.__file__)] 130 | ext_modules = [Extension( 131 | name='cpython', 132 | sources=['lmdb/cpython.c'] + extra_sources, 133 | extra_compile_args=extra_compile_args, 134 | libraries=libraries, 135 | include_dirs=extra_include_dirs, 136 | library_dirs=extra_library_dirs 137 | )] 138 | else: 139 | print('Using cffi extension.') 140 | install_requires = ['cffi>=0.8'] 141 | try: 142 | import lmdb.cffi 143 | ext_modules = [lmdb.cffi._ffi.verifier.get_extension()] 144 | except ImportError: 145 | sys.stderr.write('Could not import lmdb; ensure cffi is installed!\n') 146 | ext_modules = [] 147 | 148 | def grep_version(): 149 | path = os.path.join(os.path.dirname(__file__), 'lmdb/__init__.py') 150 | with open(path) as fp: 151 | for line in fp: 152 | if line.startswith('__version__'): 153 | return eval(line.split()[-1]) 154 | 155 | setup( 156 | name = 'lmdb', 157 | version = grep_version(), 158 | description = "Universal Python binding for the LMDB 'Lightning' Database", 159 | author = 'David Wilson', 160 | license = 'OpenLDAP BSD', 161 | url = 'http://github.com/dw/py-lmdb/', 162 | packages = ['lmdb'], 163 | classifiers = [ 164 | "Programming Language :: Python", 165 | "Programming Language :: Python :: Implementation :: CPython", 166 | "Programming Language :: Python :: Implementation :: PyPy", 167 | "Programming Language :: Python :: 2", 168 | "Programming Language :: Python :: 2.6", 169 | "Programming Language :: Python :: 2.7", 170 | "Programming Language :: Python :: 3", 171 | "Programming Language :: Python :: 3.0", 172 | "Programming Language :: Python :: 3.1", 173 | "Programming Language :: Python :: 3.2", 174 | "Programming Language :: Python :: 3.3", 175 | "Topic :: Database", 176 | "Topic :: Database :: Database Engines/Servers", 177 | ], 178 | ext_package = 'lmdb', 179 | ext_modules = ext_modules, 180 | install_requires = install_requires, 181 | zip_safe = False 182 | ) 183 | -------------------------------------------------------------------------------- /tests/crash_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | # This is not a test suite! More like a collection of triggers for previously 24 | # observed crashes. Want to contribute to py-lmdb? Please write a test suite! 25 | # 26 | # what happens when empty keys/ values passed to various funcs 27 | # incorrect types 28 | # try to break cpython arg parsing - too many/few/incorrect args 29 | # Various efforts to cause Python-level leaks. 30 | # 31 | 32 | from __future__ import absolute_import 33 | from __future__ import with_statement 34 | 35 | import itertools 36 | import os 37 | import random 38 | import unittest 39 | 40 | import lmdb 41 | import testlib 42 | 43 | from testlib import B 44 | from testlib import O 45 | 46 | 47 | try: 48 | next(iter([1])) 49 | except NameError: # Python2.5. 50 | def next(it): 51 | return it.next() 52 | 53 | 54 | class CrashTest(unittest.TestCase): 55 | def tearDown(self): 56 | testlib.cleanup() 57 | 58 | # Various efforts to cause segfaults. 59 | 60 | def setUp(self): 61 | self.path, self.env = testlib.temp_env() 62 | with self.env.begin(write=True) as txn: 63 | txn.put(B('dave'), B('')) 64 | txn.put(B('dave2'), B('')) 65 | 66 | def testOldCrash(self): 67 | txn = self.env.begin() 68 | dir(iter(txn.cursor())) 69 | 70 | def testCloseWithTxn(self): 71 | txn = self.env.begin(write=True) 72 | self.env.close() 73 | self.assertRaises(Exception, (lambda: list(txn.cursor()))) 74 | 75 | def testDoubleClose(self): 76 | self.env.close() 77 | self.env.close() 78 | 79 | def testDbDoubleClose(self): 80 | db = self.env.open_db(key=B('dave3')) 81 | #db.close() 82 | #db.close() 83 | 84 | def testTxnCloseActiveIter(self): 85 | with self.env.begin() as txn: 86 | it = txn.cursor().iternext() 87 | self.assertRaises(Exception, (lambda: list(it))) 88 | 89 | def testDbCloseActiveIter(self): 90 | db = self.env.open_db(key=B('dave3')) 91 | with self.env.begin(write=True) as txn: 92 | txn.put(B('a'), B('b'), db=db) 93 | it = txn.cursor(db=db).iternext() 94 | self.assertRaises(Exception, (lambda: list(it))) 95 | 96 | 97 | class IteratorTest(unittest.TestCase): 98 | def tearDown(self): 99 | testlib.cleanup() 100 | 101 | def setUp(self): 102 | self.path, self.env = testlib.temp_env() 103 | self.txn = self.env.begin(write=True) 104 | self.c = self.txn.cursor() 105 | 106 | def testEmpty(self): 107 | self.assertEqual([], list(self.c)) 108 | self.assertEqual([], list(self.c.iternext())) 109 | self.assertEqual([], list(self.c.iterprev())) 110 | 111 | def testFilled(self): 112 | testlib.putData(self.txn) 113 | self.assertEqual(testlib.ITEMS, list(self.c)) 114 | self.assertEqual(testlib.ITEMS, list(self.c)) 115 | self.assertEqual(testlib.ITEMS, list(self.c.iternext())) 116 | self.assertEqual(testlib.ITEMS[::-1], list(self.txn.cursor().iterprev())) 117 | self.assertEqual(testlib.ITEMS[::-1], list(self.c.iterprev())) 118 | self.assertEqual(testlib.ITEMS, list(self.c)) 119 | 120 | def testFilledSkipForward(self): 121 | testlib.putData(self.txn) 122 | self.c.set_range(B('b')) 123 | self.assertEqual(testlib.ITEMS[1:], list(self.c)) 124 | 125 | def testFilledSkipReverse(self): 126 | testlib.putData(self.txn) 127 | self.c.set_range(B('b')) 128 | self.assertEqual(testlib.REV_ITEMS[-2:], list(self.c.iterprev())) 129 | 130 | def testFilledSkipEof(self): 131 | testlib.putData(self.txn) 132 | self.assertEqual(False, self.c.set_range(B('z'))) 133 | self.assertEqual(testlib.REV_ITEMS, list(self.c.iterprev())) 134 | 135 | 136 | class BigReverseTest(unittest.TestCase): 137 | def tearDown(self): 138 | testlib.cleanup() 139 | 140 | # Test for issue with MDB_LAST+MDB_PREV skipping chunks of database. 141 | def test_big_reverse(self): 142 | path, env = testlib.temp_env() 143 | txn = env.begin(write=True) 144 | keys = [B('%05d' % i) for i in range(0xffff)] 145 | for k in keys: 146 | txn.put(k, k, append=True) 147 | assert list(txn.cursor().iterprev(values=False)) == list(reversed(keys)) 148 | 149 | 150 | class MultiCursorDeleteTest(unittest.TestCase): 151 | def tearDown(self): 152 | testlib.cleanup() 153 | 154 | def setUp(self): 155 | self.path, self.env = testlib.temp_env() 156 | 157 | def test1(self): 158 | """Ensure MDB_NEXT is ignored on `c1' when it was previously positioned 159 | on the key that `c2' just deleted.""" 160 | txn = self.env.begin(write=True) 161 | cur = txn.cursor() 162 | while cur.first(): 163 | cur.delete() 164 | 165 | for i in range(1, 10): 166 | cur.put(O(ord('a') + i) * i, B('')) 167 | 168 | c1 = txn.cursor() 169 | c1f = c1.iternext(values=False) 170 | while next(c1f) != B('ddd'): 171 | pass 172 | c2 = txn.cursor() 173 | assert c2.set_key(B('ddd')) 174 | c2.delete() 175 | assert next(c1f) == B('eeee') 176 | 177 | def test_monster(self): 178 | # Generate predictable sequence of sizes. 179 | rand = random.Random() 180 | rand.seed(0) 181 | 182 | txn = self.env.begin(write=True) 183 | keys = [] 184 | for i in range(20000): 185 | key = B('%06x' % i) 186 | val = B('x' * rand.randint(76, 350)) 187 | assert txn.put(key, val) 188 | keys.append(key) 189 | 190 | deleted = 0 191 | for key in txn.cursor().iternext(values=False): 192 | assert txn.delete(key), key 193 | deleted += 1 194 | 195 | assert deleted == len(keys), deleted 196 | 197 | 198 | class TxnFullTest(unittest.TestCase): 199 | def tearDown(self): 200 | testlib.cleanup() 201 | 202 | def test_17bf75b12eb94d9903cd62329048b146d5313bad(self): 203 | """ 204 | me_txn0 previously cached MDB_TXN_ERROR permanently. Fixed by 205 | 17bf75b12eb94d9903cd62329048b146d5313bad. 206 | """ 207 | path, env = testlib.temp_env(map_size=4096*9, sync=False, max_spare_txns=0) 208 | for i in itertools.count(): 209 | try: 210 | with env.begin(write=True) as txn: 211 | txn.put(B(str(i)), B(str(i))) 212 | except lmdb.MapFullError: 213 | break 214 | 215 | # Should not crash with MDB_BAD_TXN: 216 | with env.begin(write=True) as txn: 217 | txn.delete(B('1')) 218 | 219 | 220 | class EmptyIterTest(unittest.TestCase): 221 | def tearDown(self): 222 | testlib.cleanup() 223 | 224 | def test_python3_iternext_segfault(self): 225 | # https://github.com/dw/py-lmdb/issues/105 226 | _, env = testlib.temp_env() 227 | txn = env.begin() 228 | cur = txn.cursor() 229 | ite = cur.iternext() 230 | nex = getattr(ite, 'next', 231 | getattr(ite, '__next__', None)) 232 | assert nex is not None 233 | self.assertRaises(StopIteration, nex) 234 | 235 | 236 | if __name__ == '__main__': 237 | unittest.main() 238 | -------------------------------------------------------------------------------- /tests/cursor_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | # test delete(dupdata) 24 | 25 | from __future__ import absolute_import 26 | from __future__ import with_statement 27 | import unittest 28 | 29 | import testlib 30 | from testlib import B 31 | from testlib import BT 32 | 33 | 34 | class ContextManagerTest(unittest.TestCase): 35 | def tearDown(self): 36 | testlib.cleanup() 37 | 38 | def test_ok(self): 39 | path, env = testlib.temp_env() 40 | txn = env.begin(write=True) 41 | with txn.cursor() as curs: 42 | curs.put(B('foo'), B('123')) 43 | self.assertRaises(Exception, lambda: curs.get(B('foo'))) 44 | 45 | def test_crash(self): 46 | path, env = testlib.temp_env() 47 | txn = env.begin(write=True) 48 | 49 | try: 50 | with txn.cursor() as curs: 51 | curs.put(123, 123) 52 | except: 53 | pass 54 | self.assertRaises(Exception, lambda: curs.get(B('foo'))) 55 | 56 | 57 | class CursorTestBase(unittest.TestCase): 58 | def tearDown(self): 59 | testlib.cleanup() 60 | 61 | def setUp(self): 62 | self.path, self.env = testlib.temp_env() 63 | self.txn = self.env.begin(write=True) 64 | self.c = self.txn.cursor() 65 | 66 | 67 | class CursorTest(CursorTestBase): 68 | def testKeyValueItemEmpty(self): 69 | self.assertEqual(B(''), self.c.key()) 70 | self.assertEqual(B(''), self.c.value()) 71 | self.assertEqual(BT('', ''), self.c.item()) 72 | 73 | def testFirstLastEmpty(self): 74 | self.assertEqual(False, self.c.first()) 75 | self.assertEqual(False, self.c.last()) 76 | 77 | def testFirstFilled(self): 78 | testlib.putData(self.txn) 79 | self.assertEqual(True, self.c.first()) 80 | self.assertEqual(testlib.ITEMS[0], self.c.item()) 81 | 82 | def testLastFilled(self): 83 | testlib.putData(self.txn) 84 | self.assertEqual(True, self.c.last()) 85 | self.assertEqual(testlib.ITEMS[-1], self.c.item()) 86 | 87 | def testSetKey(self): 88 | self.assertRaises(Exception, (lambda: self.c.set_key(B('')))) 89 | self.assertEqual(False, self.c.set_key(B('missing'))) 90 | testlib.putData(self.txn) 91 | self.assertEqual(True, self.c.set_key(B('b'))) 92 | self.assertEqual(False, self.c.set_key(B('ba'))) 93 | 94 | def testSetRange(self): 95 | self.assertEqual(False, self.c.set_range(B('x'))) 96 | testlib.putData(self.txn) 97 | self.assertEqual(False, self.c.set_range(B('x'))) 98 | self.assertEqual(True, self.c.set_range(B('a'))) 99 | self.assertEqual(B('a'), self.c.key()) 100 | self.assertEqual(True, self.c.set_range(B('ba'))) 101 | self.assertEqual(B('baa'), self.c.key()) 102 | self.c.set_range(B('')) 103 | self.assertEqual(B('a'), self.c.key()) 104 | 105 | def testDeleteEmpty(self): 106 | self.assertEqual(False, self.c.delete()) 107 | 108 | def testDeleteFirst(self): 109 | testlib.putData(self.txn) 110 | self.assertEqual(False, self.c.delete()) 111 | self.c.first() 112 | self.assertEqual(BT('a', ''), self.c.item()) 113 | self.assertEqual(True, self.c.delete()) 114 | self.assertEqual(BT('b', ''), self.c.item()) 115 | self.assertEqual(True, self.c.delete()) 116 | self.assertEqual(BT('baa', ''), self.c.item()) 117 | self.assertEqual(True, self.c.delete()) 118 | self.assertEqual(BT('d', ''), self.c.item()) 119 | self.assertEqual(True, self.c.delete()) 120 | self.assertEqual(BT('', ''), self.c.item()) 121 | self.assertEqual(False, self.c.delete()) 122 | self.assertEqual(BT('', ''), self.c.item()) 123 | 124 | def testDeleteLast(self): 125 | testlib.putData(self.txn) 126 | self.assertEqual(True, self.c.last()) 127 | self.assertEqual(BT('d', ''), self.c.item()) 128 | self.assertEqual(True, self.c.delete()) 129 | self.assertEqual(BT('', ''), self.c.item()) 130 | self.assertEqual(False, self.c.delete()) 131 | self.assertEqual(BT('', ''), self.c.item()) 132 | 133 | def testCount(self): 134 | self.assertRaises(Exception, (lambda: self.c.count())) 135 | testlib.putData(self.txn) 136 | self.c.first() 137 | # TODO: complete dup key support. 138 | #self.assertEqual(1, self.c.count()) 139 | 140 | def testPut(self): 141 | pass 142 | 143 | 144 | class PutmultiTest(CursorTestBase): 145 | def test_empty_seq(self): 146 | consumed, added = self.c.putmulti(()) 147 | assert consumed == added == 0 148 | 149 | def test_2list(self): 150 | l = [BT('a', ''), BT('a', '')] 151 | consumed, added = self.c.putmulti(l) 152 | assert consumed == added == 2 153 | 154 | li = iter(l) 155 | consumed, added = self.c.putmulti(li) 156 | assert consumed == added == 2 157 | 158 | def test_2list_preserve(self): 159 | l = [BT('a', ''), BT('a', '')] 160 | consumed, added = self.c.putmulti(l, overwrite=False) 161 | assert consumed == 2 162 | assert added == 1 163 | 164 | assert self.c.set_key(B('a')) 165 | assert self.c.delete() 166 | 167 | li = iter(l) 168 | consumed, added = self.c.putmulti(li, overwrite=False) 169 | assert consumed == 2 170 | assert added == 1 171 | 172 | def test_bad_seq1(self): 173 | self.assertRaises(Exception, 174 | lambda: self.c.putmulti(range(2))) 175 | 176 | 177 | class ReplaceTest(CursorTestBase): 178 | def test_replace(self): 179 | assert None is self.c.replace(B('a'), B('')) 180 | assert B('') == self.c.replace(B('a'), B('x')) 181 | assert B('x') == self.c.replace(B('a'), B('y')) 182 | 183 | 184 | class ContextManagerTest(CursorTestBase): 185 | def test_enter(self): 186 | with self.c as c: 187 | assert c is self.c 188 | c.put(B('a'), B('a')) 189 | assert c.get(B('a')) == B('a') 190 | self.assertRaises(Exception, 191 | lambda: c.get(B('a'))) 192 | 193 | def test_exit_success(self): 194 | with self.txn.cursor() as c: 195 | c.put(B('a'), B('a')) 196 | self.assertRaises(Exception, 197 | lambda: c.get(B('a'))) 198 | 199 | def test_exit_failure(self): 200 | try: 201 | with self.txn.cursor() as c: 202 | c.put(B('a'), B('a')) 203 | raise ValueError 204 | except ValueError: 205 | pass 206 | self.assertRaises(Exception, 207 | lambda: c.get(B('a'))) 208 | 209 | def test_close(self): 210 | self.c.close() 211 | self.assertRaises(Exception, 212 | lambda: c.get(B('a'))) 213 | 214 | def test_double_close(self): 215 | self.c.close() 216 | self.c.close() 217 | self.assertRaises(Exception, 218 | lambda: self.c.put(B('a'), B('a'))) 219 | 220 | 221 | if __name__ == '__main__': 222 | unittest.main() 223 | -------------------------------------------------------------------------------- /tests/iteration_test.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # 3 | # Copyright 2013 The py-lmdb authors, all rights reserved. 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted only as authorized by the OpenLDAP 7 | # Public License. 8 | # 9 | # A copy of this license is available in the file LICENSE in the 10 | # top-level directory of the distribution or, alternatively, at 11 | # . 12 | # 13 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 14 | # 15 | # Individual files and/or contributed packages may be copyright by 16 | # other parties and/or subject to additional restrictions. 17 | # 18 | # This work also contains materials derived from public sources. 19 | # 20 | # Additional information about OpenLDAP can be obtained at 21 | # . 22 | # 23 | 24 | # test delete(dupdata) 25 | 26 | from __future__ import absolute_import 27 | from __future__ import with_statement 28 | import unittest 29 | 30 | import testlib 31 | from testlib import B 32 | from testlib import BT 33 | from testlib import KEYS, ITEMS, KEYS2, ITEMS2 34 | from testlib import putData, putBigData 35 | 36 | 37 | class IterationTestBase(unittest.TestCase): 38 | def tearDown(self): 39 | testlib.cleanup() 40 | 41 | def setUp(self): 42 | self.path, self.env = testlib.temp_env() # creates 10 databases 43 | self.txn = self.env.begin(write=True) 44 | putData(self.txn) 45 | self.c = self.txn.cursor() 46 | self.empty_entry = (B(''), B('')) 47 | 48 | def matchList(self, ls_a, ls_b): 49 | return all(map(lambda x, y: x == y, ls_a, ls_b)) 50 | 51 | 52 | class IterationTestBase2(unittest.TestCase): 53 | """ This puts more data than its predecessor""" 54 | 55 | def tearDown(self): 56 | testlib.cleanup() 57 | 58 | def setUp(self): 59 | self.path, self.env = testlib.temp_env() # creates 10 databases 60 | self.txn = self.env.begin(write=True) 61 | putBigData(self.txn) # HERE! 62 | self.c = self.txn.cursor() 63 | self.empty_entry = ('', '') 64 | 65 | def matchList(self, ls_a, ls_b): 66 | return all(map(lambda x, y: x == y, ls_a, ls_b)) 67 | 68 | 69 | class IterationTest(IterationTestBase): 70 | def testFromStart(self): 71 | # From start 72 | self.c.first() 73 | self.assertEqual(self.c.key(), KEYS[0]) # start of db 74 | test_list = [i for i in iter(self.c)] 75 | self.assertEqual(self.matchList(test_list, ITEMS), True) 76 | self.assertEqual(self.c.item(), self.empty_entry) # end of db 77 | 78 | def testFromStartWithIternext(self): 79 | # From start with iternext 80 | self.c.first() 81 | self.assertEqual(self.c.key(), KEYS[0]) # start of db 82 | test_list = [i for i in self.c.iternext()] 83 | # remaining elements in db 84 | self.assertEqual(self.matchList(test_list, ITEMS), True) 85 | self.assertEqual(self.c.item(), self.empty_entry) # end of db 86 | 87 | def testFromStartWithNext(self): 88 | # From start with next 89 | self.c.first() 90 | self.assertEqual(self.c.key(), KEYS[0]) # start of db 91 | test_list = [] 92 | while 1: 93 | test_list.append(self.c.item()) 94 | if not self.c.next(): 95 | break 96 | self.assertEqual(self.matchList(test_list, ITEMS), True) 97 | 98 | def testFromExistentKeySetKey(self): 99 | self.c.first() 100 | self.c.set_key(KEYS[1]) 101 | self.assertEqual(self.c.key(), KEYS[1]) 102 | test_list = [i for i in self.c.iternext()] 103 | self.assertEqual(self.matchList(test_list, ITEMS[1:]), True) 104 | 105 | def testFromExistentKeySetRange(self): 106 | self.c.first() 107 | self.c.set_range(KEYS[1]) 108 | self.assertEqual(self.c.key(), KEYS[1]) 109 | test_list = [i for i in self.c.iternext()] 110 | self.assertEqual(self.matchList(test_list, ITEMS[1:]), True) 111 | 112 | def testFromNonExistentKeySetRange(self): 113 | self.c.first() 114 | self.c.set_range(B('c')) 115 | self.assertEqual(self.c.key(), B('d')) 116 | test_list = [i for i in self.c.iternext()] 117 | test_items = [i for i in ITEMS if i[0] > B('c')] 118 | self.assertEqual(self.matchList(test_list, test_items), True) 119 | 120 | def testFromLastKey(self): 121 | self.c.last() 122 | self.assertEqual(self.c.key(), KEYS[-1]) 123 | test_list = [i for i in self.c.iternext()] 124 | self.assertEqual(self.matchList(test_list, ITEMS[-1:]), True) 125 | 126 | def testFromNonExistentKeyPastEnd(self): 127 | self.c.last() 128 | self.assertEqual(self.c.key(), KEYS[-1]) 129 | # next() fails, leaving iterator in an unpositioned state. 130 | self.c.next() 131 | self.assertEqual(self.c.item(), self.empty_entry) 132 | # iternext() from an unpositioned state proceeds from start of DB. 133 | test_list = list(self.c.iternext()) 134 | self.assertEqual(test_list, ITEMS) 135 | 136 | 137 | class ReverseIterationTest(IterationTestBase): 138 | def testFromStartRev(self): 139 | # From start 140 | self.c.first() 141 | self.assertEqual(self.c.key(), KEYS[0]) # start of db 142 | test_list = [i for i in self.c.iterprev()] 143 | self.assertEqual(self.matchList(test_list, ITEMS[:1][::-1]), True) 144 | self.assertEqual(self.c.item(), self.empty_entry) # very start of db 145 | 146 | def testFromExistentKeySetKeyRev(self): 147 | self.c.first() 148 | self.c.set_key(KEYS[2]) 149 | self.assertEqual(self.c.key(), KEYS[2]) 150 | test_list = [i for i in self.c.iterprev()] 151 | self.assertEqual(self.matchList(test_list, ITEMS[:3][::-1]), True) 152 | 153 | def testFromExistentKeySetRangeRev(self): 154 | self.c.first() 155 | self.c.set_range(KEYS[2]) 156 | self.assertEqual(self.c.key(), KEYS[2]) 157 | test_list = [i for i in self.c.iterprev()] 158 | self.assertEqual(self.matchList(test_list, ITEMS[:3][::-1]), True) 159 | 160 | def testFromNonExistentKeySetRangeRev(self): 161 | self.c.first() 162 | self.c.set_range(B('c')) 163 | self.assertEqual(self.c.key(), B('d')) 164 | test_list = [i for i in self.c.iterprev()] 165 | test_items = [i for i in ITEMS if i[0] <= B('d')] 166 | test_items = test_items[::-1] 167 | self.assertEqual(self.matchList(test_list, test_items), True) 168 | 169 | def testFromLastKeyRev(self): 170 | self.c.last() 171 | self.assertEqual(self.c.key(), KEYS[-1]) 172 | test_list = [i for i in self.c.iterprev()] 173 | self.assertEqual(self.matchList(test_list, ITEMS[::-1]), True) 174 | 175 | def testFromLastKeyWithPrevRev(self): 176 | self.c.last() 177 | self.assertEqual(self.c.key(), KEYS[-1]) # end of db 178 | test_list = [] 179 | while 1: 180 | test_list.append(self.c.item()) 181 | if not self.c.prev(): 182 | break 183 | self.assertEqual(self.matchList(test_list, ITEMS[::-1]), True) 184 | 185 | def testFromNonExistentKeyPastEndRev(self): 186 | self.c.first() 187 | self.assertEqual(self.c.key(), KEYS[0]) 188 | # prev() fails, leaving iterator in an unpositioned state. 189 | self.c.prev() 190 | self.assertEqual(self.c.item(), self.empty_entry) 191 | # iterprev() from an unpositioned state proceeds from end of DB. 192 | test_list = list(self.c.iterprev()) 193 | self.assertEqual(test_list, ITEMS[::-1]) 194 | 195 | class IterationTestWithDupsBase(unittest.TestCase): 196 | def tearDown(self): 197 | testlib.cleanup() 198 | 199 | def setUp(self): 200 | self.path, self.env = testlib.temp_env() 201 | db = self.env.open_db(B('db1'), dupsort=True) 202 | self.txn = self.env.begin(db, write=True) 203 | for _ in range(2): 204 | putData(self.txn) 205 | self.c = self.txn.cursor() 206 | self.empty_entry = ('', '') 207 | 208 | def matchList(self, ls_a, ls_b): 209 | return all(map(lambda x, y: x == y, ls_a, ls_b)) 210 | 211 | 212 | class IterationTestWithDups(IterationTestWithDupsBase): 213 | pass 214 | 215 | 216 | class SeekIterationTest(IterationTestBase2): 217 | def testForwardIterationSeek(self): 218 | self.c.first() 219 | test_list = [] 220 | for i in self.c.iternext(): 221 | test_list.append(i) 222 | # skips d and e 223 | if self.c.key() == B('baa'): 224 | self.c.set_key(B('e')) 225 | test_item = [i for i in ITEMS2 if i[0] not in (B('d'), B('e'))] 226 | self.assertEqual(test_list, test_item) 227 | 228 | def testPutDuringIteration(self): 229 | self.c.first() 230 | test_list = [] 231 | c = self.txn.cursor() 232 | for i in c.iternext(): 233 | test_list.append(i) 234 | # adds 'i' upon seeing 'e' 235 | if c.key() == B('e'): 236 | self.c.put(B('i'), B('')) 237 | test_item = ITEMS2 + [(B('i'), B(''))] 238 | self.assertEqual(test_list, test_item) 239 | 240 | def testDeleteDuringIteration(self): 241 | self.c.first() 242 | test_list = [] 243 | for i in self.c.iternext(): 244 | # deletes 'e' upon seeing it 245 | if self.c.key() == B('e'): 246 | # Causes 'e' to be deleted, and advances cursor to next 247 | # element. 248 | self.c.delete() 249 | i = self.c.item() 250 | test_list.append(i) 251 | 252 | test_item = [i for i in ITEMS2 if i[0] != B('e')] 253 | self.assertEqual(test_list, test_item) 254 | 255 | 256 | if __name__ == '__main__': 257 | unittest.main() 258 | -------------------------------------------------------------------------------- /tests/package_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import unittest 25 | 26 | import lmdb 27 | 28 | 29 | class PackageExportsTest(unittest.TestCase): 30 | """ 31 | Ensure the list of exported names matches a predefined list. Designed to 32 | ensure future interface changes to cffi.py and cpython.c don't break 33 | consistency of "from lmdb import *". 34 | """ 35 | def test_exports(self): 36 | assert sorted(lmdb.__all__) == [ 37 | 'BadDbiError', 38 | 'BadRslotError', 39 | 'BadTxnError', 40 | 'BadValsizeError', 41 | 'CorruptedError', 42 | 'Cursor', 43 | 'CursorFullError', 44 | 'DbsFullError', 45 | 'DiskError', 46 | 'Environment', 47 | 'Error', 48 | 'IncompatibleError', 49 | 'InvalidError', 50 | 'InvalidParameterError', 51 | 'KeyExistsError', 52 | 'LockError', 53 | 'MapFullError', 54 | 'MapResizedError', 55 | 'MemoryError', 56 | 'NotFoundError', 57 | 'PageFullError', 58 | 'PageNotFoundError', 59 | 'PanicError', 60 | 'ReadersFullError', 61 | 'ReadonlyError', 62 | 'TlsFullError', 63 | 'Transaction', 64 | 'TxnFullError', 65 | 'VersionMismatchError', 66 | 'enable_drop_gil', 67 | 'version', 68 | ] 69 | -------------------------------------------------------------------------------- /tests/testlib.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import atexit 25 | import gc 26 | import os 27 | import shutil 28 | import stat 29 | import sys 30 | import tempfile 31 | import traceback 32 | 33 | try: 34 | import __builtin__ 35 | except ImportError: 36 | import builtins as __builtin__ 37 | 38 | import lmdb 39 | 40 | 41 | _cleanups = [] 42 | 43 | def cleanup(): 44 | while _cleanups: 45 | func = _cleanups.pop() 46 | try: 47 | func() 48 | except Exception: 49 | traceback.print_exc() 50 | 51 | atexit.register(cleanup) 52 | 53 | 54 | def temp_dir(create=True): 55 | path = tempfile.mkdtemp(prefix='lmdb_test') 56 | assert path is not None, 'tempfile.mkdtemp failed' 57 | if not create: 58 | os.rmdir(path) 59 | _cleanups.append(lambda: shutil.rmtree(path, ignore_errors=True)) 60 | if hasattr(path, 'decode'): 61 | path = path.decode(sys.getfilesystemencoding()) 62 | return path 63 | 64 | 65 | def temp_file(create=True): 66 | fd, path = tempfile.mkstemp(prefix='lmdb_test') 67 | assert path is not None, 'tempfile.mkstemp failed' 68 | os.close(fd) 69 | if not create: 70 | os.unlink(path) 71 | _cleanups.append(lambda: os.path.exists(path) and os.unlink(path)) 72 | pathlock = path + '-lock' 73 | _cleanups.append(lambda: os.path.exists(pathlock) and os.unlink(pathlock)) 74 | if hasattr(path, 'decode'): 75 | path = path.decode(sys.getfilesystemencoding()) 76 | return path 77 | 78 | 79 | def temp_env(path=None, max_dbs=10, **kwargs): 80 | if not path: 81 | path = temp_dir() 82 | env = lmdb.open(path, max_dbs=max_dbs, **kwargs) 83 | _cleanups.append(env.close) 84 | return path, env 85 | 86 | 87 | def path_mode(path): 88 | return stat.S_IMODE(os.stat(path).st_mode) 89 | 90 | 91 | def debug_collect(): 92 | if hasattr(gc, 'set_debug') and hasattr(gc, 'get_debug'): 93 | old = gc.get_debug() 94 | gc.set_debug(gc.DEBUG_LEAK) 95 | gc.collect() 96 | gc.set_debug(old) 97 | else: 98 | for x in range(10): 99 | # PyPy doesn't collect objects with __del__ on first attempt. 100 | gc.collect() 101 | 102 | 103 | # Handle moronic Python >=3.0 <3.3. 104 | UnicodeType = getattr(__builtin__, 'unicode', str) 105 | BytesType = getattr(__builtin__, 'bytes', str) 106 | 107 | 108 | try: 109 | INT_TYPES = (int, long) 110 | except NameError: 111 | INT_TYPES = (int,) 112 | 113 | # B(ascii 'string') -> bytes 114 | try: 115 | bytes('') # Python>=2.6, alias for str(). 116 | B = lambda s: s 117 | except TypeError: # Python3.x, requires encoding parameter. 118 | B = lambda s: bytes(s, 'ascii') 119 | except NameError: # Python<=2.5. 120 | B = lambda s: s 121 | 122 | # BL('s1', 's2') -> ['bytes1', 'bytes2'] 123 | BL = lambda *args: list(map(B, args)) 124 | # TS('s1', 's2') -> ('bytes1', 'bytes2') 125 | BT = lambda *args: tuple(B(s) for s in args) 126 | # O(int) -> length-1 bytes 127 | O = lambda arg: B(chr(arg)) 128 | # OCT(s) -> parse string as octal 129 | OCT = lambda s: int(s, 8) 130 | 131 | 132 | KEYS = BL('a', 'b', 'baa', 'd') 133 | ITEMS = [(k, B('')) for k in KEYS] 134 | REV_ITEMS = ITEMS[::-1] 135 | VALUES = [B('') for k in KEYS] 136 | 137 | KEYS2 = BL('a', 'b', 'baa', 'd', 'e', 'f', 'g', 'h') 138 | ITEMS2 = [(k, B('')) for k in KEYS2] 139 | REV_ITEMS2 = ITEMS2[::-1] 140 | VALUES2 = [B('') for k in KEYS2] 141 | 142 | def putData(t, db=None): 143 | for k, v in ITEMS: 144 | if db: 145 | t.put(k, v, db=db) 146 | else: 147 | t.put(k, v) 148 | 149 | def putBigData(t, db=None): 150 | for k, v in ITEMS2: 151 | if db: 152 | t.put(k, v, db=db) 153 | else: 154 | t.put(k, v) 155 | -------------------------------------------------------------------------------- /tests/tool_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | import unittest 25 | 26 | import lmdb 27 | import lmdb.tool 28 | 29 | 30 | class ToolTest(unittest.TestCase): 31 | def test_ok(self): 32 | # For now, simply ensure the module can be compiled (3.x compat). 33 | pass 34 | 35 | 36 | if __name__ == '__main__': 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /tests/txn_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2013 The py-lmdb authors, all rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted only as authorized by the OpenLDAP 6 | # Public License. 7 | # 8 | # A copy of this license is available in the file LICENSE in the 9 | # top-level directory of the distribution or, alternatively, at 10 | # . 11 | # 12 | # OpenLDAP is a registered trademark of the OpenLDAP Foundation. 13 | # 14 | # Individual files and/or contributed packages may be copyright by 15 | # other parties and/or subject to additional restrictions. 16 | # 17 | # This work also contains materials derived from public sources. 18 | # 19 | # Additional information about OpenLDAP can be obtained at 20 | # . 21 | # 22 | 23 | from __future__ import absolute_import 24 | from __future__ import with_statement 25 | import struct 26 | import unittest 27 | import weakref 28 | 29 | import testlib 30 | from testlib import B 31 | from testlib import BT 32 | from testlib import OCT 33 | from testlib import INT_TYPES 34 | from testlib import BytesType 35 | from testlib import UnicodeType 36 | 37 | import lmdb 38 | 39 | 40 | UINT_0001 = struct.pack('I', 1) 41 | UINT_0002 = struct.pack('I', 2) 42 | ULONG_0001 = struct.pack('L', 1) # L != size_t 43 | ULONG_0002 = struct.pack('L', 2) # L != size_t 44 | 45 | 46 | class InitTest(unittest.TestCase): 47 | def tearDown(self): 48 | testlib.cleanup() 49 | 50 | def test_closed(self): 51 | _, env = testlib.temp_env() 52 | env.close() 53 | self.assertRaises(Exception, 54 | lambda: lmdb.Transaction(env)) 55 | 56 | def test_readonly(self): 57 | _, env = testlib.temp_env() 58 | txn = lmdb.Transaction(env) 59 | # Read txn can't write. 60 | self.assertRaises(lmdb.ReadonlyError, 61 | lambda: txn.put(B('a'), B(''))) 62 | txn.abort() 63 | 64 | def test_begin_write(self): 65 | _, env = testlib.temp_env() 66 | txn = lmdb.Transaction(env, write=True) 67 | # Write txn can write. 68 | assert txn.put(B('a'), B('')) 69 | txn.commit() 70 | 71 | def test_bind_db(self): 72 | _, env = testlib.temp_env() 73 | main = env.open_db(None) 74 | sub = env.open_db(B('db1')) 75 | 76 | txn = lmdb.Transaction(env, write=True, db=sub) 77 | assert txn.put(B('b'), B('')) # -> sub 78 | assert txn.put(B('a'), B(''), db=main) # -> main 79 | txn.commit() 80 | 81 | txn = lmdb.Transaction(env) 82 | assert txn.get(B('a')) == B('') 83 | assert txn.get(B('b')) is None 84 | assert txn.get(B('a'), db=sub) is None 85 | assert txn.get(B('b'), db=sub) == B('') 86 | txn.abort() 87 | 88 | def test_bind_db_methods(self): 89 | _, env = testlib.temp_env() 90 | maindb = env.open_db(None) 91 | db1 = env.open_db(B('d1')) 92 | txn = lmdb.Transaction(env, write=True, db=db1) 93 | assert txn.put(B('a'), B('d1')) 94 | assert txn.get(B('a'), db=db1) == B('d1') 95 | assert txn.get(B('a'), db=maindb) is None 96 | assert txn.replace(B('a'), B('d11')) == B('d1') 97 | assert txn.pop(B('a')) == B('d11') 98 | assert txn.put(B('a'), B('main'), db=maindb, overwrite=False) 99 | assert not txn.delete(B('a')) 100 | txn.abort() 101 | 102 | def test_parent_readonly(self): 103 | _, env = testlib.temp_env() 104 | parent = lmdb.Transaction(env) 105 | # Nonsensical. 106 | self.assertRaises(lmdb.InvalidParameterError, 107 | lambda: lmdb.Transaction(env, parent=parent)) 108 | 109 | def test_parent(self): 110 | _, env = testlib.temp_env() 111 | parent = lmdb.Transaction(env, write=True) 112 | parent.put(B('a'), B('a')) 113 | 114 | child = lmdb.Transaction(env, write=True, parent=parent) 115 | assert child.get(B('a')) == B('a') 116 | assert child.put(B('a'), B('b')) 117 | child.abort() 118 | 119 | # put() should have rolled back 120 | assert parent.get(B('a')) == B('a') 121 | 122 | child = lmdb.Transaction(env, write=True, parent=parent) 123 | assert child.put(B('a'), B('b')) 124 | child.commit() 125 | 126 | # put() should be visible 127 | assert parent.get(B('a')) == B('b') 128 | 129 | def test_buffers(self): 130 | _, env = testlib.temp_env() 131 | txn = lmdb.Transaction(env, write=True, buffers=True) 132 | assert txn.put(B('a'), B('a')) 133 | b = txn.get(B('a')) 134 | assert b is not None 135 | assert len(b) == 1 136 | assert not isinstance(b, type(B(''))) 137 | txn.commit() 138 | 139 | txn = lmdb.Transaction(env, buffers=False) 140 | b = txn.get(B('a')) 141 | assert b is not None 142 | assert len(b) == 1 143 | assert isinstance(b, type(B(''))) 144 | txn.abort() 145 | 146 | 147 | class ContextManagerTest(unittest.TestCase): 148 | def tearDown(self): 149 | testlib.cleanup() 150 | 151 | def test_ok(self): 152 | path, env = testlib.temp_env() 153 | txn = env.begin(write=True) 154 | with txn as txn_: 155 | assert txn is txn_ 156 | txn.put(B('foo'), B('123')) 157 | 158 | self.assertRaises(Exception, lambda: txn.get(B('foo'))) 159 | with env.begin() as txn: 160 | assert txn.get(B('foo')) == B('123') 161 | 162 | def test_crash(self): 163 | path, env = testlib.temp_env() 164 | txn = env.begin(write=True) 165 | 166 | try: 167 | with txn as txn_: 168 | txn.put(B('foo'), B('123')) 169 | txn.put(123, 123) 170 | except: 171 | pass 172 | 173 | self.assertRaises(Exception, lambda: txn.get(B('foo'))) 174 | with env.begin() as txn: 175 | assert txn.get(B('foo')) is None 176 | 177 | 178 | class IdTest(unittest.TestCase): 179 | def tearDown(self): 180 | testlib.cleanup() 181 | 182 | def test_readonly_new(self): 183 | _, env = testlib.temp_env() 184 | with env.begin() as txn: 185 | assert txn.id() == 0 186 | 187 | def test_write_new(self): 188 | _, env = testlib.temp_env() 189 | with env.begin(write=True) as txn: 190 | assert txn.id() == 1 191 | 192 | def test_readonly_after_write(self): 193 | _, env = testlib.temp_env() 194 | with env.begin(write=True) as txn: 195 | txn.put(B('a'), B('a')) 196 | with env.begin() as txn: 197 | assert txn.id() == 1 198 | 199 | def test_invalid_txn(self): 200 | _, env = testlib.temp_env() 201 | txn = env.begin() 202 | txn.abort() 203 | self.assertRaises(Exception, lambda: txn.id()) 204 | 205 | 206 | class StatTest(unittest.TestCase): 207 | def tearDown(self): 208 | testlib.cleanup() 209 | 210 | def test_stat(self): 211 | _, env = testlib.temp_env() 212 | db1 = env.open_db(B('db1')) 213 | db2 = env.open_db(B('db2')) 214 | 215 | txn = lmdb.Transaction(env) 216 | for db in db1, db2: 217 | stat = txn.stat(db) 218 | for k in 'psize', 'depth', 'branch_pages', 'overflow_pages',\ 219 | 'entries': 220 | assert isinstance(stat[k], INT_TYPES), k 221 | assert stat[k] >= 0 222 | assert stat['entries'] == 0 223 | 224 | txn = lmdb.Transaction(env, write=True) 225 | txn.put(B('a'), B('b'), db=db1) 226 | txn.commit() 227 | 228 | txn = lmdb.Transaction(env) 229 | stat = txn.stat(db1) 230 | assert stat['entries'] == 1 231 | 232 | stat = txn.stat(db2) 233 | assert stat['entries'] == 0 234 | 235 | txn.abort() 236 | self.assertRaises(Exception, 237 | lambda: env.stat(db1)) 238 | env.close() 239 | self.assertRaises(Exception, 240 | lambda: env.stat(db1)) 241 | 242 | 243 | class DropTest(unittest.TestCase): 244 | def tearDown(self): 245 | testlib.cleanup() 246 | 247 | def test_empty(self): 248 | _, env = testlib.temp_env() 249 | db1 = env.open_db(B('db1')) 250 | txn = env.begin(write=True) 251 | txn.put(B('a'), B('a'), db=db1) 252 | assert txn.get(B('a'), db=db1) == B('a') 253 | txn.drop(db1, False) 254 | assert txn.get(B('a')) is None 255 | txn.drop(db1, False) # should succeed. 256 | assert txn.get(B('a')) is None 257 | 258 | def test_delete(self): 259 | _, env = testlib.temp_env() 260 | db1 = env.open_db(B('db1')) 261 | txn = env.begin(write=True) 262 | txn.put(B('a'), B('a'), db=db1) 263 | txn.drop(db1) 264 | self.assertRaises(lmdb.InvalidParameterError, 265 | lambda: txn.get(B('a'), db=db1)) 266 | self.assertRaises(lmdb.InvalidParameterError, 267 | lambda: txn.drop(db1)) 268 | 269 | 270 | class CommitTest(unittest.TestCase): 271 | def tearDown(self): 272 | testlib.cleanup() 273 | 274 | def test_bad_txn(self): 275 | _, env = testlib.temp_env() 276 | txn = env.begin() 277 | txn.abort() 278 | self.assertRaises(Exception, 279 | lambda: txn.commit()) 280 | 281 | def test_bad_env(self): 282 | _, env = testlib.temp_env() 283 | txn = env.begin() 284 | env.close() 285 | self.assertRaises(Exception, 286 | lambda: txn.commit()) 287 | 288 | def test_commit_ro(self): 289 | _, env = testlib.temp_env() 290 | txn = env.begin() 291 | txn.commit() 292 | self.assertRaises(Exception, 293 | lambda: txn.commit()) 294 | 295 | def test_commit_rw(self): 296 | _, env = testlib.temp_env() 297 | txn = env.begin(write=True) 298 | assert txn.put(B('a'), B('a')) 299 | txn.commit() 300 | self.assertRaises(Exception, 301 | lambda: txn.commit()) 302 | txn = env.begin() 303 | assert txn.get(B('a')) == B('a') 304 | txn.abort() 305 | 306 | 307 | class AbortTest(unittest.TestCase): 308 | def tearDown(self): 309 | testlib.cleanup() 310 | 311 | def test_abort_ro(self): 312 | _, env = testlib.temp_env() 313 | txn = env.begin() 314 | assert txn.get(B('a')) is None 315 | txn.abort() 316 | self.assertRaises(Exception, 317 | lambda: txn.get(B('a'))) 318 | env.close() 319 | self.assertRaises(Exception, 320 | lambda: txn.get(B('a'))) 321 | 322 | def test_abort_rw(self): 323 | _, env = testlib.temp_env() 324 | txn = env.begin(write=True) 325 | assert txn.put(B('a'), B('a')) 326 | txn.abort() 327 | txn = env.begin() 328 | assert txn.get(B('a')) is None 329 | 330 | 331 | class GetTest(unittest.TestCase): 332 | def tearDown(self): 333 | testlib.cleanup() 334 | 335 | def test_bad_txn(self): 336 | _, env = testlib.temp_env() 337 | txn = env.begin() 338 | txn.abort() 339 | self.assertRaises(Exception, 340 | lambda: txn.get(B('a'))) 341 | 342 | def test_bad_env(self): 343 | _, env = testlib.temp_env() 344 | txn = env.begin() 345 | env.close() 346 | self.assertRaises(Exception, 347 | lambda: txn.get(B('a'))) 348 | 349 | def test_missing(self): 350 | _, env = testlib.temp_env() 351 | txn = env.begin() 352 | assert txn.get(B('a')) is None 353 | assert txn.get(B('a'), default='default') is 'default' 354 | 355 | def test_empty_key(self): 356 | _, env = testlib.temp_env() 357 | txn = env.begin() 358 | self.assertRaises(lmdb.BadValsizeError, 359 | lambda: txn.get(B(''))) 360 | 361 | def test_db(self): 362 | _, env = testlib.temp_env() 363 | maindb = env.open_db(None) 364 | db1 = env.open_db(B('db1')) 365 | 366 | txn = env.begin() 367 | assert txn.get(B('a'), db=db1) is None 368 | txn.abort() 369 | 370 | txn = env.begin(write=True) 371 | txn.put(B('a'), B('a'), db=db1) 372 | txn.commit() 373 | 374 | txn = env.begin() 375 | assert txn.get(B('a')) is None 376 | txn.abort() 377 | 378 | txn = env.begin(db=db1) 379 | assert txn.get(B('a')) == B('a') 380 | assert txn.get(B('a'), db=maindb) is None 381 | 382 | def test_buffers_no(self): 383 | _, env = testlib.temp_env() 384 | txn = env.begin(write=True) 385 | assert txn.put(B('a'), B('a')) 386 | assert type(txn.get(B('a'))) is BytesType 387 | 388 | def test_buffers_yes(self): 389 | _, env = testlib.temp_env() 390 | txn = env.begin(write=True, buffers=True) 391 | assert txn.put(B('a'), B('a')) 392 | assert type(txn.get(B('a'))) is not BytesType 393 | 394 | def test_dupsort(self): 395 | _, env = testlib.temp_env() 396 | db1 = env.open_db(B('db1'), dupsort=True) 397 | txn = env.begin(write=True, db=db1) 398 | assert txn.put(B('a'), B('a')) 399 | assert txn.put(B('a'), B('b')) 400 | assert txn.get(B('a')) == B('a') 401 | 402 | def test_integerkey(self): 403 | _, env = testlib.temp_env() 404 | db1 = env.open_db(B('db1'), integerkey=True) 405 | txn = env.begin(write=True, db=db1) 406 | assert txn.put(UINT_0001, B('a')) 407 | assert txn.put(UINT_0002, B('b')) 408 | assert txn.get(UINT_0001) == B('a') 409 | assert txn.get(UINT_0002) == B('b') 410 | 411 | def test_integerdup(self): 412 | _, env = testlib.temp_env() 413 | db1 = env.open_db(B('db1'), dupsort=True, integerdup=True) 414 | txn = env.begin(write=True, db=db1) 415 | assert txn.put(UINT_0001, UINT_0002) 416 | assert txn.put(UINT_0001, UINT_0001) 417 | assert txn.get(UINT_0001) == UINT_0001 418 | 419 | def test_dupfixed(self): 420 | _, env = testlib.temp_env() 421 | db1 = env.open_db(B('db1'), dupsort=True, dupfixed=True) 422 | txn = env.begin(write=True, db=db1) 423 | assert txn.put(B('a'), B('a')) 424 | assert txn.put(B('a'), B('b')) 425 | assert txn.get(B('a')) == B('a') 426 | 427 | 428 | class PutTest(unittest.TestCase): 429 | def tearDown(self): 430 | testlib.cleanup() 431 | 432 | def test_bad_txn(self): 433 | _, env = testlib.temp_env() 434 | txn = env.begin(write=True) 435 | txn.abort() 436 | self.assertRaises(Exception, 437 | lambda: txn.put(B('a'), B('a'))) 438 | 439 | def test_bad_env(self): 440 | _, env = testlib.temp_env() 441 | txn = env.begin(write=True) 442 | env.close() 443 | self.assertRaises(Exception, 444 | lambda: txn.put(B('a'), B('a'))) 445 | 446 | def test_ro_txn(self): 447 | _, env = testlib.temp_env() 448 | txn = env.begin() 449 | self.assertRaises(lmdb.ReadonlyError, 450 | lambda: txn.put(B('a'), B('a'))) 451 | 452 | def test_empty_key_value(self): 453 | _, env = testlib.temp_env() 454 | txn = env.begin(write=True) 455 | self.assertRaises(lmdb.BadValsizeError, 456 | lambda: txn.put(B(''), B('a'))) 457 | 458 | def test_dupsort(self): 459 | _, env = testlib.temp_env() 460 | 461 | def test_dupdata_no_dupsort(self): 462 | _, env = testlib.temp_env() 463 | txn = env.begin(write=True) 464 | assert txn.put(B('a'), B('a'), dupdata=True) 465 | assert txn.put(B('a'), B('b'), dupdata=True) 466 | txn.get(B('a')) 467 | 468 | 469 | class ReplaceTest(unittest.TestCase): 470 | def tearDown(self): 471 | testlib.cleanup() 472 | 473 | def test_bad_txn(self): 474 | _, env = testlib.temp_env() 475 | txn = env.begin(write=True) 476 | txn.abort() 477 | self.assertRaises(Exception, 478 | lambda: txn.replace(B('a'), B('a'))) 479 | 480 | def test_bad_env(self): 481 | _, env = testlib.temp_env() 482 | txn = env.begin(write=True) 483 | env.close() 484 | self.assertRaises(Exception, 485 | lambda: txn.replace(B('a'), B('a'))) 486 | 487 | def test_ro_txn(self): 488 | _, env = testlib.temp_env() 489 | txn = env.begin() 490 | self.assertRaises(lmdb.ReadonlyError, 491 | lambda: txn.replace(B('a'), B('a'))) 492 | 493 | def test_empty_key_value(self): 494 | _, env = testlib.temp_env() 495 | txn = env.begin(write=True) 496 | self.assertRaises(lmdb.BadValsizeError, 497 | lambda: txn.replace(B(''), B('a'))) 498 | 499 | def test_dupsort_noexist(self): 500 | _, env = testlib.temp_env() 501 | db = env.open_db(B('db1'), dupsort=True) 502 | txn = env.begin(write=True, db=db) 503 | assert None == txn.replace(B('a'), B('x')) 504 | assert B('x') == txn.replace(B('a'), B('y')) 505 | assert B('y') == txn.replace(B('a'), B('z')) 506 | cur = txn.cursor() 507 | assert cur.set_key(B('a')) 508 | assert [B('z')] == list(cur.iternext_dup()) 509 | 510 | def test_dupdata_no_dupsort(self): 511 | _, env = testlib.temp_env() 512 | txn = env.begin(write=True) 513 | assert txn.put(B('a'), B('a'), dupdata=True) 514 | assert txn.put(B('a'), B('b'), dupdata=True) 515 | txn.get(B('a')) 516 | 517 | 518 | class LeakTest(unittest.TestCase): 519 | def tearDown(self): 520 | testlib.cleanup() 521 | 522 | def test_open_close(self): 523 | temp_dir = testlib.temp_dir() 524 | env = lmdb.open(temp_dir) 525 | with env.begin() as txn: 526 | pass 527 | env.close() 528 | r1 = weakref.ref(env) 529 | r2 = weakref.ref(txn) 530 | env = None 531 | txn = None 532 | testlib.debug_collect() 533 | assert r1() is None 534 | assert r2() is None 535 | 536 | 537 | if __name__ == '__main__': 538 | unittest.main() 539 | --------------------------------------------------------------------------------