├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE.md ├── MANIFEST.in ├── README.rst ├── docs ├── Makefile ├── _static │ └── .empty ├── api │ ├── backup.rst │ ├── database.rst │ ├── index.rst │ ├── interfaces.rst │ └── options.rst ├── changelog.rst ├── conf.py ├── index.rst ├── installation.rst └── tutorial │ └── index.rst ├── rocksdb ├── __init__.py ├── _rocksdb.pyx ├── backup.pxd ├── cache.pxd ├── comparator.pxd ├── cpp │ ├── comparator_wrapper.hpp │ ├── filter_policy_wrapper.hpp │ ├── memtable_factories.hpp │ ├── merge_operator_wrapper.hpp │ ├── slice_transform_wrapper.hpp │ ├── utils.hpp │ └── write_batch_iter_helper.hpp ├── db.pxd ├── env.pxd ├── errors.py ├── filter_policy.pxd ├── interfaces.py ├── iterator.pxd ├── logger.pxd ├── memtablerep.pxd ├── merge_operator.pxd ├── merge_operators.py ├── options.pxd ├── slice_.pxd ├── slice_transform.pxd ├── snapshot.pxd ├── status.pxd ├── std_memory.pxd ├── table_factory.pxd ├── tests │ ├── __init__.py │ ├── test_db.py │ ├── test_memtable.py │ └── test_options.py └── universal_compaction.pxd ├── setup.cfg ├── setup.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | docs/_build 3 | .pytest_cache 4 | .eggs/ 5 | .tox/ 6 | *.egg-info/ 7 | *.pyc 8 | *.so 9 | __pycache__ 10 | rocksdb/_rocksdb.cpp 11 | 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: generic 4 | services: 5 | - docker 6 | 7 | cache: 8 | directories: 9 | - ~/.cache/pip 10 | 11 | install: 12 | docker build . -t ci-image; 13 | script: 14 | docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src ci-image:latest tox -e ${TOXENV} ; 15 | env: 16 | - TOXENV=py27 17 | - TOXENV=py36 18 | - TOXENV=docs 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | ENV SRC /home/tester/src 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | RUN apt-get update -y && apt-get install -qy \ 6 | locales \ 7 | git \ 8 | wget \ 9 | python \ 10 | python3 \ 11 | python-dev \ 12 | python3-dev \ 13 | python-pip \ 14 | librocksdb-dev \ 15 | libsnappy-dev \ 16 | zlib1g-dev \ 17 | libbz2-dev \ 18 | liblz4-dev \ 19 | && rm -rf /var/lib/apt/lists/* 20 | 21 | #NOTE(sileht): really no utf-8 in 2017 !? 22 | ENV LANG en_US.UTF-8 23 | RUN update-locale 24 | RUN locale-gen $LANG 25 | 26 | #NOTE(sileht): Upgrade python dev tools 27 | RUN pip install -U pip tox virtualenv 28 | 29 | RUN groupadd --gid 2000 tester 30 | RUN useradd --uid 2000 --gid 2000 --create-home --shell /bin/bash tester 31 | USER tester 32 | 33 | WORKDIR $SRC 34 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Stephan Hofmockel 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, this 11 | list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | * Neither the name of the author nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 22 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 25 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include rocksdb/cpp/*.hpp 2 | recursive-include rocksdb *.pxd 3 | recursive-include rocksdb *.pyx 4 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | |conda-badge| 2 | 3 | .. |conda-badge| image:: https://anaconda.org/conda-forge/python-rocksdb/badges/installer/conda.svg 4 | :target: https://anaconda.org/conda-forge/python-rocksdb 5 | 6 | Note 7 | ========= 8 | The original pyrocksdb (https://pypi.python.org/pypi/pyrocksdb/0.4) has not been updated for long time. I update pyrocksdb to support the latest rocksdb. Please open issues in github if you have any problem. 9 | 10 | News (2019/04/18) 11 | ========= 12 | Currently I am refactoring the code, and more features like TTL are coming soon. And the installation with cmake will be much more easily. 13 | 14 | News (2019/04/19) 15 | ========= 16 | I have created a new branch(https://github.com/twmht/python-rocksdb/tree/pybind11) which provides the basic functions (`put`, `get` and `delete`) now. And the installtion is much more easily! you can try it if you encounter any installtion issues in the current version of `python-rocksdb`. 17 | 18 | The branch is under development and will be released to PypI after I migrate most of the existing features. 19 | 20 | pyrocksdb 21 | ========= 22 | 23 | Python bindings for RocksDB. 24 | See http://python-rocksdb.readthedocs.io/en/latest/ for a more comprehensive install and usage description. 25 | 26 | 27 | Quick Install 28 | ------------- 29 | 30 | Quick install for debian/ubuntu like linux distributions. 31 | 32 | .. code-block:: bash 33 | 34 | $ apt-get install build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev 35 | $ git clone https://github.com/facebook/rocksdb.git 36 | $ cd rocksdb 37 | $ mkdir build && cd build 38 | $ cmake .. 39 | $ make 40 | $ cd .. 41 | $ export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}${CPLUS_INCLUDE_PATH:+:}`pwd`/include/ 42 | $ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}${LD_LIBRARY_PATH:+:}`pwd`/build/ 43 | $ export LIBRARY_PATH=${LIBRARY_PATH}${LIBRARY_PATH:+:}`pwd`/build/ 44 | 45 | $ apt-get install python-virtualenv python-dev 46 | $ virtualenv pyrocks_test 47 | $ cd pyrocks_test 48 | $ . bin/active 49 | $ pip install python-rocksdb 50 | 51 | 52 | Quick Usage Guide 53 | ----------------- 54 | 55 | .. code-block:: pycon 56 | 57 | >>> import rocksdb 58 | >>> db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) 59 | >>> db.put(b'a', b'data') 60 | >>> print db.get(b'a') 61 | b'data' 62 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rocksdb.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rocksdb.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/rocksdb" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rocksdb" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/_static/.empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twmht/python-rocksdb/75eb0ad0841f7272d9605ceac999cb155a65954c/docs/_static/.empty -------------------------------------------------------------------------------- /docs/api/backup.rst: -------------------------------------------------------------------------------- 1 | Backup and Restore 2 | ****************** 3 | 4 | BackupEngine 5 | ============ 6 | 7 | .. py:class:: rocksdb.BackupEngine 8 | 9 | .. py:method:: __init__(backup_dir) 10 | 11 | Creates a object to manage backup of a single database. 12 | 13 | :param unicode backup_dir: Where to keep the backup files. 14 | Has to be different than db.db_name. 15 | For example db.db_name + '/backups'. 16 | 17 | .. py:method:: create_backup(db, flush_before_backup=False) 18 | 19 | Triggers the creation of a backup. 20 | 21 | :param db: Database object to backup. 22 | :type db: :py:class:`rocksdb.DB` 23 | 24 | :param bool flush_before_backup: If ``True`` the current memtable is flushed. 25 | 26 | .. py:method:: restore_backup(backup_id, db_dir, wal_dir) 27 | 28 | Restores the backup from the given id. 29 | 30 | :param int backup_id: id of the backup to restore. 31 | :param unicode db_dir: Target directory to restore backup. 32 | :param unicode wal_dir: Target directory to restore backuped WAL files. 33 | 34 | .. py:method:: restore_latest_backup(db_dir, wal_dir) 35 | 36 | Restores the latest backup. 37 | 38 | :param unicode db_dir: see :py:meth:`restore_backup` 39 | :param unicode wal_dir: see :py:meth:`restore_backup` 40 | 41 | .. py:method:: stop_backup() 42 | 43 | Can be called from another thread to stop the current backup process. 44 | 45 | .. py:method:: purge_old_backups(num_backups_to_keep) 46 | 47 | Deletes all backups (oldest first) until "num_backups_to_keep" are left. 48 | 49 | :param int num_backups_to_keep: Number of backupfiles to keep. 50 | 51 | .. py:method:: delete_backup(backup_id) 52 | 53 | :param int backup_id: Delete the backup with the given id. 54 | 55 | .. py:method:: get_backup_info() 56 | 57 | Returns information about all backups. 58 | 59 | It returns a list of dict's where each dict as the following keys. 60 | 61 | ``backup_id`` 62 | (int): id of this backup. 63 | 64 | ``timestamp`` 65 | (int): Seconds since epoch, when the backup was created. 66 | 67 | ``size`` 68 | (int): Size in bytes of the backup. 69 | -------------------------------------------------------------------------------- /docs/api/database.rst: -------------------------------------------------------------------------------- 1 | Database interactions 2 | ********************* 3 | 4 | Database object 5 | =============== 6 | 7 | .. py:class:: rocksdb.DB 8 | 9 | .. py:method:: __init__(db_name, Options opts, read_only=False) 10 | 11 | :param unicode db_name: Name of the database to open 12 | :param opts: Options for this specific database 13 | :type opts: :py:class:`rocksdb.Options` 14 | :param bool read_only: If ``True`` the database is opened read-only. 15 | All DB calls which modify data will raise an 16 | Exception. 17 | 18 | 19 | .. py:method:: put(key, value, sync=False, disable_wal=False) 20 | 21 | Set the database entry for "key" to "value". 22 | 23 | :param bytes key: Name for this entry 24 | :param bytes value: Data for this entry 25 | :param bool sync: 26 | If ``True``, the write will be flushed from the operating system 27 | buffer cache (by calling WritableFile::Sync()) before the write 28 | is considered complete. If this flag is true, writes will be 29 | slower. 30 | 31 | If this flag is ``False``, and the machine crashes, some recent 32 | writes may be lost. Note that if it is just the process that 33 | crashes (i.e., the machine does not reboot), no writes will be 34 | lost even if ``sync == False``. 35 | 36 | In other words, a DB write with ``sync == False`` has similar 37 | crash semantics as the "write()" system call. A DB write 38 | with ``sync == True`` has similar crash semantics to a "write()" 39 | system call followed by "fdatasync()". 40 | 41 | :param bool disable_wal: 42 | If ``True``, writes will not first go to the write ahead log, 43 | and the write may got lost after a crash. 44 | 45 | .. py:method:: delete(key, sync=False, disable_wal=False) 46 | 47 | Remove the database entry for "key". 48 | 49 | :param bytes key: Name to delete 50 | :param sync: See :py:meth:`rocksdb.DB.put` 51 | :param disable_wal: See :py:meth:`rocksdb.DB.put` 52 | :raises rocksdb.errors.NotFound: If the key did not exists 53 | 54 | .. py:method:: merge(key, value, sync=False, disable_wal=False) 55 | 56 | Merge the database entry for "key" with "value". 57 | The semantics of this operation is determined by the user provided 58 | merge_operator when opening DB. 59 | 60 | See :py:meth:`rocksdb.DB.put` for the parameters 61 | 62 | :raises: 63 | :py:exc:`rocksdb.errors.NotSupported` if this is called and 64 | no :py:attr:`rocksdb.Options.merge_operator` was set at creation 65 | 66 | 67 | .. py:method:: write(batch, sync=False, disable_wal=False) 68 | 69 | Apply the specified updates to the database. 70 | 71 | :param rocksdb.WriteBatch batch: Batch to apply 72 | :param sync: See :py:meth:`rocksdb.DB.put` 73 | :param disable_wal: See :py:meth:`rocksdb.DB.put` 74 | 75 | .. py:method:: get(key, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 76 | 77 | :param bytes key: Name to get 78 | 79 | :param bool verify_checksums: 80 | If ``True``, all data read from underlying storage will be 81 | verified against corresponding checksums. 82 | 83 | :param bool fill_cache: 84 | Should the "data block", "index block" or "filter block" 85 | read for this iteration be cached in memory? 86 | Callers may wish to set this field to ``False`` for bulk scans. 87 | 88 | :param snapshot: 89 | If not ``None``, read as of the supplied snapshot 90 | (which must belong to the DB that is being read and which must 91 | not have been released). Is it ``None`` a implicit snapshot of the 92 | state at the beginning of this read operation is used 93 | :type snapshot: :py:class:`rocksdb.Snapshot` 94 | 95 | :param string read_tier: 96 | Specify if this read request should process data that ALREADY 97 | resides on a particular cache. If the required data is not 98 | found at the specified cache, 99 | then :py:exc:`rocksdb.errors.Incomplete` is raised. 100 | 101 | | Use ``all`` if a fetch from disk is allowed. 102 | | Use ``cache`` if only data from cache is allowed. 103 | 104 | :returns: ``None`` if not found, else the value for this key 105 | 106 | .. py:method:: multi_get(keys, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 107 | 108 | :param keys: Keys to fetch 109 | :type keys: list of bytes 110 | 111 | For the other params see :py:meth:`rocksdb.DB.get` 112 | 113 | :returns: 114 | A ``dict`` where the value is either ``bytes`` or ``None`` if not found 115 | 116 | :raises: If the fetch for a single key fails 117 | 118 | .. note:: 119 | keys will not be "de-duplicated". 120 | Duplicate keys will return duplicate values in order. 121 | 122 | .. py:method:: key_may_exist(key, fetch=False, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 123 | 124 | If the key definitely does not exist in the database, then this method 125 | returns ``False``, else ``True``. If the caller wants to obtain value 126 | when the key is found in memory, fetch should be set to ``True``. 127 | This check is potentially lighter-weight than invoking DB::get(). 128 | One way to make this lighter weight is to avoid doing any IOs. 129 | 130 | :param bytes key: Key to check 131 | :param bool fetch: Obtain also the value if found 132 | 133 | For the other params see :py:meth:`rocksdb.DB.get` 134 | 135 | :returns: 136 | * ``(True, None)`` if key is found but value not in memory 137 | * ``(True, None)`` if key is found and ``fetch=False`` 138 | * ``(True, )`` if key is found and value in memory and ``fetch=True`` 139 | * ``(False, None)`` if key is not found 140 | 141 | .. py:method:: iterkeys(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 142 | 143 | Iterate over the keys 144 | 145 | For other params see :py:meth:`rocksdb.DB.get` 146 | 147 | :returns: 148 | A iterator object which is not valid yet. 149 | Call first one of the seek methods of the iterator to position it 150 | 151 | :rtype: :py:class:`rocksdb.BaseIterator` 152 | 153 | .. py:method:: itervalues(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 154 | 155 | Iterate over the values 156 | 157 | For other params see :py:meth:`rocksdb.DB.get` 158 | 159 | :returns: 160 | A iterator object which is not valid yet. 161 | Call first one of the seek methods of the iterator to position it 162 | 163 | :rtype: :py:class:`rocksdb.BaseIterator` 164 | 165 | .. py:method:: iteritems(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") 166 | 167 | Iterate over the items 168 | 169 | For other params see :py:meth:`rocksdb.DB.get` 170 | 171 | :returns: 172 | A iterator object which is not valid yet. 173 | Call first one of the seek methods of the iterator to position it 174 | 175 | :rtype: :py:class:`rocksdb.BaseIterator` 176 | 177 | .. py:method:: snapshot() 178 | 179 | Return a handle to the current DB state. 180 | Iterators created with this handle will all observe a stable snapshot 181 | of the current DB state. 182 | 183 | :rtype: :py:class:`rocksdb.Snapshot` 184 | 185 | 186 | .. py:method:: get_property(prop) 187 | 188 | DB implementations can export properties about their state 189 | via this method. If "property" is a valid property understood by this 190 | DB implementation, a byte string with its value is returned. 191 | Otherwise ``None`` 192 | 193 | Valid property names include: 194 | 195 | * ``b"rocksdb.num-files-at-level"``: return the number of files at level , 196 | where is an ASCII representation of a level number (e.g. "0"). 197 | 198 | * ``b"rocksdb.stats"``: returns a multi-line byte string that describes statistics 199 | about the internal operation of the DB. 200 | 201 | * ``b"rocksdb.sstables"``: returns a multi-line byte string that describes all 202 | of the sstables that make up the db contents. 203 | 204 | * ``b"rocksdb.num-immutable-mem-table"``: Number of immutable mem tables. 205 | 206 | * ``b"rocksdb.mem-table-flush-pending"``: Returns ``1`` if mem table flush is pending, otherwise ``0``. 207 | 208 | * ``b"rocksdb.compaction-pending"``: Returns ``1`` if a compaction is pending, otherweise ``0``. 209 | 210 | * ``b"rocksdb.background-errors"``: Returns accumulated background errors encountered. 211 | 212 | * ``b"rocksdb.cur-size-active-mem-table"``: Returns current size of the active memtable. 213 | 214 | .. py:method:: get_live_files_metadata() 215 | 216 | Returns a list of all table files. 217 | 218 | It returns a list of dict's were each dict has the following keys. 219 | 220 | ``name`` 221 | Name of the file 222 | 223 | ``level`` 224 | Level at which this file resides 225 | 226 | ``size`` 227 | File size in bytes 228 | 229 | ``smallestkey`` 230 | Smallest user defined key in the file 231 | 232 | ``largestkey`` 233 | Largest user defined key in the file 234 | 235 | ``smallest_seqno`` 236 | smallest seqno in file 237 | 238 | ``largest_seqno`` 239 | largest seqno in file 240 | 241 | .. py:method:: compact_range(begin=None, end=None, ** options) 242 | 243 | Compact the underlying storage for the key range [begin,end]. 244 | The actual compaction interval might be superset of [begin, end]. 245 | In particular, deleted and overwritten versions are discarded, 246 | and the data is rearranged to reduce the cost of operations 247 | needed to access the data. 248 | 249 | This operation should typically only be invoked by users who understand 250 | the underlying implementation. 251 | 252 | ``begin == None`` is treated as a key before all keys in the database. 253 | ``end == None`` is treated as a key after all keys in the database. 254 | Therefore the following call will compact the entire database: ``db.compact_range()``. 255 | 256 | Note that after the entire database is compacted, all data are pushed 257 | down to the last level containing any data. If the total data size 258 | after compaction is reduced, that level might not be appropriate for 259 | hosting all the files. In this case, client could set change_level 260 | to ``True``, to move the files back to the minimum level capable of holding 261 | the data set or a given level (specified by non-negative target_level). 262 | 263 | :param bytes begin: Key where to start compaction. 264 | If ``None`` start at the beginning of the database. 265 | :param bytes end: Key where to end compaction. 266 | If ``None`` end at the last key of the database. 267 | :param bool change_level: If ``True``, compacted files will be moved to 268 | the minimum level capable of holding the data 269 | or given level (specified by non-negative target_level). 270 | If ``False`` you may end with a bigger level 271 | than configured. Default is ``False``. 272 | :param int target_level: If change_level is true and target_level have non-negative 273 | value, compacted files will be moved to target_level. 274 | Default is ``-1``. 275 | :param string bottommost_level_compaction: 276 | For level based compaction, we can configure if we want to 277 | skip/force bottommost level compaction. By default level based 278 | compaction will only compact the bottommost level if there is a 279 | compaction filter. It can be set to the following values. 280 | 281 | ``skip`` 282 | Skip bottommost level compaction 283 | 284 | ``if_compaction_filter`` 285 | Only compact bottommost level if there is a compaction filter. 286 | This is the default. 287 | 288 | ``force`` 289 | Always compact bottommost level 290 | 291 | .. py:attribute:: options 292 | 293 | Returns the associated :py:class:`rocksdb.Options` instance. 294 | 295 | .. note:: 296 | 297 | Changes to this object have no effect anymore. 298 | Consider this as read-only 299 | 300 | Iterator 301 | ======== 302 | 303 | .. py:class:: rocksdb.BaseIterator 304 | 305 | Base class for all iterators in this module. After creation a iterator is 306 | invalid. Call one of the seek methods first before starting iteration 307 | 308 | .. py:method:: seek_to_first() 309 | 310 | Position at the first key in the source 311 | 312 | .. py:method:: seek_to_last() 313 | 314 | Position at the last key in the source 315 | 316 | .. py:method:: seek(key) 317 | 318 | :param bytes key: Position at the first key in the source that at or past 319 | 320 | Methods to support the python iterator protocol 321 | 322 | .. py:method:: __iter__() 323 | .. py:method:: __next__() 324 | .. py:method:: __reversed__() 325 | 326 | Snapshot 327 | ======== 328 | 329 | .. py:class:: rocksdb.Snapshot 330 | 331 | Opaque handler for a single Snapshot. 332 | Snapshot is released if nobody holds a reference on it. 333 | Retrieved via :py:meth:`rocksdb.DB.snapshot` 334 | 335 | WriteBatch 336 | ========== 337 | 338 | .. py:class:: rocksdb.WriteBatch 339 | 340 | WriteBatch holds a collection of updates to apply atomically to a DB. 341 | 342 | The updates are applied in the order in which they are added 343 | to the WriteBatch. For example, the value of "key" will be "v3" 344 | after the following batch is written:: 345 | 346 | batch = rocksdb.WriteBatch() 347 | batch.put(b"key", b"v1") 348 | batch.delete(b"key") 349 | batch.put(b"key", b"v2") 350 | batch.put(b"key", b"v3") 351 | 352 | .. py:method:: __init__(data=None) 353 | 354 | Creates a WriteBatch. 355 | 356 | :param bytes data: 357 | A serialized version of a previous WriteBatch. As retrieved 358 | from a previous .data() call. If ``None`` a empty WriteBatch is 359 | generated 360 | 361 | .. py:method:: put(key, value) 362 | 363 | Store the mapping "key->value" in the database. 364 | 365 | :param bytes key: Name of the entry to store 366 | :param bytes value: Data of this entry 367 | 368 | .. py:method:: merge(key, value) 369 | 370 | Merge "value" with the existing value of "key" in the database. 371 | 372 | :param bytes key: Name of the entry to merge 373 | :param bytes value: Data to merge 374 | 375 | .. py:method:: delete(key) 376 | 377 | If the database contains a mapping for "key", erase it. Else do nothing. 378 | 379 | :param bytes key: Key to erase 380 | 381 | .. py:method:: clear() 382 | 383 | Clear all updates buffered in this batch. 384 | 385 | .. note:: 386 | Don't call this method if there is an outstanding iterator. 387 | Calling :py:meth:`rocksdb.WriteBatch.clear()` with outstanding 388 | iterator, leads to SEGFAULT. 389 | 390 | .. py:method:: data() 391 | 392 | Retrieve the serialized version of this batch. 393 | 394 | :rtype: ``bytes`` 395 | 396 | .. py:method:: count() 397 | 398 | Returns the number of updates in the batch 399 | 400 | :rtype: int 401 | 402 | .. py:method:: __iter__() 403 | 404 | Returns an iterator over the current contents of the write batch. 405 | 406 | If you add new items to the batch, they are not visible for this 407 | iterator. Create a new one if you need to see them. 408 | 409 | .. note:: 410 | Calling :py:meth:`rocksdb.WriteBatch.clear()` on the write batch 411 | invalidates the iterator. Using a iterator where its corresponding 412 | write batch has been cleared, leads to SEGFAULT. 413 | 414 | :rtype: :py:class:`rocksdb.WriteBatchIterator` 415 | 416 | WriteBatchIterator 417 | ================== 418 | 419 | .. py:class:: rocksdb.WriteBatchIterator 420 | 421 | .. py:method:: __iter__() 422 | 423 | Returns self. 424 | 425 | .. py:method:: __next__() 426 | 427 | Returns the next item inside the corresponding write batch. 428 | The return value is a tuple of always size three. 429 | 430 | First item (Name of the operation): 431 | 432 | * ``"Put"`` 433 | * ``"Merge"`` 434 | * ``"Delete"`` 435 | 436 | Second item (key): 437 | Key for this operation. 438 | 439 | Third item (value): 440 | The value for this operation. Empty for ``"Delete"``. 441 | 442 | Repair DB 443 | ========= 444 | 445 | .. py:function:: repair_db(db_name, opts) 446 | 447 | :param unicode db_name: Name of the database to open 448 | :param opts: Options for this specific database 449 | :type opts: :py:class:`rocksdb.Options` 450 | 451 | If a DB cannot be opened, you may attempt to call this method to 452 | resurrect as much of the contents of the database as possible. 453 | Some data may be lost, so be careful when calling this function 454 | on a database that contains important information. 455 | 456 | 457 | Errors 458 | ====== 459 | 460 | .. py:exception:: rocksdb.errors.NotFound 461 | .. py:exception:: rocksdb.errors.Corruption 462 | .. py:exception:: rocksdb.errors.NotSupported 463 | .. py:exception:: rocksdb.errors.InvalidArgument 464 | .. py:exception:: rocksdb.errors.RocksIOError 465 | .. py:exception:: rocksdb.errors.MergeInProgress 466 | .. py:exception:: rocksdb.errors.Incomplete 467 | 468 | 469 | -------------------------------------------------------------------------------- /docs/api/index.rst: -------------------------------------------------------------------------------- 1 | Python driver for RocksDB 2 | ========================= 3 | 4 | .. py:module:: rocksdb 5 | 6 | .. toctree:: 7 | 8 | Options 9 | Database 10 | Interfaces 11 | Backup 12 | -------------------------------------------------------------------------------- /docs/api/interfaces.rst: -------------------------------------------------------------------------------- 1 | Interfaces 2 | ********** 3 | 4 | Comparator 5 | ========== 6 | 7 | .. py:class:: rocksdb.interfaces.Comparator 8 | 9 | A Comparator object provides a total order across slices that are 10 | used as keys in an sstable or a database. A Comparator implementation 11 | must be thread-safe since rocksdb may invoke its methods concurrently 12 | from multiple threads. 13 | 14 | .. py:method:: compare(a, b) 15 | 16 | Three-way comparison. 17 | 18 | :param bytes a: First field to compare 19 | :param bytes b: Second field to compare 20 | :returns: * -1 if a < b 21 | * 0 if a == b 22 | * 1 if a > b 23 | :rtype: ``int`` 24 | 25 | .. py:method:: name() 26 | 27 | The name of the comparator. Used to check for comparator 28 | mismatches (i.e., a DB created with one comparator is 29 | accessed using a different comparator). 30 | 31 | The client of this package should switch to a new name whenever 32 | the comparator implementation changes in a way that will cause 33 | the relative ordering of any two keys to change. 34 | 35 | Names starting with "rocksdb." are reserved and should not be used 36 | by any clients of this package. 37 | 38 | :rtype: ``bytes`` 39 | 40 | Merge Operator 41 | ============== 42 | 43 | Essentially, a MergeOperator specifies the SEMANTICS of a merge, which only 44 | client knows. It could be numeric addition, list append, string 45 | concatenation, edit data structure, whatever. 46 | The library, on the other hand, is concerned with the exercise of this 47 | interface, at the right time (during get, iteration, compaction...) 48 | 49 | To use merge, the client needs to provide an object implementing one of 50 | the following interfaces: 51 | 52 | * AssociativeMergeOperator - for most simple semantics (always take 53 | two values, and merge them into one value, which is then put back 54 | into rocksdb). 55 | numeric addition and string concatenation are examples. 56 | 57 | * MergeOperator - the generic class for all the more complex operations. 58 | One method (FullMerge) to merge a Put/Delete value with a merge operand. 59 | Another method (PartialMerge) that merges two operands together. 60 | This is especially useful if your key values have a complex structure but 61 | you would still like to support client-specific incremental updates. 62 | 63 | AssociativeMergeOperator is simpler to implement. 64 | MergeOperator is simply more powerful. 65 | 66 | See this page for more details 67 | https://github.com/facebook/rocksdb/wiki/Merge-Operator 68 | 69 | AssociativeMergeOperator 70 | ------------------------ 71 | 72 | .. py:class:: rocksdb.interfaces.AssociativeMergeOperator 73 | 74 | .. py:method:: merge(key, existing_value, value) 75 | 76 | Gives the client a way to express the read -> modify -> write semantics 77 | 78 | :param bytes key: The key that's associated with this merge operation 79 | :param bytes existing_value: The current value in the db. 80 | ``None`` indicates the key does not exist 81 | before this op 82 | :param bytes value: The value to update/merge the existing_value with 83 | 84 | :returns: ``True`` and the new value on success. 85 | All values passed in will be client-specific values. 86 | So if this method returns false, it is because client 87 | specified bad data or there was internal corruption. 88 | The client should assume that this will be treated as an 89 | error by the library. 90 | 91 | :rtype: ``(bool, bytes)`` 92 | 93 | .. py:method:: name() 94 | 95 | The name of the MergeOperator. Used to check for MergeOperator mismatches. 96 | For example a DB created with one MergeOperator is accessed using a 97 | different MergeOperator. 98 | 99 | :rtype: ``bytes`` 100 | 101 | MergeOperator 102 | ------------- 103 | 104 | .. py:class:: rocksdb.interfaces.MergeOperator 105 | 106 | .. py:method:: full_merge(key, existing_value, operand_list) 107 | 108 | Gives the client a way to express the read -> modify -> write semantics 109 | 110 | :param bytes key: The key that's associated with this merge operation. 111 | Client could multiplex the merge operator based on it 112 | if the key space is partitioned and different subspaces 113 | refer to different types of data which have different 114 | merge operation semantics 115 | 116 | :param bytes existing_value: The current value in the db. 117 | ``None`` indicates the key does not exist 118 | before this op 119 | 120 | :param operand_list: The sequence of merge operations to apply. 121 | :type operand_list: list of bytes 122 | 123 | :returns: ``True`` and the new value on success. 124 | All values passed in will be client-specific values. 125 | So if this method returns false, it is because client 126 | specified bad data or there was internal corruption. 127 | The client should assume that this will be treated as an 128 | error by the library. 129 | 130 | :rtype: ``(bool, bytes)`` 131 | 132 | .. py:method:: partial_merge(key, left_operand, right_operand) 133 | 134 | This function performs merge(left_op, right_op) 135 | when both the operands are themselves merge operation types 136 | that you would have passed to a DB::Merge() call in the same order. 137 | For example DB::Merge(key,left_op), followed by DB::Merge(key,right_op)). 138 | 139 | PartialMerge should combine them into a single merge operation that is 140 | returned together with ``True`` 141 | This new value should be constructed such that a call to 142 | DB::Merge(key, new_value) would yield the same result as a call 143 | to DB::Merge(key, left_op) followed by DB::Merge(key, right_op). 144 | 145 | If it is impossible or infeasible to combine the two operations, 146 | return ``(False, None)`` The library will internally keep track of the 147 | operations, and apply them in the correct order once a base-value 148 | (a Put/Delete/End-of-Database) is seen. 149 | 150 | :param bytes key: the key that is associated with this merge operation. 151 | :param bytes left_operand: First operand to merge 152 | :param bytes right_operand: Second operand to merge 153 | :rtype: ``(bool, bytes)`` 154 | 155 | .. note:: 156 | 157 | Presently there is no way to differentiate between error/corruption 158 | and simply "return false". For now, the client should simply return 159 | false in any case it cannot perform partial-merge, regardless of reason. 160 | If there is corruption in the data, handle it in the FullMerge() function, 161 | and return false there. 162 | 163 | .. py:method:: name() 164 | 165 | The name of the MergeOperator. Used to check for MergeOperator mismatches. 166 | For example a DB created with one MergeOperator is accessed using a 167 | different MergeOperator. 168 | 169 | :rtype: ``bytes`` 170 | 171 | FilterPolicy 172 | ============ 173 | 174 | .. py:class:: rocksdb.interfaces.FilterPolicy 175 | 176 | .. py:method:: create_filter(keys) 177 | 178 | Create a bytestring which can act as a filter for keys. 179 | 180 | :param keys: list of keys (potentially with duplicates) 181 | that are ordered according to the user supplied 182 | comparator. 183 | :type keys: list of bytes 184 | 185 | :returns: A filter that summarizes keys 186 | :rtype: ``bytes`` 187 | 188 | .. py:method:: key_may_match(key, filter) 189 | 190 | Check if the key is maybe in the filter. 191 | 192 | :param bytes key: Key for a single entry inside the database 193 | :param bytes filter: Contains the data returned by a preceding call 194 | to create_filter on this class 195 | :returns: This method must return ``True`` if the key was in the list 196 | of keys passed to create_filter(). 197 | This method may return ``True`` or ``False`` if the key was 198 | not on the list, but it should aim to return ``False`` with 199 | a high probability. 200 | :rtype: ``bool`` 201 | 202 | 203 | .. py:method:: name() 204 | 205 | Return the name of this policy. Note that if the filter encoding 206 | changes in an incompatible way, the name returned by this method 207 | must be changed. Otherwise, old incompatible filters may be 208 | passed to methods of this type. 209 | 210 | :rtype: ``bytes`` 211 | 212 | 213 | SliceTransform 214 | ============== 215 | 216 | .. py:class:: rocksdb.interfaces.SliceTransform 217 | 218 | SliceTransform is currently used to implement the 'prefix-API' of rocksdb. 219 | https://github.com/facebook/rocksdb/wiki/Proposal-for-prefix-API 220 | 221 | .. py:method:: transform(src) 222 | 223 | :param bytes src: Full key to extract the prefix from. 224 | 225 | :returns: A tuple of two interges ``(offset, size)``. 226 | Where the first integer is the offset within the ``src`` 227 | and the second the size of the prefix after the offset. 228 | Which means the prefix is generted by ``src[offset:offset+size]`` 229 | 230 | :rtype: ``(int, int)`` 231 | 232 | 233 | .. py:method:: in_domain(src) 234 | 235 | Decide if a prefix can be extraced from ``src``. 236 | Only if this method returns ``True`` :py:meth:`transform` will be 237 | called. 238 | 239 | :param bytes src: Full key to check. 240 | :rtype: ``bool`` 241 | 242 | .. py:method:: in_range(prefix) 243 | 244 | Checks if prefix is a valid prefix 245 | 246 | :param bytes prefix: Prefix to check. 247 | :returns: ``True`` if ``prefix`` is a valid prefix. 248 | :rtype: ``bool`` 249 | 250 | .. py:method:: name() 251 | 252 | Return the name of this transformation. 253 | 254 | :rtype: ``bytes`` 255 | -------------------------------------------------------------------------------- /docs/api/options.rst: -------------------------------------------------------------------------------- 1 | Options creation 2 | **************** 3 | 4 | Options object 5 | ============== 6 | 7 | 8 | .. py:class:: rocksdb.Options 9 | 10 | .. IMPORTANT:: 11 | 12 | The default values mentioned here, describe the values of the 13 | C++ library only. This wrapper does not set any default value 14 | itself. So as soon as the rocksdb developers change a default value 15 | this document could be outdated. So if you really depend on a default 16 | value, double check it with the according version of the C++ library. 17 | 18 | | Most recent default values should be here 19 | | https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h 20 | | https://github.com/facebook/rocksdb/blob/master/util/options.cc 21 | 22 | .. py:method:: __init__(**kwargs) 23 | 24 | All options mentioned below can also be passed as keyword-arguments in 25 | the constructor. For example:: 26 | 27 | import rocksdb 28 | 29 | opts = rocksdb.Options(create_if_missing=True) 30 | # is the same as 31 | opts = rocksdb.Options() 32 | opts.create_if_missing = True 33 | 34 | 35 | .. py:attribute:: create_if_missing 36 | 37 | If ``True``, the database will be created if it is missing. 38 | 39 | | *Type:* ``bool`` 40 | | *Default:* ``False`` 41 | 42 | .. py:attribute:: error_if_exists 43 | 44 | If ``True``, an error is raised if the database already exists. 45 | 46 | | *Type:* ``bool`` 47 | | *Default:* ``False`` 48 | 49 | 50 | .. py:attribute:: paranoid_checks 51 | 52 | If ``True``, the implementation will do aggressive checking of the 53 | data it is processing and will stop early if it detects any 54 | errors. This may have unforeseen ramifications: for example, a 55 | corruption of one DB entry may cause a large number of entries to 56 | become unreadable or for the entire DB to become unopenable. 57 | If any of the writes to the database fails (Put, Delete, Merge, Write), 58 | the database will switch to read-only mode and fail all other 59 | Write operations. 60 | 61 | | *Type:* ``bool`` 62 | | *Default:* ``True`` 63 | 64 | .. py:attribute:: write_buffer_size 65 | 66 | Amount of data to build up in memory (backed by an unsorted log 67 | on disk) before converting to a sorted on-disk file. 68 | 69 | Larger values increase performance, especially during bulk loads. 70 | Up to max_write_buffer_number write buffers may be held in memory 71 | at the same time, so you may wish to adjust this parameter to control 72 | memory usage. Also, a larger write buffer will result in a longer recovery 73 | time the next time the database is opened. 74 | 75 | | *Type:* ``int`` 76 | | *Default:* ``4194304`` 77 | 78 | .. py:attribute:: max_write_buffer_number 79 | 80 | The maximum number of write buffers that are built up in memory. 81 | The default is 2, so that when 1 write buffer is being flushed to 82 | storage, new writes can continue to the other write buffer. 83 | 84 | | *Type:* ``int`` 85 | | *Default:* ``2`` 86 | 87 | .. py:attribute:: min_write_buffer_number_to_merge 88 | 89 | The minimum number of write buffers that will be merged together 90 | before writing to storage. If set to 1, then 91 | all write buffers are fushed to L0 as individual files and this increases 92 | read amplification because a get request has to check in all of these 93 | files. Also, an in-memory merge may result in writing lesser 94 | data to storage if there are duplicate records in each of these 95 | individual write buffers. 96 | 97 | | *Type:* ``int`` 98 | | *Default:* ``1`` 99 | 100 | .. py:attribute:: max_open_files 101 | 102 | Number of open files that can be used by the DB. You may need to 103 | increase this if your database has a large working set. Value -1 means 104 | files opened are always kept open. You can estimate number of 105 | files based on target_file_size_base and target_file_size_multiplier 106 | for level-based compaction. 107 | For universal-style compaction, you can usually set it to -1. 108 | 109 | | *Type:* ``int`` 110 | | *Default:* ``5000`` 111 | 112 | .. py:attribute:: compression 113 | 114 | Compress blocks using the specified compression algorithm. 115 | This parameter can be changed dynamically. 116 | 117 | | *Type:* Member of :py:class:`rocksdb.CompressionType` 118 | | *Default:* :py:attr:`rocksdb.CompressionType.snappy_compression` 119 | 120 | .. py:attribute:: num_levels 121 | 122 | Number of levels for this database 123 | 124 | | *Type:* ``int`` 125 | | *Default:* ``7`` 126 | 127 | 128 | .. py:attribute:: level0_file_num_compaction_trigger 129 | 130 | Number of files to trigger level-0 compaction. A value <0 means that 131 | level-0 compaction will not be triggered by number of files at all. 132 | 133 | | *Type:* ``int`` 134 | | *Default:* ``4`` 135 | 136 | .. py:attribute:: level0_slowdown_writes_trigger 137 | 138 | Soft limit on number of level-0 files. We start slowing down writes at this 139 | point. A value <0 means that no writing slow down will be triggered by 140 | number of files in level-0. 141 | 142 | | *Type:* ``int`` 143 | | *Default:* ``20`` 144 | 145 | .. py:attribute:: level0_stop_writes_trigger 146 | 147 | Maximum number of level-0 files. We stop writes at this point. 148 | 149 | | *Type:* ``int`` 150 | | *Default:* ``24`` 151 | 152 | .. py:attribute:: max_mem_compaction_level 153 | 154 | Maximum level to which a new compacted memtable is pushed if it 155 | does not create overlap. We try to push to level 2 to avoid the 156 | relatively expensive level 0=>1 compactions and to avoid some 157 | expensive manifest file operations. We do not push all the way to 158 | the largest level since that can generate a lot of wasted disk 159 | space if the same key space is being repeatedly overwritten. 160 | 161 | | *Type:* ``int`` 162 | | *Default:* ``2`` 163 | 164 | 165 | .. py:attribute:: target_file_size_base 166 | 167 | | Target file size for compaction. 168 | | target_file_size_base is per-file size for level-1. 169 | | Target file size for level L can be calculated by 170 | | target_file_size_base * (target_file_size_multiplier ^ (L-1)). 171 | 172 | For example, if target_file_size_base is 2MB and 173 | target_file_size_multiplier is 10, then each file on level-1 will 174 | be 2MB, and each file on level 2 will be 20MB, 175 | and each file on level-3 will be 200MB. 176 | 177 | | *Type:* ``int`` 178 | | *Default:* ``2097152`` 179 | 180 | .. py:attribute:: target_file_size_multiplier 181 | 182 | | by default target_file_size_multiplier is 1, which means 183 | | by default files in different levels will have similar size. 184 | 185 | | *Type:* ``int`` 186 | | *Default:* ``1`` 187 | 188 | .. py:attribute:: max_bytes_for_level_base 189 | 190 | Control maximum total data size for a level. 191 | *max_bytes_for_level_base* is the max total for level-1. 192 | Maximum number of bytes for level L can be calculated as 193 | (*max_bytes_for_level_base*) * (*max_bytes_for_level_multiplier* ^ (L-1)) 194 | For example, if *max_bytes_for_level_base* is 20MB, and if 195 | *max_bytes_for_level_multiplier* is 10, total data size for level-1 196 | will be 20MB, total file size for level-2 will be 200MB, 197 | and total file size for level-3 will be 2GB. 198 | 199 | | *Type:* ``int`` 200 | | *Default:* ``10485760`` 201 | 202 | .. py:attribute:: max_bytes_for_level_multiplier 203 | 204 | See :py:attr:`max_bytes_for_level_base` 205 | 206 | | *Type:* ``int`` 207 | | *Default:* ``10`` 208 | 209 | .. py:attribute:: max_bytes_for_level_multiplier_additional 210 | 211 | Different max-size multipliers for different levels. 212 | These are multiplied by max_bytes_for_level_multiplier to arrive 213 | at the max-size of each level. 214 | 215 | | *Type:* ``[int]`` 216 | | *Default:* ``[1, 1, 1, 1, 1, 1, 1]`` 217 | 218 | .. py:attribute:: max_compaction_bytes 219 | 220 | We try to limit number of bytes in one compaction to be lower than this 221 | threshold. But it's not guaranteed. 222 | Value 0 will be sanitized. 223 | 224 | | *Type:* ``int`` 225 | | *Default:* ``target_file_size_base * 25`` 226 | 227 | .. py:attribute:: use_fsync 228 | 229 | If true, then every store to stable storage will issue a fsync. 230 | If false, then every store to stable storage will issue a fdatasync. 231 | This parameter should be set to true while storing data to 232 | filesystem like ext3 that can lose files after a reboot. 233 | 234 | | *Type:* ``bool`` 235 | | *Default:* ``False`` 236 | 237 | .. py:attribute:: db_log_dir 238 | 239 | This specifies the info LOG dir. 240 | If it is empty, the log files will be in the same dir as data. 241 | If it is non empty, the log files will be in the specified dir, 242 | and the db data dir's absolute path will be used as the log file 243 | name's prefix. 244 | 245 | | *Type:* ``unicode`` 246 | | *Default:* ``""`` 247 | 248 | .. py:attribute:: wal_dir 249 | 250 | This specifies the absolute dir path for write-ahead logs (WAL). 251 | If it is empty, the log files will be in the same dir as data, 252 | dbname is used as the data dir by default. 253 | If it is non empty, the log files will be in kept the specified dir. 254 | When destroying the db, all log files in wal_dir and the dir itself is deleted 255 | 256 | | *Type:* ``unicode`` 257 | | *Default:* ``""`` 258 | 259 | .. py:attribute:: delete_obsolete_files_period_micros 260 | 261 | The periodicity when obsolete files get deleted. The default 262 | value is 6 hours. The files that get out of scope by compaction 263 | process will still get automatically delete on every compaction, 264 | regardless of this setting 265 | 266 | | *Type:* ``int`` 267 | | *Default:* ``21600000000`` 268 | 269 | .. py:attribute:: max_background_compactions 270 | 271 | Maximum number of concurrent background jobs, submitted to 272 | the default LOW priority thread pool 273 | 274 | | *Type:* ``int`` 275 | | *Default:* ``1`` 276 | 277 | .. py:attribute:: max_background_flushes 278 | 279 | Maximum number of concurrent background memtable flush jobs, submitted to 280 | the HIGH priority thread pool. 281 | By default, all background jobs (major compaction and memtable flush) go 282 | to the LOW priority pool. If this option is set to a positive number, 283 | memtable flush jobs will be submitted to the HIGH priority pool. 284 | It is important when the same Env is shared by multiple db instances. 285 | Without a separate pool, long running major compaction jobs could 286 | potentially block memtable flush jobs of other db instances, leading to 287 | unnecessary Put stalls. 288 | 289 | | *Type:* ``int`` 290 | | *Default:* ``1`` 291 | 292 | .. py:attribute:: max_log_file_size 293 | 294 | Specify the maximal size of the info log file. If the log file 295 | is larger than `max_log_file_size`, a new info log file will 296 | be created. 297 | If max_log_file_size == 0, all logs will be written to one 298 | log file. 299 | 300 | | *Type:* ``int`` 301 | | *Default:* ``0`` 302 | 303 | .. py:attribute:: log_file_time_to_roll 304 | 305 | Time for the info log file to roll (in seconds). 306 | If specified with non-zero value, log file will be rolled 307 | if it has been active longer than `log_file_time_to_roll`. 308 | A value of ``0`` means disabled. 309 | 310 | | *Type:* ``int`` 311 | | *Default:* ``0`` 312 | 313 | .. py:attribute:: keep_log_file_num 314 | 315 | Maximal info log files to be kept. 316 | 317 | | *Type:* ``int`` 318 | | *Default:* ``1000`` 319 | 320 | .. py:attribute:: soft_rate_limit 321 | 322 | Puts are delayed 0-1 ms when any level has a compaction score that exceeds 323 | soft_rate_limit. This is ignored when == 0.0. 324 | CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not 325 | hold, RocksDB will set soft_rate_limit = hard_rate_limit. 326 | A value of ``0`` means disabled. 327 | 328 | | *Type:* ``float`` 329 | | *Default:* ``0`` 330 | 331 | .. py:attribute:: hard_rate_limit 332 | 333 | Puts are delayed 1ms at a time when any level has a compaction score that 334 | exceeds hard_rate_limit. This is ignored when <= 1.0. 335 | A value fo ``0`` means disabled. 336 | 337 | | *Type:* ``float`` 338 | | *Default:* ``0`` 339 | 340 | .. py:attribute:: rate_limit_delay_max_milliseconds 341 | 342 | Max time a put will be stalled when hard_rate_limit is enforced. If 0, then 343 | there is no limit. 344 | 345 | | *Type:* ``int`` 346 | | *Default:* ``1000`` 347 | 348 | .. py:attribute:: max_manifest_file_size 349 | 350 | manifest file is rolled over on reaching this limit. 351 | The older manifest file be deleted. 352 | The default value is MAX_INT so that roll-over does not take place. 353 | 354 | | *Type:* ``int`` 355 | | *Default:* ``(2**64) - 1`` 356 | 357 | .. py:attribute:: table_cache_numshardbits 358 | 359 | Number of shards used for table cache. 360 | 361 | | *Type:* ``int`` 362 | | *Default:* ``4`` 363 | 364 | .. py:attribute:: arena_block_size 365 | 366 | size of one block in arena memory allocation. 367 | If <= 0, a proper value is automatically calculated (usually 1/10 of 368 | writer_buffer_size). 369 | 370 | | *Type:* ``int`` 371 | | *Default:* ``0`` 372 | 373 | .. py:attribute:: disable_auto_compactions 374 | 375 | Disable automatic compactions. Manual compactions can still 376 | be issued on this database. 377 | 378 | | *Type:* ``bool`` 379 | | *Default:* ``False`` 380 | 381 | .. py:attribute:: wal_ttl_seconds, wal_size_limit_mb 382 | 383 | The following two fields affect how archived logs will be deleted. 384 | 385 | 1. If both set to 0, logs will be deleted asap and will not get into 386 | the archive. 387 | 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0, 388 | WAL files will be checked every 10 min and if total size is greater 389 | then wal_size_limit_mb, they will be deleted starting with the 390 | earliest until size_limit is met. All empty files will be deleted. 391 | 3. If wal_ttl_seconds is not 0 and wal_size_limit_mb is 0, then 392 | WAL files will be checked every wal_ttl_secondsi / 2 and those that 393 | are older than wal_ttl_seconds will be deleted. 394 | 4. If both are not 0, WAL files will be checked every 10 min and both 395 | checks will be performed with ttl being first. 396 | 397 | | *Type:* ``int`` 398 | | *Default:* ``0`` 399 | 400 | .. py:attribute:: manifest_preallocation_size 401 | 402 | Number of bytes to preallocate (via fallocate) the manifest 403 | files. Default is 4mb, which is reasonable to reduce random IO 404 | as well as prevent overallocation for mounts that preallocate 405 | large amounts of data (such as xfs's allocsize option). 406 | 407 | | *Type:* ``int`` 408 | | *Default:* ``4194304`` 409 | 410 | .. py:attribute:: purge_redundant_kvs_while_flush 411 | 412 | Purge duplicate/deleted keys when a memtable is flushed to storage. 413 | 414 | | *Type:* ``bool`` 415 | | *Default:* ``True`` 416 | 417 | 418 | .. py:attribute:: allow_mmap_reads 419 | 420 | Allow the OS to mmap file for reading sst tables 421 | 422 | | *Type:* ``bool`` 423 | | *Default:* ``True`` 424 | 425 | .. py:attribute:: allow_mmap_writes 426 | 427 | Allow the OS to mmap file for writing 428 | 429 | | *Type:* ``bool`` 430 | | *Default:* ``False`` 431 | 432 | .. py:attribute:: is_fd_close_on_exec 433 | 434 | Disable child process inherit open files 435 | 436 | | *Type:* ``bool`` 437 | | *Default:* ``True`` 438 | 439 | .. py:attribute:: skip_log_error_on_recovery 440 | 441 | Skip log corruption error on recovery 442 | (If client is ok with losing most recent changes) 443 | 444 | | *Type:* ``bool`` 445 | | *Default:* ``False`` 446 | 447 | .. py:attribute:: stats_dump_period_sec 448 | 449 | If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec 450 | 451 | | *Type:* ``int`` 452 | | *Default:* ``3600`` 453 | 454 | .. py:attribute:: advise_random_on_open 455 | 456 | If set true, will hint the underlying file system that the file 457 | access pattern is random, when a sst file is opened. 458 | 459 | | *Type:* ``bool`` 460 | | *Default:* ``True`` 461 | 462 | .. py:attribute:: use_adaptive_mutex 463 | 464 | Use adaptive mutex, which spins in the user space before resorting 465 | to kernel. This could reduce context switch when the mutex is not 466 | heavily contended. However, if the mutex is hot, we could end up 467 | wasting spin time. 468 | 469 | | *Type:* ``bool`` 470 | | *Default:* ``False`` 471 | 472 | .. py:attribute:: bytes_per_sync 473 | 474 | Allows OS to incrementally sync files to disk while they are being 475 | written, asynchronously, in the background. 476 | Issue one request for every bytes_per_sync written. 0 turns it off. 477 | 478 | | *Type:* ``int`` 479 | | *Default:* ``0`` 480 | 481 | 482 | .. py:attribute:: compaction_style 483 | 484 | The compaction style. Could be set to ``"level"`` to use level-style 485 | compaction. For universal-style compaction use ``"universal"``. For 486 | FIFO compaction use ``"fifo"``. If no compaction style use ``"none"``. 487 | 488 | | *Type:* ``string`` 489 | | *Default:* ``level`` 490 | 491 | .. py:attribute:: compaction_pri 492 | 493 | If level compaction_style = kCompactionStyleLevel, for each level, 494 | which files are prioritized to be picked to compact. 495 | 496 | | *Type:* Member of :py:class:`rocksdb.CompactionPri` 497 | | *Default:* :py:attr:`rocksdb.CompactionPri.kByCompensatedSize` 498 | 499 | .. py:attribute:: compaction_options_universal 500 | 501 | Options to use for universal-style compaction. They make only sense if 502 | :py:attr:`rocksdb.Options.compaction_style` is set to ``"universal"``. 503 | 504 | It is a dict with the following keys. 505 | 506 | * ``size_ratio``: 507 | Percentage flexibilty while comparing file size. 508 | If the candidate file(s) size is 1% smaller than the next file's size, 509 | then include next file into this candidate set. 510 | Default: ``1`` 511 | 512 | * ``min_merge_width``: 513 | The minimum number of files in a single compaction run. 514 | Default: ``2`` 515 | 516 | * ``max_merge_width``: 517 | The maximum number of files in a single compaction run. 518 | Default: ``UINT_MAX`` 519 | 520 | * ``max_size_amplification_percent``: 521 | The size amplification is defined as the amount (in percentage) of 522 | additional storage needed to store a single byte of data in the database. 523 | For example, a size amplification of 2% means that a database that 524 | contains 100 bytes of user-data may occupy upto 102 bytes of 525 | physical storage. By this definition, a fully compacted database has 526 | a size amplification of 0%. Rocksdb uses the following heuristic 527 | to calculate size amplification: it assumes that all files excluding 528 | the earliest file contribute to the size amplification. 529 | Default: ``200``, which means that a 100 byte database could require upto 530 | 300 bytes of storage. 531 | 532 | * ``compression_size_percent``: 533 | If this option is set to be -1 (the default value), all the output 534 | files will follow compression type specified. 535 | 536 | If this option is not negative, we will try to make sure compressed 537 | size is just above this value. In normal cases, at least this 538 | percentage of data will be compressed. 539 | 540 | When we are compacting to a new file, here is the criteria whether 541 | it needs to be compressed: assuming here are the list of files sorted 542 | by generation time: ``A1...An B1...Bm C1...Ct`` 543 | where ``A1`` is the newest and ``Ct`` is the oldest, and we are going 544 | to compact ``B1...Bm``, we calculate the total size of all the files 545 | as total_size, as well as the total size of ``C1...Ct`` as 546 | ``total_C``, the compaction output file will be compressed if 547 | ``total_C / total_size < this percentage``. 548 | Default: -1 549 | 550 | * ``stop_style``: 551 | The algorithm used to stop picking files into a single compaction. 552 | Can be either ``"similar_size"`` or ``"total_size"``. 553 | 554 | * ``similar_size``: Pick files of similar size. 555 | * ``total_size``: Total size of picked files is greater than next file. 556 | 557 | Default: ``"total_size"`` 558 | 559 | For setting options, just assign a dict with the fields to set. 560 | It is allowed to omit keys in this dict. Missing keys are just not set 561 | to the underlying options object. 562 | 563 | This example just changes the stop_style and leaves the other options 564 | untouched. :: 565 | 566 | opts = rocksdb.Options() 567 | opts.compaction_options_universal = {'stop_style': 'similar_size'} 568 | 569 | 570 | .. py:attribute:: max_sequential_skip_in_iterations 571 | 572 | An iteration->Next() sequentially skips over keys with the same 573 | user-key unless this option is set. This number specifies the number 574 | of keys (with the same userkey) that will be sequentially 575 | skipped before a reseek is issued. 576 | 577 | | *Type:* ``int`` 578 | | *Default:* ``8`` 579 | 580 | .. py:attribute:: memtable_factory 581 | 582 | This is a factory that provides MemTableRep objects. 583 | Right now you can assing instances of the following classes. 584 | 585 | * :py:class:`rocksdb.VectorMemtableFactory` 586 | * :py:class:`rocksdb.SkipListMemtableFactory` 587 | * :py:class:`rocksdb.HashSkipListMemtableFactory` 588 | * :py:class:`rocksdb.HashLinkListMemtableFactory` 589 | 590 | *Default:* :py:class:`rocksdb.SkipListMemtableFactory` 591 | 592 | .. py:attribute:: table_factory 593 | 594 | Factory for the files forming the persisten data storage. 595 | Sometimes they are also named SST-Files. Right now you can assign 596 | instances of the following classes. 597 | 598 | * :py:class:`rocksdb.BlockBasedTableFactory` 599 | * :py:class:`rocksdb.PlainTableFactory` 600 | * :py:class:`rocksdb.TotalOrderPlainTableFactory` 601 | 602 | *Default:* :py:class:`rocksdb.BlockBasedTableFactory` 603 | 604 | .. py:attribute:: inplace_update_support 605 | 606 | Allows thread-safe inplace updates. Requires Updates if 607 | 608 | * key exists in current memtable 609 | * new sizeof(new_value) <= sizeof(old_value) 610 | * old_value for that key is a put i.e. kTypeValue 611 | 612 | | *Type:* ``bool`` 613 | | *Default:* ``False`` 614 | 615 | .. py:attribute:: inplace_update_num_locks 616 | 617 | | Number of locks used for inplace update. 618 | | Default: 10000, if inplace_update_support = true, else 0. 619 | 620 | | *Type:* ``int`` 621 | | *Default:* ``10000`` 622 | 623 | .. py:attribute:: comparator 624 | 625 | Comparator used to define the order of keys in the table. 626 | A python comparator must implement the :py:class:`rocksdb.interfaces.Comparator` 627 | interface. 628 | 629 | *Requires*: The client must ensure that the comparator supplied 630 | here has the same name and orders keys *exactly* the same as the 631 | comparator provided to previous open calls on the same DB. 632 | 633 | *Default:* :py:class:`rocksdb.BytewiseComparator` 634 | 635 | .. py:attribute:: merge_operator 636 | 637 | The client must provide a merge operator if Merge operation 638 | needs to be accessed. Calling Merge on a DB without a merge operator 639 | would result in :py:exc:`rocksdb.errors.NotSupported`. The client must 640 | ensure that the merge operator supplied here has the same name and 641 | *exactly* the same semantics as the merge operator provided to 642 | previous open calls on the same DB. The only exception is reserved 643 | for upgrade, where a DB previously without a merge operator is 644 | introduced to Merge operation for the first time. It's necessary to 645 | specify a merge operator when openning the DB in this case. 646 | 647 | A python merge operator must implement the 648 | :py:class:`rocksdb.interfaces.MergeOperator` or 649 | :py:class:`rocksdb.interfaces.AssociativeMergeOperator` 650 | interface. 651 | 652 | *Default:* ``None`` 653 | 654 | .. py:attribute:: prefix_extractor 655 | 656 | If not ``None``, use the specified function to determine the 657 | prefixes for keys. These prefixes will be placed in the filter. 658 | Depending on the workload, this can reduce the number of read-IOP 659 | cost for scans when a prefix is passed to the calls generating an 660 | iterator (:py:meth:`rocksdb.DB.iterkeys` ...). 661 | 662 | A python prefix_extractor must implement the 663 | :py:class:`rocksdb.interfaces.SliceTransform` interface 664 | 665 | For prefix filtering to work properly, "prefix_extractor" and "comparator" 666 | must be such that the following properties hold: 667 | 668 | 1. ``key.starts_with(prefix(key))`` 669 | 2. ``compare(prefix(key), key) <= 0`` 670 | 3. ``If compare(k1, k2) <= 0, then compare(prefix(k1), prefix(k2)) <= 0`` 671 | 4. ``prefix(prefix(key)) == prefix(key)`` 672 | 673 | *Default:* ``None`` 674 | 675 | .. py:attribute:: row_cache 676 | 677 | A global cache for table-level rows. If ``None`` this cache is not used. 678 | Otherwise it must be an instance of :py:class:`rocksdb.LRUCache` 679 | 680 | *Default:* ``None`` 681 | 682 | 683 | CompactionPri 684 | ================ 685 | 686 | .. py:class:: rocksdb.CompactionPri 687 | 688 | Defines the support compression types 689 | 690 | .. py:attribute:: kByCompensatedSize 691 | .. py:attribute:: kOldestLargestSeqFirst 692 | .. py:attribute:: kOldestSmallestSeqFirst 693 | .. py:attribute:: kMinOverlappingRatio 694 | 695 | CompressionTypes 696 | ================ 697 | 698 | .. py:class:: rocksdb.CompressionType 699 | 700 | Defines the support compression types 701 | 702 | .. py:attribute:: no_compression 703 | .. py:attribute:: snappy_compression 704 | .. py:attribute:: zlib_compression 705 | .. py:attribute:: bzip2_compression 706 | .. py:attribute:: lz4_compression 707 | .. py:attribute:: lz4hc_compression 708 | .. py:attribute:: xpress_compression 709 | .. py:attribute:: zstd_compression 710 | .. py:attribute:: zstdnotfinal_compression 711 | .. py:attribute:: disable_compression 712 | 713 | BytewiseComparator 714 | ================== 715 | 716 | .. py:class:: rocksdb.BytewiseComparator 717 | 718 | Wraps the rocksdb Bytewise Comparator, it uses lexicographic byte-wise 719 | ordering 720 | 721 | BloomFilterPolicy 722 | ================= 723 | 724 | .. py:class:: rocksdb.BloomFilterPolicy 725 | 726 | Wraps the rocksdb BloomFilter Policy 727 | 728 | .. py:method:: __init__(bits_per_key) 729 | 730 | :param int bits_per_key: 731 | Specifies the approximately number of bits per key. 732 | A good value for bits_per_key is 10, which yields a filter with 733 | ~ 1% false positive rate. 734 | 735 | 736 | LRUCache 737 | ======== 738 | 739 | .. py:class:: rocksdb.LRUCache 740 | 741 | Wraps the rocksdb LRUCache 742 | 743 | .. py:method:: __init__(capacity, shard_bits=None) 744 | 745 | Create a new cache with a fixed size capacity (in bytes). 746 | The cache is sharded to 2^numShardBits shards, by hash of the key. 747 | The total capacity is divided and evenly assigned to each shard. 748 | 749 | .. _table_factories_label: 750 | 751 | TableFactories 752 | ============== 753 | 754 | Currently RocksDB supports two types of tables: plain table and block-based table. 755 | Instances of this classes can assigned to :py:attr:`rocksdb.Options.table_factory` 756 | 757 | * *Block-based table:* This is the default table type that RocksDB inherited from 758 | LevelDB. It was designed for storing data in hard disk or flash device. 759 | 760 | * *Plain table:* It is one of RocksDB's SST file format optimized 761 | for low query latency on pure-memory or really low-latency media. 762 | 763 | Tutorial of rocksdb table formats is available here: 764 | https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats 765 | 766 | .. py:class:: rocksdb.BlockBasedTableFactory 767 | 768 | Wraps BlockBasedTableFactory of RocksDB. 769 | 770 | .. py:method:: __init__(index_type='binary_search', hash_index_allow_collision=True, checksum='crc32', block_cache, block_cache_compressed, filter_policy=None, no_block_cache=False, block_size=None, block_size_deviation=None, block_restart_interval=None, whole_key_filtering=None): 771 | 772 | 773 | :param string index_type: 774 | * ``binary_search`` a space efficient index block that is optimized 775 | for binary-search-based index. 776 | * ``hash_search`` the hash index. If enabled, will do hash lookup 777 | when `Options.prefix_extractor` is provided. 778 | 779 | :param bool hash_index_allow_collision: 780 | Influence the behavior when ``hash_search`` is used. 781 | If ``False``, stores a precise prefix to block range mapping. 782 | If ``True``, does not store prefix and allows prefix hash collision 783 | (less memory consumption) 784 | 785 | :param string checksum: 786 | Use the specified checksum type. Newly created table files will be 787 | protected with this checksum type. Old table files will still be readable, 788 | even though they have different checksum type. 789 | Can be either ``crc32`` or ``xxhash``. 790 | 791 | :param block_cache: 792 | Control over blocks (user data is stored in a set of blocks, and 793 | a block is the unit of reading from disk). 794 | 795 | If ``None``, rocksdb will automatically create and use an 8MB internal cache. 796 | If not ``None`` use the specified cache for blocks. In that case it must 797 | be an instance of :py:class:`rocksdb.LRUCache` 798 | 799 | :param block_cache_compressed: 800 | If ``None``, rocksdb will not use a compressed block cache. 801 | If not ``None`` use the specified cache for compressed blocks. In that 802 | case it must be an instance of :py:class:`rocksdb.LRUCache` 803 | 804 | :param filter_policy: 805 | If not ``None`` use the specified filter policy to reduce disk reads. 806 | A python filter policy must implement the 807 | :py:class:`rocksdb.interfaces.FilterPolicy` interface. 808 | Recommended is a instance of :py:class:`rocksdb.BloomFilterPolicy` 809 | 810 | :param bool no_block_cache: 811 | Disable block cache. If this is set to true, 812 | then no block cache should be used, and the block_cache should 813 | point to ``None`` 814 | 815 | :param int block_size: 816 | If set to ``None`` the rocksdb default of ``4096`` is used. 817 | Approximate size of user data packed per block. Note that the 818 | block size specified here corresponds to uncompressed data. The 819 | actual size of the unit read from disk may be smaller if 820 | compression is enabled. This parameter can be changed dynamically. 821 | 822 | :param int block_size_deviation: 823 | If set to ``None`` the rocksdb default of ``10`` is used. 824 | This is used to close a block before it reaches the configured 825 | 'block_size'. If the percentage of free space in the current block is less 826 | than this specified number and adding a new record to the block will 827 | exceed the configured block size, then this block will be closed and the 828 | new record will be written to the next block. 829 | 830 | :param int block_restart_interval: 831 | If set to ``None`` the rocksdb default of ``16`` is used. 832 | Number of keys between restart points for delta encoding of keys. 833 | This parameter can be changed dynamically. Most clients should 834 | leave this parameter alone. 835 | 836 | :param bool whole_key_filtering: 837 | If set to ``None`` the rocksdb default of ``True`` is used. 838 | If ``True``, place whole keys in the filter (not just prefixes). 839 | This must generally be true for gets to be efficient. 840 | 841 | .. py:class:: rocksdb.PlainTableFactory 842 | 843 | Plain Table with prefix-only seek. It wraps rocksdb PlainTableFactory. 844 | 845 | For this factory, you need to set :py:attr:`rocksdb.Options.prefix_extractor` 846 | properly to make it work. Look-up will start with prefix hash lookup for 847 | key prefix. Inside the hash bucket found, a binary search is executed for 848 | hash conflicts. Finally, a linear search is used. 849 | 850 | .. py:method:: __init__(user_key_len=0, bloom_bits_per_key=10, hash_table_ratio=0.75, index_sparseness=10, huge_page_tlb_size=0, encoding_type='plain', full_scan_mode=False, store_index_in_file=False) 851 | 852 | :param int user_key_len: 853 | Plain table has optimization for fix-sized keys, which can be 854 | specified via user_key_len. 855 | Alternatively, you can pass `0` if your keys have variable lengths. 856 | 857 | :param int bloom_bits_per_key: 858 | The number of bits used for bloom filer per prefix. 859 | You may disable it by passing `0`. 860 | 861 | :param float hash_table_ratio: 862 | The desired utilization of the hash table used for prefix hashing. 863 | hash_table_ratio = number of prefixes / #buckets in the hash table. 864 | 865 | :param int index_sparseness: 866 | Inside each prefix, need to build one index record for how 867 | many keys for binary search inside each hash bucket. 868 | For encoding type ``prefix``, the value will be used when 869 | writing to determine an interval to rewrite the full key. 870 | It will also be used as a suggestion and satisfied when possible. 871 | 872 | :param int huge_page_tlb_size: 873 | If <=0, allocate hash indexes and blooms from malloc. 874 | Otherwise from huge page TLB. 875 | The user needs to reserve huge pages for it to be allocated, like: 876 | ``sysctl -w vm.nr_hugepages=20`` 877 | See linux doc Documentation/vm/hugetlbpage.txt 878 | 879 | :param string encoding_type: 880 | How to encode the keys. The value will determine how to encode keys 881 | when writing to a new SST file. This value will be stored 882 | inside the SST file which will be used when reading from the 883 | file, which makes it possible for users to choose different 884 | encoding type when reopening a DB. Files with different 885 | encoding types can co-exist in the same DB and can be read. 886 | 887 | * ``plain``: Always write full keys without any special encoding. 888 | * ``prefix``: Find opportunity to write the same prefix once for multiple rows. 889 | In some cases, when a key follows a previous key with the same prefix, 890 | instead of writing out the full key, it just writes out the size of the 891 | shared prefix, as well as other bytes, to save some bytes. 892 | 893 | When using this option, the user is required to use the same prefix 894 | extractor to make sure the same prefix will be extracted from the same key. 895 | The Name() value of the prefix extractor will be stored in the file. 896 | When reopening the file, the name of the options.prefix_extractor given 897 | will be bitwise compared to the prefix extractors stored in the file. 898 | An error will be returned if the two don't match. 899 | 900 | :param bool full_scan_mode: 901 | Mode for reading the whole file one record by one without using the index. 902 | 903 | :param bool store_index_in_file: 904 | Compute plain table index and bloom filter during file building 905 | and store it in file. When reading file, index will be mmaped 906 | instead of recomputation. 907 | 908 | .. _memtable_factories_label: 909 | 910 | MemtableFactories 911 | ================= 912 | 913 | RocksDB has different classes to represent the in-memory buffer for the current 914 | operations. You have to assing instances of the following classes to 915 | :py:attr:`rocksdb.Options.memtable_factory`. 916 | This page has a comparison the most popular ones. 917 | https://github.com/facebook/rocksdb/wiki/Hash-based-memtable-implementations 918 | 919 | .. py:class:: rocksdb.VectorMemtableFactory 920 | 921 | This creates MemTableReps that are backed by an std::vector. 922 | On iteration, the vector is sorted. This is useful for workloads where 923 | iteration is very rare and writes are generally not issued after reads begin. 924 | 925 | .. py:method:: __init__(count=0) 926 | 927 | :param int count: 928 | Passed to the constructor of the underlying std::vector of each 929 | VectorRep. On initialization, the underlying array will be at 930 | least count bytes reserved for usage. 931 | 932 | .. py:class:: rocksdb.SkipListMemtableFactory 933 | 934 | This uses a skip list to store keys. 935 | 936 | .. py:method:: __init__() 937 | 938 | .. py:class:: rocksdb.HashSkipListMemtableFactory 939 | 940 | This class contains a fixed array of buckets, each pointing 941 | to a skiplist (null if the bucket is empty). 942 | 943 | .. note:: 944 | 945 | :py:attr:`rocksdb.Options.prefix_extractor` must be set, otherwise 946 | rocksdb fails back to skip-list. 947 | 948 | .. py:method:: __init__(bucket_count = 1000000, skiplist_height = 4, skiplist_branching_factor = 4) 949 | 950 | :param int bucket_count: number of fixed array buckets 951 | :param int skiplist_height: the max height of the skiplist 952 | :param int skiplist_branching_factor: 953 | probabilistic size ratio between adjacent link lists in the skiplist 954 | 955 | .. py:class:: rocksdb.HashLinkListMemtableFactory 956 | 957 | The factory is to create memtables with a hashed linked list. 958 | It contains a fixed array of buckets, each pointing to a sorted single 959 | linked list (null if the bucket is empty). 960 | 961 | .. note:: 962 | 963 | :py:attr:`rocksdb.Options.prefix_extractor` must be set, otherwise 964 | rocksdb fails back to skip-list. 965 | 966 | 967 | .. py:method:: __init__(bucket_count=50000) 968 | 969 | :param int bucket: number of fixed array buckets 970 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ********* 3 | 4 | Version 0.5 5 | ----------- 6 | 7 | 8 | Version 0.4 9 | ----------- 10 | This version works with RocksDB v3.12. 11 | 12 | * Added :py:func:`repair_db`. 13 | * Added :py:meth:`rocksdb.Options.row_cache` 14 | * Publish to pypi. 15 | 16 | Backward Incompatible Changes: 17 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 18 | 19 | * Changed API of :py:meth:`rocksdb.DB.compact_range`. 20 | 21 | * Only allow keyword arguments. 22 | * Changed ``reduce_level`` to ``change_level``. 23 | * Add new argument called ``bottommost_level_compaction``. 24 | 25 | 26 | Version 0.3 27 | ----------- 28 | This version works with RocksDB version v3.11. 29 | 30 | Backward Incompatible Changes: 31 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 32 | 33 | **Prefix Seeks:** 34 | 35 | According to this page https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes, 36 | all the prefix related parameters on ``ReadOptions`` are removed. 37 | Rocksdb realizes now if ``Options.prefix_extractor`` is set and uses then 38 | prefix-seeks automatically. This means the following changes on pyrocksdb. 39 | 40 | * DB.iterkeys, DB.itervalues, DB.iteritems have *no* ``prefix`` parameter anymore. 41 | * DB.get, DB.multi_get, DB.key_may_exist, DB.iterkeys, DB.itervalues, DB.iteritems 42 | have *no* ``prefix_seek`` parameter anymore. 43 | 44 | Which means all the iterators walk now always to the *end* of the database. 45 | So if you need to stay within a prefix, write your own code to ensure that. 46 | For DB.iterkeys and DB.iteritems ``itertools.takewhile`` is a possible solution. :: 47 | 48 | from itertools import takewhile 49 | 50 | it = self.db.iterkeys() 51 | it.seek(b'00002') 52 | print list(takewhile(lambda key: key.startswith(b'00002'), it)) 53 | 54 | it = self.db.iteritems() 55 | it.seek(b'00002') 56 | print dict(takewhile(lambda item: item[0].startswith(b'00002'), it)) 57 | 58 | **SST Table Builders:** 59 | 60 | * Removed ``NewTotalOrderPlainTableFactory``, because rocksdb drops it too. 61 | 62 | **Changed Options:** 63 | 64 | In newer versions of rocksdb a bunch of options were moved or removed. 65 | 66 | * Rename ``bloom_bits_per_prefix`` of :py:class:`rocksdb.PlainTableFactory` to ``bloom_bits_per_key`` 67 | * Removed ``Options.db_stats_log_interval``. 68 | * Removed ``Options.disable_seek_compaction`` 69 | * Moved ``Options.no_block_cache`` to ``BlockBasedTableFactory`` 70 | * Moved ``Options.block_size`` to ``BlockBasedTableFactory`` 71 | * Moved ``Options.block_size_deviation`` to ``BlockBasedTableFactory`` 72 | * Moved ``Options.block_restart_interval`` to ``BlockBasedTableFactory`` 73 | * Moved ``Options.whole_key_filtering`` to ``BlockBasedTableFactory`` 74 | * Removed ``Options.table_cache_remove_scan_count_limit`` 75 | * Removed rm_scan_count_limit from ``LRUCache`` 76 | 77 | 78 | New: 79 | ^^^^ 80 | * Make CompactRange available: :py:meth:`rocksdb.DB.compact_range` 81 | * Add init options to :py:class:`rocksdb.BlockBasedTableFactory` 82 | * Add more option to :py:class:`rocksdb.PlainTableFactory` 83 | * Add :py:class:`rocksdb.WriteBatchIterator` 84 | * add :py:attr:`rocksdb.CompressionType.lz4_compression` 85 | * add :py:attr:`rocksdb.CompressionType.lz4hc_compression` 86 | 87 | 88 | Version 0.2 89 | ----------- 90 | 91 | This version works with RocksDB version 2.8.fb. Now you have access to the more 92 | advanced options of rocksdb. Like changing the memtable or SST representation. 93 | It is also possible now to enable *Universal Style Compaction*. 94 | 95 | * Fixed `issue 3 `_. 96 | Which fixed the change of prefix_extractor from raw-pointer to smart-pointer. 97 | 98 | * Support the new :py:attr:`rocksdb.Options.verify_checksums_in_compaction` option. 99 | 100 | * Add :py:attr:`rocksdb.Options.table_factory` option. So you could use the new 101 | 'PlainTableFactories' which are optimized for in-memory-databases. 102 | 103 | * https://github.com/facebook/rocksdb/wiki/PlainTable-Format 104 | * https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database%3F 105 | 106 | * Add :py:attr:`rocksdb.Options.memtable_factory` option. 107 | 108 | * Add options :py:attr:`rocksdb.Options.compaction_style` and 109 | :py:attr:`rocksdb.Options.compaction_options_universal` to change the 110 | compaction style. 111 | 112 | * Update documentation to the new default values 113 | 114 | * allow_mmap_reads=true 115 | * allow_mmap_writes=false 116 | * max_background_flushes=1 117 | * max_open_files=5000 118 | * paranoid_checks=true 119 | * disable_seek_compaction=true 120 | * level0_stop_writes_trigger=24 121 | * level0_slowdown_writes_trigger=20 122 | 123 | * Document new property names for :py:meth:`rocksdb.DB.get_property`. 124 | 125 | Version 0.1 126 | ----------- 127 | 128 | Initial version. Works with rocksdb version 2.7.fb. 129 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # python-rocksdb documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Dec 31 12:50:54 2013. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | 'sphinx.ext.autodoc', 33 | 'sphinx.ext.todo', 34 | 'sphinx.ext.viewcode', 35 | ] 36 | 37 | # Add any paths that contain templates here, relative to this directory. 38 | templates_path = ['_templates'] 39 | 40 | # The suffix of source filenames. 41 | source_suffix = '.rst' 42 | 43 | # The encoding of source files. 44 | #source_encoding = 'utf-8-sig' 45 | 46 | # The master toctree document. 47 | master_doc = 'index' 48 | 49 | # General information about the project. 50 | project = u'python-rocksdb' 51 | copyright = u'2014, sh' 52 | 53 | # The version info for the project you're documenting, acts as replacement for 54 | # |version| and |release|, also used in various other places throughout the 55 | # built documents. 56 | # 57 | # The short X.Y version. 58 | version = '0.6' 59 | # The full version, including alpha/beta/rc tags. 60 | release = '0.6.7' 61 | 62 | # The language for content autogenerated by Sphinx. Refer to documentation 63 | # for a list of supported languages. 64 | #language = None 65 | 66 | # There are two options for replacing |today|: either, you set today to some 67 | # non-false value, then it is used: 68 | #today = '' 69 | # Else, today_fmt is used as the format for a strftime call. 70 | #today_fmt = '%B %d, %Y' 71 | 72 | # List of patterns, relative to source directory, that match files and 73 | # directories to ignore when looking for source files. 74 | exclude_patterns = ['_build'] 75 | 76 | # The reST default role (used for this markup: `text`) to use for all 77 | # documents. 78 | #default_role = None 79 | 80 | # If true, '()' will be appended to :func: etc. cross-reference text. 81 | #add_function_parentheses = True 82 | 83 | # If true, the current module name will be prepended to all description 84 | # unit titles (such as .. function::). 85 | #add_module_names = True 86 | 87 | # If true, sectionauthor and moduleauthor directives will be shown in the 88 | # output. They are ignored by default. 89 | #show_authors = False 90 | 91 | # The name of the Pygments (syntax highlighting) style to use. 92 | pygments_style = 'sphinx' 93 | 94 | # A list of ignored prefixes for module index sorting. 95 | #modindex_common_prefix = [] 96 | 97 | # If true, keep warnings as "system message" paragraphs in the built documents. 98 | #keep_warnings = False 99 | 100 | 101 | # -- Options for HTML output ---------------------------------------------- 102 | 103 | # The theme to use for HTML and HTML Help pages. See the documentation for 104 | # a list of builtin themes. 105 | # html_theme = 'default' 106 | 107 | # Theme options are theme-specific and customize the look and feel of a theme 108 | # further. For a list of options available for each theme, see the 109 | # documentation. 110 | #html_theme_options = {} 111 | 112 | # Add any paths that contain custom themes here, relative to this directory. 113 | #html_theme_path = [] 114 | 115 | # The name for this set of Sphinx documents. If None, it defaults to 116 | # " v documentation". 117 | #html_title = None 118 | 119 | # A shorter title for the navigation bar. Default is the same as html_title. 120 | #html_short_title = None 121 | 122 | # The name of an image file (relative to this directory) to place at the top 123 | # of the sidebar. 124 | #html_logo = None 125 | 126 | # The name of an image file (within the static path) to use as favicon of the 127 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 128 | # pixels large. 129 | #html_favicon = None 130 | 131 | # Add any paths that contain custom static files (such as style sheets) here, 132 | # relative to this directory. They are copied after the builtin static files, 133 | # so a file named "default.css" will overwrite the builtin "default.css". 134 | html_static_path = ['_static'] 135 | 136 | # Add any extra paths that contain custom files (such as robots.txt or 137 | # .htaccess) here, relative to this directory. These files are copied 138 | # directly to the root of the documentation. 139 | #html_extra_path = [] 140 | 141 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 142 | # using the given strftime format. 143 | #html_last_updated_fmt = '%b %d, %Y' 144 | 145 | # If true, SmartyPants will be used to convert quotes and dashes to 146 | # typographically correct entities. 147 | #html_use_smartypants = True 148 | 149 | # Custom sidebar templates, maps document names to template names. 150 | #html_sidebars = {} 151 | 152 | # Additional templates that should be rendered to pages, maps page names to 153 | # template names. 154 | #html_additional_pages = {} 155 | 156 | # If false, no module index is generated. 157 | #html_domain_indices = True 158 | 159 | # If false, no index is generated. 160 | #html_use_index = True 161 | 162 | # If true, the index is split into individual pages for each letter. 163 | #html_split_index = False 164 | 165 | # If true, links to the reST sources are added to the pages. 166 | #html_show_sourcelink = True 167 | 168 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 169 | #html_show_sphinx = True 170 | 171 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 172 | #html_show_copyright = True 173 | 174 | # If true, an OpenSearch description file will be output, and all pages will 175 | # contain a tag referring to it. The value of this option must be the 176 | # base URL from which the finished HTML is served. 177 | #html_use_opensearch = '' 178 | 179 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 180 | #html_file_suffix = None 181 | 182 | # Output file base name for HTML help builder. 183 | htmlhelp_basename = 'python-rocksdbdoc' 184 | 185 | 186 | # -- Options for LaTeX output --------------------------------------------- 187 | 188 | latex_elements = { 189 | # The paper size ('letterpaper' or 'a4paper'). 190 | #'papersize': 'letterpaper', 191 | 192 | # The font size ('10pt', '11pt' or '12pt'). 193 | #'pointsize': '10pt', 194 | 195 | # Additional stuff for the LaTeX preamble. 196 | #'preamble': '', 197 | } 198 | 199 | # Grouping the document tree into LaTeX files. List of tuples 200 | # (source start file, target name, title, 201 | # author, documentclass [howto, manual, or own class]). 202 | latex_documents = [ 203 | ('index', 'python-rocksdb.tex', u'python-rocksdb Documentation', 204 | u'sh', 'manual'), 205 | ] 206 | 207 | # The name of an image file (relative to this directory) to place at the top of 208 | # the title page. 209 | #latex_logo = None 210 | 211 | # For "manual" documents, if this is true, then toplevel headings are parts, 212 | # not chapters. 213 | #latex_use_parts = False 214 | 215 | # If true, show page references after internal links. 216 | #latex_show_pagerefs = False 217 | 218 | # If true, show URL addresses after external links. 219 | #latex_show_urls = False 220 | 221 | # Documents to append as an appendix to all manuals. 222 | #latex_appendices = [] 223 | 224 | # If false, no module index is generated. 225 | #latex_domain_indices = True 226 | 227 | 228 | # -- Options for manual page output --------------------------------------- 229 | 230 | # One entry per manual page. List of tuples 231 | # (source start file, name, description, authors, manual section). 232 | man_pages = [ 233 | ('index', 'python-rocksdb', u'python-rocksdb Documentation', 234 | [u'sh'], 1) 235 | ] 236 | 237 | # If true, show URL addresses after external links. 238 | #man_show_urls = False 239 | 240 | 241 | # -- Options for Texinfo output ------------------------------------------- 242 | 243 | # Grouping the document tree into Texinfo files. List of tuples 244 | # (source start file, target name, title, author, 245 | # dir menu entry, description, category) 246 | texinfo_documents = [ 247 | ('index', 'python-rocksdb', u'python-rocksdb Documentation', 248 | u'sh', 'python-rocksdb', 'One line description of project.', 249 | 'Miscellaneous'), 250 | ] 251 | 252 | # Documents to append as an appendix to all manuals. 253 | #texinfo_appendices = [] 254 | 255 | # If false, no module index is generated. 256 | #texinfo_domain_indices = True 257 | 258 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 259 | #texinfo_show_urls = 'footnote' 260 | 261 | # If true, do not generate a @detailmenu in the "Top" node's menu. 262 | #texinfo_no_detailmenu = False 263 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to python-rocksdb's documentation! 2 | ========================================== 3 | 4 | Overview 5 | -------- 6 | Python bindings to the C++ interface of http://rocksdb.org/ using cython:: 7 | 8 | import rocksdb 9 | db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) 10 | db.put(b"a", b"b") 11 | print db.get(b"a") 12 | 13 | 14 | Tested with python2.7 and python3.4 and RocksDB version 5.3.0 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | 19 | Instructions how to install 20 | Tutorial 21 | API 22 | Changelog 23 | 24 | 25 | Contributing 26 | ------------ 27 | 28 | Source can be found on `github `_. 29 | Feel free to fork and send pull-requests or create issues on the 30 | `github issue tracker `_ 31 | 32 | RoadMap/TODO 33 | ------------ 34 | 35 | No plans so far. Please submit wishes to the github issues. 36 | 37 | Indices and tables 38 | ================== 39 | 40 | * :ref:`genindex` 41 | * :ref:`modindex` 42 | * :ref:`search` 43 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installing 2 | ========== 3 | .. highlight:: bash 4 | 5 | With conda 6 | ********** 7 | 8 | .. code-block:: bash 9 | 10 | conda install -c conda-forge python-rocksdb 11 | 12 | With distro package and pypi 13 | **************************** 14 | 15 | This requires librocksdb-dev>=5.0 16 | 17 | .. code-block:: bash 18 | 19 | apt-get install python-virtualenv python-dev librocksdb-dev 20 | virtualenv venv 21 | source venv/bin/activate 22 | pip install python-rocksdb 23 | 24 | From source 25 | *********** 26 | 27 | Building rocksdb 28 | ---------------- 29 | 30 | Briefly describes how to build rocksdb under an ordinary debian/ubuntu. 31 | For more details consider https://github.com/facebook/rocksdb/blob/master/INSTALL.md 32 | 33 | .. code-block:: bash 34 | 35 | apt-get install build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev 36 | git clone https://github.com/facebook/rocksdb.git 37 | cd rocksdb 38 | mkdir build && cd build 39 | cmake .. 40 | make 41 | 42 | Systemwide rocksdb 43 | ^^^^^^^^^^^^^^^^^^ 44 | The following command installs the shared library in ``/usr/lib/`` and the 45 | header files in ``/usr/include/rocksdb/``:: 46 | 47 | make install-shared INSTALL_PATH=/usr 48 | 49 | To uninstall use:: 50 | 51 | make uninstall INSTALL_PATH=/usr 52 | 53 | Local rocksdb 54 | ^^^^^^^^^^^^^ 55 | If you don't like the system wide installation, or you don't have the 56 | permissions, it is possible to set the following environment variables. 57 | These varialbes are picked up by the compiler, linker and loader 58 | 59 | .. code-block:: bash 60 | 61 | export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:`pwd`/../include 62 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:`pwd` 63 | export LIBRARY_PATH=${LIBRARY_PATH}:`pwd` 64 | 65 | Building python-rocksdb 66 | ----------------------- 67 | 68 | .. code-block:: bash 69 | 70 | apt-get install python-virtualenv python-dev 71 | virtualenv venv 72 | source venv/bin/activate 73 | pip install git+git://github.com/twmht/python-rocksdb.git#egg=python-rocksdb 74 | -------------------------------------------------------------------------------- /docs/tutorial/index.rst: -------------------------------------------------------------------------------- 1 | Basic Usage of python-rocksdb 2 | ***************************** 3 | 4 | Open 5 | ==== 6 | 7 | The most basic open call is :: 8 | 9 | import rocksdb 10 | 11 | db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) 12 | 13 | A more production ready open can look like this :: 14 | 15 | import rocksdb 16 | 17 | opts = rocksdb.Options() 18 | opts.create_if_missing = True 19 | opts.max_open_files = 300000 20 | opts.write_buffer_size = 67108864 21 | opts.max_write_buffer_number = 3 22 | opts.target_file_size_base = 67108864 23 | 24 | opts.table_factory = rocksdb.BlockBasedTableFactory( 25 | filter_policy=rocksdb.BloomFilterPolicy(10), 26 | block_cache=rocksdb.LRUCache(2 * (1024 ** 3)), 27 | block_cache_compressed=rocksdb.LRUCache(500 * (1024 ** 2))) 28 | 29 | db = rocksdb.DB("test.db", opts) 30 | 31 | It assings a cache of 2.5G, uses a bloom filter for faster lookups and keeps 32 | more data (64 MB) in memory before writting a .sst file. 33 | 34 | About Bytes And Unicode 35 | ======================= 36 | 37 | RocksDB stores all data as uninterpreted *byte strings*. 38 | pyrocksdb behaves the same and uses nearly everywhere byte strings too. 39 | In python2 this is the ``str`` type. In python3 the ``bytes`` type. 40 | Since the default string type for string literals differs between python 2 and 3, 41 | it is strongly recommended to use an explicit ``b`` prefix for all byte string 42 | literals in both python2 and python3 code. 43 | For example ``b'this is a byte string'``. This avoids ambiguity and ensures 44 | that your code keeps working as intended if you switch between python2 and python3. 45 | 46 | The only place where you can pass unicode objects are filesytem paths like 47 | 48 | * Directory name of the database itself :py:meth:`rocksdb.DB.__init__` 49 | 50 | * :py:attr:`rocksdb.Options.wal_dir` 51 | 52 | * :py:attr:`rocksdb.Options.db_log_dir` 53 | 54 | To encode this path name, `sys.getfilesystemencoding()` encoding is used. 55 | 56 | Access 57 | ====== 58 | 59 | Store, Get, Delete is straight forward :: 60 | 61 | # Store 62 | db.put(b"key", b"value") 63 | 64 | # Get 65 | db.get(b"key") 66 | 67 | # Delete 68 | db.delete(b"key") 69 | 70 | It is also possible to gather modifications and 71 | apply them in a single operation :: 72 | 73 | batch = rocksdb.WriteBatch() 74 | batch.put(b"key", b"v1") 75 | batch.delete(b"key") 76 | batch.put(b"key", b"v2") 77 | batch.put(b"key", b"v3") 78 | 79 | db.write(batch) 80 | 81 | Fetch of multiple values at once :: 82 | 83 | db.put(b"key1", b"v1") 84 | db.put(b"key2", b"v2") 85 | 86 | ret = db.multi_get([b"key1", b"key2", b"key3"]) 87 | 88 | # prints b"v1" 89 | print ret[b"key1"] 90 | 91 | # prints None 92 | print ret[b"key3"] 93 | 94 | Iteration 95 | ========= 96 | 97 | Iterators behave slightly different than expected. Per default they are not 98 | valid. So you have to call one of its seek methods first :: 99 | 100 | db.put(b"key1", b"v1") 101 | db.put(b"key2", b"v2") 102 | db.put(b"key3", b"v3") 103 | 104 | it = db.iterkeys() 105 | it.seek_to_first() 106 | 107 | # prints [b'key1', b'key2', b'key3'] 108 | print list(it) 109 | 110 | it.seek_to_last() 111 | # prints [b'key3'] 112 | print list(it) 113 | 114 | it.seek(b'key2') 115 | # prints [b'key2', b'key3'] 116 | print list(it) 117 | 118 | There are also methods to iterate over values/items :: 119 | 120 | it = db.itervalues() 121 | it.seek_to_first() 122 | 123 | # prints [b'v1', b'v2', b'v3'] 124 | print list(it) 125 | 126 | it = db.iteritems() 127 | it.seek_to_first() 128 | 129 | # prints [(b'key1', b'v1'), (b'key2, b'v2'), (b'key3', b'v3')] 130 | print list(it) 131 | 132 | Reversed iteration :: 133 | 134 | it = db.iteritems() 135 | it.seek_to_last() 136 | 137 | # prints [(b'key3', b'v3'), (b'key2', b'v2'), (b'key1', b'v1')] 138 | print list(reversed(it)) 139 | 140 | SeekForPrev (Take the example from `https://github.com/facebook/rocksdb/wiki/SeekForPrev`):: 141 | 142 | db.put(b'a1', b'a1_value') 143 | db.put(b'a3', b'a3_value') 144 | db.put(b'b1', b'b1_value') 145 | db.put(b'b2', b'b2_value') 146 | db.put(b'c2', b'c2_value') 147 | db.put(b'c4', b'c4_value') 148 | 149 | it = db.iteritems() 150 | it.seek(b'a1') 151 | assertEqual(it.get(), (b'a1', b'a1_value')) 152 | it.seek(b'a3') 153 | assertEqual(it.get(), (b'a3', b'a3_value')) 154 | it.seek_for_prev(b'c4') 155 | assertEqual(it.get(), (b'c4', b'c4_value')) 156 | it.seek_for_prev(b'c3') 157 | assertEqual(it.get(), (b'c2', b'c2_value')) 158 | 159 | 160 | Snapshots 161 | ========= 162 | 163 | Snapshots are nice to get a consistent view on the database :: 164 | 165 | self.db.put(b"a", b"1") 166 | self.db.put(b"b", b"2") 167 | 168 | snapshot = self.db.snapshot() 169 | self.db.put(b"a", b"2") 170 | self.db.delete(b"b") 171 | 172 | it = self.db.iteritems() 173 | it.seek_to_first() 174 | 175 | # prints {b'a': b'2'} 176 | print dict(it) 177 | 178 | it = self.db.iteritems(snapshot=snapshot) 179 | it.seek_to_first() 180 | 181 | # prints {b'a': b'1', b'b': b'2'} 182 | print dict(it) 183 | 184 | 185 | MergeOperator 186 | ============= 187 | 188 | Merge operators are useful for efficient read-modify-write operations. 189 | For more details see `Merge Operator `_ 190 | 191 | A python merge operator must either implement the 192 | :py:class:`rocksdb.interfaces.AssociativeMergeOperator` or 193 | :py:class:`rocksdb.interfaces.MergeOperator` interface. 194 | 195 | The following example python merge operator implements a counter :: 196 | 197 | class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator): 198 | def merge(self, key, existing_value, value): 199 | if existing_value: 200 | s = int(existing_value) + int(value) 201 | return (True, str(s).encode('ascii')) 202 | return (True, value) 203 | 204 | def name(self): 205 | return b'AssocCounter' 206 | 207 | 208 | opts = rocksdb.Options() 209 | opts.create_if_missing = True 210 | opts.merge_operator = AssocCounter() 211 | db = rocksdb.DB('test.db', opts) 212 | 213 | db.merge(b"a", b"1") 214 | db.merge(b"a", b"1") 215 | 216 | # prints b'2' 217 | print db.get(b"a") 218 | 219 | We provide a set of default operators ``rocksdb.merge_operators.UintAddOperator`` and ``rocksdb.merge_operators.StringAppendOperator``:: 220 | 221 | from rocksdb.merge_operators import UintAddOperator, StringAppendOperator 222 | opts = rocksdb.Options() 223 | opts.create_if_missing = True 224 | # you should also play with StringAppendOperator 225 | opts.merge_operator = UintAddOperator() 226 | db = rocksdb.DB('/tmp/test', opts) 227 | self.db.put(b'a', struct.pack('Q', 5566)) 228 | for x in range(1000): 229 | self.db.merge(b"a", struct.pack('Q', x)) 230 | self.assertEqual(5566 + sum(range(1000)), struct.unpack('Q', self.db.get(b'a'))[0]) 231 | 232 | 233 | 234 | PrefixExtractor 235 | =============== 236 | 237 | According to `Prefix API `_ 238 | a prefix_extractor can reduce IO for scans within a prefix range. 239 | A python prefix extractor must implement the :py:class:`rocksdb.interfaces.SliceTransform` interface. 240 | 241 | The following example presents a prefix extractor of a static size. 242 | So always the first 5 bytes are used as the prefix :: 243 | 244 | class StaticPrefix(rocksdb.interfaces.SliceTransform): 245 | def name(self): 246 | return b'static' 247 | 248 | def transform(self, src): 249 | return (0, 5) 250 | 251 | def in_domain(self, src): 252 | return len(src) >= 5 253 | 254 | def in_range(self, dst): 255 | return len(dst) == 5 256 | 257 | opts = rocksdb.Options() 258 | opts.create_if_missing=True 259 | opts.prefix_extractor = StaticPrefix() 260 | 261 | db = rocksdb.DB('test.db', opts) 262 | 263 | db.put(b'00001.x', b'x') 264 | db.put(b'00001.y', b'y') 265 | db.put(b'00001.z', b'z') 266 | 267 | db.put(b'00002.x', b'x') 268 | db.put(b'00002.y', b'y') 269 | db.put(b'00002.z', b'z') 270 | 271 | db.put(b'00003.x', b'x') 272 | db.put(b'00003.y', b'y') 273 | db.put(b'00003.z', b'z') 274 | 275 | prefix = b'00002' 276 | 277 | it = db.iteritems() 278 | it.seek(prefix) 279 | 280 | # prints {b'00002.z': b'z', b'00002.y': b'y', b'00002.x': b'x'} 281 | print dict(itertools.takewhile(lambda item: item[0].startswith(prefix), it)) 282 | 283 | 284 | Backup And Restore 285 | ================== 286 | 287 | Backup and Restore is done with a separate :py:class:`rocksdb.BackupEngine` object. 288 | 289 | A backup can only be created on a living database object. :: 290 | 291 | import rocksdb 292 | 293 | db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) 294 | db.put(b'a', b'v1') 295 | db.put(b'b', b'v2') 296 | db.put(b'c', b'v3') 297 | 298 | Backup is created like this. 299 | You can choose any path for the backup destination except the db path itself. 300 | If ``flush_before_backup`` is ``True`` the current memtable is flushed to disk 301 | before backup. :: 302 | 303 | backup = rocksdb.BackupEngine("test.db/backups") 304 | backup.create_backup(db, flush_before_backup=True) 305 | 306 | Restore is done like this. 307 | The two arguments are the db_dir and wal_dir, which are mostly the same. :: 308 | 309 | backup = rocksdb.BackupEngine("test.db/backups") 310 | backup.restore_latest_backup("test.db", "test.db") 311 | 312 | 313 | Change Memtable Or SST Implementations 314 | ====================================== 315 | 316 | As noted here :ref:`memtable_factories_label`, RocksDB offers different implementations for the memtable 317 | representation. Per default :py:class:`rocksdb.SkipListMemtableFactory` is used, 318 | but changing it to a different one is veary easy. 319 | 320 | Here is an example for HashSkipList-MemtableFactory. 321 | Keep in mind: To use the hashed based MemtableFactories you must set 322 | :py:attr:`rocksdb.Options.prefix_extractor`. 323 | In this example all keys have a static prefix of len 5. :: 324 | 325 | class StaticPrefix(rocksdb.interfaces.SliceTransform): 326 | def name(self): 327 | return b'static' 328 | 329 | def transform(self, src): 330 | return (0, 5) 331 | 332 | def in_domain(self, src): 333 | return len(src) >= 5 334 | 335 | def in_range(self, dst): 336 | return len(dst) == 5 337 | 338 | 339 | opts = rocksdb.Options() 340 | opts.prefix_extractor = StaticPrefix() 341 | opts.allow_concurrent_memtable_write = False 342 | opts.memtable_factory = rocksdb.HashSkipListMemtableFactory() 343 | opts.create_if_missing = True 344 | 345 | db = rocksdb.DB("test.db", opts) 346 | db.put(b'00001.x', b'x') 347 | db.put(b'00001.y', b'y') 348 | db.put(b'00002.x', b'x') 349 | 350 | For initial bulk loads the Vector-MemtableFactory makes sense. :: 351 | 352 | opts = rocksdb.Options() 353 | opts.allow_concurrent_memtable_write = False 354 | opts.memtable_factory = rocksdb.VectorMemtableFactory() 355 | opts.create_if_missing = True 356 | 357 | db = rocksdb.DB("test.db", opts) 358 | 359 | As noted here :ref:`table_factories_label`, it is also possible to change the 360 | representation of the final data files. 361 | Here is an example how to use a 'PlainTable'. :: 362 | 363 | opts = rocksdb.Options() 364 | opts.table_factory = rocksdb.PlainTableFactory() 365 | opts.create_if_missing = True 366 | 367 | db = rocksdb.DB("test.db", opts) 368 | 369 | Change Compaction Style 370 | ======================= 371 | 372 | RocksDB has a compaction algorithm called *universal*. This style typically 373 | results in lower write amplification but higher space amplification than 374 | Level Style Compaction. See here for more details, 375 | https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide#multi-threaded-compactions 376 | 377 | Here is an example to switch to *universal style compaction*. :: 378 | 379 | opts = rocksdb.Options() 380 | opts.compaction_style = "universal" 381 | opts.compaction_options_universal = {"min_merge_width": 3} 382 | 383 | See here for more options on *universal style compaction*, 384 | :py:attr:`rocksdb.Options.compaction_options_universal` 385 | 386 | Iterate Over WriteBatch 387 | ======================= 388 | 389 | In same cases you need to know, what operations happened on a WriteBatch. 390 | The pyrocksdb WriteBatch supports the iterator protocol, see this example. :: 391 | 392 | batch = rocksdb.WriteBatch() 393 | batch.put(b"key1", b"v1") 394 | batch.delete(b'a') 395 | batch.merge(b'xxx', b'value') 396 | 397 | for op, key, value in batch: 398 | print op, key, value 399 | 400 | # prints the following three lines 401 | # Put key1 v1 402 | # Delete a 403 | # Merge xxx value 404 | -------------------------------------------------------------------------------- /rocksdb/__init__.py: -------------------------------------------------------------------------------- 1 | from ._rocksdb import * 2 | -------------------------------------------------------------------------------- /rocksdb/backup.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as cpp_bool 2 | from libcpp.string cimport string 3 | from libcpp.vector cimport vector 4 | from libc.stdint cimport uint32_t 5 | from libc.stdint cimport int64_t 6 | from libc.stdint cimport uint64_t 7 | 8 | from status cimport Status 9 | from db cimport DB 10 | from env cimport Env 11 | 12 | cdef extern from "rocksdb/utilities/backupable_db.h" namespace "rocksdb": 13 | ctypedef uint32_t BackupID 14 | 15 | cdef cppclass BackupableDBOptions: 16 | BackupableDBOptions(const string& backup_dir) 17 | 18 | cdef struct BackupInfo: 19 | BackupID backup_id 20 | int64_t timestamp 21 | uint64_t size 22 | 23 | cdef cppclass BackupEngine: 24 | Status CreateNewBackup(DB*, cpp_bool) nogil except+ 25 | Status PurgeOldBackups(uint32_t) nogil except+ 26 | Status DeleteBackup(BackupID) nogil except+ 27 | void StopBackup() nogil except+ 28 | void GetBackupInfo(vector[BackupInfo]*) nogil except+ 29 | Status RestoreDBFromBackup(BackupID, string&, string&) nogil except+ 30 | Status RestoreDBFromLatestBackup(string&, string&) nogil except+ 31 | 32 | cdef Status BackupEngine_Open "rocksdb::BackupEngine::Open"( 33 | Env*, 34 | BackupableDBOptions&, 35 | BackupEngine**) 36 | -------------------------------------------------------------------------------- /rocksdb/cache.pxd: -------------------------------------------------------------------------------- 1 | from std_memory cimport shared_ptr 2 | 3 | cdef extern from "rocksdb/cache.h" namespace "rocksdb": 4 | cdef cppclass Cache: 5 | pass 6 | 7 | cdef extern shared_ptr[Cache] NewLRUCache(size_t) 8 | cdef extern shared_ptr[Cache] NewLRUCache(size_t, int) 9 | -------------------------------------------------------------------------------- /rocksdb/comparator.pxd: -------------------------------------------------------------------------------- 1 | from libcpp.string cimport string 2 | from slice_ cimport Slice 3 | from logger cimport Logger 4 | from std_memory cimport shared_ptr 5 | 6 | cdef extern from "rocksdb/comparator.h" namespace "rocksdb": 7 | cdef cppclass Comparator: 8 | const char* Name() 9 | int Compare(const Slice&, const Slice&) const 10 | 11 | cdef extern const Comparator* BytewiseComparator() nogil except + 12 | 13 | ctypedef int (*compare_func)( 14 | void*, 15 | Logger*, 16 | string&, 17 | const Slice&, 18 | const Slice&) 19 | 20 | cdef extern from "cpp/comparator_wrapper.hpp" namespace "py_rocks": 21 | cdef cppclass ComparatorWrapper: 22 | ComparatorWrapper(string, void*, compare_func) nogil except + 23 | void set_info_log(shared_ptr[Logger]) nogil except+ 24 | -------------------------------------------------------------------------------- /rocksdb/cpp/comparator_wrapper.hpp: -------------------------------------------------------------------------------- 1 | #include "rocksdb/comparator.h" 2 | #include "rocksdb/env.h" 3 | #include 4 | 5 | using std::string; 6 | using rocksdb::Comparator; 7 | using rocksdb::Slice; 8 | using rocksdb::Logger; 9 | 10 | namespace py_rocks { 11 | class ComparatorWrapper: public Comparator { 12 | public: 13 | typedef int (*compare_func)( 14 | void*, 15 | Logger*, 16 | string&, 17 | const Slice&, 18 | const Slice&); 19 | 20 | ComparatorWrapper( 21 | string name, 22 | void* compare_context, 23 | compare_func compare_callback): 24 | name(name), 25 | compare_context(compare_context), 26 | compare_callback(compare_callback) 27 | {} 28 | 29 | virtual int Compare(const Slice& a, const Slice& b) const { 30 | string error_msg; 31 | int val; 32 | 33 | val = this->compare_callback( 34 | this->compare_context, 35 | this->info_log.get(), 36 | error_msg, 37 | a, 38 | b); 39 | 40 | if (error_msg.size()) { 41 | throw std::runtime_error(error_msg.c_str()); 42 | } 43 | return val; 44 | } 45 | 46 | virtual const char* Name() const { 47 | return this->name.c_str(); 48 | } 49 | 50 | virtual void FindShortestSeparator(string*, const Slice&) const {} 51 | virtual void FindShortSuccessor(string*) const {} 52 | 53 | void set_info_log(std::shared_ptr info_log) { 54 | this->info_log = info_log; 55 | } 56 | 57 | private: 58 | string name; 59 | void* compare_context; 60 | compare_func compare_callback; 61 | std::shared_ptr info_log; 62 | }; 63 | } 64 | -------------------------------------------------------------------------------- /rocksdb/cpp/filter_policy_wrapper.hpp: -------------------------------------------------------------------------------- 1 | #include "rocksdb/filter_policy.h" 2 | #include "rocksdb/env.h" 3 | #include 4 | 5 | using std::string; 6 | using rocksdb::FilterPolicy; 7 | using rocksdb::Slice; 8 | using rocksdb::Logger; 9 | 10 | namespace py_rocks { 11 | class FilterPolicyWrapper: public FilterPolicy { 12 | public: 13 | typedef void (*create_filter_func)( 14 | void* ctx, 15 | Logger*, 16 | string&, 17 | const Slice* keys, 18 | int n, 19 | string* dst); 20 | 21 | typedef bool (*key_may_match_func)( 22 | void* ctx, 23 | Logger*, 24 | string&, 25 | const Slice& key, 26 | const Slice& filter); 27 | 28 | FilterPolicyWrapper( 29 | string name, 30 | void* ctx, 31 | create_filter_func create_filter_callback, 32 | key_may_match_func key_may_match_callback): 33 | name(name), 34 | ctx(ctx), 35 | create_filter_callback(create_filter_callback), 36 | key_may_match_callback(key_may_match_callback) 37 | {} 38 | 39 | virtual void 40 | CreateFilter(const Slice* keys, int n, std::string* dst) const { 41 | string error_msg; 42 | 43 | this->create_filter_callback( 44 | this->ctx, 45 | this->info_log.get(), 46 | error_msg, 47 | keys, 48 | n, 49 | dst); 50 | 51 | if (error_msg.size()) { 52 | throw std::runtime_error(error_msg.c_str()); 53 | } 54 | } 55 | 56 | virtual bool 57 | KeyMayMatch(const Slice& key, const Slice& filter) const { 58 | string error_msg; 59 | bool val; 60 | 61 | val = this->key_may_match_callback( 62 | this->ctx, 63 | this->info_log.get(), 64 | error_msg, 65 | key, 66 | filter); 67 | 68 | if (error_msg.size()) { 69 | throw std::runtime_error(error_msg.c_str()); 70 | } 71 | return val; 72 | } 73 | 74 | virtual const char* Name() const { 75 | return this->name.c_str(); 76 | } 77 | 78 | void set_info_log(std::shared_ptr info_log) { 79 | this->info_log = info_log; 80 | } 81 | 82 | private: 83 | string name; 84 | void* ctx; 85 | create_filter_func create_filter_callback; 86 | key_may_match_func key_may_match_callback; 87 | std::shared_ptr info_log; 88 | }; 89 | } 90 | -------------------------------------------------------------------------------- /rocksdb/cpp/memtable_factories.hpp: -------------------------------------------------------------------------------- 1 | #include "rocksdb/memtablerep.h" 2 | 3 | using rocksdb::MemTableRepFactory; 4 | using rocksdb::VectorRepFactory; 5 | using rocksdb::SkipListFactory; 6 | 7 | namespace py_rocks { 8 | MemTableRepFactory* NewVectorRepFactory(size_t count = 0) { 9 | return new VectorRepFactory(count); 10 | } 11 | 12 | MemTableRepFactory* NewSkipListFactory() { 13 | return new SkipListFactory(); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /rocksdb/cpp/merge_operator_wrapper.hpp: -------------------------------------------------------------------------------- 1 | #include "rocksdb/merge_operator.h" 2 | 3 | using std::string; 4 | using std::deque; 5 | using rocksdb::Slice; 6 | using rocksdb::Logger; 7 | using rocksdb::MergeOperator; 8 | using rocksdb::AssociativeMergeOperator; 9 | 10 | namespace py_rocks { 11 | class AssociativeMergeOperatorWrapper: public AssociativeMergeOperator { 12 | public: 13 | typedef bool (*merge_func)( 14 | void*, 15 | const Slice& key, 16 | const Slice* existing_value, 17 | const Slice& value, 18 | std::string* new_value, 19 | Logger* logger); 20 | 21 | 22 | AssociativeMergeOperatorWrapper( 23 | string name, 24 | void* merge_context, 25 | merge_func merge_callback): 26 | name(name), 27 | merge_context(merge_context), 28 | merge_callback(merge_callback) 29 | {} 30 | 31 | virtual bool Merge( 32 | const Slice& key, 33 | const Slice* existing_value, 34 | const Slice& value, 35 | std::string* new_value, 36 | Logger* logger) const 37 | { 38 | return this->merge_callback( 39 | this->merge_context, 40 | key, 41 | existing_value, 42 | value, 43 | new_value, 44 | logger); 45 | } 46 | 47 | virtual const char* Name() const { 48 | return this->name.c_str(); 49 | } 50 | 51 | private: 52 | string name; 53 | void* merge_context; 54 | merge_func merge_callback; 55 | }; 56 | 57 | class MergeOperatorWrapper: public MergeOperator { 58 | public: 59 | typedef bool (*full_merge_func)( 60 | void* ctx, 61 | const Slice& key, 62 | const Slice* existing_value, 63 | const deque& operand_list, 64 | string* new_value, 65 | Logger* logger); 66 | 67 | typedef bool (*partial_merge_func)( 68 | void* ctx, 69 | const Slice& key, 70 | const Slice& left_op, 71 | const Slice& right_op, 72 | string* new_value, 73 | Logger* logger); 74 | 75 | MergeOperatorWrapper( 76 | string name, 77 | void* full_merge_context, 78 | void* partial_merge_context, 79 | full_merge_func full_merge_callback, 80 | partial_merge_func partial_merge_callback): 81 | name(name), 82 | full_merge_context(full_merge_context), 83 | partial_merge_context(partial_merge_context), 84 | full_merge_callback(full_merge_callback), 85 | partial_merge_callback(partial_merge_callback) 86 | {} 87 | 88 | virtual bool FullMerge( 89 | const Slice& key, 90 | const Slice* existing_value, 91 | const deque& operand_list, 92 | string* new_value, 93 | Logger* logger) const 94 | { 95 | return this->full_merge_callback( 96 | this->full_merge_context, 97 | key, 98 | existing_value, 99 | operand_list, 100 | new_value, 101 | logger); 102 | } 103 | 104 | virtual bool PartialMerge ( 105 | const Slice& key, 106 | const Slice& left_operand, 107 | const Slice& right_operand, 108 | string* new_value, 109 | Logger* logger) const 110 | { 111 | return this->partial_merge_callback( 112 | this->partial_merge_context, 113 | key, 114 | left_operand, 115 | right_operand, 116 | new_value, 117 | logger); 118 | } 119 | 120 | virtual const char* Name() const { 121 | return this->name.c_str(); 122 | } 123 | 124 | private: 125 | string name; 126 | void* full_merge_context; 127 | void* partial_merge_context; 128 | full_merge_func full_merge_callback; 129 | partial_merge_func partial_merge_callback; 130 | 131 | }; 132 | } 133 | -------------------------------------------------------------------------------- /rocksdb/cpp/slice_transform_wrapper.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "rocksdb/slice_transform.h" 3 | #include "rocksdb/env.h" 4 | #include 5 | 6 | using std::string; 7 | using rocksdb::SliceTransform; 8 | using rocksdb::Slice; 9 | using rocksdb::Logger; 10 | 11 | namespace py_rocks { 12 | class SliceTransformWrapper: public SliceTransform { 13 | public: 14 | typedef Slice (*transform_func)( 15 | void*, 16 | Logger*, 17 | string&, 18 | const Slice&); 19 | 20 | typedef bool (*in_domain_func)( 21 | void*, 22 | Logger*, 23 | string&, 24 | const Slice&); 25 | 26 | typedef bool (*in_range_func)( 27 | void*, 28 | Logger*, 29 | string&, 30 | const Slice&); 31 | 32 | SliceTransformWrapper( 33 | string name, 34 | void* ctx, 35 | transform_func transfrom_callback, 36 | in_domain_func in_domain_callback, 37 | in_range_func in_range_callback): 38 | name(name), 39 | ctx(ctx), 40 | transfrom_callback(transfrom_callback), 41 | in_domain_callback(in_domain_callback), 42 | in_range_callback(in_range_callback) 43 | {} 44 | 45 | virtual const char* Name() const { 46 | return this->name.c_str(); 47 | } 48 | 49 | virtual Slice Transform(const Slice& src) const { 50 | string error_msg; 51 | Slice val; 52 | 53 | val = this->transfrom_callback( 54 | this->ctx, 55 | this->info_log.get(), 56 | error_msg, 57 | src); 58 | 59 | if (error_msg.size()) { 60 | throw std::runtime_error(error_msg.c_str()); 61 | } 62 | return val; 63 | } 64 | 65 | virtual bool InDomain(const Slice& src) const { 66 | string error_msg; 67 | bool val; 68 | 69 | val = this->in_domain_callback( 70 | this->ctx, 71 | this->info_log.get(), 72 | error_msg, 73 | src); 74 | 75 | if (error_msg.size()) { 76 | throw std::runtime_error(error_msg.c_str()); 77 | } 78 | return val; 79 | } 80 | 81 | virtual bool InRange(const Slice& dst) const { 82 | string error_msg; 83 | bool val; 84 | 85 | val = this->in_range_callback( 86 | this->ctx, 87 | this->info_log.get(), 88 | error_msg, 89 | dst); 90 | 91 | if (error_msg.size()) { 92 | throw std::runtime_error(error_msg.c_str()); 93 | } 94 | return val; 95 | } 96 | 97 | void set_info_log(std::shared_ptr info_log) { 98 | this->info_log = info_log; 99 | } 100 | 101 | private: 102 | string name; 103 | void* ctx; 104 | transform_func transfrom_callback; 105 | in_domain_func in_domain_callback; 106 | in_range_func in_range_callback; 107 | std::shared_ptr info_log; 108 | }; 109 | } 110 | -------------------------------------------------------------------------------- /rocksdb/cpp/utils.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace py_rocks { 4 | template 5 | const T* vector_data(std::vector& v) { 6 | return v.data(); 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /rocksdb/cpp/write_batch_iter_helper.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "rocksdb/write_batch.h" 5 | 6 | namespace py_rocks { 7 | 8 | class RecordItemsHandler: public rocksdb::WriteBatch::Handler { 9 | public: 10 | enum Optype {PutRecord, MergeRecord, DeleteRecord}; 11 | 12 | class BatchItem { 13 | public: 14 | BatchItem( 15 | const Optype& op, 16 | uint32_t column_family_id, 17 | const rocksdb::Slice& key, 18 | const rocksdb::Slice& value): 19 | op(op), 20 | column_family_id(column_family_id), 21 | key(key), 22 | value(value) 23 | {} 24 | 25 | const Optype op; 26 | uint32_t column_family_id; 27 | const rocksdb::Slice key; 28 | const rocksdb::Slice value; 29 | }; 30 | 31 | typedef std::vector BatchItems; 32 | 33 | public: 34 | /* Items is filled during iteration. */ 35 | RecordItemsHandler(BatchItems* items): items(items) {} 36 | 37 | virtual rocksdb::Status PutCF( 38 | uint32_t column_family_id, const Slice& key, const Slice& value) { 39 | this->items->emplace_back(PutRecord, column_family_id, key, value); 40 | return rocksdb::Status::OK(); 41 | } 42 | 43 | virtual rocksdb::Status MergeCF( 44 | uint32_t column_family_id, const Slice& key, const Slice& value) { 45 | this->items->emplace_back(MergeRecord, column_family_id, key, value); 46 | return rocksdb::Status::OK(); 47 | } 48 | 49 | virtual rocksdb::Status DeleteCF( 50 | uint32_t column_family_id, const Slice& key) { 51 | this->items->emplace_back(DeleteRecord, column_family_id, key, rocksdb::Slice()); 52 | return rocksdb::Status::OK(); 53 | } 54 | 55 | private: 56 | BatchItems* items; 57 | }; 58 | 59 | rocksdb::Status 60 | get_batch_items(const rocksdb::WriteBatch* batch, RecordItemsHandler::BatchItems* items) { 61 | RecordItemsHandler handler(items); 62 | return batch->Iterate(&handler); 63 | } 64 | 65 | } 66 | -------------------------------------------------------------------------------- /rocksdb/db.pxd: -------------------------------------------------------------------------------- 1 | cimport options 2 | from libc.stdint cimport uint64_t, uint32_t 3 | from status cimport Status 4 | from libcpp cimport bool as cpp_bool 5 | from libcpp.string cimport string 6 | from libcpp.vector cimport vector 7 | from slice_ cimport Slice 8 | from snapshot cimport Snapshot 9 | from iterator cimport Iterator 10 | 11 | cdef extern from "rocksdb/write_batch.h" namespace "rocksdb": 12 | cdef cppclass WriteBatch: 13 | WriteBatch() nogil except+ 14 | WriteBatch(string) nogil except+ 15 | void Put(const Slice&, const Slice&) nogil except+ 16 | void Put(ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ 17 | void Merge(const Slice&, const Slice&) nogil except+ 18 | void Merge(ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ 19 | void Delete(const Slice&) nogil except+ 20 | void Delete(ColumnFamilyHandle*, const Slice&) nogil except+ 21 | void PutLogData(const Slice&) nogil except+ 22 | void Clear() nogil except+ 23 | const string& Data() nogil except+ 24 | int Count() nogil except+ 25 | 26 | cdef extern from "cpp/write_batch_iter_helper.hpp" namespace "py_rocks": 27 | cdef enum BatchItemOp "RecordItemsHandler::Optype": 28 | BatchItemOpPut "py_rocks::RecordItemsHandler::Optype::PutRecord" 29 | BatchItemOpMerge "py_rocks::RecordItemsHandler::Optype::MergeRecord" 30 | BatchItemOpDelte "py_rocks::RecordItemsHandler::Optype::DeleteRecord" 31 | 32 | cdef cppclass BatchItem "py_rocks::RecordItemsHandler::BatchItem": 33 | BatchItemOp op 34 | uint32_t column_family_id 35 | Slice key 36 | Slice value 37 | 38 | Status get_batch_items(WriteBatch* batch, vector[BatchItem]* items) 39 | 40 | 41 | cdef extern from "rocksdb/db.h" namespace "rocksdb": 42 | ctypedef uint64_t SequenceNumber 43 | string kDefaultColumnFamilyName 44 | 45 | cdef struct LiveFileMetaData: 46 | string name 47 | int level 48 | uint64_t size 49 | string smallestkey 50 | string largestkey 51 | SequenceNumber smallest_seqno 52 | SequenceNumber largest_seqno 53 | 54 | cdef cppclass Range: 55 | Range(const Slice&, const Slice&) 56 | 57 | cdef cppclass DB: 58 | Status Put( 59 | const options.WriteOptions&, 60 | ColumnFamilyHandle*, 61 | const Slice&, 62 | const Slice&) nogil except+ 63 | 64 | Status Delete( 65 | const options.WriteOptions&, 66 | ColumnFamilyHandle*, 67 | const Slice&) nogil except+ 68 | 69 | Status Merge( 70 | const options.WriteOptions&, 71 | ColumnFamilyHandle*, 72 | const Slice&, 73 | const Slice&) nogil except+ 74 | 75 | Status Write( 76 | const options.WriteOptions&, 77 | WriteBatch*) nogil except+ 78 | 79 | Status Get( 80 | const options.ReadOptions&, 81 | ColumnFamilyHandle*, 82 | const Slice&, 83 | string*) nogil except+ 84 | 85 | vector[Status] MultiGet( 86 | const options.ReadOptions&, 87 | const vector[ColumnFamilyHandle*]&, 88 | const vector[Slice]&, 89 | vector[string]*) nogil except+ 90 | 91 | cpp_bool KeyMayExist( 92 | const options.ReadOptions&, 93 | ColumnFamilyHandle*, 94 | Slice&, 95 | string*, 96 | cpp_bool*) nogil except+ 97 | 98 | cpp_bool KeyMayExist( 99 | const options.ReadOptions&, 100 | ColumnFamilyHandle*, 101 | Slice&, 102 | string*) nogil except+ 103 | 104 | Iterator* NewIterator( 105 | const options.ReadOptions&, 106 | ColumnFamilyHandle*) nogil except+ 107 | 108 | void NewIterators( 109 | const options.ReadOptions&, 110 | vector[ColumnFamilyHandle*]&, 111 | vector[Iterator*]*) nogil except+ 112 | 113 | const Snapshot* GetSnapshot() nogil except+ 114 | 115 | void ReleaseSnapshot(const Snapshot*) nogil except+ 116 | 117 | cpp_bool GetProperty( 118 | ColumnFamilyHandle*, 119 | const Slice&, 120 | string*) nogil except+ 121 | 122 | void GetApproximateSizes( 123 | ColumnFamilyHandle*, 124 | const Range* 125 | int, 126 | uint64_t*) nogil except+ 127 | 128 | Status CompactRange( 129 | const options.CompactRangeOptions&, 130 | ColumnFamilyHandle*, 131 | const Slice*, 132 | const Slice*) nogil except+ 133 | 134 | Status CreateColumnFamily( 135 | const options.ColumnFamilyOptions&, 136 | const string&, 137 | ColumnFamilyHandle**) nogil except+ 138 | 139 | Status DropColumnFamily( 140 | ColumnFamilyHandle*) nogil except+ 141 | 142 | int NumberLevels(ColumnFamilyHandle*) nogil except+ 143 | int MaxMemCompactionLevel(ColumnFamilyHandle*) nogil except+ 144 | int Level0StopWriteTrigger(ColumnFamilyHandle*) nogil except+ 145 | const string& GetName() nogil except+ 146 | const options.Options& GetOptions(ColumnFamilyHandle*) nogil except+ 147 | Status Flush(const options.FlushOptions&, ColumnFamilyHandle*) nogil except+ 148 | Status DisableFileDeletions() nogil except+ 149 | Status EnableFileDeletions() nogil except+ 150 | 151 | # TODO: Status GetSortedWalFiles(VectorLogPtr& files) 152 | # TODO: SequenceNumber GetLatestSequenceNumber() 153 | # TODO: Status GetUpdatesSince( 154 | # SequenceNumber seq_number, 155 | # unique_ptr[TransactionLogIterator]*) 156 | 157 | Status DeleteFile(string) nogil except+ 158 | void GetLiveFilesMetaData(vector[LiveFileMetaData]*) nogil except+ 159 | ColumnFamilyHandle* DefaultColumnFamily() 160 | 161 | 162 | cdef Status DB_Open "rocksdb::DB::Open"( 163 | const options.Options&, 164 | const string&, 165 | DB**) nogil except+ 166 | 167 | cdef Status DB_Open_ColumnFamilies "rocksdb::DB::Open"( 168 | const options.Options&, 169 | const string&, 170 | const vector[ColumnFamilyDescriptor]&, 171 | vector[ColumnFamilyHandle*]*, 172 | DB**) nogil except+ 173 | 174 | cdef Status DB_OpenForReadOnly "rocksdb::DB::OpenForReadOnly"( 175 | const options.Options&, 176 | const string&, 177 | DB**, 178 | cpp_bool) nogil except+ 179 | 180 | cdef Status DB_OpenForReadOnly_ColumnFamilies "rocksdb::DB::OpenForReadOnly"( 181 | const options.Options&, 182 | const string&, 183 | const vector[ColumnFamilyDescriptor]&, 184 | vector[ColumnFamilyHandle*]*, 185 | DB**, 186 | cpp_bool) nogil except+ 187 | 188 | cdef Status RepairDB(const string& dbname, const options.Options&) 189 | 190 | cdef Status ListColumnFamilies "rocksdb::DB::ListColumnFamilies" ( 191 | const options.Options&, 192 | const string&, 193 | vector[string]*) nogil except+ 194 | 195 | cdef cppclass ColumnFamilyHandle: 196 | const string& GetName() nogil except+ 197 | int GetID() nogil except+ 198 | 199 | cdef cppclass ColumnFamilyDescriptor: 200 | ColumnFamilyDescriptor() nogil except+ 201 | ColumnFamilyDescriptor( 202 | const string&, 203 | const options.ColumnFamilyOptions&) nogil except+ 204 | string name 205 | options.ColumnFamilyOptions options 206 | -------------------------------------------------------------------------------- /rocksdb/env.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "rocksdb/env.h" namespace "rocksdb": 2 | cdef cppclass Env: 3 | Env() 4 | 5 | cdef Env* Env_Default "rocksdb::Env::Default"() 6 | -------------------------------------------------------------------------------- /rocksdb/errors.py: -------------------------------------------------------------------------------- 1 | class NotFound(Exception): 2 | pass 3 | 4 | class Corruption(Exception): 5 | pass 6 | 7 | class NotSupported(Exception): 8 | pass 9 | 10 | class InvalidArgument(Exception): 11 | pass 12 | 13 | class RocksIOError(Exception): 14 | pass 15 | 16 | class MergeInProgress(Exception): 17 | pass 18 | 19 | class Incomplete(Exception): 20 | pass 21 | -------------------------------------------------------------------------------- /rocksdb/filter_policy.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as cpp_bool 2 | from libcpp.string cimport string 3 | from libc.string cimport const_char 4 | from slice_ cimport Slice 5 | from std_memory cimport shared_ptr 6 | from logger cimport Logger 7 | 8 | cdef extern from "rocksdb/filter_policy.h" namespace "rocksdb": 9 | cdef cppclass FilterPolicy: 10 | void CreateFilter(const Slice*, int, string*) nogil except+ 11 | cpp_bool KeyMayMatch(const Slice&, const Slice&) nogil except+ 12 | const_char* Name() nogil except+ 13 | 14 | cdef extern const FilterPolicy* NewBloomFilterPolicy(int) nogil except+ 15 | 16 | ctypedef void (*create_filter_func)( 17 | void*, 18 | Logger*, 19 | string&, 20 | const Slice*, 21 | int, 22 | string*) 23 | 24 | ctypedef cpp_bool (*key_may_match_func)( 25 | void*, 26 | Logger*, 27 | string&, 28 | const Slice&, 29 | const Slice&) 30 | 31 | cdef extern from "cpp/filter_policy_wrapper.hpp" namespace "py_rocks": 32 | cdef cppclass FilterPolicyWrapper: 33 | FilterPolicyWrapper( 34 | string, 35 | void*, 36 | create_filter_func, 37 | key_may_match_func) nogil except+ 38 | 39 | void set_info_log(shared_ptr[Logger]) nogil except+ 40 | -------------------------------------------------------------------------------- /rocksdb/interfaces.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | from abc import abstractmethod 3 | 4 | 5 | class Comparator: 6 | __metaclass__ = ABCMeta 7 | 8 | @abstractmethod 9 | def compare(self, a, b): 10 | pass 11 | 12 | @abstractmethod 13 | def name(self): 14 | pass 15 | 16 | 17 | class AssociativeMergeOperator: 18 | __metaclass__ = ABCMeta 19 | 20 | @abstractmethod 21 | def merge(self, key, existing_value, value): 22 | pass 23 | 24 | @abstractmethod 25 | def name(self): 26 | pass 27 | 28 | 29 | class MergeOperator: 30 | __metaclass__ = ABCMeta 31 | 32 | @abstractmethod 33 | def full_merge(self, key, existing_value, operand_list): 34 | pass 35 | 36 | @abstractmethod 37 | def partial_merge(self, key, left_operand, right_operand): 38 | pass 39 | 40 | @abstractmethod 41 | def name(self): 42 | pass 43 | 44 | 45 | class FilterPolicy: 46 | __metaclass__ = ABCMeta 47 | 48 | @abstractmethod 49 | def name(self): 50 | pass 51 | 52 | @abstractmethod 53 | def create_filter(self, keys): 54 | pass 55 | 56 | @abstractmethod 57 | def key_may_match(self, key, filter_): 58 | pass 59 | 60 | class SliceTransform: 61 | __metaclass__ = ABCMeta 62 | 63 | @abstractmethod 64 | def name(self): 65 | pass 66 | 67 | @abstractmethod 68 | def transform(self, src): 69 | pass 70 | 71 | @abstractmethod 72 | def in_domain(self, src): 73 | pass 74 | 75 | @abstractmethod 76 | def in_range(self, dst): 77 | pass 78 | -------------------------------------------------------------------------------- /rocksdb/iterator.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as cpp_bool 2 | from slice_ cimport Slice 3 | from status cimport Status 4 | 5 | cdef extern from "rocksdb/iterator.h" namespace "rocksdb": 6 | cdef cppclass Iterator: 7 | cpp_bool Valid() nogil except+ 8 | void SeekToFirst() nogil except+ 9 | void SeekToLast() nogil except+ 10 | void Seek(const Slice&) nogil except+ 11 | void Next() nogil except+ 12 | void Prev() nogil except+ 13 | void SeekForPrev(const Slice&) nogil except+ 14 | Slice key() nogil except+ 15 | Slice value() nogil except+ 16 | Status status() nogil except+ 17 | -------------------------------------------------------------------------------- /rocksdb/logger.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "rocksdb/env.h" namespace "rocksdb": 2 | cdef cppclass Logger: 3 | pass 4 | 5 | void Log(Logger*, const char*, ...) nogil except+ 6 | -------------------------------------------------------------------------------- /rocksdb/memtablerep.pxd: -------------------------------------------------------------------------------- 1 | from libc.stdint cimport int32_t 2 | 3 | cdef extern from "rocksdb/memtablerep.h" namespace "rocksdb": 4 | cdef cppclass MemTableRepFactory: 5 | MemTableRepFactory() 6 | 7 | cdef MemTableRepFactory* NewHashSkipListRepFactory(size_t, int32_t, int32_t) 8 | cdef MemTableRepFactory* NewHashLinkListRepFactory(size_t) 9 | 10 | cdef extern from "cpp/memtable_factories.hpp" namespace "py_rocks": 11 | cdef MemTableRepFactory* NewVectorRepFactory(size_t) 12 | cdef MemTableRepFactory* NewSkipListFactory() 13 | -------------------------------------------------------------------------------- /rocksdb/merge_operator.pxd: -------------------------------------------------------------------------------- 1 | from libcpp.string cimport string 2 | from libcpp cimport bool as cpp_bool 3 | from libcpp.deque cimport deque 4 | from slice_ cimport Slice 5 | from logger cimport Logger 6 | from std_memory cimport shared_ptr 7 | 8 | cdef extern from "rocksdb/merge_operator.h" namespace "rocksdb": 9 | cdef cppclass MergeOperator: 10 | pass 11 | 12 | ctypedef cpp_bool (*merge_func)( 13 | void*, 14 | const Slice&, 15 | const Slice*, 16 | const Slice&, 17 | string*, 18 | Logger*) 19 | 20 | ctypedef cpp_bool (*full_merge_func)( 21 | void* ctx, 22 | const Slice& key, 23 | const Slice* existing_value, 24 | const deque[string]& operand_list, 25 | string* new_value, 26 | Logger* logger) 27 | 28 | ctypedef cpp_bool (*partial_merge_func)( 29 | void* ctx, 30 | const Slice& key, 31 | const Slice& left_op, 32 | const Slice& right_op, 33 | string* new_value, 34 | Logger* logger) 35 | 36 | cdef extern from "cpp/merge_operator_wrapper.hpp" namespace "py_rocks": 37 | cdef cppclass AssociativeMergeOperatorWrapper: 38 | AssociativeMergeOperatorWrapper(string, void*, merge_func) nogil except+ 39 | 40 | cdef cppclass MergeOperatorWrapper: 41 | MergeOperatorWrapper( 42 | string, 43 | void*, 44 | void*, 45 | full_merge_func, 46 | partial_merge_func) nogil except+ 47 | -------------------------------------------------------------------------------- /rocksdb/merge_operators.py: -------------------------------------------------------------------------------- 1 | import struct as py_struct 2 | from rocksdb.interfaces import AssociativeMergeOperator 3 | 4 | class UintAddOperator(AssociativeMergeOperator): 5 | def merge(self, key, existing_value, value): 6 | if existing_value: 7 | s = py_struct.unpack('Q', existing_value)[0] + py_struct.unpack('Q', value)[0] 8 | return (True, py_struct.pack('Q', s)) 9 | return (True, value) 10 | 11 | def name(self): 12 | return b'uint64add' 13 | 14 | class StringAppendOperator(AssociativeMergeOperator): 15 | def merge(self, key, existing_value, value): 16 | if existing_value: 17 | s = existing_value + b',' + value 18 | return (True, s) 19 | return (True, value) 20 | 21 | def name(self): 22 | return b'StringAppendOperator' 23 | -------------------------------------------------------------------------------- /rocksdb/options.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as cpp_bool 2 | from libcpp.string cimport string 3 | from libcpp.vector cimport vector 4 | from libc.stdint cimport uint64_t 5 | from libc.stdint cimport uint32_t 6 | from std_memory cimport shared_ptr 7 | from comparator cimport Comparator 8 | from merge_operator cimport MergeOperator 9 | from logger cimport Logger 10 | from slice_ cimport Slice 11 | from snapshot cimport Snapshot 12 | from slice_transform cimport SliceTransform 13 | from table_factory cimport TableFactory 14 | from memtablerep cimport MemTableRepFactory 15 | from universal_compaction cimport CompactionOptionsUniversal 16 | from cache cimport Cache 17 | 18 | cdef extern from "rocksdb/options.h" namespace "rocksdb": 19 | cdef cppclass CompressionOptions: 20 | int window_bits; 21 | int level; 22 | int strategy; 23 | uint32_t max_dict_bytes 24 | CompressionOptions() except + 25 | CompressionOptions(int, int, int, int) except + 26 | 27 | ctypedef enum CompactionStyle: 28 | kCompactionStyleLevel 29 | kCompactionStyleUniversal 30 | kCompactionStyleFIFO 31 | kCompactionStyleNone 32 | 33 | ctypedef enum CompressionType: 34 | kNoCompression 35 | kSnappyCompression 36 | kZlibCompression 37 | kBZip2Compression 38 | kLZ4Compression 39 | kLZ4HCCompression 40 | kXpressCompression 41 | kZSTD 42 | kZSTDNotFinalCompression 43 | kDisableCompressionOption 44 | 45 | ctypedef enum ReadTier: 46 | kReadAllTier 47 | kBlockCacheTier 48 | 49 | ctypedef enum CompactionPri: 50 | kByCompensatedSize 51 | kOldestLargestSeqFirst 52 | kOldestSmallestSeqFirst 53 | kMinOverlappingRatio 54 | 55 | # This needs to be in _rocksdb.pxd so it will export into python 56 | #cpdef enum AccessHint "rocksdb::DBOptions::AccessHint": 57 | # NONE, 58 | # NORMAL, 59 | # SEQUENTIAL, 60 | # WILLNEED 61 | 62 | cdef cppclass DBOptions: 63 | cpp_bool create_if_missing 64 | cpp_bool create_missing_column_families 65 | cpp_bool error_if_exists 66 | cpp_bool paranoid_checks 67 | # TODO: env 68 | shared_ptr[Logger] info_log 69 | int max_open_files 70 | int max_file_opening_threads 71 | # TODO: statistics 72 | cpp_bool use_fsync 73 | string db_log_dir 74 | string wal_dir 75 | uint64_t delete_obsolete_files_period_micros 76 | int max_background_jobs 77 | int max_background_compactions 78 | uint32_t max_subcompactions 79 | int max_background_flushes 80 | size_t max_log_file_size 81 | size_t log_file_time_to_roll 82 | size_t keep_log_file_num 83 | size_t recycle_log_file_num 84 | uint64_t max_manifest_file_size 85 | int table_cache_numshardbits 86 | uint64_t WAL_ttl_seconds 87 | uint64_t WAL_size_limit_MB 88 | size_t manifest_preallocation_size 89 | cpp_bool allow_mmap_reads 90 | cpp_bool allow_mmap_writes 91 | cpp_bool use_direct_reads 92 | cpp_bool use_direct_io_for_flush_and_compaction 93 | cpp_bool allow_fallocate 94 | cpp_bool is_fd_close_on_exec 95 | cpp_bool skip_log_error_on_recovery 96 | unsigned int stats_dump_period_sec 97 | cpp_bool advise_random_on_open 98 | size_t db_write_buffer_size 99 | # AccessHint access_hint_on_compaction_start 100 | cpp_bool use_adaptive_mutex 101 | uint64_t bytes_per_sync 102 | cpp_bool allow_concurrent_memtable_write 103 | cpp_bool enable_write_thread_adaptive_yield 104 | shared_ptr[Cache] row_cache 105 | 106 | cdef cppclass ColumnFamilyOptions: 107 | ColumnFamilyOptions() 108 | ColumnFamilyOptions(const Options& options) 109 | const Comparator* comparator 110 | shared_ptr[MergeOperator] merge_operator 111 | # TODO: compaction_filter 112 | # TODO: compaction_filter_factory 113 | size_t write_buffer_size 114 | int max_write_buffer_number 115 | int min_write_buffer_number_to_merge 116 | CompressionType compression 117 | CompactionPri compaction_pri 118 | # TODO: compression_per_level 119 | shared_ptr[SliceTransform] prefix_extractor 120 | int num_levels 121 | int level0_file_num_compaction_trigger 122 | int level0_slowdown_writes_trigger 123 | int level0_stop_writes_trigger 124 | int max_mem_compaction_level 125 | uint64_t target_file_size_base 126 | int target_file_size_multiplier 127 | uint64_t max_bytes_for_level_base 128 | double max_bytes_for_level_multiplier 129 | vector[int] max_bytes_for_level_multiplier_additional 130 | int expanded_compaction_factor 131 | int source_compaction_factor 132 | int max_grandparent_overlap_factor 133 | cpp_bool disableDataSync 134 | double soft_rate_limit 135 | double hard_rate_limit 136 | unsigned int rate_limit_delay_max_milliseconds 137 | size_t arena_block_size 138 | # TODO: PrepareForBulkLoad() 139 | cpp_bool disable_auto_compactions 140 | cpp_bool purge_redundant_kvs_while_flush 141 | cpp_bool allow_os_buffer 142 | cpp_bool verify_checksums_in_compaction 143 | CompactionStyle compaction_style 144 | CompactionOptionsUniversal compaction_options_universal 145 | cpp_bool filter_deletes 146 | uint64_t max_sequential_skip_in_iterations 147 | shared_ptr[MemTableRepFactory] memtable_factory 148 | shared_ptr[TableFactory] table_factory 149 | # TODO: table_properties_collectors 150 | cpp_bool inplace_update_support 151 | size_t inplace_update_num_locks 152 | # TODO: remove options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor from document 153 | uint64_t max_compaction_bytes 154 | CompressionOptions compression_opts 155 | 156 | cdef cppclass Options(DBOptions, ColumnFamilyOptions): 157 | pass 158 | 159 | cdef cppclass WriteOptions: 160 | cpp_bool sync 161 | cpp_bool disableWAL 162 | 163 | cdef cppclass ReadOptions: 164 | cpp_bool verify_checksums 165 | cpp_bool fill_cache 166 | const Snapshot* snapshot 167 | ReadTier read_tier 168 | 169 | cdef cppclass FlushOptions: 170 | cpp_bool wait 171 | 172 | ctypedef enum BottommostLevelCompaction: 173 | blc_skip "rocksdb::BottommostLevelCompaction::kSkip" 174 | blc_is_filter "rocksdb::BottommostLevelCompaction::kIfHaveCompactionFilter" 175 | blc_force "rocksdb::BottommostLevelCompaction::kForce" 176 | 177 | cdef cppclass CompactRangeOptions: 178 | cpp_bool change_level 179 | int target_level 180 | uint32_t target_path_id 181 | BottommostLevelCompaction bottommost_level_compaction 182 | -------------------------------------------------------------------------------- /rocksdb/slice_.pxd: -------------------------------------------------------------------------------- 1 | from libcpp.string cimport string 2 | from libcpp cimport bool as cpp_bool 3 | 4 | cdef extern from "rocksdb/slice.h" namespace "rocksdb": 5 | cdef cppclass Slice: 6 | Slice() nogil 7 | Slice(const char*, size_t) nogil 8 | Slice(const string&) nogil 9 | Slice(const char*) nogil 10 | 11 | const char* data() nogil 12 | size_t size() nogil 13 | cpp_bool empty() nogil 14 | char operator[](int) nogil 15 | void clear() nogil 16 | void remove_prefix(size_t) nogil 17 | string ToString() nogil 18 | string ToString(cpp_bool) nogil 19 | int compare(const Slice&) nogil 20 | cpp_bool starts_with(const Slice&) nogil 21 | -------------------------------------------------------------------------------- /rocksdb/slice_transform.pxd: -------------------------------------------------------------------------------- 1 | from slice_ cimport Slice 2 | from libcpp.string cimport string 3 | from libcpp cimport bool as cpp_bool 4 | from logger cimport Logger 5 | from std_memory cimport shared_ptr 6 | 7 | cdef extern from "rocksdb/slice_transform.h" namespace "rocksdb": 8 | cdef cppclass SliceTransform: 9 | pass 10 | 11 | ctypedef Slice (*transform_func)( 12 | void*, 13 | Logger*, 14 | string&, 15 | const Slice&) 16 | 17 | ctypedef cpp_bool (*in_domain_func)( 18 | void*, 19 | Logger*, 20 | string&, 21 | const Slice&) 22 | 23 | ctypedef cpp_bool (*in_range_func)( 24 | void*, 25 | Logger*, 26 | string&, 27 | const Slice&) 28 | 29 | cdef extern from "cpp/slice_transform_wrapper.hpp" namespace "py_rocks": 30 | cdef cppclass SliceTransformWrapper: 31 | SliceTransformWrapper( 32 | string name, 33 | void*, 34 | transform_func, 35 | in_domain_func, 36 | in_range_func) nogil except+ 37 | void set_info_log(shared_ptr[Logger]) nogil except+ 38 | -------------------------------------------------------------------------------- /rocksdb/snapshot.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "rocksdb/db.h" namespace "rocksdb": 2 | cdef cppclass Snapshot: 3 | pass 4 | -------------------------------------------------------------------------------- /rocksdb/status.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool as cpp_bool 2 | from libcpp.string cimport string 3 | 4 | cdef extern from "rocksdb/status.h" namespace "rocksdb": 5 | cdef cppclass Status: 6 | Status() 7 | cpp_bool ok() nogil 8 | cpp_bool IsNotFound() nogil const 9 | cpp_bool IsCorruption() nogil const 10 | cpp_bool IsNotSupported() nogil const 11 | cpp_bool IsInvalidArgument() nogil const 12 | cpp_bool IsIOError() nogil const 13 | cpp_bool IsMergeInProgress() nogil const 14 | cpp_bool IsIncomplete() nogil const 15 | string ToString() nogil except+ 16 | -------------------------------------------------------------------------------- /rocksdb/std_memory.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "" namespace "std": 2 | cdef cppclass shared_ptr[T]: 3 | shared_ptr() nogil except+ 4 | shared_ptr(T*) nogil except+ 5 | void reset() nogil except+ 6 | void reset(T*) nogil except+ 7 | T* get() nogil except+ 8 | -------------------------------------------------------------------------------- /rocksdb/table_factory.pxd: -------------------------------------------------------------------------------- 1 | from libc.stdint cimport uint32_t 2 | from libcpp cimport bool as cpp_bool 3 | from std_memory cimport shared_ptr 4 | 5 | from cache cimport Cache 6 | from filter_policy cimport FilterPolicy 7 | 8 | cdef extern from "rocksdb/table.h" namespace "rocksdb": 9 | cdef cppclass TableFactory: 10 | TableFactory() 11 | 12 | ctypedef enum BlockBasedTableIndexType: 13 | kBinarySearch "rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch" 14 | kHashSearch "rocksdb::BlockBasedTableOptions::IndexType::kHashSearch" 15 | 16 | ctypedef enum ChecksumType: 17 | kCRC32c 18 | kxxHash 19 | 20 | cdef cppclass BlockBasedTableOptions: 21 | BlockBasedTableOptions() 22 | BlockBasedTableIndexType index_type 23 | cpp_bool hash_index_allow_collision 24 | ChecksumType checksum 25 | cpp_bool no_block_cache 26 | size_t block_size 27 | int block_size_deviation 28 | int block_restart_interval 29 | cpp_bool whole_key_filtering 30 | shared_ptr[Cache] block_cache 31 | shared_ptr[Cache] block_cache_compressed 32 | shared_ptr[FilterPolicy] filter_policy 33 | 34 | cdef TableFactory* NewBlockBasedTableFactory(const BlockBasedTableOptions&) 35 | 36 | ctypedef enum EncodingType: 37 | kPlain 38 | kPrefix 39 | 40 | cdef cppclass PlainTableOptions: 41 | uint32_t user_key_len 42 | int bloom_bits_per_key 43 | double hash_table_ratio 44 | size_t index_sparseness 45 | size_t huge_page_tlb_size 46 | EncodingType encoding_type 47 | cpp_bool full_scan_mode 48 | cpp_bool store_index_in_file 49 | 50 | cdef TableFactory* NewPlainTableFactory(const PlainTableOptions&) 51 | -------------------------------------------------------------------------------- /rocksdb/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twmht/python-rocksdb/75eb0ad0841f7272d9605ceac999cb155a65954c/rocksdb/tests/__init__.py -------------------------------------------------------------------------------- /rocksdb/tests/test_db.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import shutil 4 | import gc 5 | import unittest 6 | import rocksdb 7 | from itertools import takewhile 8 | import struct 9 | import tempfile 10 | from rocksdb.merge_operators import UintAddOperator, StringAppendOperator 11 | 12 | def int_to_bytes(ob): 13 | return str(ob).encode('ascii') 14 | 15 | class TestHelper(unittest.TestCase): 16 | 17 | def setUp(self): 18 | self.db_loc = tempfile.mkdtemp() 19 | self.addCleanup(self._close_db) 20 | 21 | def _close_db(self): 22 | del self.db 23 | gc.collect() 24 | if os.path.exists(self.db_loc): 25 | shutil.rmtree(self.db_loc) 26 | 27 | 28 | class TestDB(TestHelper): 29 | def setUp(self): 30 | TestHelper.setUp(self) 31 | opts = rocksdb.Options(create_if_missing=True) 32 | self.db = rocksdb.DB(os.path.join(self.db_loc, "test"), opts) 33 | 34 | def test_options_used_twice(self): 35 | if sys.version_info[0] == 3: 36 | assertRaisesRegex = self.assertRaisesRegex 37 | else: 38 | assertRaisesRegex = self.assertRaisesRegexp 39 | expected = "Options object is already used by another DB" 40 | with assertRaisesRegex(Exception, expected): 41 | rocksdb.DB(os.path.join(self.db_loc, "test2"), self.db.options) 42 | 43 | def test_unicode_path(self): 44 | name = os.path.join(self.db_loc, b'M\xc3\xbcnchen'.decode('utf8')) 45 | rocksdb.DB(name, rocksdb.Options(create_if_missing=True)) 46 | self.addCleanup(shutil.rmtree, name) 47 | self.assertTrue(os.path.isdir(name)) 48 | 49 | def test_get_none(self): 50 | self.assertIsNone(self.db.get(b'xxx')) 51 | 52 | def test_put_get(self): 53 | self.db.put(b"a", b"b") 54 | self.assertEqual(b"b", self.db.get(b"a")) 55 | 56 | def test_multi_get(self): 57 | self.db.put(b"a", b"1") 58 | self.db.put(b"b", b"2") 59 | self.db.put(b"c", b"3") 60 | 61 | ret = self.db.multi_get([b'a', b'b', b'c']) 62 | ref = {b'a': b'1', b'c': b'3', b'b': b'2'} 63 | self.assertEqual(ref, ret) 64 | 65 | def test_delete(self): 66 | self.db.put(b"a", b"b") 67 | self.assertEqual(b"b", self.db.get(b"a")) 68 | self.db.delete(b"a") 69 | self.assertIsNone(self.db.get(b"a")) 70 | 71 | def test_write_batch(self): 72 | batch = rocksdb.WriteBatch() 73 | batch.put(b"key", b"v1") 74 | batch.delete(b"key") 75 | batch.put(b"key", b"v2") 76 | batch.put(b"key", b"v3") 77 | batch.put(b"a", b"b") 78 | 79 | self.db.write(batch) 80 | ref = {b'a': b'b', b'key': b'v3'} 81 | ret = self.db.multi_get([b'key', b'a']) 82 | self.assertEqual(ref, ret) 83 | 84 | def test_write_batch_iter(self): 85 | batch = rocksdb.WriteBatch() 86 | self.assertEqual([], list(batch)) 87 | 88 | batch.put(b"key1", b"v1") 89 | batch.put(b"key2", b"v2") 90 | batch.put(b"key3", b"v3") 91 | batch.delete(b'a') 92 | batch.delete(b'key1') 93 | batch.merge(b'xxx', b'value') 94 | 95 | it = iter(batch) 96 | del batch 97 | ref = [ 98 | ('Put', b'key1', b'v1'), 99 | ('Put', b'key2', b'v2'), 100 | ('Put', b'key3', b'v3'), 101 | ('Delete', b'a', b''), 102 | ('Delete', b'key1', b''), 103 | ('Merge', b'xxx', b'value') 104 | ] 105 | self.assertEqual(ref, list(it)) 106 | 107 | 108 | def test_key_may_exists(self): 109 | self.db.put(b"a", b'1') 110 | 111 | self.assertEqual((False, None), self.db.key_may_exist(b"x")) 112 | self.assertEqual((False, None), self.db.key_may_exist(b'x', True)) 113 | self.assertEqual((True, None), self.db.key_may_exist(b'a')) 114 | self.assertEqual((True, b'1'), self.db.key_may_exist(b'a', True)) 115 | 116 | def test_seek_for_prev(self): 117 | self.db.put(b'a1', b'a1_value') 118 | self.db.put(b'a3', b'a3_value') 119 | self.db.put(b'b1', b'b1_value') 120 | self.db.put(b'b2', b'b2_value') 121 | self.db.put(b'c2', b'c2_value') 122 | self.db.put(b'c4', b'c4_value') 123 | 124 | self.assertEqual(self.db.get(b'a1'), b'a1_value') 125 | 126 | it = self.db.iterkeys() 127 | 128 | it.seek(b'a1') 129 | self.assertEqual(it.get(), b'a1') 130 | it.seek(b'a3') 131 | self.assertEqual(it.get(), b'a3') 132 | it.seek_for_prev(b'c4') 133 | self.assertEqual(it.get(), b'c4') 134 | it.seek_for_prev(b'c3') 135 | self.assertEqual(it.get(), b'c2') 136 | 137 | it = self.db.itervalues() 138 | it.seek(b'a1') 139 | self.assertEqual(it.get(), b'a1_value') 140 | it.seek(b'a3') 141 | self.assertEqual(it.get(), b'a3_value') 142 | it.seek_for_prev(b'c4') 143 | self.assertEqual(it.get(), b'c4_value') 144 | it.seek_for_prev(b'c3') 145 | self.assertEqual(it.get(), b'c2_value') 146 | 147 | it = self.db.iteritems() 148 | it.seek(b'a1') 149 | self.assertEqual(it.get(), (b'a1', b'a1_value')) 150 | it.seek(b'a3') 151 | self.assertEqual(it.get(), (b'a3', b'a3_value')) 152 | it.seek_for_prev(b'c4') 153 | self.assertEqual(it.get(), (b'c4', b'c4_value')) 154 | it.seek_for_prev(b'c3') 155 | self.assertEqual(it.get(), (b'c2', b'c2_value')) 156 | 157 | reverse_it = reversed(it) 158 | it.seek_for_prev(b'c3') 159 | self.assertEqual(it.get(), (b'c2', b'c2_value')) 160 | 161 | 162 | def test_iter_keys(self): 163 | for x in range(300): 164 | self.db.put(int_to_bytes(x), int_to_bytes(x)) 165 | 166 | it = self.db.iterkeys() 167 | 168 | self.assertEqual([], list(it)) 169 | 170 | it.seek_to_last() 171 | self.assertEqual([b'99'], list(it)) 172 | 173 | ref = sorted([int_to_bytes(x) for x in range(300)]) 174 | it.seek_to_first() 175 | self.assertEqual(ref, list(it)) 176 | 177 | it.seek(b'90') 178 | ref = [ 179 | b'90', 180 | b'91', 181 | b'92', 182 | b'93', 183 | b'94', 184 | b'95', 185 | b'96', 186 | b'97', 187 | b'98', 188 | b'99' 189 | ] 190 | self.assertEqual(ref, list(it)) 191 | 192 | def test_iter_values(self): 193 | for x in range(300): 194 | self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) 195 | 196 | it = self.db.itervalues() 197 | 198 | self.assertEqual([], list(it)) 199 | 200 | it.seek_to_last() 201 | self.assertEqual([b'99000'], list(it)) 202 | 203 | ref = sorted([int_to_bytes(x) for x in range(300)]) 204 | ref = [int_to_bytes(int(x) * 1000) for x in ref] 205 | it.seek_to_first() 206 | self.assertEqual(ref, list(it)) 207 | 208 | it.seek(b'90') 209 | ref = [int_to_bytes(x * 1000) for x in range(90, 100)] 210 | self.assertEqual(ref, list(it)) 211 | 212 | def test_iter_items(self): 213 | for x in range(300): 214 | self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) 215 | 216 | it = self.db.iteritems() 217 | 218 | self.assertEqual([], list(it)) 219 | 220 | it.seek_to_last() 221 | self.assertEqual([(b'99', b'99000')], list(it)) 222 | 223 | ref = sorted([int_to_bytes(x) for x in range(300)]) 224 | ref = [(x, int_to_bytes(int(x) * 1000)) for x in ref] 225 | it.seek_to_first() 226 | self.assertEqual(ref, list(it)) 227 | 228 | it.seek(b'90') 229 | ref = [(int_to_bytes(x), int_to_bytes(x * 1000)) for x in range(90, 100)] 230 | self.assertEqual(ref, list(it)) 231 | 232 | def test_reverse_iter(self): 233 | for x in range(100): 234 | self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) 235 | 236 | it = self.db.iteritems() 237 | it.seek_to_last() 238 | 239 | ref = reversed(sorted([int_to_bytes(x) for x in range(100)])) 240 | ref = [(x, int_to_bytes(int(x) * 1000)) for x in ref] 241 | 242 | self.assertEqual(ref, list(reversed(it))) 243 | 244 | def test_snapshot(self): 245 | self.db.put(b"a", b"1") 246 | self.db.put(b"b", b"2") 247 | 248 | snapshot = self.db.snapshot() 249 | self.db.put(b"a", b"2") 250 | self.db.delete(b"b") 251 | 252 | it = self.db.iteritems() 253 | it.seek_to_first() 254 | self.assertEqual({b'a': b'2'}, dict(it)) 255 | 256 | it = self.db.iteritems(snapshot=snapshot) 257 | it.seek_to_first() 258 | self.assertEqual({b'a': b'1', b'b': b'2'}, dict(it)) 259 | 260 | def test_get_property(self): 261 | for x in range(300): 262 | x = int_to_bytes(x) 263 | self.db.put(x, x) 264 | 265 | self.assertIsNotNone(self.db.get_property(b'rocksdb.stats')) 266 | self.assertIsNotNone(self.db.get_property(b'rocksdb.sstables')) 267 | self.assertIsNotNone(self.db.get_property(b'rocksdb.num-files-at-level0')) 268 | self.assertIsNone(self.db.get_property(b'does not exsits')) 269 | 270 | def test_compact_range(self): 271 | for x in range(10000): 272 | x = int_to_bytes(x) 273 | self.db.put(x, x) 274 | 275 | self.db.compact_range() 276 | 277 | 278 | class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator): 279 | def merge(self, key, existing_value, value): 280 | if existing_value: 281 | return (True, int_to_bytes(int(existing_value) + int(value))) 282 | return (True, value) 283 | 284 | def name(self): 285 | return b'AssocCounter' 286 | 287 | 288 | class TestUint64Merge(TestHelper): 289 | def setUp(self): 290 | TestHelper.setUp(self) 291 | opts = rocksdb.Options() 292 | opts.create_if_missing = True 293 | opts.merge_operator = UintAddOperator() 294 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 295 | 296 | def test_merge(self): 297 | self.db.put(b'a', struct.pack('Q', 5566)) 298 | for x in range(1000): 299 | self.db.merge(b"a", struct.pack('Q', x)) 300 | self.assertEqual(5566 + sum(range(1000)), struct.unpack('Q', self.db.get(b'a'))[0]) 301 | 302 | 303 | # class TestPutMerge(TestHelper): 304 | # def setUp(self): 305 | # TestHelper.setUp(self) 306 | # opts = rocksdb.Options() 307 | # opts.create_if_missing = True 308 | # opts.merge_operator = "put" 309 | # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 310 | 311 | # def test_merge(self): 312 | # self.db.put(b'a', b'ccc') 313 | # self.db.merge(b'a', b'ddd') 314 | # self.assertEqual(self.db.get(b'a'), 'ddd') 315 | 316 | # class TestPutV1Merge(TestHelper): 317 | # def setUp(self): 318 | # TestHelper.setUp(self) 319 | # opts = rocksdb.Options() 320 | # opts.create_if_missing = True 321 | # opts.merge_operator = "put_v1" 322 | # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 323 | 324 | # def test_merge(self): 325 | # self.db.put(b'a', b'ccc') 326 | # self.db.merge(b'a', b'ddd') 327 | # self.assertEqual(self.db.get(b'a'), 'ddd') 328 | 329 | class TestStringAppendOperatorMerge(TestHelper): 330 | def setUp(self): 331 | TestHelper.setUp(self) 332 | opts = rocksdb.Options() 333 | opts.create_if_missing = True 334 | opts.merge_operator = StringAppendOperator() 335 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 336 | 337 | # NOTE(sileht): Raise "Corruption: Error: Could not perform merge." on PY3 338 | #@unittest.skipIf(sys.version_info[0] == 3, 339 | # "Unexpected behavior on PY3") 340 | def test_merge(self): 341 | self.db.put(b'a', b'ccc') 342 | self.db.merge(b'a', b'ddd') 343 | self.assertEqual(self.db.get(b'a'), b'ccc,ddd') 344 | 345 | # class TestStringMaxOperatorMerge(TestHelper): 346 | # def setUp(self): 347 | # TestHelper.setUp(self) 348 | # opts = rocksdb.Options() 349 | # opts.create_if_missing = True 350 | # opts.merge_operator = "max" 351 | # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 352 | 353 | # def test_merge(self): 354 | # self.db.put(b'a', int_to_bytes(55)) 355 | # self.db.merge(b'a', int_to_bytes(56)) 356 | # self.assertEqual(int(self.db.get(b'a')), 56) 357 | 358 | 359 | class TestAssocMerge(TestHelper): 360 | def setUp(self): 361 | TestHelper.setUp(self) 362 | opts = rocksdb.Options() 363 | opts.create_if_missing = True 364 | opts.merge_operator = AssocCounter() 365 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 366 | 367 | def test_merge(self): 368 | for x in range(1000): 369 | self.db.merge(b"a", int_to_bytes(x)) 370 | self.assertEqual(sum(range(1000)), int(self.db.get(b'a'))) 371 | 372 | 373 | class FullCounter(rocksdb.interfaces.MergeOperator): 374 | def name(self): 375 | return b'fullcounter' 376 | 377 | def full_merge(self, key, existing_value, operand_list): 378 | ret = sum([int(x) for x in operand_list]) 379 | if existing_value: 380 | ret += int(existing_value) 381 | 382 | return (True, int_to_bytes(ret)) 383 | 384 | def partial_merge(self, key, left, right): 385 | return (True, int_to_bytes(int(left) + int(right))) 386 | 387 | 388 | class TestFullMerge(TestHelper): 389 | def setUp(self): 390 | TestHelper.setUp(self) 391 | opts = rocksdb.Options() 392 | opts.create_if_missing = True 393 | opts.merge_operator = FullCounter() 394 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 395 | 396 | def test_merge(self): 397 | for x in range(1000): 398 | self.db.merge(b"a", int_to_bytes(x)) 399 | self.assertEqual(sum(range(1000)), int(self.db.get(b'a'))) 400 | 401 | 402 | class SimpleComparator(rocksdb.interfaces.Comparator): 403 | def name(self): 404 | return b'mycompare' 405 | 406 | def compare(self, a, b): 407 | a = int(a) 408 | b = int(b) 409 | if a < b: 410 | return -1 411 | if a == b: 412 | return 0 413 | if a > b: 414 | return 1 415 | 416 | 417 | class TestComparator(TestHelper): 418 | def setUp(self): 419 | TestHelper.setUp(self) 420 | opts = rocksdb.Options() 421 | opts.create_if_missing = True 422 | opts.comparator = SimpleComparator() 423 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 424 | 425 | def test_compare(self): 426 | for x in range(1000): 427 | self.db.put(int_to_bytes(x), int_to_bytes(x)) 428 | 429 | self.assertEqual(b'300', self.db.get(b'300')) 430 | 431 | class StaticPrefix(rocksdb.interfaces.SliceTransform): 432 | def name(self): 433 | return b'static' 434 | 435 | def transform(self, src): 436 | return (0, 5) 437 | 438 | def in_domain(self, src): 439 | return len(src) >= 5 440 | 441 | def in_range(self, dst): 442 | return len(dst) == 5 443 | 444 | class TestPrefixExtractor(TestHelper): 445 | def setUp(self): 446 | TestHelper.setUp(self) 447 | opts = rocksdb.Options(create_if_missing=True) 448 | opts.prefix_extractor = StaticPrefix() 449 | self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) 450 | 451 | def _fill_db(self): 452 | for x in range(3000): 453 | keyx = hex(x)[2:].zfill(5).encode('utf8') + b'.x' 454 | keyy = hex(x)[2:].zfill(5).encode('utf8') + b'.y' 455 | keyz = hex(x)[2:].zfill(5).encode('utf8') + b'.z' 456 | self.db.put(keyx, b'x') 457 | self.db.put(keyy, b'y') 458 | self.db.put(keyz, b'z') 459 | 460 | 461 | def test_prefix_iterkeys(self): 462 | self._fill_db() 463 | self.assertEqual(b'x', self.db.get(b'00001.x')) 464 | self.assertEqual(b'y', self.db.get(b'00001.y')) 465 | self.assertEqual(b'z', self.db.get(b'00001.z')) 466 | 467 | it = self.db.iterkeys() 468 | it.seek(b'00002') 469 | 470 | ref = [b'00002.x', b'00002.y', b'00002.z'] 471 | ret = takewhile(lambda key: key.startswith(b'00002'), it) 472 | self.assertEqual(ref, list(ret)) 473 | 474 | def test_prefix_iteritems(self): 475 | self._fill_db() 476 | 477 | it = self.db.iteritems() 478 | it.seek(b'00002') 479 | 480 | ref = {b'00002.z': b'z', b'00002.y': b'y', b'00002.x': b'x'} 481 | ret = takewhile(lambda item: item[0].startswith(b'00002'), it) 482 | self.assertEqual(ref, dict(ret)) 483 | 484 | class TestDBColumnFamilies(TestHelper): 485 | def setUp(self): 486 | TestHelper.setUp(self) 487 | opts = rocksdb.Options(create_if_missing=True) 488 | self.db = rocksdb.DB( 489 | os.path.join(self.db_loc, 'test'), 490 | opts, 491 | ) 492 | 493 | self.cf_a = self.db.create_column_family(b'A', rocksdb.ColumnFamilyOptions()) 494 | self.cf_b = self.db.create_column_family(b'B', rocksdb.ColumnFamilyOptions()) 495 | 496 | def test_column_families(self): 497 | families = self.db.column_families 498 | names = [handle.name for handle in families] 499 | self.assertEqual([b'default', b'A', b'B'], names) 500 | for name in names: 501 | self.assertIn(self.db.get_column_family(name), families) 502 | 503 | self.assertEqual( 504 | names, 505 | rocksdb.list_column_families( 506 | os.path.join(self.db_loc, 'test'), 507 | rocksdb.Options(), 508 | ) 509 | ) 510 | 511 | def test_get_none(self): 512 | self.assertIsNone(self.db.get(b'k')) 513 | self.assertIsNone(self.db.get((self.cf_a, b'k'))) 514 | self.assertIsNone(self.db.get((self.cf_b, b'k'))) 515 | 516 | def test_put_get(self): 517 | key = (self.cf_a, b'k') 518 | self.db.put(key, b"v") 519 | self.assertEqual(b"v", self.db.get(key)) 520 | self.assertIsNone(self.db.get(b"k")) 521 | self.assertIsNone(self.db.get((self.cf_b, b"k"))) 522 | 523 | def test_multi_get(self): 524 | data = [ 525 | (b'a', b'1default'), 526 | (b'b', b'2default'), 527 | (b'c', b'3default'), 528 | ((self.cf_a, b'a'), b'1a'), 529 | ((self.cf_a, b'b'), b'2a'), 530 | ((self.cf_a, b'c'), b'3a'), 531 | ((self.cf_b, b'a'), b'1b'), 532 | ((self.cf_b, b'b'), b'2b'), 533 | ((self.cf_b, b'c'), b'3b'), 534 | ] 535 | for value in data: 536 | self.db.put(*value) 537 | 538 | multi_get_lookup = [value[0] for value in data] 539 | 540 | ret = self.db.multi_get(multi_get_lookup) 541 | ref = {value[0]: value[1] for value in data} 542 | self.assertEqual(ref, ret) 543 | 544 | def test_delete(self): 545 | self.db.put((self.cf_a, b"a"), b"b") 546 | self.assertEqual(b"b", self.db.get((self.cf_a, b"a"))) 547 | self.db.delete((self.cf_a, b"a")) 548 | self.assertIsNone(self.db.get((self.cf_a, b"a"))) 549 | 550 | def test_write_batch(self): 551 | cfa = self.db.get_column_family(b"A") 552 | batch = rocksdb.WriteBatch() 553 | batch.put((cfa, b"key"), b"v1") 554 | batch.delete((self.cf_a, b"key")) 555 | batch.put((cfa, b"key"), b"v2") 556 | batch.put((cfa, b"key"), b"v3") 557 | batch.put((cfa, b"a"), b"1") 558 | batch.put((cfa, b"b"), b"2") 559 | 560 | self.db.write(batch) 561 | query = [(cfa, b"key"), (cfa, b"a"), (cfa, b"b")] 562 | ret = self.db.multi_get(query) 563 | 564 | self.assertEqual(b"v3", ret[query[0]]) 565 | self.assertEqual(b"1", ret[query[1]]) 566 | self.assertEqual(b"2", ret[query[2]]) 567 | 568 | def test_key_may_exists(self): 569 | self.db.put((self.cf_a, b"a"), b'1') 570 | 571 | self.assertEqual( 572 | (False, None), 573 | self.db.key_may_exist((self.cf_a, b"x")) 574 | ) 575 | self.assertEqual( 576 | (False, None), 577 | self.db.key_may_exist((self.cf_a, b'x'), fetch=True) 578 | ) 579 | self.assertEqual( 580 | (True, None), 581 | self.db.key_may_exist((self.cf_a, b'a')) 582 | ) 583 | self.assertEqual( 584 | (True, b'1'), 585 | self.db.key_may_exist((self.cf_a, b'a'), fetch=True) 586 | ) 587 | 588 | def test_iter_keys(self): 589 | for x in range(300): 590 | self.db.put((self.cf_a, int_to_bytes(x)), int_to_bytes(x)) 591 | 592 | it = self.db.iterkeys(self.cf_a) 593 | self.assertEqual([], list(it)) 594 | 595 | it.seek_to_last() 596 | self.assertEqual([(self.cf_a, b'99')], list(it)) 597 | 598 | ref = sorted([(self.cf_a, int_to_bytes(x)) for x in range(300)]) 599 | it.seek_to_first() 600 | self.assertEqual(ref, list(it)) 601 | 602 | it.seek(b'90') 603 | ref = sorted([(self.cf_a, int_to_bytes(x)) for x in range(90, 100)]) 604 | self.assertEqual(ref, list(it)) 605 | 606 | def test_iter_values(self): 607 | for x in range(300): 608 | self.db.put((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) 609 | 610 | it = self.db.itervalues(self.cf_b) 611 | self.assertEqual([], list(it)) 612 | 613 | it.seek_to_last() 614 | self.assertEqual([b'99000'], list(it)) 615 | 616 | ref = sorted([int_to_bytes(x) for x in range(300)]) 617 | ref = [int_to_bytes(int(x) * 1000) for x in ref] 618 | it.seek_to_first() 619 | self.assertEqual(ref, list(it)) 620 | 621 | it.seek(b'90') 622 | ref = [int_to_bytes(x * 1000) for x in range(90, 100)] 623 | self.assertEqual(ref, list(it)) 624 | 625 | def test_iter_items(self): 626 | for x in range(300): 627 | self.db.put((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) 628 | 629 | it = self.db.iteritems(self.cf_b) 630 | self.assertEqual([], list(it)) 631 | 632 | it.seek_to_last() 633 | self.assertEqual([((self.cf_b, b'99'), b'99000')], list(it)) 634 | 635 | ref = sorted([int_to_bytes(x) for x in range(300)]) 636 | ref = [((self.cf_b, x), int_to_bytes(int(x) * 1000)) for x in ref] 637 | it.seek_to_first() 638 | self.assertEqual(ref, list(it)) 639 | 640 | it.seek(b'90') 641 | ref = [((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) for x in range(90, 100)] 642 | self.assertEqual(ref, list(it)) 643 | 644 | def test_reverse_iter(self): 645 | for x in range(100): 646 | self.db.put((self.cf_a, int_to_bytes(x)), int_to_bytes(x * 1000)) 647 | 648 | it = self.db.iteritems(self.cf_a) 649 | it.seek_to_last() 650 | 651 | ref = reversed(sorted([(self.cf_a, int_to_bytes(x)) for x in range(100)])) 652 | ref = [(x, int_to_bytes(int(x[1]) * 1000)) for x in ref] 653 | 654 | self.assertEqual(ref, list(reversed(it))) 655 | 656 | def test_snapshot(self): 657 | cfa = self.db.get_column_family(b'A') 658 | self.db.put((cfa, b"a"), b"1") 659 | self.db.put((cfa, b"b"), b"2") 660 | 661 | snapshot = self.db.snapshot() 662 | self.db.put((cfa, b"a"), b"2") 663 | self.db.delete((cfa, b"b")) 664 | 665 | it = self.db.iteritems(cfa) 666 | it.seek_to_first() 667 | self.assertEqual({(cfa, b'a'): b'2'}, dict(it)) 668 | 669 | it = self.db.iteritems(cfa, snapshot=snapshot) 670 | it.seek_to_first() 671 | self.assertEqual({(cfa, b'a'): b'1', (cfa, b'b'): b'2'}, dict(it)) 672 | 673 | def test_get_property(self): 674 | for x in range(300): 675 | x = int_to_bytes(x) 676 | self.db.put((self.cf_a, x), x) 677 | 678 | self.assertEqual(b"300", 679 | self.db.get_property(b'rocksdb.estimate-num-keys', 680 | self.cf_a)) 681 | self.assertIsNone(self.db.get_property(b'does not exsits', 682 | self.cf_a)) 683 | 684 | def test_compact_range(self): 685 | for x in range(10000): 686 | x = int_to_bytes(x) 687 | self.db.put((self.cf_b, x), x) 688 | 689 | self.db.compact_range(column_family=self.cf_b) 690 | 691 | -------------------------------------------------------------------------------- /rocksdb/tests/test_memtable.py: -------------------------------------------------------------------------------- 1 | # content of test_sample.py 2 | import rocksdb 3 | import pytest 4 | import shutil 5 | import os 6 | import tempfile 7 | 8 | def test_open_skiplist_memtable_factory(): 9 | opts = rocksdb.Options() 10 | opts.memtable_factory = rocksdb.SkipListMemtableFactory() 11 | opts.create_if_missing = True 12 | 13 | loc = tempfile.mkdtemp() 14 | try: 15 | test_db = rocksdb.DB(os.path.join(loc, "test"), opts) 16 | finally: 17 | shutil.rmtree(loc) 18 | 19 | 20 | def test_open_vector_memtable_factory(): 21 | opts = rocksdb.Options() 22 | opts.allow_concurrent_memtable_write = False 23 | opts.memtable_factory = rocksdb.VectorMemtableFactory() 24 | opts.create_if_missing = True 25 | loc = tempfile.mkdtemp() 26 | try: 27 | test_db = rocksdb.DB(os.path.join(loc, "test"), opts) 28 | finally: 29 | shutil.rmtree(loc) 30 | -------------------------------------------------------------------------------- /rocksdb/tests/test_options.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | import rocksdb 4 | 5 | class TestFilterPolicy(rocksdb.interfaces.FilterPolicy): 6 | def create_filter(self, keys): 7 | return b'nix' 8 | 9 | def key_may_match(self, key, fil): 10 | return True 11 | 12 | def name(self): 13 | return b'testfilter' 14 | 15 | class TestMergeOperator(rocksdb.interfaces.MergeOperator): 16 | def full_merge(self, *args, **kwargs): 17 | return (False, None) 18 | 19 | def partial_merge(self, *args, **kwargs): 20 | return (False, None) 21 | 22 | def name(self): 23 | return b'testmergeop' 24 | 25 | class TestOptions(unittest.TestCase): 26 | # def test_default_merge_operator(self): 27 | # opts = rocksdb.Options() 28 | # self.assertEqual(True, opts.paranoid_checks) 29 | # opts.paranoid_checks = False 30 | # self.assertEqual(False, opts.paranoid_checks) 31 | 32 | # self.assertIsNone(opts.merge_operator) 33 | # opts.merge_operator = "uint64add" 34 | # self.assertIsNotNone(opts.merge_operator) 35 | # self.assertEqual(opts.merge_operator, "uint64add") 36 | # with self.assertRaises(TypeError): 37 | # opts.merge_operator = "not an operator" 38 | 39 | # FIXME: travis test should include the latest version of rocksdb 40 | # def test_compaction_pri(self): 41 | # opts = rocksdb.Options() 42 | # default compaction_pri 43 | # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size) 44 | # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.min_overlapping_ratio) 45 | # opts.compaction_pri = rocksdb.CompactionPri.by_compensated_size 46 | # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size) 47 | # opts.compaction_pri = rocksdb.CompactionPri.oldest_largest_seq_first 48 | # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.oldest_largest_seq_first) 49 | # opts.compaction_pri = rocksdb.CompactionPri.min_overlapping_ratio 50 | # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.min_overlapping_ratio) 51 | 52 | def test_enable_write_thread_adaptive_yield(self): 53 | opts = rocksdb.Options() 54 | self.assertEqual(opts.enable_write_thread_adaptive_yield, True) 55 | opts.enable_write_thread_adaptive_yield = False 56 | self.assertEqual(opts.enable_write_thread_adaptive_yield, False) 57 | 58 | def test_allow_concurrent_memtable_write(self): 59 | opts = rocksdb.Options() 60 | self.assertEqual(opts.allow_concurrent_memtable_write, True) 61 | opts.allow_concurrent_memtable_write = False 62 | self.assertEqual(opts.allow_concurrent_memtable_write, False) 63 | 64 | def test_compression_opts(self): 65 | opts = rocksdb.Options() 66 | compression_opts = opts.compression_opts 67 | # default value 68 | self.assertEqual(isinstance(compression_opts, dict), True) 69 | self.assertEqual(compression_opts['window_bits'], -14) 70 | # This doesn't match rocksdb latest 71 | # self.assertEqual(compression_opts['level'], -1) 72 | self.assertEqual(compression_opts['strategy'], 0) 73 | self.assertEqual(compression_opts['max_dict_bytes'], 0) 74 | 75 | with self.assertRaises(TypeError): 76 | opts.compression_opts = list(1,2) 77 | 78 | opts.compression_opts = {'window_bits': 1, 'level': 2, 'strategy': 3, 'max_dict_bytes': 4} 79 | compression_opts = opts.compression_opts 80 | self.assertEqual(compression_opts['window_bits'], 1) 81 | self.assertEqual(compression_opts['level'], 2) 82 | self.assertEqual(compression_opts['strategy'], 3) 83 | self.assertEqual(compression_opts['max_dict_bytes'], 4) 84 | 85 | def test_simple(self): 86 | opts = rocksdb.Options() 87 | self.assertEqual(True, opts.paranoid_checks) 88 | opts.paranoid_checks = False 89 | self.assertEqual(False, opts.paranoid_checks) 90 | 91 | self.assertIsNone(opts.merge_operator) 92 | ob = TestMergeOperator() 93 | opts.merge_operator = ob 94 | self.assertEqual(opts.merge_operator, ob) 95 | 96 | self.assertIsInstance( 97 | opts.comparator, 98 | rocksdb.BytewiseComparator) 99 | 100 | self.assertIn(opts.compression, 101 | (rocksdb.CompressionType.no_compression, 102 | rocksdb.CompressionType.snappy_compression)) 103 | 104 | opts.compression = rocksdb.CompressionType.zstd_compression 105 | self.assertEqual(rocksdb.CompressionType.zstd_compression, opts.compression) 106 | 107 | def test_block_options(self): 108 | rocksdb.BlockBasedTableFactory( 109 | block_size=4096, 110 | filter_policy=TestFilterPolicy(), 111 | block_cache=rocksdb.LRUCache(100)) 112 | 113 | def test_unicode_path(self): 114 | name = b'/tmp/M\xc3\xbcnchen'.decode('utf8') 115 | opts = rocksdb.Options() 116 | opts.db_log_dir = name 117 | opts.wal_dir = name 118 | 119 | self.assertEqual(name, opts.db_log_dir) 120 | self.assertEqual(name, opts.wal_dir) 121 | 122 | def test_table_factory(self): 123 | opts = rocksdb.Options() 124 | self.assertIsNone(opts.table_factory) 125 | 126 | opts.table_factory = rocksdb.BlockBasedTableFactory() 127 | opts.table_factory = rocksdb.PlainTableFactory() 128 | 129 | def test_compaction_style(self): 130 | opts = rocksdb.Options() 131 | self.assertEqual('level', opts.compaction_style) 132 | 133 | opts.compaction_style = 'universal' 134 | self.assertEqual('universal', opts.compaction_style) 135 | 136 | opts.compaction_style = 'level' 137 | self.assertEqual('level', opts.compaction_style) 138 | 139 | if sys.version_info[0] == 3: 140 | assertRaisesRegex = self.assertRaisesRegex 141 | else: 142 | assertRaisesRegex = self.assertRaisesRegexp 143 | 144 | with assertRaisesRegex(Exception, 'Unknown compaction style'): 145 | opts.compaction_style = 'foo' 146 | 147 | def test_compaction_opts_universal(self): 148 | opts = rocksdb.Options() 149 | uopts = opts.compaction_options_universal 150 | self.assertEqual(-1, uopts['compression_size_percent']) 151 | self.assertEqual(200, uopts['max_size_amplification_percent']) 152 | self.assertEqual('total_size', uopts['stop_style']) 153 | self.assertEqual(1, uopts['size_ratio']) 154 | self.assertEqual(2, uopts['min_merge_width']) 155 | self.assertGreaterEqual(4294967295, uopts['max_merge_width']) 156 | 157 | new_opts = {'stop_style': 'similar_size', 'max_merge_width': 30} 158 | opts.compaction_options_universal = new_opts 159 | uopts = opts.compaction_options_universal 160 | 161 | self.assertEqual(-1, uopts['compression_size_percent']) 162 | self.assertEqual(200, uopts['max_size_amplification_percent']) 163 | self.assertEqual('similar_size', uopts['stop_style']) 164 | self.assertEqual(1, uopts['size_ratio']) 165 | self.assertEqual(2, uopts['min_merge_width']) 166 | self.assertEqual(30, uopts['max_merge_width']) 167 | 168 | def test_row_cache(self): 169 | opts = rocksdb.Options() 170 | self.assertIsNone(opts.row_cache) 171 | opts.row_cache = cache = rocksdb.LRUCache(2*1024*1024) 172 | self.assertEqual(cache, opts.row_cache) 173 | -------------------------------------------------------------------------------- /rocksdb/universal_compaction.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "rocksdb/universal_compaction.h" namespace "rocksdb": 2 | 3 | ctypedef enum CompactionStopStyle: 4 | kCompactionStopStyleSimilarSize 5 | kCompactionStopStyleTotalSize 6 | 7 | cdef cppclass CompactionOptionsUniversal: 8 | CompactionOptionsUniversal() 9 | 10 | unsigned int size_ratio 11 | unsigned int min_merge_width 12 | unsigned int max_merge_width 13 | unsigned int max_size_amplification_percent 14 | int compression_size_percent 15 | CompactionStopStyle stop_style 16 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [upload_sphinx] 7 | upload-dir = docs/_build/html 8 | 9 | [aliases] 10 | test=pytest 11 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import platform 2 | from setuptools import setup 3 | from setuptools import find_packages 4 | from setuptools import Extension 5 | 6 | 7 | extra_compile_args = [ 8 | '-std=c++11', 9 | '-O3', 10 | '-Wall', 11 | '-Wextra', 12 | '-Wconversion', 13 | '-fno-strict-aliasing', 14 | '-fno-rtti', 15 | ] 16 | 17 | if platform.system() == 'Darwin': 18 | extra_compile_args += ['-mmacosx-version-min=10.7', '-stdlib=libc++'] 19 | 20 | 21 | setup( 22 | name="python-rocksdb", 23 | version='0.7.0', 24 | description="Python bindings for RocksDB", 25 | keywords='rocksdb', 26 | author='Ming Hsuan Tu', 27 | author_email="qrnnis2623891@gmail.com", 28 | url="https://github.com/twmht/python-rocksdb", 29 | license='BSD License', 30 | setup_requires=['setuptools>=25', 'Cython>=0.20'], 31 | install_requires=['setuptools>=25'], 32 | package_dir={'rocksdb': 'rocksdb'}, 33 | packages=find_packages('.'), 34 | ext_modules=[Extension( 35 | 'rocksdb._rocksdb', 36 | ['rocksdb/_rocksdb.pyx'], 37 | extra_compile_args=extra_compile_args, 38 | language='c++', 39 | libraries=['rocksdb', 'snappy', 'bz2', 'z', 'lz4'], 40 | )], 41 | extras_require={ 42 | "doc": ['sphinx_rtd_theme', 'sphinx'], 43 | "test": ['pytest'], 44 | }, 45 | include_package_data=True, 46 | zip_safe=False, 47 | ) 48 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py27,py35,py36 3 | minversion = 2.0 4 | skipsdist = True 5 | 6 | [testenv] 7 | skip_install = True 8 | deps = 9 | -e 10 | .[test] 11 | commands = pytest {posargs:rocksdb/tests} 12 | 13 | [testenv:docs] 14 | deps = .[doc] 15 | commands = python setup.py build_sphinx -W 16 | 17 | [pytest] 18 | addopts = --verbose 19 | norecursedirs = .tox 20 | --------------------------------------------------------------------------------