├── .flake8 ├── .github ├── dependabot.yml └── workflows │ └── validate.yaml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTORS ├── LICENSE ├── MANIFEST.in ├── README.md ├── black.toml ├── docs ├── Makefile ├── _static │ ├── ContextHierarchy.png │ ├── RDFlib-500.png │ ├── RDFlib.ico │ ├── RDFlib.png │ ├── RDFlib.svg │ ├── datatype_hierarchy.png │ ├── headerbg.png │ ├── logo-rdflib.png │ ├── plugins-diagram.svg │ └── pyramid.css ├── _themes │ └── armstrong │ │ ├── LICENSE │ │ ├── README │ │ ├── layout.html │ │ ├── static │ │ └── rtd.css_t │ │ ├── theme-old.conf │ │ └── theme.conf ├── conf.py ├── docs.rst ├── gettingstarted.rst ├── index.rst ├── sphinx-requirements.txt └── universal_rdf_store_interface.rst ├── examples ├── __init__.py └── leveldb_example.py ├── pyproject.toml ├── pytest.ini ├── rdflib_leveldb ├── __init__.py └── leveldbstore.py ├── requirements.dev.txt ├── requirements.txt ├── run_tests.py ├── setup.cfg ├── setup.py └── test ├── __init__.py ├── context_case.py ├── graph_case.py ├── sp2b ├── 10ktriples.n3 ├── 1ktriples.n3 ├── 25ktriples.n3 ├── 2ktriples.n3 ├── 3ktriples.n3 ├── 500triples.n3 ├── 50ktriples.n3 └── 5ktriples.n3 ├── test_conjunctivegraph.py ├── test_extended.py ├── test_functionality.py ├── test_graph.py ├── test_graph_and_context.py ├── test_store.py ├── test_store_performance1.py ├── test_store_performance2.py └── test_usecase.py /.flake8: -------------------------------------------------------------------------------- 1 | # https://flake8.pycqa.org/en/latest/user/configuration.html 2 | [flake8] 3 | extend-ignore = 4 | # E501: line too long 5 | # Disabled so that black can control line length. 6 | E501,W503 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 10 8 | ignore: 9 | - dependency-name: sphinx 10 | versions: 11 | - 3.4.3 12 | - 3.5.2 13 | -------------------------------------------------------------------------------- /.github/workflows/validate.yaml: -------------------------------------------------------------------------------- 1 | name: Validate 2 | 3 | on: [push, pull_request, workflow_dispatch] 4 | 5 | env: 6 | FORCE_COLOR: 1 7 | XDG_CACHE_HOME: ${{ github.workspace }}/cache 8 | 9 | jobs: 10 | validate: 11 | runs-on: ${{ matrix.os }} 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | python-version: ["3.7", "3.8", "3.9"] 16 | os: [ 17 | ubuntu-latest, 18 | macos-latest, 19 | windows-latest 20 | ] 21 | steps: 22 | - uses: actions/checkout@v2 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v2 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | # - uses: actions/setup-java@v2 30 | # with: 31 | # distribution: "temurin" 32 | # java-version: "17" 33 | 34 | - name: Get pip cache dir 35 | id: pip-cache 36 | shell: bash 37 | run: | 38 | python -m ensurepip --upgrade 39 | echo "::set-output name=dir::$(pip cache dir)" 40 | - name: Cache pip 41 | uses: actions/cache@v2 42 | with: 43 | path: ${{ steps.pip-cache.outputs.dir }} 44 | key: ${{ matrix.os }}-pip-${{ matrix.python-version }}-v1-${{ 45 | hashFiles('**/setup.py', '**/requirements*.txt') }} 46 | restore-keys: | 47 | ${{ matrix.os }}-pip-${{ matrix.python-version }}-v1- 48 | # - name: Cache xdg 49 | # uses: actions/cache@v2 50 | # with: 51 | # path: ${{ env.XDG_CACHE_HOME }} 52 | # key: ${{ matrix.os }}-xdg-v1-${{ hashFiles('**/with-fuseki.sh') }} 53 | # restore-keys: | 54 | # ${{ matrix.os }}-xdg-v1- 55 | - name: Install dependencies 56 | shell: bash 57 | run: | 58 | if [ "${{ matrix.os }}" == "ubuntu-latest" ] 59 | then 60 | sudo apt-get install -y libleveldb-dev 61 | python -m pip install --default-timeout 60 -r requirements.txt 62 | sudo apt-get install -y libdb-dev 63 | python -m pip install --default-timeout 60 -r requirements.dev.txt 64 | elif [ "${{ matrix.os }}" == "windows-latest" ] 65 | then 66 | python -m pip install --default-timeout 60 -r requirements.txt 67 | python -m pip install --default-timeout 60 -r requirements.dev.txt 68 | elif [ "${{ matrix.os }}" == "macos-latest" ] 69 | then 70 | python -m pip install --default-timeout 60 -r requirements.txt 71 | brew install berkeley-db@4 72 | export BERKELEYDB_DIR=$(brew --prefix berkeley-db@4) 73 | python -m pip install --user --upgrade berkeleydb 74 | python -m pip install --default-timeout 60 -r requirements.dev.txt 75 | fi 76 | python setup.py install 77 | - name: Validate 78 | shell: bash 79 | run: | 80 | black --config black.toml --check ./rdflib_leveldb || true 81 | flake8 --exit-zero rdflib_leveldb 82 | pytest -ra --cov 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | rdflib-leveldb.sublime-project 2 | /docs/_build/ 3 | rdflib-leveldb.sublime-workspace 4 | coverage/ 5 | /.hgtags 6 | /.hgignore 7 | build/ 8 | /docs/draft/ 9 | *~ 10 | test_reports/*latest.ttl 11 | # PyCharm 12 | .idea/ 13 | prepare_changelog.sh 14 | #### vimdiff <(curl --silent -L https://github.com/github/gitignore/raw/master/Python.gitignore) .gitignore 15 | # Byte-compiled / optimized / DLL files 16 | __pycache__/ 17 | *.py[cod] 18 | *$py.class 19 | 20 | # C extensions 21 | *.so 22 | 23 | # Distribution / packaging 24 | .Python 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | wheels/ 37 | share/python-wheels/ 38 | *.egg-info/ 39 | .installed.cfg 40 | *.egg 41 | MANIFEST 42 | 43 | # PyInstaller 44 | # Usually these files are written by a python script from a template 45 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 46 | *.manifest 47 | *.spec 48 | 49 | # Installer logs 50 | pip-log.txt 51 | pip-delete-this-directory.txt 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .nox/ 57 | .coverage 58 | .coverage.* 59 | .cache 60 | nosetests.xml 61 | coverage.xml 62 | *.cover 63 | *.py,cover 64 | .hypothesis/ 65 | .pytest_cache/ 66 | cover/ 67 | 68 | # Translations 69 | *.mo 70 | *.pot 71 | 72 | # Django stuff: 73 | *.log 74 | local_settings.py 75 | db.sqlite3 76 | db.sqlite3-journal 77 | 78 | # Flask stuff: 79 | instance/ 80 | .webassets-cache 81 | 82 | # Scrapy stuff: 83 | .scrapy 84 | 85 | # Sphinx documentation 86 | docs/_build/ 87 | 88 | # PyBuilder 89 | .pybuilder/ 90 | target/ 91 | 92 | # Jupyter Notebook 93 | .ipynb_checkpoints 94 | 95 | # IPython 96 | profile_default/ 97 | ipython_config.py 98 | 99 | # pyenv 100 | # For a library or package, you might want to ignore these files since the code is 101 | # intended to run in multiple environments; otherwise, check them in: 102 | # .python-version 103 | 104 | # pipenv 105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 108 | # install all needed dependencies. 109 | #Pipfile.lock 110 | 111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 112 | __pypackages__/ 113 | 114 | # Celery stuff 115 | celerybeat-schedule 116 | celerybeat.pid 117 | 118 | # SageMath parsed files 119 | *.sage.py 120 | 121 | # Environments 122 | .env 123 | .venv 124 | env/ 125 | venv/ 126 | ENV/ 127 | env.bak/ 128 | venv.bak/ 129 | 130 | # Spyder project settings 131 | .spyderproject 132 | .spyproject 133 | 134 | # Rope project settings 135 | .ropeproject 136 | 137 | # mkdocs documentation 138 | /site 139 | 140 | # mypy 141 | .mypy_cache/ 142 | .dmypy.json 143 | dmypy.json 144 | 145 | # Pyre type checker 146 | .pyre/ 147 | 148 | # pytype static type analyzer 149 | .pytype/ 150 | 151 | # Cython debug symbols 152 | cython_debug/ 153 | 154 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2021/11/16 RELEASE 0.2 2 | ====================== 3 | - Migrated to Python 3, dropped support for Python 2. 4 | - Removed LRU implementation (leveldb uses its own) 5 | - Migrated the remaining Python-coded LFU cache to `__init__.py` 6 | - Removed the (unused) Cython Picklr extension (Python3 7 | uses cPickle if available) 8 | - Added some additional tests 9 | - Added some documentation 10 | - Cargo-culted RDFLib's devops config files (e.g. `drone.yml`, `black.toml`) 11 | - Cloned RDFLib's `berkeleydb_example.py` as `leveldb_example.py` 12 | 13 | 2021/11/16 RELEASE 0.1 14 | ====================== 15 | 16 | Based on Drew Perttula's original `TokyoCabinet Store` contribution. 17 | And then a Kyoto Cabinet version by Graham Higgins. 18 | And this one by Gunnar Grimnes. 19 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # See https://github.com/RDFLib/rdflib/graphs/contributors 2 | 3 | Drew Perttula 4 | Gunnar AAstrand Grimnes 5 | Graham Higgins 6 | Vincent Octo 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2002-2021, RDFLib Team 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include CONTRIBUTORS 3 | include CHANGELOG.md 4 | include README.md 5 | recursive-include rdflib_leveldb *.py 6 | recursive-include examples *.py 7 | graft test 8 | graft docs 9 | prune docs/_build 10 | global-exclude *.pyc *$py.class 11 | include setup.* 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A leveldb-backed persistence plugin store for RDFLib 2 | 3 | [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) ![Validation: install and test](https://github.com/RDFLib/rdflib-leveldb/actions/workflows/validate.yaml/badge.svg) [action](https://github.com/RDFLib/rdflib-leveldb/actions/workflows/validate.yaml) 4 | 5 | An adaptation of RDFLib BerkeleyDB Store’s key-value approach, using LevelDB as a back-end. 6 | 7 | Implemented by Gunnar Grimnes, based on an original contribution by Drew Perttula. 8 | 9 | Migrated to Python 3 / RDFLib=>6 and adjusted to use the [plyvel](https://pypi.org/project/plyvel/) / [plyvel-wheels](https://github.com/AustEcon/plyvel-wheels) Python-LevelDB inferface by Graham Higgins. 10 | 11 | 12 | ## Installation options 13 | 14 | ### Install with pip from github repos 15 | 16 | ```bash 17 | pip install git+https://github.com/RDFLib/rdflib-leveldb#egg=rdflib_leveldb` 18 | ``` 19 | 20 | ### Install by cloning github repos, then pip install 21 | 22 | ```bash 23 | git clone https://github.com/RDFLib/rdflib-leveldb.git 24 | cd rdflib-leveldb 25 | pip install . 26 | # Optionally 27 | pip install -r requirements.dev.txt 28 | ./run_tests.py 29 | ``` 30 | 31 | ### Install by cloning github repos, then `python setup.py install` 32 | 33 | ```bash 34 | git clone https://github.com/RDFLib/rdflib-leveldb.git 35 | cd rdflib-leveldb 36 | python setup.py install 37 | # Optionally 38 | pip install -r requirements.dev.txt 39 | ./run_tests.py 40 | ``` 41 | 42 | ### Example usage: 43 | 44 | ```python 45 | from rdflib import plugin, Graph, URIRef 46 | from rdflib.store import Store 47 | import tempfile 48 | import os 49 | 50 | 51 | def example(): 52 | path = os.path.join(tempfile.gettempdir(), "testleveldb") 53 | store = plugin.get("LevelDB", Store)(identifier=URIRef("rdflib_leveldb_test")) 54 | 55 | g = Graph(store) 56 | g.open(path, create=True) 57 | 58 | # Parse in an RDF file hosted on the Internet 59 | g.parse("http://www.w3.org/People/Berners-Lee/card") 60 | 61 | # Loop through each triple in the graph (subj, pred, obj) 62 | for subj, pred, obj in g: 63 | # Check if there is at least one triple in the Graph 64 | if (subj, pred, obj) not in g: 65 | raise Exception("It better be!") 66 | assert len(g) == 86, len(g) 67 | g.close() 68 | 69 | g.destroy(configuration=path) 70 | ``` 71 | 72 | ## A note on install dependencies as required/resolved by setup.py / pip: 73 | 74 | ### Linux 75 | 76 | The implementation of the rdflib-leveldb “LevelDB” Store depends on: 77 | 78 | 1. The C++ [leveldb library](https://github.com/google/leveldb/) 79 | 2. The [Plyvel](https://pypi.org/project/plyvel/) Python-to-leveldb interface. 80 | 81 | The leveldb library is installed using the appropriate package manager. 82 | 83 | `sudo apt install leveldb-dev` 84 | 85 | ### Windows / MacOS 86 | 87 | The implementation of the rdflib-leveldb “LevelDB” Store depends on a 88 | Python wheels package [plyvel-wheels](https://github.com/AustEcon/plyvel-wheels) 89 | which includes platform-specific binaries for the leveldb library. 90 | 91 | The task of installing a platform-specific `Plyvel` wrapper is handled with: 92 | 93 | `pip install -r requirements.txt` (for standard use of this RDFLib Store) 94 | 95 | or 96 | 97 | `pip install -r requirements.dev.txt` (for module development) 98 | 99 | or just 100 | 101 | `python setup.py install` 102 | -------------------------------------------------------------------------------- /black.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | required-version = "21.11b1" 3 | line-length = "88" 4 | skip-string-normalization = true 5 | target-version = ['py38'] 6 | include = '\.pyi?$' 7 | exclude = ''' 8 | ( 9 | /( 10 | \.eggs # exclude a few common directories in the 11 | | \.git # root of the project 12 | | \.hg 13 | | \.mypy_cache 14 | | \.pytest_cache 15 | | \.tox 16 | | \.venv 17 | | \.github 18 | | _build 19 | | htmlcov 20 | | benchmarks 21 | | examples # No need to Black examples 22 | | test # Tests are a mess, don't black them 23 | | test_reports 24 | | rdflib_leveldb.egg-info 25 | | buck-out 26 | | build 27 | | dist 28 | | venv 29 | )/ 30 | ) 31 | ''' 32 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " dirhtml to make HTML files named index.html in directories" 20 | @echo " pickle to make pickle files" 21 | @echo " json to make JSON files" 22 | @echo " htmlhelp to make HTML files and a HTML help project" 23 | @echo " qthelp to make HTML files and a qthelp project" 24 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 25 | @echo " changes to make an overview of all changed/added/deprecated items" 26 | @echo " linkcheck to check all external links for integrity" 27 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 28 | 29 | clean: 30 | -rm -rf _build/* 31 | 32 | html: 33 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 34 | @echo 35 | @echo "Build finished. The HTML pages are in _build/html." 36 | 37 | dirhtml: 38 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml 39 | @echo 40 | @echo "Build finished. The HTML pages are in _build/dirhtml." 41 | 42 | pickle: 43 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 44 | @echo 45 | @echo "Build finished; now you can process the pickle files." 46 | 47 | json: 48 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json 49 | @echo 50 | @echo "Build finished; now you can process the JSON files." 51 | 52 | htmlhelp: 53 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 54 | @echo 55 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 56 | ".hhp project file in _build/htmlhelp." 57 | 58 | qthelp: 59 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp 60 | @echo 61 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 62 | ".qhcp project file in _build/qthelp, like this:" 63 | @echo "# qcollectiongenerator _build/qthelp/rdflib.qhcp" 64 | @echo "To view the help file:" 65 | @echo "# assistant -collectionFile _build/qthelp/rdflib.qhc" 66 | 67 | latex: 68 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 69 | @echo 70 | @echo "Build finished; the LaTeX files are in _build/latex." 71 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 72 | "run these through (pdf)latex." 73 | 74 | changes: 75 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 76 | @echo 77 | @echo "The overview file is in _build/changes." 78 | 79 | linkcheck: 80 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 81 | @echo 82 | @echo "Link check complete; look for any errors in the above output " \ 83 | "or in _build/linkcheck/output.txt." 84 | 85 | doctest: 86 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest 87 | @echo "Testing of doctests in the sources finished, look at the " \ 88 | "results in _build/doctest/output.txt." 89 | -------------------------------------------------------------------------------- /docs/_static/ContextHierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/ContextHierarchy.png -------------------------------------------------------------------------------- /docs/_static/RDFlib-500.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/RDFlib-500.png -------------------------------------------------------------------------------- /docs/_static/RDFlib.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/RDFlib.ico -------------------------------------------------------------------------------- /docs/_static/RDFlib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/RDFlib.png -------------------------------------------------------------------------------- /docs/_static/RDFlib.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 11 | 12 | 16 | 20 | 29 | 30 | 33 | 34 | 35 | 36 | 39 | 40 | 41 | 42 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /docs/_static/datatype_hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/datatype_hierarchy.png -------------------------------------------------------------------------------- /docs/_static/headerbg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/headerbg.png -------------------------------------------------------------------------------- /docs/_static/logo-rdflib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/docs/_static/logo-rdflib.png -------------------------------------------------------------------------------- /docs/_static/pyramid.css: -------------------------------------------------------------------------------- 1 | /* 2 | * pylons.css_t 3 | * ~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- pylons theme. 6 | * 7 | * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: "Nobile", sans-serif; 18 | font-size: 100%; 19 | background-color: #393939; 20 | color: #ffffff; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.documentwrapper { 26 | float: left; 27 | width: 100%; 28 | } 29 | 30 | div.bodywrapper { 31 | margin: 0 0 0 230px; 32 | } 33 | 34 | hr { 35 | border: 1px solid #B1B4B6; 36 | } 37 | 38 | div.document { 39 | background-color: #eee; 40 | } 41 | 42 | div.header { 43 | width:100%; 44 | background: #f4ad32 url(headerbg.png) repeat-x 0 top; 45 | border-bottom: 2px solid #ffffff; 46 | } 47 | 48 | div.logo { 49 | text-align: center; 50 | padding-top: 10px; 51 | } 52 | 53 | div.body { 54 | background-color: #ffffff; 55 | color: #3E4349; 56 | padding: 0 30px 30px 30px; 57 | font-size: 1em; 58 | border: 2px solid #ddd; 59 | border-right-style: none; 60 | overflow: auto; 61 | } 62 | 63 | div.footer { 64 | color: #ffffff; 65 | width: 100%; 66 | padding: 13px 0; 67 | text-align: center; 68 | font-size: 75%; 69 | background: transparent; 70 | clear:both; 71 | } 72 | 73 | div.footer a { 74 | color: #ffffff; 75 | text-decoration: none; 76 | } 77 | 78 | div.footer a:hover { 79 | color: #e88f00; 80 | text-decoration: underline; 81 | } 82 | 83 | div.related { 84 | line-height: 30px; 85 | color: #373839; 86 | font-size: 0.8em; 87 | background-color: #eee; 88 | } 89 | 90 | div.related a { 91 | color: #1b61d6; 92 | } 93 | 94 | div.related ul { 95 | padding-left: 240px; 96 | } 97 | 98 | div.sphinxsidebar { 99 | font-size: 0.75em; 100 | line-height: 1.5em; 101 | } 102 | 103 | div.sphinxsidebarwrapper{ 104 | padding: 10px 0; 105 | } 106 | 107 | div.sphinxsidebar h3, 108 | div.sphinxsidebar h4 { 109 | font-family: "Neuton", sans-serif; 110 | color: #373839; 111 | font-size: 1.4em; 112 | font-weight: normal; 113 | margin: 0; 114 | padding: 5px 10px; 115 | border-bottom: 2px solid #ddd; 116 | } 117 | 118 | div.sphinxsidebar h4{ 119 | font-size: 1.3em; 120 | } 121 | 122 | div.sphinxsidebar h3 a { 123 | color: #000000; 124 | } 125 | 126 | 127 | div.sphinxsidebar p { 128 | color: #888; 129 | padding: 5px 20px; 130 | } 131 | 132 | div.sphinxsidebar p.topless { 133 | } 134 | 135 | div.sphinxsidebar ul { 136 | margin: 10px 20px; 137 | padding: 0; 138 | color: #373839; 139 | } 140 | 141 | div.sphinxsidebar a { 142 | color: #444; 143 | } 144 | 145 | div.sphinxsidebar input { 146 | border: 1px solid #ccc; 147 | font-family: sans-serif; 148 | font-size: 1em; 149 | } 150 | 151 | div.sphinxsidebar input[type=text]{ 152 | margin-left: 20px; 153 | } 154 | 155 | /* -- sidebars -------------------------------------------------------------- */ 156 | 157 | div.sidebar { 158 | margin: 0 0 0.5em 1em; 159 | border: 2px solid #c6d880; 160 | background-color: #e6efc2; 161 | width: 40%; 162 | float: right; 163 | border-right-style: none; 164 | border-left-style: none; 165 | padding: 10px 20px; 166 | } 167 | 168 | p.sidebar-title { 169 | font-weight: bold; 170 | } 171 | 172 | /* -- body styles ----------------------------------------------------------- */ 173 | 174 | a, a .pre { 175 | color: #1b61d6; 176 | text-decoration: none; 177 | } 178 | 179 | a:hover, a:hover .pre { 180 | text-decoration: underline; 181 | } 182 | 183 | div.body h1, 184 | div.body h2, 185 | div.body h3, 186 | div.body h4, 187 | div.body h5, 188 | div.body h6 { 189 | font-family: "Neuton", sans-serif; 190 | background-color: #ffffff; 191 | font-weight: normal; 192 | color: #373839; 193 | margin: 30px 0px 10px 0px; 194 | padding: 5px 0; 195 | } 196 | 197 | div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } 198 | div.body h2 { font-size: 150%; background-color: #ffffff; } 199 | div.body h3 { font-size: 120%; background-color: #ffffff; } 200 | div.body h4 { font-size: 110%; background-color: #ffffff; } 201 | div.body h5 { font-size: 100%; background-color: #ffffff; } 202 | div.body h6 { font-size: 100%; background-color: #ffffff; } 203 | 204 | a.headerlink { 205 | color: #1b61d6; 206 | font-size: 0.8em; 207 | padding: 0 4px 0 4px; 208 | text-decoration: none; 209 | } 210 | 211 | a.headerlink:hover { 212 | text-decoration: underline; 213 | } 214 | 215 | div.body p, div.body dd, div.body li { 216 | line-height: 1.5em; 217 | } 218 | 219 | div.admonition p.admonition-title + p { 220 | display: inline; 221 | } 222 | 223 | div.highlight{ 224 | background-color: white; 225 | } 226 | 227 | div.note { 228 | border: 2px solid #7a9eec; 229 | border-right-style: none; 230 | border-left-style: none; 231 | padding: 10px 20px 10px 60px; 232 | background: #e1ecfe url(dialog-note.png) no-repeat 10px 8px; 233 | } 234 | 235 | div.seealso { 236 | background: #fff6bf url(dialog-seealso.png) no-repeat 10px 8px; 237 | border: 2px solid #ffd324; 238 | border-left-style: none; 239 | border-right-style: none; 240 | padding: 10px 20px 10px 60px; 241 | } 242 | 243 | div.topic { 244 | background: #eeeeee; 245 | border: 2px solid #C6C9CB; 246 | padding: 10px 20px; 247 | border-right-style: none; 248 | border-left-style: none; 249 | } 250 | 251 | div.warning { 252 | background: #fbe3e4 url(dialog-warning.png) no-repeat 10px 8px; 253 | border: 2px solid #fbc2c4; 254 | border-right-style: none; 255 | border-left-style: none; 256 | padding: 10px 20px 10px 60px; 257 | } 258 | 259 | p.admonition-title { 260 | display: none; 261 | } 262 | 263 | p.admonition-title:after { 264 | content: ":"; 265 | } 266 | 267 | pre { 268 | padding: 10px; 269 | background-color: #fafafa; 270 | color: #222; 271 | line-height: 1.2em; 272 | border: 2px solid #C6C9CB; 273 | font-size: 1.1em; 274 | margin: 1.5em 0 1.5em 0; 275 | border-right-style: none; 276 | border-left-style: none; 277 | } 278 | 279 | tt { 280 | background-color: transparent; 281 | color: #222; 282 | font-size: 1.1em; 283 | font-family: monospace; 284 | } 285 | 286 | .viewcode-back { 287 | font-family: "Nobile", sans-serif; 288 | } 289 | 290 | div.viewcode-block:target { 291 | background-color: #fff6bf; 292 | border: 2px solid #ffd324; 293 | border-left-style: none; 294 | border-right-style: none; 295 | padding: 10px 20px; 296 | } 297 | 298 | table.highlighttable { 299 | width: 100%; 300 | } 301 | 302 | table.highlighttable td { 303 | padding: 0; 304 | } 305 | 306 | a em.std-term { 307 | color: #007f00; 308 | } 309 | 310 | a:hover em.std-term { 311 | text-decoration: underline; 312 | } 313 | 314 | .download { 315 | font-family: "Nobile", sans-serif; 316 | font-weight: normal; 317 | font-style: normal; 318 | } 319 | 320 | tt.xref { 321 | font-weight: normal; 322 | font-style: normal; 323 | } -------------------------------------------------------------------------------- /docs/_themes/armstrong/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011 Bay Citizen & Texas Tribune 2 | 3 | Original ReadTheDocs.org code 4 | Copyright (c) 2010 Charles Leifer, Eric Holscher, Bobby Grace 5 | 6 | Permission is hereby granted, free of charge, to any person 7 | obtaining a copy of this software and associated documentation 8 | files (the "Software"), to deal in the Software without 9 | restriction, including without limitation the rights to use, 10 | copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the 12 | Software is furnished to do so, subject to the following 13 | conditions: 14 | 15 | The above copyright notice and this permission notice shall be 16 | included in all copies or substantial portions of the Software. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 20 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 21 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 22 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 23 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. 26 | 27 | -------------------------------------------------------------------------------- /docs/_themes/armstrong/README: -------------------------------------------------------------------------------- 1 | This is the Armstrong Sphinx theme from https://github.com/armstrong/armstrong_sphinx 2 | 3 | Used under BSD license. 4 | -------------------------------------------------------------------------------- /docs/_themes/armstrong/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "basic/layout.html" %} 2 | 3 | {% set script_files = script_files + [pathto("_static/searchtools.js", 1)] %} 4 | 5 | {% block htmltitle %} 6 | {{ super() }} 7 | 8 | 9 | 10 | {% endblock %} 11 | 12 | {% block footer %} 13 | 31 | 32 | 33 | {% if theme_analytics_code %} 34 | 35 | 46 | {% endif %} 47 | 48 | {% endblock %} 49 | -------------------------------------------------------------------------------- /docs/_themes/armstrong/static/rtd.css_t: -------------------------------------------------------------------------------- 1 | /* 2 | * rtd.css 3 | * ~~~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- sphinxdoc theme. Originally created by 6 | * Armin Ronacher for Werkzeug. 7 | * 8 | * Customized for ReadTheDocs by Eric Pierce & Eric Holscher 9 | * 10 | * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. 11 | * :license: BSD, see LICENSE for details. 12 | * 13 | */ 14 | 15 | /* RTD colors 16 | * light blue: {{ theme_light_color }} 17 | * medium blue: {{ theme_medium_color }} 18 | * dark blue: {{ theme_dark_color }} 19 | * dark grey: {{ theme_grey_color }} 20 | * 21 | * medium blue hover: {{ theme_medium_color_hover }}; 22 | * green highlight: {{ theme_green_highlight }} 23 | * light blue (project bar): {{ theme_light_color }} 24 | */ 25 | 26 | @import url("basic.css"); 27 | 28 | /* PAGE LAYOUT -------------------------------------------------------------- */ 29 | 30 | body { 31 | font: 100%/1.5 "ff-meta-web-pro-1","ff-meta-web-pro-2",Arial,"Helvetica Neue",sans-serif; 32 | text-align: center; 33 | color: black; 34 | background-color: {{ theme_background }}; 35 | padding: 0; 36 | margin: 0; 37 | } 38 | 39 | div.document { 40 | text-align: left; 41 | background-color: {{ theme_light_color }}; 42 | } 43 | 44 | div.bodywrapper { 45 | background-color: {{ theme_white }}; 46 | border-left: 1px solid {{ theme_lighter_gray }}; 47 | border-bottom: 1px solid {{ theme_lighter_gray }}; 48 | margin: 0 0 0 16em; 49 | } 50 | 51 | div.body { 52 | margin: 0; 53 | padding: 0.5em 1.3em; 54 | max-width: 55em; 55 | min-width: 20em; 56 | } 57 | 58 | div.related { 59 | font-size: 1em; 60 | background-color: {{ theme_background }}; 61 | } 62 | 63 | div.documentwrapper { 64 | float: left; 65 | width: 100%; 66 | background-color: {{ theme_light_color }}; 67 | } 68 | 69 | p.logo { 70 | padding-top: 30px; 71 | } 72 | 73 | /* HEADINGS --------------------------------------------------------------- */ 74 | 75 | h1 { 76 | margin: 0; 77 | padding: 0.7em 0 0.3em 0; 78 | font-size: 1.5em; 79 | line-height: 1.15; 80 | color: {{ theme_h1 }}; 81 | clear: both; 82 | } 83 | 84 | h2 { 85 | margin: 2em 0 0.2em 0; 86 | font-size: 1.35em; 87 | padding: 0; 88 | color: {{ theme_h2 }}; 89 | } 90 | 91 | h3 { 92 | margin: 1em 0 -0.3em 0; 93 | font-size: 1.2em; 94 | color: {{ theme_h3 }}; 95 | } 96 | 97 | div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { 98 | color: black; 99 | } 100 | 101 | h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { 102 | display: none; 103 | margin: 0 0 0 0.3em; 104 | padding: 0 0.2em 0 0.2em; 105 | color: {{ theme_gray_a }} !important; 106 | } 107 | 108 | h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, 109 | h5:hover a.anchor, h6:hover a.anchor { 110 | display: inline; 111 | } 112 | 113 | h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, 114 | h5 a.anchor:hover, h6 a.anchor:hover { 115 | color: {{ theme_gray_7 }}; 116 | background-color: {{ theme_dirty_white }}; 117 | } 118 | 119 | 120 | /* LINKS ------------------------------------------------------------------ */ 121 | 122 | /* Normal links get a pseudo-underline */ 123 | a { 124 | color: {{ theme_link_color }}; 125 | text-decoration: none; 126 | border-bottom: 1px solid {{ theme_link_color_decoration }}; 127 | } 128 | 129 | /* Links in sidebar, TOC, index trees and tables have no underline */ 130 | .sphinxsidebar a, 131 | .toctree-wrapper a, 132 | .indextable a, 133 | #indices-and-tables a { 134 | color: {{ theme_dark_gray }}; 135 | text-decoration: none; 136 | border-bottom: none; 137 | } 138 | 139 | /* Most links get an underline-effect when hovered */ 140 | a:hover, 141 | div.toctree-wrapper a:hover, 142 | .indextable a:hover, 143 | #indices-and-tables a:hover { 144 | color: {{ theme_black }}; 145 | text-decoration: none; 146 | border-bottom: 1px solid {{ theme_black }}; 147 | } 148 | 149 | /* Footer links */ 150 | div.footer a { 151 | color: {{ theme_background_text_link }}; 152 | text-decoration: none; 153 | border: none; 154 | } 155 | div.footer a:hover { 156 | color: {{ theme_medium_color_link_hover }}; 157 | text-decoration: underline; 158 | border: none; 159 | } 160 | 161 | /* Permalink anchor (subtle grey with a red hover) */ 162 | div.body a.headerlink { 163 | color: {{ theme_lighter_gray }}; 164 | font-size: 1em; 165 | margin-left: 6px; 166 | padding: 0 4px 0 4px; 167 | text-decoration: none; 168 | border: none; 169 | } 170 | div.body a.headerlink:hover { 171 | color: {{ theme_negative_text }}; 172 | border: none; 173 | } 174 | 175 | 176 | /* NAVIGATION BAR --------------------------------------------------------- */ 177 | 178 | div.related ul { 179 | height: 2.5em; 180 | } 181 | 182 | div.related ul li { 183 | margin: 0; 184 | padding: 0.65em 0; 185 | float: left; 186 | display: block; 187 | color: {{ theme_background_link_half }}; /* For the >> separators */ 188 | font-size: 0.8em; 189 | } 190 | 191 | div.related ul li.right { 192 | float: right; 193 | margin-right: 5px; 194 | color: transparent; /* Hide the | separators */ 195 | } 196 | 197 | /* "Breadcrumb" links in nav bar */ 198 | div.related ul li a { 199 | order: none; 200 | background-color: inherit; 201 | font-weight: bold; 202 | margin: 6px 0 6px 4px; 203 | line-height: 1.75em; 204 | color: {{ theme_background_link }}; 205 | text-shadow: 0 1px rgba(0, 0, 0, 0.5); 206 | padding: 0.4em 0.8em; 207 | border: none; 208 | border-radius: 3px; 209 | } 210 | /* previous / next / modules / index links look more like buttons */ 211 | div.related ul li.right a { 212 | margin: 0.375em 0; 213 | background-color: {{ theme_medium_color_hover }}; 214 | text-shadow: 0 1px rgba(0, 0, 0, 0.5); 215 | border-radius: 3px; 216 | -webkit-border-radius: 3px; 217 | -moz-border-radius: 3px; 218 | } 219 | /* All navbar links light up as buttons when hovered */ 220 | div.related ul li a:hover { 221 | background-color: {{ theme_medium_color }}; 222 | color: {{ theme_white }}; 223 | text-decoration: none; 224 | border-radius: 3px; 225 | -webkit-border-radius: 3px; 226 | -moz-border-radius: 3px; 227 | } 228 | /* Take extra precautions for tt within links */ 229 | a tt, 230 | div.related ul li a tt { 231 | background: inherit !important; 232 | color: inherit !important; 233 | } 234 | 235 | 236 | /* SIDEBAR ---------------------------------------------------------------- */ 237 | 238 | div.sphinxsidebarwrapper { 239 | padding: 0; 240 | } 241 | 242 | div.sphinxsidebar { 243 | margin: 0; 244 | margin-left: -100%; 245 | float: left; 246 | top: 3em; 247 | left: 0; 248 | padding: 0 1em; 249 | width: 14em; 250 | font-size: 1em; 251 | text-align: left; 252 | background-color: {{ theme_light_color }}; 253 | } 254 | 255 | div.sphinxsidebar img { 256 | max-width: 12em; 257 | } 258 | 259 | div.sphinxsidebar h3, div.sphinxsidebar h4 { 260 | margin: 1.2em 0 0.3em 0; 261 | font-size: 1em; 262 | padding: 0; 263 | color: {{ theme_gray_2 }}; 264 | font-family: "ff-meta-web-pro-1", "ff-meta-web-pro-2", "Arial", "Helvetica Neue", sans-serif; 265 | } 266 | 267 | div.sphinxsidebar h3 a { 268 | color: {{ theme_grey_color }}; 269 | } 270 | 271 | div.sphinxsidebar ul, 272 | div.sphinxsidebar p { 273 | margin-top: 0; 274 | padding-left: 0; 275 | line-height: 130%; 276 | background-color: {{ theme_light_color }}; 277 | } 278 | 279 | /* No bullets for nested lists, but a little extra indentation */ 280 | div.sphinxsidebar ul ul { 281 | list-style-type: none; 282 | margin-left: 1.5em; 283 | padding: 0; 284 | } 285 | 286 | /* A little top/bottom padding to prevent adjacent links' borders 287 | * from overlapping each other */ 288 | div.sphinxsidebar ul li { 289 | padding: 1px 0; 290 | } 291 | 292 | /* A little left-padding to make these align with the ULs */ 293 | div.sphinxsidebar p.topless { 294 | padding-left: 0 0 0 1em; 295 | } 296 | 297 | /* Make these into hidden one-liners */ 298 | div.sphinxsidebar ul li, 299 | div.sphinxsidebar p.topless { 300 | white-space: nowrap; 301 | overflow: hidden; 302 | } 303 | /* ...which become visible when hovered */ 304 | div.sphinxsidebar ul li:hover, 305 | div.sphinxsidebar p.topless:hover { 306 | overflow: visible; 307 | } 308 | 309 | /* Search text box and "Go" button */ 310 | #searchbox { 311 | margin-top: 2em; 312 | margin-bottom: 1em; 313 | background: {{ theme_dirtier_white }}; 314 | padding: 0.5em; 315 | border-radius: 6px; 316 | -moz-border-radius: 6px; 317 | -webkit-border-radius: 6px; 318 | } 319 | #searchbox h3 { 320 | margin-top: 0; 321 | } 322 | 323 | /* Make search box and button abut and have a border */ 324 | input, 325 | div.sphinxsidebar input { 326 | border: 1px solid {{ theme_gray_9 }}; 327 | float: left; 328 | } 329 | 330 | /* Search textbox */ 331 | input[type="text"] { 332 | margin: 0; 333 | padding: 0 3px; 334 | height: 20px; 335 | width: 144px; 336 | border-top-left-radius: 3px; 337 | border-bottom-left-radius: 3px; 338 | -moz-border-radius-topleft: 3px; 339 | -moz-border-radius-bottomleft: 3px; 340 | -webkit-border-top-left-radius: 3px; 341 | -webkit-border-bottom-left-radius: 3px; 342 | } 343 | /* Search button */ 344 | input[type="submit"] { 345 | margin: 0 0 0 -1px; /* -1px prevents a double-border with textbox */ 346 | height: 22px; 347 | color: {{ theme_dark_gray }}; 348 | background-color: {{ theme_light_color }}; 349 | padding: 1px 4px; 350 | font-weight: bold; 351 | border-top-right-radius: 3px; 352 | border-bottom-right-radius: 3px; 353 | -moz-border-radius-topright: 3px; 354 | -moz-border-radius-bottomright: 3px; 355 | -webkit-border-top-right-radius: 3px; 356 | -webkit-border-bottom-right-radius: 3px; 357 | } 358 | input[type="submit"]:hover { 359 | color: {{ theme_white }}; 360 | background-color: {{ theme_green_highlight }}; 361 | } 362 | 363 | div.sphinxsidebar p.searchtip { 364 | clear: both; 365 | padding: 0.5em 0 0 0; 366 | background: {{ theme_dirtier_white }}; 367 | color: {{ theme_gray }}; 368 | font-size: 0.9em; 369 | } 370 | 371 | /* Sidebar links are unusual */ 372 | div.sphinxsidebar li a, 373 | div.sphinxsidebar p a { 374 | background: {{ theme_light_color }}; /* In case links overlap main content */ 375 | border-radius: 3px; 376 | -moz-border-radius: 3px; 377 | -webkit-border-radius: 3px; 378 | border: 1px solid transparent; /* To prevent things jumping around on hover */ 379 | padding: 0 5px 0 5px; 380 | } 381 | div.sphinxsidebar li a:hover, 382 | div.sphinxsidebar p a:hover { 383 | color: {{ theme_black }}; 384 | text-decoration: none; 385 | border: 1px solid {{ theme_light_gray }}; 386 | } 387 | 388 | /* Tweak any link appearing in a heading */ 389 | div.sphinxsidebar h3 a { 390 | } 391 | 392 | 393 | 394 | 395 | /* OTHER STUFF ------------------------------------------------------------ */ 396 | 397 | cite, code, tt { 398 | font-family: 'Consolas', 'Deja Vu Sans Mono', 399 | 'Bitstream Vera Sans Mono', monospace; 400 | font-size: 0.95em; 401 | letter-spacing: 0.01em; 402 | } 403 | 404 | tt { 405 | background-color: {{ theme_code_background }}; 406 | color: {{ theme_dark_gray }}; 407 | } 408 | 409 | tt.descname, tt.descclassname, tt.xref { 410 | border: 0; 411 | } 412 | 413 | hr { 414 | border: 1px solid {{ theme_ruler }}; 415 | margin: 2em; 416 | } 417 | 418 | pre, #_fontwidthtest { 419 | font-family: 'Consolas', 'Deja Vu Sans Mono', 420 | 'Bitstream Vera Sans Mono', monospace; 421 | margin: 1em 2em; 422 | font-size: 0.95em; 423 | letter-spacing: 0.015em; 424 | line-height: 120%; 425 | padding: 0.5em; 426 | border: 1px solid {{ theme_lighter_gray }}; 427 | background-color: {{ theme_code_background }}; 428 | border-radius: 6px; 429 | -moz-border-radius: 6px; 430 | -webkit-border-radius: 6px; 431 | } 432 | 433 | pre a { 434 | color: inherit; 435 | text-decoration: underline; 436 | } 437 | 438 | td.linenos pre { 439 | padding: 0.5em 0; 440 | } 441 | 442 | div.quotebar { 443 | background-color: {{ theme_almost_white }}; 444 | max-width: 250px; 445 | float: right; 446 | padding: 2px 7px; 447 | border: 1px solid {{ theme_lighter_gray }}; 448 | } 449 | 450 | div.topic { 451 | background-color: {{ theme_almost_white }}; 452 | } 453 | 454 | table { 455 | border-collapse: collapse; 456 | margin: 0 -0.5em 0 0; 457 | } 458 | 459 | table td, table th { 460 | padding: 0.2em 0.5em 0.2em 0.5em; 461 | } 462 | 463 | 464 | /* ADMONITIONS AND WARNINGS ------------------------------------------------- */ 465 | 466 | /* Shared by admonitions, warnings and sidebars */ 467 | div.admonition, 468 | div.warning, 469 | div.sidebar { 470 | font-size: 0.9em; 471 | margin: 2em; 472 | padding: 0; 473 | /* 474 | border-radius: 6px; 475 | -moz-border-radius: 6px; 476 | -webkit-border-radius: 6px; 477 | */ 478 | } 479 | div.admonition p, 480 | div.warning p, 481 | div.sidebar p { 482 | margin: 0.5em 1em 0.5em 1em; 483 | padding: 0; 484 | } 485 | div.admonition pre, 486 | div.warning pre, 487 | div.sidebar pre { 488 | margin: 0.4em 1em 0.4em 1em; 489 | } 490 | div.admonition p.admonition-title, 491 | div.warning p.admonition-title, 492 | div.sidebar p.sidebar-title { 493 | margin: 0; 494 | padding: 0.1em 0 0.1em 0.5em; 495 | color: white; 496 | font-weight: bold; 497 | font-size: 1.1em; 498 | text-shadow: 0 1px rgba(0, 0, 0, 0.5); 499 | } 500 | div.admonition ul, div.admonition ol, 501 | div.warning ul, div.warning ol, 502 | div.sidebar ul, div.sidebar ol { 503 | margin: 0.1em 0.5em 0.5em 3em; 504 | padding: 0; 505 | } 506 | 507 | 508 | /* Admonitions and sidebars only */ 509 | div.admonition, div.sidebar { 510 | border: 1px solid {{ theme_positive_dark }}; 511 | background-color: {{ theme_positive_light }}; 512 | } 513 | div.admonition p.admonition-title, 514 | div.sidebar p.sidebar-title { 515 | background-color: {{ theme_positive_medium }}; 516 | border-bottom: 1px solid {{ theme_positive_dark }}; 517 | } 518 | 519 | 520 | /* Warnings only */ 521 | div.warning { 522 | border: 1px solid {{ theme_negative_dark }}; 523 | background-color: {{ theme_negative_light }}; 524 | } 525 | div.warning p.admonition-title { 526 | background-color: {{ theme_negative_medium }}; 527 | border-bottom: 1px solid {{ theme_negative_dark }}; 528 | } 529 | 530 | 531 | /* Sidebars only */ 532 | div.sidebar { 533 | max-width: 200px; 534 | } 535 | 536 | 537 | 538 | div.versioninfo { 539 | margin: 1em 0 0 0; 540 | border: 1px solid {{ theme_lighter_gray }}; 541 | background-color: {{ theme_light_medium_color }}; 542 | padding: 8px; 543 | line-height: 1.3em; 544 | font-size: 0.9em; 545 | } 546 | 547 | .viewcode-back { 548 | font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 549 | 'Verdana', sans-serif; 550 | } 551 | 552 | div.viewcode-block:target { 553 | background-color: {{ theme_viewcode_bg }}; 554 | border-top: 1px solid {{ theme_viewcode_border }}; 555 | border-bottom: 1px solid {{ theme_viewcode_border }}; 556 | } 557 | 558 | dl { 559 | margin: 1em 0 2.5em 0; 560 | } 561 | 562 | /* Highlight target when you click an internal link */ 563 | dt:target { 564 | background: {{ theme_highlight }}; 565 | } 566 | /* Don't highlight whole divs */ 567 | div.highlight { 568 | background: transparent; 569 | } 570 | /* But do highlight spans (so search results can be highlighted) */ 571 | span.highlight { 572 | background: {{ theme_highlight }}; 573 | } 574 | 575 | div.footer { 576 | background-color: {{ theme_background }}; 577 | color: {{ theme_background_text }}; 578 | padding: 0 2em 2em 2em; 579 | clear: both; 580 | font-size: 0.8em; 581 | text-align: center; 582 | } 583 | 584 | p { 585 | margin: 0.8em 0 0.5em 0; 586 | } 587 | 588 | .section p img { 589 | margin: 1em 2em; 590 | } 591 | 592 | 593 | /* MOBILE LAYOUT -------------------------------------------------------------- */ 594 | 595 | @media screen and (max-width: 600px) { 596 | 597 | h1, h2, h3, h4, h5 { 598 | position: relative; 599 | } 600 | 601 | ul { 602 | padding-left: 1.75em; 603 | } 604 | 605 | div.bodywrapper a.headerlink, #indices-and-tables h1 a { 606 | color: {{ theme_almost_dirty_white }}; 607 | font-size: 80%; 608 | float: right; 609 | line-height: 1.8; 610 | position: absolute; 611 | right: -0.7em; 612 | visibility: inherit; 613 | } 614 | 615 | div.bodywrapper h1 a.headerlink, #indices-and-tables h1 a { 616 | line-height: 1.5; 617 | } 618 | 619 | pre { 620 | font-size: 0.7em; 621 | overflow: auto; 622 | word-wrap: break-word; 623 | white-space: pre-wrap; 624 | } 625 | 626 | div.related ul { 627 | height: 2.5em; 628 | padding: 0; 629 | text-align: left; 630 | } 631 | 632 | div.related ul li { 633 | clear: both; 634 | color: {{ theme_dark_color }}; 635 | padding: 0.2em 0; 636 | } 637 | 638 | div.related ul li:last-child { 639 | border-bottom: 1px dotted {{ theme_medium_color }}; 640 | padding-bottom: 0.4em; 641 | margin-bottom: 1em; 642 | width: 100%; 643 | } 644 | 645 | div.related ul li a { 646 | color: {{ theme_dark_color }}; 647 | padding-right: 0; 648 | } 649 | 650 | div.related ul li a:hover { 651 | background: inherit; 652 | color: inherit; 653 | } 654 | 655 | div.related ul li.right { 656 | clear: none; 657 | padding: 0.65em 0; 658 | margin-bottom: 0.5em; 659 | } 660 | 661 | div.related ul li.right a { 662 | color: {{ theme_white }}; 663 | padding-right: 0.8em; 664 | } 665 | 666 | div.related ul li.right a:hover { 667 | background-color: {{ theme_medium_color }}; 668 | } 669 | 670 | div.body { 671 | clear: both; 672 | min-width: 0; 673 | word-wrap: break-word; 674 | } 675 | 676 | div.bodywrapper { 677 | margin: 0 0 0 0; 678 | } 679 | 680 | div.sphinxsidebar { 681 | float: none; 682 | margin: 0; 683 | width: auto; 684 | } 685 | 686 | div.sphinxsidebar input[type="text"] { 687 | height: 2em; 688 | line-height: 2em; 689 | width: 70%; 690 | } 691 | 692 | div.sphinxsidebar input[type="submit"] { 693 | height: 2em; 694 | margin-left: 0.5em; 695 | width: 20%; 696 | } 697 | 698 | div.sphinxsidebar p.searchtip { 699 | background: inherit; 700 | margin-bottom: 1em; 701 | } 702 | 703 | div.sphinxsidebar ul li, div.sphinxsidebar p.topless { 704 | white-space: normal; 705 | } 706 | 707 | .bodywrapper img { 708 | display: block; 709 | margin-left: auto; 710 | margin-right: auto; 711 | max-width: 100%; 712 | } 713 | 714 | div.documentwrapper { 715 | float: none; 716 | } 717 | 718 | div.admonition, div.warning, pre, blockquote { 719 | margin-left: 0em; 720 | margin-right: 0em; 721 | } 722 | 723 | .body p img { 724 | margin: 0; 725 | } 726 | 727 | #searchbox { 728 | background: transparent; 729 | } 730 | 731 | .related:not(:first-child) li { 732 | display: none; 733 | } 734 | 735 | .related:not(:first-child) li.right { 736 | display: block; 737 | } 738 | 739 | div.footer { 740 | padding: 1em; 741 | } 742 | 743 | .rtd_doc_footer .badge { 744 | float: none; 745 | margin: 1em auto; 746 | position: static; 747 | } 748 | 749 | .rtd_doc_footer .badge.revsys-inline { 750 | margin-right: auto; 751 | margin-bottom: 2em; 752 | } 753 | 754 | table.indextable { 755 | display: block; 756 | width: auto; 757 | } 758 | 759 | .indextable tr { 760 | display: block; 761 | } 762 | 763 | .indextable td { 764 | display: block; 765 | padding: 0; 766 | width: auto !important; 767 | } 768 | 769 | .indextable td dt { 770 | margin: 1em 0; 771 | } 772 | 773 | ul.search { 774 | margin-left: 0.25em; 775 | } 776 | 777 | ul.search li div.context { 778 | font-size: 90%; 779 | line-height: 1.1; 780 | margin-bottom: 1; 781 | margin-left: 0; 782 | } 783 | 784 | } 785 | -------------------------------------------------------------------------------- /docs/_themes/armstrong/theme-old.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = default 3 | stylesheet = rtd.css 4 | pygment_style = default 5 | show_sphinx = False 6 | 7 | [options] 8 | show_rtd = True 9 | 10 | white = #ffffff 11 | almost_white = #f8f8f8 12 | barely_white = #f2f2f2 13 | dirty_white = #eeeeee 14 | almost_dirty_white = #e6e6e6 15 | dirtier_white = #DAC6AF 16 | lighter_gray = #cccccc 17 | gray_a = #aaaaaa 18 | gray_9 = #999999 19 | light_gray = #888888 20 | gray_7 = #777777 21 | gray = #666666 22 | dark_gray = #444444 23 | gray_2 = #222222 24 | black = #111111 25 | light_color = #EDE4D8 26 | light_medium_color = #DDEAF0 27 | medium_color_link = #634320 28 | medium_color_link_hover = #261a0c 29 | dark_color = rgba(160, 109, 52, 1.0) 30 | 31 | h1 = #1f3744 32 | h2 = #335C72 33 | h3 = #638fa6 34 | 35 | link_color = #335C72 36 | link_color_decoration = #99AEB9 37 | 38 | medium_color_hover = rgba(255, 255, 255, 0.25) 39 | medium_color = rgba(255, 255, 255, 0.5) 40 | green_highlight = #8ecc4c 41 | 42 | 43 | positive_dark = rgba(51, 77, 0, 1.0) 44 | positive_medium = rgba(102, 153, 0, 1.0) 45 | positive_light = rgba(102, 153, 0, 0.1) 46 | 47 | negative_dark = rgba(51, 13, 0, 1.0) 48 | negative_medium = rgba(204, 51, 0, 1.0) 49 | negative_light = rgba(204, 51, 0, 0.1) 50 | negative_text = #c60f0f 51 | 52 | ruler = #abc 53 | 54 | viewcode_bg = #f4debf 55 | viewcode_border = #ac9 56 | 57 | highlight = #ffe080 58 | 59 | code_background = rgba(0, 0, 0, 0.075) 60 | 61 | background = rgba(135, 57, 34, 1.0) 62 | background_link = rgba(212, 195, 172, 1.0) 63 | background_link_half = rgba(212, 195, 172, 0.5) 64 | background_text = rgba(212, 195, 172, 1.0) 65 | background_text_link = rgba(171, 138, 93, 1.0) 66 | -------------------------------------------------------------------------------- /docs/_themes/armstrong/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = default 3 | stylesheet = rtd.css 4 | pygment_style = default 5 | show_sphinx = False 6 | 7 | [options] 8 | show_rtd = True 9 | 10 | white = #ffffff 11 | almost_white = #f8f8f8 12 | barely_white = #f2f2f2 13 | dirty_white = #eeeeee 14 | almost_dirty_white = #e6e6e6 15 | dirtier_white = #dddddd 16 | lighter_gray = #cccccc 17 | gray_a = #aaaaaa 18 | gray_9 = #999999 19 | light_gray = #888888 20 | gray_7 = #777777 21 | gray = #666666 22 | dark_gray = #444444 23 | gray_2 = #222222 24 | black = #111111 25 | light_color = #e8ecef 26 | light_medium_color = #DDEAF0 27 | medium_color = #8ca1af 28 | medium_color_link = #86989b 29 | medium_color_link_hover = #a6b8bb 30 | dark_color = #465158 31 | 32 | h1 = #000000 33 | h2 = #465158 34 | h3 = #6c818f 35 | 36 | link_color = #444444 37 | link_color_decoration = #CCCCCC 38 | 39 | medium_color_hover = #697983 40 | green_highlight = #8ecc4c 41 | 42 | 43 | positive_dark = #609060 44 | positive_medium = #70a070 45 | positive_light = #e9ffe9 46 | 47 | negative_dark = #900000 48 | negative_medium = #b04040 49 | negative_light = #ffe9e9 50 | negative_text = #c60f0f 51 | 52 | ruler = #abc 53 | 54 | viewcode_bg = #f4debf 55 | viewcode_border = #ac9 56 | 57 | highlight = #ffe080 58 | 59 | code_background = #eeeeee 60 | 61 | background = #465158 62 | background_link = #ffffff 63 | background_link_half = #ffffff 64 | background_text = #eeeeee 65 | background_text_link = #86989b 66 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # rdflib documentation build configuration file, created by 4 | # sphinx-quickstart on Fri May 15 15:03:54 2009. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys 15 | import os 16 | import re 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | # sys.path.append(os.path.abspath("..")) 22 | sys.path.append(os.path.abspath("..")) 23 | 24 | # -- General configuration ----------------------------------------------------- 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | # extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.doctest'] 29 | extensions = [ 30 | "sphinxcontrib.apidoc", 31 | "sphinx.ext.autodoc", 32 | # 'sphinx.ext.autosummary', 33 | "sphinx.ext.doctest", 34 | "sphinx.ext.intersphinx", 35 | "sphinx.ext.todo", 36 | "sphinx.ext.coverage", 37 | "sphinx.ext.ifconfig", 38 | "sphinx.ext.viewcode", 39 | ] 40 | 41 | apidoc_module_dir = "../rdflib_leveldb" 42 | apidoc_output_dir = "apidocs" 43 | autodoc_default_options = {"special-members": True} 44 | 45 | autosummary_generate = True 46 | 47 | # Add any paths that contain templates here, relative to this directory. 48 | templates_path = ["_templates"] 49 | 50 | # epydoc_mapping = { 51 | # '/_static/api/': [r'rdflib\.'], 52 | # } 53 | 54 | # The suffix of source filenames. 55 | source_suffix = ".rst" 56 | 57 | # The encoding of source files. 58 | source_encoding = "utf-8" 59 | 60 | # The master toctree document. 61 | master_doc = "index" 62 | 63 | # General information about the project. 64 | project = "rdflib_leveldb" 65 | copyright = "2009 - 2021, RDFLib Team" 66 | 67 | # The version info for the project you're documenting, acts as replacement for 68 | # |version| and |release|, also used in various other places throughout the 69 | # built documents. 70 | 71 | 72 | # Find version. We have to do this because we can't import it in Python 3 until 73 | # its been automatically converted in the setup process. 74 | def find_version(filename): 75 | _version_re = re.compile(r'__version__ = "(.*)"') 76 | for line in open(filename): 77 | version_match = _version_re.match(line) 78 | if version_match: 79 | return version_match.group(1) 80 | 81 | 82 | # The full version, including alpha/beta/rc tags. 83 | release = find_version("../rdflib_leveldb/__init__.py") 84 | # The short X.Y version. 85 | version = re.sub("[0-9]+\\.[0-9]\\..*", "\1", release) 86 | 87 | # The language for content autogenerated by Sphinx. Refer to documentation 88 | # for a list of supported languages. 89 | # language = None 90 | 91 | # There are two options for replacing |today|: either, you set today to some 92 | # non-false value, then it is used: 93 | # today = '' 94 | # Else, today_fmt is used as the format for a strftime call. 95 | # today_fmt = '%B %d, %Y' 96 | 97 | # List of documents that shouldn't be included in the build. 98 | # unused_docs = [] 99 | 100 | # List of directories, relative to source directory, that shouldn't be searched 101 | # for source files. 102 | exclude_trees = ["_build", "draft"] 103 | 104 | # The reST default role (used for this markup: `text`) to use for all documents. 105 | default_role = "py:obj" 106 | 107 | # If true, '()' will be appended to :func: etc. cross-reference text. 108 | add_function_parentheses = True 109 | 110 | # If true, the current module name will be prepended to all description 111 | # unit titles (such as .. function::). 112 | add_module_names = True 113 | 114 | # If true, sectionauthor and moduleauthor directives will be shown in the 115 | # output. They are ignored by default. 116 | # show_authors = False 117 | 118 | # The name of the Pygments (syntax highlighting) style to use. 119 | pygments_style = "sphinx" 120 | 121 | # A list of ignored prefixes for module index sorting. 122 | # modindex_common_prefix = [] 123 | 124 | 125 | # -- Options for HTML output --------------------------------------------------- 126 | 127 | # The theme to use for HTML and HTML Help pages. Major themes that come with 128 | # Sphinx are currently 'default' and 'sphinxdoc'. 129 | html_theme = "armstrong" 130 | 131 | 132 | # Theme options are theme-specific and customize the look and feel of a theme 133 | # further. For a list of options available for each theme, see the 134 | # documentation. 135 | # html_theme_options = {} 136 | 137 | # Add any paths that contain custom themes here, relative to this directory. 138 | html_theme_path = [ 139 | "_themes", 140 | ] 141 | 142 | # The name for this set of Sphinx documents. If None, it defaults to 143 | # " v documentation". 144 | # html_title = None 145 | 146 | # A shorter title for the navigation bar. Default is the same as html_title. 147 | # html_short_title = None 148 | 149 | # The name of an image file (relative to this directory) to place at the top 150 | # of the sidebar. 151 | # html_logo = None 152 | html_logo = "_static/RDFlib.png" 153 | 154 | # The name of an image file (within the static path) to use as favicon of the 155 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 156 | # pixels large. 157 | html_favicon = "_static/RDFlib.ico" 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | html_static_path = ["_static"] 163 | 164 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 165 | # using the given strftime format. 166 | # html_last_updated_fmt = '%b %d, %Y' 167 | 168 | # If true, SmartyPants will be used to convert quotes and dashes to 169 | # typographically correct entities. 170 | # html_use_smartypants = True 171 | 172 | # Custom sidebar templates, maps document names to template names. 173 | # html_sidebars = {} 174 | 175 | # Additional templates that should be rendered to pages, maps page names to 176 | # template names. 177 | # html_additional_pages = {} 178 | 179 | # If false, no module index is generated. 180 | # html_use_modindex = True 181 | 182 | # If false, no index is generated. 183 | # html_use_index = True 184 | 185 | # If true, the index is split into individual pages for each letter. 186 | # html_split_index = False 187 | 188 | # If true, links to the reST sources are added to the pages. 189 | # html_show_sourcelink = True 190 | 191 | # If true, an OpenSearch description file will be output, and all pages will 192 | # contain a tag referring to it. The value of this option must be the 193 | # base URL from which the finished HTML is served. 194 | # html_use_opensearch = '' 195 | 196 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 197 | # html_file_suffix = '' 198 | 199 | # Output file base name for HTML help builder. 200 | htmlhelp_basename = "rdflibleveldbdoc" 201 | 202 | 203 | # -- Options for LaTeX output -------------------------------------------------- 204 | 205 | # The paper size ('letter' or 'a4'). 206 | # latex_paper_size = 'letter' 207 | 208 | # The font size ('10pt', '11pt' or '12pt'). 209 | # latex_font_size = '10pt' 210 | 211 | # Grouping the document tree into LaTeX files. List of tuples 212 | # (source start file, target name, title, author, documentclass [howto/manual]). 213 | # latex_documents = [ 214 | # ("index", "rdflib.tex", "rdflib Documentation", "RDFLib Team", "manual"), 215 | # ] 216 | 217 | # The name of an image file (relative to this directory) to place at the top of 218 | # the title page. 219 | # latex_logo = None 220 | 221 | # For "manual" documents, if this is true, then toplevel headings are parts, 222 | # not chapters. 223 | # latex_use_parts = False 224 | 225 | # Additional stuff for the LaTeX preamble. 226 | # latex_preamble = '' 227 | 228 | # Documents to append as an appendix to all manuals. 229 | # latex_appendices = [] 230 | 231 | # If false, no module index is generated. 232 | # latex_use_modindex = True 233 | 234 | 235 | # Example configuration for intersphinx: refer to the Python standard library. 236 | intersphinx_mapping = { 237 | "python": ("https://docs.python.org/3.7", None), 238 | } 239 | 240 | html_experimental_html5_writer = True 241 | 242 | needs_sphinx = "4.1.2" 243 | -------------------------------------------------------------------------------- /docs/docs.rst: -------------------------------------------------------------------------------- 1 | .. _docs: 2 | 3 | ======================================== 4 | Writing Documentation for rdflib-leveldb 5 | ======================================== 6 | 7 | 8 | These docs are generated with Sphinx. 9 | 10 | Sphinx makes it very easy to pull in doc-strings from modules, 11 | classes, methods, etc. When writing doc-strings, special reST fields 12 | can be used to annotate parameters, return-types, etc. This makes for 13 | pretty API docs: 14 | 15 | http://sphinx-doc.org/domains.html?highlight=param#info-field-lists 16 | 17 | Building 18 | -------- 19 | 20 | To build you must have the ``sphinx`` package installed: 21 | 22 | .. code-block:: bash 23 | 24 | pip install sphinx 25 | 26 | See the documentation's full set of requirements in the ``sphinx-require,ens.txt`` file within the :file:`docs/` directory. 27 | 28 | Once you have all the requirements installed you can run this command in the rdflib root directory: 29 | 30 | .. code-block:: bash 31 | 32 | python setup.py build_sphinx 33 | 34 | Docs will be generated in :file:`build/sphinx/html/` and API documentation, generated from doc-strings, will be placed in :file:`docs/apidocs/`. 35 | 36 | API Docs 37 | -------- 38 | 39 | API Docs are automatically generated with ``sphinx-apidoc``: 40 | 41 | .. code-block:: bash 42 | 43 | sphinx-apidoc -f -d 10 -o docs/apidocs/ rdflib_leveldb examples 44 | -------------------------------------------------------------------------------- /docs/gettingstarted.rst: -------------------------------------------------------------------------------- 1 | .. _gettingstarted: 2 | 3 | ============================================= 4 | Getting started with the LevelDB RDFLib Store 5 | ============================================= 6 | 7 | Installation 8 | ============ 9 | 10 | rdflib-leveldb is open source and is maintained in a 11 | `GitHub `_ repository. 12 | 13 | The best way to install rdflib-leveldb is to use ``pip`` (sudo as required): 14 | 15 | .. code-block :: bash 16 | 17 | $ pip install rdflib-leveldb 18 | 19 | If you want the latest code to run, clone the master branch of the GitHub repo and use that or you can ``pip install`` 20 | directly from GitHub: 21 | 22 | .. code-block :: bash 23 | 24 | $ pip install git+https://github.com/RDFLib/rdflib-leveldb.git@master#egg=rdflib_leveldb 25 | 26 | 27 | Support 28 | ======= 29 | Usage support is available via questions tagged with ``[rdflib]`` on `StackOverflow `__ 30 | and development support, notifications and detailed discussion through the rdflib-dev group (mailing list): 31 | 32 | http://groups.google.com/group/rdflib-dev 33 | 34 | If you notice an bug or want to request an enhancement, please do so via our Issue Tracker in Github: 35 | 36 | ``_ 37 | 38 | How it all works 39 | ================ 40 | The primary interface that RDFLib exposes for working with RDF is a 41 | :class:`~rdflib.graph.Graph`. 42 | 43 | RDFLib graphs are un-sorted containers; they have ordinary ``set`` 44 | operations (e.g. :meth:`~rdflib.Graph.add` to add a triple) plus 45 | methods that search triples and return them in arbitrary order. 46 | 47 | RDFLib graphs also redefine certain built-in Python methods in order 48 | to behave in a predictable way: they `emulate container types 49 | `_ and 50 | are best thought of as a set of 3-item tuples ("triples", in RDF-speak): 51 | 52 | .. code-block:: text 53 | 54 | [ 55 | (subject0, predicate0, object0), 56 | (subject1, predicate1, object1), 57 | ... 58 | (subjectN, predicateN, objectN) 59 | ] 60 | 61 | A tiny example 62 | ============== 63 | 64 | .. code-block:: python 65 | 66 | from rdflib import plugin, Graph, URIRef 67 | from rdflib.store import Store 68 | 69 | # Create LevelDB-backed a Graph 70 | store = plugin.get("LevelDB", Store)(identifier=URIRef("rdflib_leveldb_test")) 71 | g = Graph(store) 72 | g.open(/tmp/leveldbtest', create=True) 73 | 74 | # Parse in an RDF file hosted on the Internet 75 | g.parse("http://www.w3.org/People/Berners-Lee/card") 76 | 77 | # Loop through each triple in the graph (subj, pred, obj) 78 | for subj, pred, obj in g: 79 | # Check if there is at least one triple in the Graph 80 | if (subj, pred, obj) not in g: 81 | raise Exception("It better be!") 82 | 83 | # Print the number of "triples" in the Graph 84 | print(f"Graph g has {len(g)} statements.") 85 | # Prints: Graph g has 86 statements. 86 | 87 | # Print out the entire Graph in the RDF Turtle format 88 | print(g.serialize(format="turtle")) 89 | 90 | Here a :class:`~rdflib.graph.Graph` is created and then an RDF file online, Tim Berners-Lee's social network details, is 91 | parsed into that graph. The ``print()`` statement uses the ``len()`` function to count the number of triples in the 92 | graph. 93 | 94 | A more extensive example 95 | ======================== 96 | 97 | .. code-block:: python 98 | 99 | from rdflib import plugin, Graph, Literal, RDF, URIRef 100 | from rdflib.store import Store 101 | # rdflib knows about quite a few popular namespaces, like W3C ontologies, schema.org etc. 102 | from rdflib.namespace import FOAF , XSD 103 | 104 | # Create a LevelDB-backed Graph 105 | store = plugin.get("LevelDB", Store)(identifier=URIRef("rdflib_leveldb_test")) 106 | g = Graph(store) 107 | g.open(/tmp/leveldbtest', create=True) 108 | 109 | # Create an RDF URI node to use as the subject for multiple triples 110 | donna = URIRef("http://example.org/donna") 111 | 112 | # Add triples using store's add() method. 113 | g.add((donna, RDF.type, FOAF.Person)) 114 | g.add((donna, FOAF.nick, Literal("donna", lang="en"))) 115 | g.add((donna, FOAF.name, Literal("Donna Fales"))) 116 | g.add((donna, FOAF.mbox, URIRef("mailto:donna@example.org"))) 117 | 118 | # Add another person 119 | ed = URIRef("http://example.org/edward") 120 | 121 | # Add triples using store's add() method. 122 | g.add((ed, RDF.type, FOAF.Person)) 123 | g.add((ed, FOAF.nick, Literal("ed", datatype=XSD.string))) 124 | g.add((ed, FOAF.name, Literal("Edward Scissorhands"))) 125 | g.add((ed, FOAF.mbox, Literal("e.scissorhands@example.org", datatype=XSD.anyURI))) 126 | 127 | # Iterate over triples in store and print them out. 128 | print("--- printing raw triples ---") 129 | for s, p, o in g: 130 | print((s, p, o)) 131 | 132 | # For each foaf:Person in the store, print out their mbox property's value. 133 | print("--- printing mboxes ---") 134 | for person in g.subjects(RDF.type, FOAF.Person): 135 | for mbox in g.objects(person, FOAF.mbox): 136 | print(mbox) 137 | 138 | # Bind the FOAF namespace to a prefix for more readable output 139 | g.bind("foaf", FOAF) 140 | 141 | # print all the data in the Notation3 format 142 | print("--- printing mboxes ---") 143 | print(g.serialize(format='n3')) 144 | 145 | 146 | A SPARQL query example 147 | ====================== 148 | 149 | .. code-block:: python 150 | 151 | from rdflib import plugin, Graph, URIRef 152 | from rdflib.store import Store 153 | 154 | # Create a LevelDB-backed Graph, parse in Internet data 155 | store = plugin.get("LevelDB", Store)(identifier=URIRef("rdflib_leveldb_test")) 156 | g = Graph(store) 157 | g.open(/tmp/leveldbtest', create=True) 158 | g.parse("http://www.w3.org/People/Berners-Lee/card") 159 | 160 | # Query the data in g using SPARQL 161 | # This query returns the 'name' of all ``foaf:Person`` instances 162 | q = """ 163 | PREFIX foaf: 164 | 165 | SELECT ?name 166 | WHERE { 167 | ?p rdf:type foaf:Person . 168 | 169 | ?p foaf:name ?name . 170 | } 171 | """ 172 | 173 | # Apply the query to the graph and iterate through results 174 | for r in g.query(q): 175 | print(r["name"]) 176 | 177 | # prints: Timothy Berners-Lee 178 | 179 | 180 | 181 | An example 182 | ========== 183 | There are more :doc:`examples ` in the :file:`examples` folder in the source distribution. 184 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. rdflib_leveldb documentation documentation master file 2 | 3 | ======================== 4 | rdflib-leveldb |release| 5 | ======================== 6 | 7 | 8 | 9 | Getting started 10 | --------------- 11 | If you have never used the LevelDB RDFLib Store, the following will help get you started: 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | gettingstarted 17 | Examples 18 | 19 | 20 | Reference 21 | --------- 22 | The nitty-gritty details of everything. 23 | 24 | API reference: 25 | 26 | .. toctree:: 27 | :maxdepth: 1 28 | 29 | apidocs/modules 30 | 31 | 32 | For developers 33 | -------------- 34 | .. toctree:: 35 | :maxdepth: 1 36 | 37 | docs 38 | universal_rdf_store_interface 39 | 40 | Source Code 41 | ----------- 42 | The rdflib-leveldb source code is hosted on GitHub at ``__ where you can lodge Issues and 43 | create Pull Requests to help improve this community project! 44 | 45 | The RDFlib organisation on GitHub at ``__ maintains this package and a number of other RDF 46 | and RDFlib-related packaged that you might also find useful. 47 | 48 | 49 | Further help & Contact 50 | ---------------------- 51 | 52 | If you would like more help with using rdflib_leveldb, rather than developing it, please post a question on StackOverflow using 53 | the tag ``[rdflib]``. A list of existing ``[rdflib]`` tagged questions is kept there at: 54 | 55 | * ``__ 56 | 57 | You might also like to join rdflib's dev mailing list: ``__ 58 | 59 | The chat is available at `gitter `_ or via matrix `#RDFLib_rdflib:gitter.im `_. 60 | 61 | 62 | 63 | Glossary 64 | -------- 65 | 66 | Here are a few RDF and Python terms referred to in this documentation. They are linked to wherever they occur. 67 | 68 | .. glossary:: 69 | 70 | functional property 71 | Properties than can only occur once for a resource, i.e. for any relation (triple, in RDF) ``x p y``, 72 | if ``p`` is functional, for any individual ``x``, there can be at most one individual ``y``. 73 | 74 | OWL 75 | The OWL 2 Web Ontology Language, informally OWL 2 or just OWL, is an ontology language for the Semantic Web 76 | with formally defined meaning. OWL 2 ontologies provide classes, properties, individuals, and data values and 77 | are stored as Semantic Web documents. OWL 2 ontologies can be used along with information written in RDF, and 78 | OWL 2 ontologies themselves are primarily exchanged as RDF documents. See the `RDF 1.1 Concepts and Abstract 79 | Syntax `_ for more info. 80 | 81 | RDF 82 | The Resource Description Framework (RDF) is a framework for representing information in the Web. RDF data is 83 | stored in graphs that are sets of subject-predicate-object triples, where the elements may be IRIs, blank nodes, 84 | or datatyped literals. See the `OWL 2 Web Ontology Language 85 | Document Overview `_ for more info. 86 | 87 | 88 | named graph 89 | A named graph 90 | 91 | context 92 | A context 93 | 94 | configuration 95 | A configuration 96 | -------------------------------------------------------------------------------- /docs/sphinx-requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==4.3.1 2 | sphinxcontrib-apidoc 3 | git+https://github.com/gniezen/n3pygments.git 4 | -------------------------------------------------------------------------------- /docs/universal_rdf_store_interface.rst: -------------------------------------------------------------------------------- 1 | .. _univrdfstore: 2 | 3 | =============================== 4 | A Universal RDF Store Interface 5 | =============================== 6 | 7 | This document attempts to summarize some fundamental components of an RDF store. The motivation is to outline a standard set of interfaces for providing the support needed to persist an `RDF Graph`_ in a way that is universal and not tied to any specific implementation. 8 | 9 | For the most part, the interface adheres to the core RDF model and uses terminology that is consistent with the RDF Model specifications. However, this suggested interface also extends an RDF store with additional requirements necessary to facilitate those aspects of `Notation 3`_ that go beyond the RDF model to provide a framework for `First Order Predicate Logic`_ processing and persistence. 10 | 11 | .. _RDF Graph: http://www.w3.org/TR/rdf-concepts/#dfn-rdf-graph 12 | .. _Notation 3: http://www.w3.org/2000/10/swap/Primer 13 | .. _First Order Predicate Logic: http://en.wikipedia.org/wiki/First-order_predicate_logic 14 | 15 | Terminology 16 | =========== 17 | 18 | .. topic:: **Context** 19 | 20 | A named, unordered set of statements (that could also be called a sub-graph). The :term:`named graph` `literature`__ and `ontology`__ are relevant to this concept. The term :term:`context` could be thought of as either the sub-graph itself or the relationship between an RDF triple and a sub-graph in which it is found (this latter is how the term context is used in the `Notation 3 Design Issues page`_). 21 | 22 | It is worth noting that the concept of logically grouping `triples`__ within an addressable 'set' or 'subgraph' is just barely beyond the scope of the RDF model. The RDF model defines a graph to be an arbitrary collection of triples and the semantics of these triples --- but doesn't give guidance on how to address such arbitrary collections in a consistent manner. Although a collection of triples can be thought of as a resource itself, the association between a triple and the collection (of which it is a part) is not covered. `Public RDF`_ is an example of an attempt to formally model this relationship - and includes one other unrelated extension: Articulated Text 23 | 24 | .. __: http://www.w3.org/2004/03/trix/ 25 | .. __: http://metacognition.info/Triclops/?xslt=Triclops.xslt&query=type(list(rdfs:Class,owl:Class,rdf:Property))&queryType=Graph&remoteGraph=http://www.w3.org/2004/03/trix/rdfg-1/ 26 | .. __: http://www.w3.org/TR/rdf-concepts/#section-triples 27 | .. _Notation 3 Design Issues page: http://www.w3.org/DesignIssues/Notation3.html 28 | .. _Public RDF: http://laurentszyster.be/blog/public-rdf/ 29 | 30 | .. topic:: **Conjunctive Graph** 31 | 32 | This refers to the 'top-level' Graph. It is the aggregation of all the contexts within it and is also the appropriate, absolute boundary for `closed world assumptions`__ / models. This distinction is the low-hanging fruit of RDF along the path to the semantic web and most of its value is in (corporate/enterprise) real-world problems: 33 | 34 | .. pull-quote:: 35 | 36 | There are at least two situations where the closed world assumption is used. The first is where it is assumed that a knowledge base contains all relevant facts. This is common in corporate databases. That is, the information it contains is assumed to be complete 37 | 38 | From a store perspective, closed world assumptions also provide the benefit of better query response times, due to the explicit closed world boundaries. Closed world boundaries can be made transparent by federated queries that assume each :class:`ConjunctiveGraph` is a section of a larger, unbounded universe. So a closed world assumption does not preclude you from an open world assumption. 39 | 40 | For the sake of persistence, Conjunctive Graphs must be distinguished by identifiers (which may not necessarily be RDF `identifiers`__ or may be an RDF identifier normalized - SHA1/MD5 perhaps - for database naming purposes) that could be referenced to indicate conjunctive queries (queries made across the entire conjunctive graph) or appear as nodes in asserted statements. In this latter case, such statements could be interpreted as being made about the entire 'known' universe. For example: 41 | 42 | .. code-block:: xml 43 | 44 | rdf:type :ConjunctiveGraph 45 | rdf:type log:Truth 46 | :persistedBy :MySQL 47 | 48 | .. __: http://cs.wwc.edu/~aabyan/Logic/CWA.html 49 | .. __: http://www.w3.org/2002/07/rdf-identifer-terminology/ 50 | 51 | .. topic:: **Quoted Statement** 52 | 53 | A statement that isn't asserted but is referred to in some manner. Most often, this happens when we want to make a statement about another statement (or set of statements) without necessarily saying these quoted statements (are true). For example: 54 | 55 | .. code-block:: text 56 | 57 | Chimezie said "higher-order statements are complicated" 58 | 59 | Which can be written (in N3) as: 60 | 61 | .. code-block:: n3 62 | 63 | :chimezie :said {:higherOrderStatements rdf:type :complicated} 64 | 65 | .. topic:: **Formula** 66 | 67 | A context whose statements are quoted or hypothetical. 68 | 69 | Context quoting can be thought of as very similar to `reification`__. The main difference is that quoted statements are not asserted or considered as statements of truth about the universe and can be referenced as a group: a hypothetical RDF Graph 70 | 71 | .. __: http://www.w3.org/TR/rdf-mt/#Reif 72 | 73 | .. topic:: **Universal Quantifiers / Variables** 74 | 75 | (relevant references): 76 | 77 | * OWL `Definition`__ of `SWRL`__. 78 | * SWRL/RuleML `Variable`__ 79 | 80 | .. __: http://www.w3.org/Submission/SWRL/swrl.owl 81 | .. __: http://www.w3.org/Submission/SWRL/ 82 | .. __: http://www.w3.org/Submission/SWRL/#owls_Variable 83 | 84 | .. topic:: **Terms** 85 | 86 | Terms are the kinds of objects that can appear in a quoted/asserted triple. 87 | 88 | This includes those that are core to RDF: 89 | 90 | * Blank Nodes 91 | * URI References 92 | * Literals (which consist of a literal value, datatype and language tag) 93 | 94 | Those that extend the RDF model into N3: 95 | 96 | * Formulae 97 | * Universal Quantifications (Variables) 98 | 99 | And those that are primarily for matching against 'Nodes' in the underlying Graph: 100 | 101 | * REGEX Expressions 102 | * Date Ranges 103 | * Numerical Ranges 104 | 105 | .. topic:: **Nodes** 106 | 107 | Nodes are a subset of the Terms that the underlying store actually persists. The set of such Terms depends on whether or not the store is formula-aware. Stores that aren't formula-aware would only persist those terms core to the RDF Model, and those that are formula-aware would be able to persist the N3 extensions as well. However, utility terms that only serve the purpose for matching nodes by term-patterns probably will only be terms and not nodes. 108 | 109 | The set of nodes of an RDF graph is the set of subjects and objects of triples in the graph. 110 | 111 | .. topic:: **Context-aware** 112 | 113 | An RDF store capable of storing statements within contexts is considered context-aware. Essentially, such a store is able to partition the RDF model it represents into individual, named, and addressable sub-graphs. 114 | 115 | .. topic:: **Formula-aware** 116 | 117 | An RDF store capable of distinguishing between statements that are asserted and statements that are quoted is considered formula-aware. 118 | 119 | Such a store is responsible for maintaining this separation and ensuring that queries against the entire model (the aggregation of all the contexts - specified by not limiting a 'query' to a specifically name context) do not include quoted statements. Also, it is responsible for distinguishing universal quantifiers (variables). 120 | 121 | .. note:: These 2 additional concepts (formulae and variables) must be thought of as core extensions and distinguishable from the other terms of a triple (for the sake of the persistence rountrip - at the very least). It's worth noting that the 'scope' of universal quantifiers (variables) and existential quantifiers (BNodes) is the formula (or context - to be specific) in which their statements reside. Beyond this, a Formula-aware store behaves the same as a Context-aware store. 122 | 123 | .. topic:: **Conjunctive Query** 124 | 125 | Any query that doesn't limit the store to search within a named context only. Such a query expects a context-aware store to search the entire asserted universe (the conjunctive graph). A formula-aware store is expected not to include quoted statements when matching such a query. 126 | 127 | .. topic:: **N3 Round Trip** 128 | 129 | This refers to the requirements on a formula-aware RDF store's persistence mechanism necessary for it to be properly populated by a N3 parser and rendered as syntax by a N3 serializer. 130 | 131 | .. topic:: **Transactional Store** 132 | 133 | An RDF store capable of providing transactional integrity to the RDF operations performed on it. 134 | 135 | Interpreting Syntax 136 | =================== 137 | 138 | The following Notation 3 `document`__: 139 | 140 | .. code-block:: n3 141 | 142 | {?x a :N3Programmer} => {?x :has [a :Migraine]} 143 | 144 | Could cause the following statements to be asserted in the store: 145 | 146 | .. code-block:: n3 147 | 148 | _:a log:implies _:b 149 | 150 | This statement would be asserted in the partition associated with quoted statements (in a formula named ``_:a``) 151 | 152 | .. code-block:: n3 153 | 154 | ?x rdf:type :N3Programmer 155 | 156 | Finally, these statements would be asserted in the same partition (in a formula named _:b) 157 | 158 | .. code-block:: n3 159 | 160 | ?x :has _:c 161 | 162 | _:c rdf:type :Migranie 163 | 164 | .. __: http://metacognition.info/Triclops/?xslt=Triclops.xslt&query=log:N3Document&queryType=Triple&remoteGraph=http://www.w3.org/2000/10/swap/log# 165 | 166 | Formulae and Variables as Terms 167 | =============================== 168 | Formulae and variables are distinguishable from URI references, Literals, and BNodes by the following syntax: 169 | 170 | .. code-block:: text 171 | 172 | { .. } - Formula ?x - Variable 173 | 174 | They must also be distinguishable in persistence to ensure they can be round-tripped. 175 | 176 | .. note:: There are a number of other issues regarding the `persisting of N3 terms `_. 177 | 178 | Database Management 179 | =================== 180 | 181 | An RDF store should provide standard interfaces for the management of database connections. Such interfaces are standard to most database management systems (Oracle, MySQL, Berkeley DB, Postgres, etc..) 182 | 183 | The following methods are defined to provide this capability (see below for description of the :term:`configuration` string): 184 | 185 | .. automethod:: rdflib.store.Store.open 186 | 187 | .. automethod:: rdflib.store.Store.close 188 | 189 | .. automethod:: rdflib.store.Store.destroy 190 | 191 | The *configuration* string is understood by the store implementation and represents all the parameters needed to locate an individual instance of a store. This could be similar to an ODBC string or in fact be an ODBC string, if the connection protocol to the underlying database is ODBC. The :meth:`open` function needs to fail intelligently in order to clearly express that a store (identified by the given configuration string) already exists or that there is no store (at the location specified by the configuration string) depending on the value of :keyword:`create`. 192 | 193 | Triple Interfaces 194 | ================= 195 | An RDF store could provide a standard set of interfaces for the manipulation, management, and/or retrieval of its contained triples (asserted or quoted): 196 | 197 | .. automethod:: rdflib.store.Store.add 198 | 199 | .. automethod:: rdflib.store.Store.remove 200 | 201 | .. automethod:: rdflib.store.Store.triples 202 | 203 | .. note:: The :meth:`triples` method can be thought of as the primary mechanism for producing triples with nodes that match the corresponding terms in the *(s, p, o)* term pattern provided. The term pattern ``(None, None, None)`` matches all nodes. 204 | 205 | .. automethod:: rdflib.store.Store.__len__ 206 | 207 | 208 | Formula / Context Interfaces 209 | ============================ 210 | 211 | These interfaces work on contexts and formulae (for stores that are formula-aware) interchangeably. 212 | 213 | .. automethod:: rdflib.graph.ConjunctiveGraph.contexts 214 | 215 | .. automethod:: rdflib.graph.ConjunctiveGraph.remove_context 216 | 217 | Interface Test Cases 218 | ==================== 219 | 220 | Basic 221 | ------------------------- 222 | 223 | Tests parsing, triple patterns, triple pattern removes, size, contextual removes 224 | 225 | Source Graph 226 | ^^^^^^^^^^^^^ 227 | 228 | .. code-block:: n3 229 | 230 | @prefix rdf: . 231 | @prefix rdfs: . 232 | @prefix : . 233 | {:a :b :c; a :foo} => {:a :d :c} . 234 | _:foo a rdfs:Class . 235 | :a :d :c. 236 | 237 | Test code 238 | ^^^^^^^^^ 239 | 240 | .. code-block:: python 241 | 242 | implies = URIRef("http://www.w3.org/2000/10/swap/log#implies") 243 | a = URIRef('http://test/a') 244 | b = URIRef('http://test/b') 245 | c = URIRef('http://test/c') 246 | d = URIRef('http://test/d') 247 | for s,p,o in g.triples((None,implies,None)): 248 | formulaA = s 249 | formulaB = o 250 | 251 | #contexts test 252 | assert len(list(g.contexts()))==3 253 | 254 | #contexts (with triple) test 255 | assert len(list(g.contexts((a,d,c))))==2 256 | 257 | #triples test cases 258 | assert type(list(g.triples((None,RDF.type,RDFS.Class)))[0][0]) == BNode 259 | assert len(list(g.triples((None,implies,None))))==1 260 | assert len(list(g.triples((None,RDF.type,None))))==3 261 | assert len(list(g.triples((None,RDF.type,None),formulaA)))==1 262 | assert len(list(g.triples((None,None,None),formulaA)))==2 263 | assert len(list(g.triples((None,None,None),formulaB)))==1 264 | assert len(list(g.triples((None,None,None))))==5 265 | assert len(list(g.triples((None,URIRef('http://test/d'),None),formulaB)))==1 266 | assert len(list(g.triples((None,URIRef('http://test/d'),None))))==1 267 | 268 | #Remove test cases 269 | g.remove((None,implies,None)) 270 | assert len(list(g.triples((None,implies,None))))==0 271 | assert len(list(g.triples((None,None,None),formulaA)))==2 272 | assert len(list(g.triples((None,None,None),formulaB)))==1 273 | g.remove((None,b,None),formulaA) 274 | assert len(list(g.triples((None,None,None),formulaA)))==1 275 | g.remove((None,RDF.type,None),formulaA) 276 | assert len(list(g.triples((None,None,None),formulaA)))==0 277 | g.remove((None,RDF.type,RDFS.Class)) 278 | 279 | #remove_context tests 280 | formulaBContext=Context(g,formulaB) 281 | g.remove_context(formulaB) 282 | assert len(list(g.triples((None,RDF.type,None))))==2 283 | assert len(g)==3 assert len(formulaBContext)==0 284 | g.remove((None,None,None)) 285 | assert len(g)==0 286 | 287 | 288 | Formula and Variables Test 289 | -------------------------- 290 | 291 | Source Graph 292 | ^^^^^^^^^^^^ 293 | 294 | .. code-block:: n3 295 | 296 | @prefix rdf: . 297 | @prefix rdfs: . 298 | @prefix : . 299 | {?x a rdfs:Class} => {?x a :Klass}. 300 | 301 | Test Code 302 | ^^^^^^^^^ 303 | 304 | .. code-block:: python 305 | 306 | implies = URIRef("http://www.w3.org/2000/10/swap/log#implies") 307 | klass = URIRef('http://test/Klass') 308 | for s,p,o in g.triples((None,implies,None)): 309 | formulaA = s 310 | formulaB = o 311 | assert type(formulaA) == Formula 312 | assert type(formulaB) == Formula 313 | for s,p,o in g.triples((None,RDF.type,RDFS.Class)),formulaA): 314 | assert type(s) == Variable 315 | for s,p,o in g.triples((None,RDF.type,klass)),formulaB): 316 | assert type(s) == Variable 317 | 318 | Transactional Tests 319 | ------------------- 320 | 321 | To be instantiated. 322 | 323 | Additional Terms to Model 324 | ========================= 325 | These are a list of additional kinds of RDF terms (all of which are special Literals) 326 | 327 | * RegExLiteral - a REGEX string which can be used in any term slot in order to match by applying the Regular Expression to statements in the underlying graph. 328 | * Date (could provide some utility functions for date manipulation / serialization, etc..) 329 | * DateRange 330 | 331 | Namespace Management Interfaces 332 | =============================== 333 | 334 | The following namespace management interfaces (defined in Graph) could be implemented in the RDF store. Currently, they exist as stub methods of :class:`~rdflib.store.Store` and are defined in the store subclasses (e.g. :class:`~rdflib.store.IOMemory`, :class:`~rdflib.store.AbstractSQLStore`): 335 | 336 | .. automethod:: rdflib.store.Store.bind 337 | 338 | .. automethod:: rdflib.store.Store.prefix 339 | 340 | .. automethod:: rdflib.store.Store.namespace 341 | 342 | .. automethod:: rdflib.store.Store.namespaces 343 | 344 | Open issues 345 | =========== 346 | Does the Store interface need to have an identifier property or can we keep that at the Graph level? 347 | 348 | The Store implementation needs a mechanism to distinguish between triples (quoted or asserted) in ConjunctiveGraphs (which are mutually exclusive universes in systems that make closed world assumptions - and queried separately). This is the separation that the store identifier provides. This is different from the name of a context within a ConjunctiveGraph (or the default context of a conjunctive graph). I tried to diagram the logical separation of ConjunctiveGraphs, SubGraphs and QuotedGraphs in this diagram 349 | 350 | .. image:: _static/ContextHierarchy.png 351 | 352 | An identifier of ``None`` can be used to indicate the store (aka `all contexts`) in methods such as :meth:`triples`, :meth:`__len__`, etc. This works as long as we're only dealing with one Conjunctive Graph at a time -- which may not always be the case. 353 | 354 | Is there any value in persisting terms that lie outside N3 (RegExLiteral,Date,etc..)? 355 | 356 | Potentially, not sure yet. 357 | 358 | Should a conjunctive query always return quads instead of triples? It would seem so, since knowing the context that produced a triple match is an essential aspect of query construction / optimization. Or if having the triples function yield/produce different length tuples is problematic, could an additional - and slightly redundant - interface be introduced?: 359 | 360 | .. automethod:: rdflib.graph.ConjunctiveGraph.quads 361 | 362 | Stores that weren't context-aware could simply return ``None`` as the 4th item in the produced/yielded tuples or simply not support this interface. 363 | 364 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RDFLib/rdflib-leveldb/a0f3386c71e6b1cfbd09c257e29400f5fde43ed3/examples/__init__.py -------------------------------------------------------------------------------- /examples/leveldb_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | leveldb (https://github.com/google/leveldb) in use as a persistent Graph store. 3 | via the Plyvel LevlDB-Python interface (https://github.com/wbolster/plyvel) 4 | 5 | The store is named and referenced as "LevelDB". 6 | 7 | Example 1: simple actions 8 | 9 | * creating a ConjunctiveGraph using the LevelDB Store 10 | * adding triples to it 11 | * counting them 12 | * closing the store, emptying the graph 13 | * re-opening the store using the same DB files 14 | * getting the same count of triples as before 15 | 16 | Example 2: larger data 17 | 18 | * loads multiple graphs downloaded from GitHub into a LevelDB-baked graph stored in the folder gsq_vocabs. 19 | * does not delete the DB at the end so you can see it on disk 20 | """ 21 | import os 22 | from rdflib import plugin, ConjunctiveGraph, Namespace, Literal, URIRef 23 | from rdflib.store import Store, NO_STORE, VALID_STORE 24 | from tempfile import mktemp 25 | 26 | 27 | def example_1(): 28 | """Creates a ConjunctiveGraph and performs some BerkeleyDB tasks with it""" 29 | 30 | # Declare we are using a LevelDB Store 31 | store = plugin.get("LevelDB", Store)( 32 | identifier=URIRef("rdflib_leveldb_test") 33 | ) 34 | graph = ConjunctiveGraph(store) 35 | path = mktemp(prefix="testleveldb") 36 | 37 | # Open previously created store, or create it if it doesn't exist yet 38 | # (always doesn't exist in this example as using temp file location) 39 | rt = graph.open(path, create=False) 40 | 41 | if rt == NO_STORE: 42 | # There is no underlying BerkeleyDB infrastructure, so create it 43 | print("Creating new DB") 44 | graph.open(path, create=True) 45 | else: 46 | print("Using existing DB") 47 | assert rt == VALID_STORE, "The underlying store is corrupt" 48 | 49 | print("Triples in graph before add:", len(graph)) 50 | print("(will always be 0 when using temp file for DB)") 51 | 52 | # Now we'll add some triples to the graph & commit the changes 53 | EG = Namespace("http://example.net/test/") 54 | graph.bind("eg", EG) 55 | 56 | graph.add((EG["pic:1"], EG.name, Literal("Jane & Bob"))) 57 | graph.add((EG["pic:2"], EG.name, Literal("Squirrel in Tree"))) 58 | 59 | graph.commit() 60 | 61 | print("Triples in graph after add:", len(graph)) 62 | print("(should be 2)") 63 | 64 | # display the graph in Turtle 65 | print(graph.serialize()) 66 | 67 | # close when done, otherwise BerkeleyDB will leak lock entries. 68 | graph.close() 69 | 70 | graph = None 71 | 72 | # reopen the graph 73 | graph = ConjunctiveGraph("LevelDB") 74 | 75 | graph.open(path, create=False) 76 | 77 | print("Triples still in graph:", len(graph)) 78 | print("(should still be 2)") 79 | 80 | graph.close() 81 | 82 | # Clean up the temp folder to remove the BerkeleyDB database files... 83 | for f in os.listdir(path): 84 | os.unlink(path + "/" + f) 85 | os.rmdir(path) 86 | 87 | 88 | def example_2(): 89 | """Loads a number of SKOS vocabularies from GitHub into a BerkeleyDB-backed graph stored in the local folder 90 | 'gsq_vocabs' 91 | 92 | Should print out the number of triples after each load, e.g.: 93 | 177 94 | 248 95 | 289 96 | 379 97 | 421 98 | 628 99 | 764 100 | 813 101 | 965 102 | 1381 103 | 9666 104 | 9719 105 | ... 106 | """ 107 | from urllib.request import urlopen, Request 108 | from urllib.error import HTTPError 109 | import json 110 | import base64 111 | 112 | store = plugin.get("LevelDB", Store)( 113 | identifier=URIRef("rdflib_leveldb_test") 114 | ) 115 | g = ConjunctiveGraph(store) 116 | 117 | g.open("gsg_vocabs", create=True) 118 | 119 | # gsq_vocabs = "https://api.github.com/repos/geological-survey-of-queensland/vocabularies/git/trees/master" 120 | gsq_vocabs = "https://api.github.com/repos/geological-survey-of-queensland/vocabularies/git/trees/cd7244d39337c1f4ef164b1cf1ea1f540a7277db" 121 | try: 122 | res = urlopen( 123 | Request(gsq_vocabs, headers={"Accept": "application/json"}) 124 | ) 125 | except HTTPError as e: 126 | return e.code, str(e), None 127 | 128 | data = res.read() 129 | encoding = res.info().get_content_charset("utf-8") 130 | j = json.loads(data.decode(encoding)) 131 | for v in j["tree"]: 132 | # process the element in GitHub result if it's a Turtle file 133 | if v["path"].endswith(".ttl"): 134 | # for each file, call it by URL, decode it and parse it into the graph 135 | r = urlopen(v["url"]) 136 | content = json.loads(r.read().decode())["content"] 137 | g.parse(data=base64.b64decode(content).decode(), format="turtle") 138 | print(len(g)) 139 | 140 | print("loading complete") 141 | 142 | 143 | if __name__ == "__main__": 144 | example_1() 145 | example_2() 146 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=40.6.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | # Not necessary for packaging but every self-respecting Python 6 | # package should a) use black and b) fix the WRONG default. 7 | [tool.black] 8 | line-length = 79 9 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = 3 | --doctest-modules 4 | --ignore=test/test_store_performance1.py 5 | --ignore=test/test_store_performance2.py 6 | --ignore-glob=docs/*.py 7 | doctest_optionflags = ALLOW_UNICODE 8 | log_cli=true 9 | log_level=DEBUG 10 | filterwarnings = 11 | # The below warning is a consequence of how pytest doctest detects mocks and how DefinedNamespace behaves when an undefined attribute is being accessed. 12 | ignore:Code. pytest_mock_example_attribute_that_shouldnt_exist is not defined in namespace .*:UserWarning 13 | # The below warning is a consequence of how pytest detects fixtures and how DefinedNamespace behaves when an undefined attribute is being accessed. 14 | ignore:Code. _pytestfixturefunction is not defined in namespace .*:UserWarning 15 | -------------------------------------------------------------------------------- /rdflib_leveldb/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | """ 4 | __all__ = [ 5 | "__title__", 6 | "__summary__", 7 | "__uri__", 8 | "__version__", 9 | "__author__", 10 | "__email__", 11 | "__license__", 12 | "__copyright__", 13 | ] 14 | 15 | 16 | __title__ = "rdflib-leveldb" 17 | 18 | __summary__ = ( 19 | "An adaptation of RDFLib BerkeleyDB Store’s key-value approach," 20 | "using LevelDB as a back-end. Implemented by Gunnar Grimnes, " 21 | "based on an original contribution by Drew Perttula. " 22 | "Migrated to Python 3 by Graham Higgins." 23 | ) 24 | 25 | __uri__ = "https://github.com/RDFLib/rdflib-leveldb" 26 | 27 | __version__ = "0.2" 28 | 29 | __author__ = "Graham Higgins" 30 | __email__ = "gjhiggins@gmail.com" 31 | 32 | __license__ = "BSD" 33 | __copyright__ = "Copyright 2021 {}".format(__author__) 34 | -------------------------------------------------------------------------------- /rdflib_leveldb/leveldbstore.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | An adaptation of the BerkeleyDB Store's key-value approach to use LevelDB 4 | as a back-end. 5 | 6 | Based on an original contribution by Drew Perttula: `TokyoCabinet Store 7 | `_. 8 | 9 | and then a Kyoto Cabinet version by Graham Higgins 10 | 11 | this one by Gunnar Grimnes 12 | 13 | Subsequently updated to Python3 by Graham Higgins 14 | 15 | berkeleydb uses the default API get and put, so has to handle 16 | string-to-bytes conversion in the args provided to every call 17 | on get/put. By using a store-specific _get/_put which takes an 18 | additional "dbname" argument, not only can store-specific 19 | differences in get/put call be coded for but it is also offers 20 | the opportunity to do the string-bytes conversion at the point 21 | of db API and so the calls can be expunged of conversion cruft. 22 | 23 | The cost is a difference of model: 24 | 25 | Berkeleydb: 26 | 27 | # def namespace(self, prefix): 28 | # prefix = prefix.encode("utf-8") 29 | # ns = self.__namespace.get(prefix, None) 30 | # if ns is not None: 31 | # return URIRef(ns.decode("utf-8")) 32 | # return None 33 | vs. 34 | 35 | # def namespace(self, prefix): 36 | # ns = _get(self.__namespace, prefix) 37 | # if ns is not None: 38 | # return URIRef(ns) 39 | # return None 40 | 41 | There is also a difference in the API w.r.t. accessing a range. 42 | BerkeleyDB takes a cursor-based approach: 43 | 44 | # index = self.__indicies[0] 45 | # cursor = index.cursor() 46 | # current = cursor.set_range(prefix) 47 | # count = 0 48 | # while current: 49 | # key, value = current 50 | # if key.startswith(prefix): 51 | # count += 1 52 | # # Hack to stop 2to3 converting this to next(cursor) 53 | # current = getattr(cursor, "next")() 54 | # else: 55 | # break 56 | # cursor.close() 57 | # return count 58 | 59 | whereas Plyvel offers an interator: 60 | 61 | # return len([key for key in self.__indices[0].iterator( 62 | # start=prefix, include_value=False) 63 | # if key.startswith(prefix)]) 64 | 65 | """ 66 | import os 67 | import logging 68 | from functools import lru_cache 69 | from rdflib.store import Store, VALID_STORE, NO_STORE 70 | from rdflib.term import URIRef 71 | from urllib.request import pathname2url 72 | 73 | try: 74 | from plyvel import DB as LevelDB 75 | 76 | has_wrapper = True 77 | except ImportError: # pragma: NO COVER 78 | has_wrapper = False 79 | 80 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 81 | logger = logging.getLogger(__name__) 82 | logger.setLevel(logging.DEBUG) 83 | 84 | 85 | class NoopMethods(object): 86 | def __getattr__(self, methodName): 87 | return lambda *args: None 88 | 89 | 90 | __all__ = ["LevelDB"] 91 | 92 | 93 | class LevelDBStore(Store): 94 | """\ 95 | A store that allows for on-disk persistent using LevelDB, a fast 96 | key/value DB. 97 | 98 | This store allows for quads as well as triples. See examples of use 99 | in both the `examples.leveldb_example` and `test.test_leveldb_store` 100 | files. 101 | 102 | **NOTE on installation**: 103 | 104 | To use this store, you must have leveldb installed on your system 105 | separately to Python (`brew install leveldb` on a Mac) and also have 106 | the Plyvel leveldb Python wrapper installed (`pip install plyvel`). 107 | 108 | Windows users should use the Plyvel-wheels distribution which includes 109 | Windows-specifc leveldb library binaries: (`pip install plyvel-wheels`). 110 | 111 | """ 112 | 113 | context_aware = True 114 | formula_aware = True 115 | transaction_aware = False 116 | graph_aware = True 117 | db_env = None 118 | should_create = True 119 | 120 | def __init__(self, configuration=None, identifier=None): 121 | if not has_wrapper: 122 | raise ImportError("Unable to import plyvel, store is unusable.") 123 | self.__open = False 124 | self._terms = 0 125 | self.__identifier = identifier 126 | super(LevelDBStore, self).__init__(configuration) 127 | self._loads = self.node_pickler.loads 128 | self._dumps = self.node_pickler.dumps 129 | 130 | def __get_identifier(self): 131 | return self.__identifier 132 | 133 | identifier = property(__get_identifier) 134 | 135 | def is_open(self): 136 | return self.__open 137 | 138 | def open(self, path, create=False): 139 | if not has_wrapper: 140 | return NO_STORE 141 | 142 | self.should_create = create 143 | self.path = path 144 | 145 | if self.__identifier is None: 146 | self.__identifier = URIRef(pathname2url(os.path.abspath(path))) 147 | 148 | # Create a prefixed database 149 | dbpathname = os.path.abspath(self.path) 150 | # Help the user to avoid writing over an existing leveldb database 151 | if self.should_create is True: 152 | if os.path.exists(dbpathname): 153 | raise Exception( 154 | f"Database file {dbpathname} aready exists, please move or delete it." 155 | ) 156 | else: 157 | self.db = LevelDB( 158 | dbpathname, create_if_missing=True, error_if_exists=True 159 | ) 160 | else: 161 | if not os.path.exists(dbpathname): 162 | return NO_STORE 163 | else: 164 | self.db = LevelDB( 165 | dbpathname, create_if_missing=False, error_if_exists=False 166 | ) 167 | 168 | # create and open the DBs 169 | self.__indices = [ 170 | None, 171 | ] * 3 172 | self.__indices_info = [ 173 | None, 174 | ] * 3 175 | for i in range(0, 3): 176 | index_name = to_key_func(i)( 177 | ( 178 | "s".encode("latin-1"), 179 | "p".encode("latin-1"), 180 | "o".encode("latin-1"), 181 | ), 182 | "c".encode("latin-1"), 183 | ) 184 | index = self.db.prefixed_db(index_name) 185 | self.__indices[i] = index 186 | self.__indices_info[i] = (index, to_key_func(i), from_key_func(i)) 187 | 188 | lookup = {} 189 | for i in range(0, 8): 190 | results = [] 191 | for start in range(0, 3): 192 | score = 1 193 | len = 0 194 | for j in range(start, start + 3): 195 | if i & (1 << (j % 3)): 196 | score = score << 1 197 | len += 1 198 | else: 199 | break 200 | tie_break = 2 - start 201 | results.append(((score, tie_break), start, len)) 202 | 203 | results.sort() 204 | score, start, len = results[-1] 205 | 206 | def get_prefix_func(start, end): 207 | def get_prefix(triple, context): 208 | if context is None: 209 | yield "" 210 | else: 211 | yield context 212 | i = start 213 | while i < end: 214 | yield triple[i % 3] 215 | i += 1 216 | yield "" 217 | 218 | return get_prefix 219 | 220 | lookup[i] = ( 221 | self.__indices[start], 222 | get_prefix_func(start, start + len), 223 | from_key_func(start), 224 | results_from_key_func(start, self._from_string), 225 | ) 226 | 227 | self.__lookup_dict = lookup 228 | self.__contexts = self.db.prefixed_db(b"contexts") 229 | self.__namespace = self.db.prefixed_db(b"namespace") 230 | self.__prefix = self.db.prefixed_db(b"prefix") 231 | self.__k2i = self.db.prefixed_db(b"k2i") 232 | self.__i2k = self.db.prefixed_db(b"i2k") 233 | 234 | try: 235 | self._terms = int(self.__k2i.get(b"__terms__")) 236 | assert isinstance(self._terms, int) 237 | except TypeError: 238 | pass # new store, no problem 239 | 240 | self.__open = True 241 | 242 | return VALID_STORE 243 | 244 | def dumpdb(self): 245 | from pprint import pformat 246 | 247 | dbs = { 248 | "self.__indices": self.__indices, 249 | "self.__indices_info": self.__indices_info, 250 | "self.__lookup_dict": self.__lookup_dict, 251 | "self.__contexts": self.__contexts, 252 | "self.__namespace": self.__namespace, 253 | "self.__prefix": self.__prefix, 254 | "self.__k2i": self.__k2i, 255 | "self.__i2k": self.__i2k, 256 | } 257 | logger.debug("\n**** Dumping database:\n") 258 | for k, v in dbs.items(): 259 | if isinstance(v, (list, dict)): 260 | logger.debug(f"{k} {type(v)}:\n{pformat(v, indent=4)}") 261 | else: 262 | logger.debug(f"db: {k} {type(v)}") 263 | for (key, val) in list(v.iterator()): 264 | logger.debug(f"\t{key}: {val}") 265 | 266 | def close(self, commit_pending_transaction=False): 267 | self.__open = False 268 | # Closing the database also closes the prefixed databases 269 | self.db.close() 270 | 271 | def destroy(self, configuration=""): 272 | assert self.__open is False, "The Store must be closed." 273 | import os 274 | 275 | path = configuration or self.path 276 | if os.path.exists(path): 277 | import shutil 278 | 279 | shutil.rmtree(path) 280 | 281 | def add(self, triple, context, quoted=False): 282 | """ 283 | Add a triple to the store of triples. 284 | """ 285 | (subject, predicate, object) = triple 286 | assert self.__open, "The Store must be open." 287 | assert context != self, "Can not add triple directly to store" 288 | # Add the triple to the Store, triggering TripleAdded events 289 | Store.add(self, (subject, predicate, object), context, quoted) 290 | 291 | _to_string = self._to_string 292 | 293 | s = _to_string(subject) 294 | p = _to_string(predicate) 295 | o = _to_string(object) 296 | c = _to_string(context) 297 | 298 | cspo, cpos, cosp = self.__indices 299 | 300 | value = cspo.get(f"{c}^{s}^{p}^{o}^".encode()) 301 | 302 | if value is None: 303 | self.__contexts.put(c.encode(), b"") 304 | 305 | contexts_value = cspo.get( 306 | f"{''}^{s}^{p}^{o}^".encode() 307 | ) or "".encode("latin-1") 308 | 309 | contexts = set(contexts_value.split("^".encode("latin-1"))) 310 | contexts.add(c.encode()) 311 | 312 | contexts_value = "^".encode("latin-1").join(contexts) 313 | assert contexts_value is not None 314 | 315 | cspo.put(f"{c}^{s}^{p}^{o}^".encode(), b"") 316 | cpos.put(f"{c}^{p}^{o}^{s}^".encode(), b"") 317 | cosp.put(f"{c}^{o}^{s}^{p}^".encode(), b"") 318 | if not quoted: 319 | cspo.put(f"^{s}^{p}^{o}^".encode(), contexts_value) 320 | cpos.put(f"^{p}^{o}^{s}^".encode(), contexts_value) 321 | cosp.put(f"^{o}^{s}^{p}^".encode(), contexts_value) 322 | 323 | # self.__needs_sync = True 324 | 325 | else: 326 | pass # already have this triple, ignoring") 327 | 328 | def __remove(self, spo, c, quoted=False): 329 | s, p, o = spo 330 | cspo, cpos, cosp = self.__indices 331 | contexts_value = ( 332 | cspo.get( 333 | "^".encode("latin-1").join( 334 | ["".encode("latin-1"), s, p, o, "".encode("latin-1")] 335 | ), 336 | ) 337 | or "".encode("latin-1") 338 | ) 339 | contexts = set(contexts_value.split("^".encode("latin-1"))) 340 | contexts.discard(c) 341 | contexts_value = "^".encode("latin-1").join(contexts) 342 | for i, _to_key, _from_key in self.__indices_info: 343 | i.delete(_to_key((s, p, o), c)) 344 | if not quoted: 345 | if contexts_value: 346 | for i, _to_key, _from_key in self.__indices_info: 347 | i.put( 348 | _to_key((s, p, o), "".encode("latin-1")), 349 | contexts_value, 350 | ) 351 | 352 | else: 353 | for i, _to_key, _from_key in self.__indices_info: 354 | try: 355 | i.delete(_to_key((s, p, o), "".encode("latin-1"))) 356 | except Exception: 357 | pass # FIXME okay to ignore these? 358 | 359 | def remove(self, spo, context): 360 | subject, predicate, object = spo 361 | assert self.__open, "The Store must be open." 362 | # Add the triple to the Store, triggering TripleRemoved events 363 | Store.remove(self, (subject, predicate, object), context) 364 | _to_string = self._to_string 365 | 366 | if context is not None: 367 | if context == self: 368 | context = None 369 | 370 | if ( 371 | subject is not None 372 | and predicate is not None 373 | and object is not None 374 | and context is not None 375 | ): 376 | s = _to_string(subject) 377 | p = _to_string(predicate) 378 | o = _to_string(object) 379 | c = _to_string(context) 380 | value = self.__indices[0].get(f"{c}^{s}^{p}^{o}^".encode()) 381 | if value is not None: 382 | self.__remove((s.encode(), p.encode(), o.encode()), c.encode()) 383 | 384 | # self.__needs_sync = True 385 | 386 | else: 387 | cspo, cpos, cosp = self.__indices 388 | index, prefix, from_key, results_from_key = self.__lookup( 389 | (subject, predicate, object), context 390 | ) 391 | for key in index.iterator(start=prefix, include_value=False): 392 | if key.startswith(prefix): 393 | c, s, p, o = from_key(key) 394 | if context is None: 395 | contexts_value = index.get(key) or "".encode("latin-1") 396 | # remove triple from all non quoted contexts 397 | contexts = set( 398 | contexts_value.split("^".encode("latin-1")) 399 | ) 400 | # and from the conjunctive index 401 | contexts.add("".encode("latin-1")) 402 | for c in contexts: 403 | for i, _to_key, _ in self.__indices_info: 404 | i.delete(_to_key((s, p, o), c)) 405 | else: 406 | self.__remove((s, p, o), c) 407 | else: 408 | break 409 | 410 | if context is not None: 411 | if subject is None and predicate is None and object is None: 412 | # TODO: also if context becomes empty and not just on 413 | # remove((None, None, None), c) 414 | try: 415 | self.__contexts.delete(_to_string(context).encode()) 416 | except Exception as e: # pragma: NO COVER 417 | print( 418 | "%s, Failed to delete %s" % (e, context) 419 | ) # pragma: NO COVER 420 | pass # pragma: NO COVER 421 | 422 | # self.__needs_sync = needs_sync 423 | 424 | def triples(self, spo, context=None): 425 | """A generator over all the triples matching""" 426 | assert self.__open, "The Store must be open." 427 | 428 | subject, predicate, object = spo 429 | 430 | if context is not None: 431 | if context == self: 432 | context = None 433 | 434 | # _from_string = self._from_string ## UNUSED 435 | index, prefix, from_key, results_from_key = self.__lookup( 436 | (subject, predicate, object), context 437 | ) 438 | 439 | for key, value in index.iterator(start=prefix, include_value=True): 440 | if key.startswith(prefix): 441 | yield results_from_key(key, subject, predicate, object, value) 442 | else: 443 | break 444 | 445 | def __len__(self, context=None): 446 | assert self.__open, "The Store must be open." 447 | if context is not None: 448 | if context == self: 449 | context = None 450 | 451 | if context is None: 452 | prefix = "^".encode("latin-1") 453 | else: 454 | prefix = f"{self._to_string(context)}^".encode() 455 | 456 | return len( 457 | [ 458 | key 459 | for key in self.__indices[0].iterator( 460 | start=prefix, include_value=False 461 | ) 462 | if key.startswith(prefix) 463 | ] 464 | ) 465 | 466 | def bind(self, prefix, namespace): 467 | prefix = prefix.encode("utf-8") 468 | namespace = namespace.encode("utf-8") 469 | bound_prefix = self.__prefix.get(namespace) 470 | if bound_prefix: 471 | self.__namespace.delete(bound_prefix) 472 | self.__prefix.put(namespace, prefix) 473 | self.__namespace.put(prefix, namespace) 474 | 475 | def namespace(self, prefix): 476 | prefix = prefix.encode("utf-8") 477 | ns = self.__namespace.get(prefix, None) 478 | if ns is not None: 479 | return URIRef(ns.decode("utf-8")) 480 | return None 481 | 482 | def prefix(self, namespace): 483 | namespace = namespace.encode("utf-8") 484 | prefix = self.__prefix.get(namespace, None) 485 | if prefix is not None: 486 | return prefix.decode("utf-8") 487 | return None 488 | 489 | def namespaces(self): 490 | for prefix, namespace in [ 491 | (k.decode(), v.decode()) 492 | for k, v in self.__namespace.iterator(include_value=True) 493 | ]: 494 | yield prefix, URIRef(namespace) 495 | 496 | @lru_cache(maxsize=5000) 497 | def __get_context(self, ident): 498 | logger.debug(f"get context {ident}") 499 | return self.__contexts.get(ident, {}) 500 | # return self.db_env.get(ident, {}) 501 | 502 | def __set_context(self, ident, g): 503 | logger.debug(f"set context {ident} for {g}") 504 | self.__contexts.put(ident.encode(), g) 505 | # self.db_env[ident] = g 506 | 507 | def contexts(self, triple=None): 508 | _from_string = self._from_string 509 | _to_string = self._to_string 510 | 511 | if triple: 512 | s, p, o = triple 513 | s = _to_string(s) 514 | p = _to_string(p) 515 | o = _to_string(o) 516 | contexts = self.__indices[0].get(f"^{s}^{p}^{o}^".encode()) 517 | 518 | if contexts: 519 | for c in contexts.split("^".encode("latin-1")): 520 | if c: 521 | yield _from_string(c) 522 | 523 | else: 524 | for k in self.__contexts.iterator(include_value=False): 525 | yield _from_string(k) 526 | 527 | @lru_cache(maxsize=5000) 528 | def add_graph(self, graph): 529 | self.__contexts.put(self._to_string(graph).encode(), b"") 530 | 531 | def remove_graph(self, graph): 532 | self.remove((None, None, None), graph) 533 | 534 | @lru_cache(maxsize=5000) 535 | def _from_string(self, i): 536 | """ 537 | rdflib term from index number (as a string) 538 | """ 539 | k = self.__i2k.get(str(int(i)).encode()) 540 | if k is not None: 541 | val = self._loads(k) 542 | return val 543 | else: 544 | raise Exception(f"Key for {i} is None") 545 | 546 | @lru_cache(maxsize=5000) 547 | def _to_string(self, term): 548 | """ 549 | index number (as a string) from rdflib term 550 | """ 551 | k = self._dumps(term) 552 | i = self.__k2i.get(k) 553 | 554 | if i is None: # (from BdbApi) 555 | # Does not yet exist, increment refcounter and create 556 | self._terms += 1 557 | i = str(self._terms) 558 | self.__i2k.put(i.encode(), k) 559 | self.__k2i.put(k, i.encode()) 560 | self.__k2i.put(b"__terms__", str(self._terms).encode()) 561 | else: 562 | i = i.decode() 563 | return i 564 | 565 | def __lookup(self, spo, context): 566 | subject, predicate, object = spo 567 | _to_string = self._to_string 568 | if context is not None: 569 | context = _to_string(context) 570 | i = 0 571 | if subject is not None: 572 | i += 1 573 | subject = _to_string(subject) 574 | if predicate is not None: 575 | i += 2 576 | predicate = _to_string(predicate) 577 | if object is not None: 578 | i += 4 579 | object = _to_string(object) 580 | index, prefix_func, from_key, results_from_key = self.__lookup_dict[i] 581 | # DEBUG 582 | try: 583 | prefix = "^".join( 584 | prefix_func((subject, predicate, object), context) 585 | ).encode("utf-8") 586 | except Exception as e: 587 | raise Exception( 588 | "{}: {} {} - {} {} - {} {} - {} {}".format( 589 | e, 590 | subject, 591 | type(subject), 592 | predicate, 593 | type(predicate), 594 | object, 595 | type(object), 596 | context, 597 | type(context), 598 | ) 599 | ) 600 | return index, prefix, from_key, results_from_key 601 | 602 | 603 | def to_key_func(i): 604 | def to_key(triple, context): 605 | "Takes a string; returns key" 606 | return "^".encode("latin-1").join( 607 | ( 608 | context, 609 | triple[i % 3], 610 | triple[(i + 1) % 3], 611 | triple[(i + 2) % 3], 612 | "".encode("latin-1"), 613 | ) 614 | ) # "" to tac on the trailing ^ 615 | 616 | return to_key 617 | 618 | 619 | def from_key_func(i): 620 | def from_key(key): 621 | "Takes a key; returns string" 622 | parts = key.split("^".encode("latin-1")) 623 | return ( 624 | parts[0], 625 | parts[(3 - i + 0) % 3 + 1], 626 | parts[(3 - i + 1) % 3 + 1], 627 | parts[(3 - i + 2) % 3 + 1], 628 | ) 629 | 630 | return from_key 631 | 632 | 633 | def results_from_key_func(i, from_string): 634 | def from_key(key, subject, predicate, object, contexts_value): 635 | "Takes a key and subject, predicate, object; returns tuple for yield" 636 | parts = key.split("^".encode("latin-1")) 637 | if subject is None: 638 | # TODO: i & 1: # dis assemble and/or measure to see which is faster 639 | # subject is None or i & 1 640 | s = from_string(parts[(3 - i + 0) % 3 + 1]) 641 | else: 642 | s = subject 643 | if predicate is None: # i & 2: 644 | p = from_string(parts[(3 - i + 1) % 3 + 1]) 645 | else: 646 | p = predicate 647 | if object is None: # i & 4: 648 | o = from_string(parts[(3 - i + 2) % 3 + 1]) 649 | else: 650 | o = object 651 | return ( 652 | (s, p, o), 653 | ( 654 | from_string(c) 655 | for c in contexts_value.split("^".encode("latin-1")) 656 | if c 657 | ), 658 | ) 659 | 660 | return from_key 661 | 662 | 663 | def readable_index(i): 664 | s, p, o = "?" * 3 665 | if i & 1: 666 | s = "s" 667 | if i & 2: 668 | p = "p" 669 | if i & 4: 670 | o = "o" 671 | return f"{s},{p},{o}" 672 | 673 | 674 | # # To facilitate TDD :) 675 | # # ==================== 676 | # storename = "LevelDB" 677 | # storetest = True 678 | # configString = tempfile.mktemp(prefix='leveldbstoretest') 679 | 680 | 681 | # @unittest.skip("WIP") 682 | # class LevelDBTDD(unittest.TestCase): 683 | # def setUp(self): 684 | # from rdflib import Graph 685 | # store = "LevelDB" 686 | # self.graph = Graph(store=store) 687 | # self.path = configString 688 | # self.graph.open(self.path, create=True) 689 | 690 | # def tearDown(self): 691 | # self.graph.close() 692 | # self.graph.destroy(self.path) 693 | 694 | # def test_namespaces(self): 695 | # self.graph.bind("dc", "http://http://purl.org/dc/elements/1.1/") 696 | # self.graph.bind("foaf", "http://xmlns.com/foaf/0.1/") 697 | # self.assertTrue(len(list(self.graph.namespaces())) == 6) 698 | # self.assertIn( 699 | # ('foaf', URIRef(u'http://xmlns.com/foaf/0.1/')), 700 | # list(self.graph.namespaces())) 701 | 702 | 703 | # if __name__ == '__main__': 704 | # unittest.main() 705 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | berkeleydb; sys_platform == "linux" or sys_platform == "darwin" 2 | black==21.12b0 3 | coverage 4 | doctest-ignore-unicode==0.1.2 5 | flake8 6 | flake8-black 7 | html5lib 8 | mypy 9 | pytest 10 | pytest-cov 11 | pytest-subtests 12 | sphinx 13 | sphinxcontrib-apidoc 14 | types-setuptools 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/RDFLib/rdflib#egg=rdflib 2 | plyvel-wheels; platform_system != "linux" 3 | plyvel; sys_platform == "linux" -------------------------------------------------------------------------------- /run_tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Testing with pytest 4 | ================= 5 | 6 | This test runner uses pytest for test discovery and running. It uses the argument 7 | spec of pytest, but with some options pre-set. To begin with, make sure you have 8 | pytest installed, e.g.: 9 | 10 | $ pip install pytest 11 | 12 | To run the tests, use: 13 | 14 | $ ./run_tests.py 15 | 16 | For more details check . 17 | 18 | Coverage 19 | ======== 20 | 21 | If ``pytest-cov`` is placed in $PYTHONPATH, it can be used to create coverage 22 | information if the "--cov" option is supplied. 23 | 24 | See for details. 25 | 26 | """ 27 | 28 | import json 29 | import sys 30 | 31 | if __name__ == "__main__": 32 | try: 33 | import pytest 34 | except ImportError: 35 | print( 36 | """\ 37 | Requires pytest. Try: 38 | 39 | $ pip install pytest 40 | 41 | Exiting. """, 42 | file=sys.stderr, 43 | ) 44 | exit(1) 45 | 46 | finalArgs = sys.argv[1:] 47 | print("Running pytest with:", json.dumps(finalArgs)) 48 | sys.exit(pytest.main(args=finalArgs)) 49 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | license_files = LICENSE 3 | 4 | [options.package_data] 5 | rdflib_leveldb = py.typed 6 | 7 | [flake8] 8 | ignore = W806 9 | max-line-length = 88 10 | exclude = host,extras,transform,results,pyMicrodata 11 | 12 | [coverage:run] 13 | branch = True 14 | source = rdflib_leveldb 15 | 16 | [coverage:report] 17 | # Regexes for lines to exclude from consideration 18 | exclude_lines = 19 | # Have to re-enable the standard pragma 20 | pragma: no cover 21 | 22 | # Don't complain if non-runnable code isn't run: 23 | if 0: 24 | if __name__ == .__main__.: 25 | if __name__==.__main__.: 26 | 27 | [mypy] 28 | python_version = 3.8 29 | warn_unused_configs = True 30 | ignore_missing_imports = True 31 | disallow_subclassing_any = False 32 | warn_unreachable = True 33 | 34 | [tool:pytest] 35 | addopts = 36 | --doctest-modules 37 | --ignore-glob=test/pending/*.py 38 | --ignore-glob=docs/*.py 39 | doctest_optionflags = ALLOW_UNICODE 40 | log_cli=true 41 | log_level=DEBUG 42 | filterwarnings = 43 | # The below warning is a consequence of how pytest doctest detects mocks and how DefinedNamespace behaves when an undefined attribute is being accessed. 44 | ignore:Code. pytest_mock_example_attribute_that_shouldnt_exist is not defined in namespace .*:UserWarning 45 | # The below warning is a consequence of how pytest detects fixtures and how DefinedNamespace behaves when an undefined attribute is being accessed. 46 | ignore:Code. _pytestfixturefunction is not defined in namespace .*:UserWarning 47 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import re 5 | import sys 6 | import codecs 7 | from setuptools import setup, find_packages 8 | 9 | kwargs = {} 10 | kwargs["install_requires"] = [ 11 | "setuptools", 12 | "rdflib>=6.0", 13 | "importlib-metadata; python_version < '3.8.0'", 14 | ] + (["plyvel"] if sys.platform == "linux" else ["plyvel-wheels"]) 15 | 16 | kwargs["dependency_links"] = [ 17 | "git+https://github.com/RDFLib/rdflib.git#egg=rdflib", 18 | ] 19 | 20 | kwargs["tests_require"] = [ 21 | "pytest", 22 | "pytest-cov", 23 | "pytest-subtests", 24 | ] + ([] if sys.platform.startswith("win") else ["berkeleydb"]) 25 | 26 | kwargs["extras_require"] = { 27 | "tests": kwargs["tests_require"], 28 | "docs": ["sphinx < 5", "sphinxcontrib-apidoc"], 29 | } 30 | 31 | 32 | def find_version(filename): 33 | _version_re = re.compile(r'__version__ = "(.*)"') 34 | for line in open(filename): 35 | version_match = _version_re.match(line) 36 | if version_match: 37 | return version_match.group(1) 38 | 39 | 40 | def open_local(paths, mode="r", encoding="utf8"): 41 | path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths) 42 | return codecs.open(path, mode, encoding) 43 | 44 | 45 | # long_description=""" 46 | # An adaptation of RDFLib BerkeleyDB Store’s key-value approach, using Leveldb as a back-end. 47 | 48 | # Based on an original contribution by Drew Perttula. 49 | # """ 50 | with open_local(["README.md"], encoding="utf-8") as readme: 51 | long_description = readme.read() 52 | 53 | version = find_version("rdflib_leveldb/__init__.py") 54 | 55 | packages = find_packages(exclude=("examples*", "test*")) 56 | 57 | if os.environ.get("READTHEDOCS", None): 58 | # if building docs for RTD 59 | # install examples, to get docstrings 60 | packages.append("examples") 61 | 62 | setup( 63 | name="rdflib-leveldb", 64 | version=version, 65 | description="rdflib extension adding Leveldb as back-end store", 66 | author="RDFLib team", 67 | maintainer="Graham Higgins", 68 | maintainer_email="gjhiggins@gmail.com", 69 | url="https://github.com/RDFLib/rdflib-leveldb", 70 | # license="bsd-3-clause", 71 | license="BSD", 72 | platforms=["any"], 73 | python_requires=">=3.7", 74 | classifiers=[ 75 | "Programming Language :: Python", 76 | "Programming Language :: Python :: 3", 77 | "Programming Language :: Python :: 3.7", 78 | "Programming Language :: Python :: 3.8", 79 | "Programming Language :: Python :: 3.9", 80 | "License :: OSI Approved :: BSD License", 81 | "Topic :: Software Development :: Libraries :: Python Modules", 82 | "Operating System :: OS Independent", 83 | "Natural Language :: English", 84 | ], 85 | long_description=long_description, 86 | long_description_content_type="text/markdown", 87 | packages=packages, 88 | entry_points={ 89 | "rdf.plugins.store": [ 90 | "LevelDB = rdflib_leveldb.leveldbstore:LevelDBStore", 91 | ], 92 | }, 93 | **kwargs, 94 | ) 95 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | # from rdflib import plugin 2 | 3 | # from rdflib import store 4 | 5 | # plugin.register("LevelDB", store.Store, "rdflib_leveldb.LevelDB", "LevelDB") -------------------------------------------------------------------------------- /test/context_case.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import unittest 3 | from rdflib import BNode 4 | from rdflib import ConjunctiveGraph 5 | from rdflib import Graph 6 | from rdflib import URIRef 7 | 8 | # logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN) 9 | 10 | michel = URIRef("urn:michel") 11 | tarek = URIRef("urn:tarek") 12 | bob = URIRef("urn:bob") 13 | likes = URIRef("urn:likes") 14 | hates = URIRef("urn:hates") 15 | pizza = URIRef("urn:pizza") 16 | cheese = URIRef("urn:cheese") 17 | c1 = URIRef("urn:context-1") 18 | c2 = URIRef("urn:context-2") 19 | 20 | 21 | class ContextTestCase(unittest.TestCase): 22 | storetest = True 23 | store_name = "LevelDB" 24 | create = True 25 | identifier = URIRef("http://rdflib.net") 26 | 27 | def setUp(self): 28 | self.graph = ConjunctiveGraph(store=self.store_name) 29 | self.graph.open(self.path, create=self.create) 30 | 31 | def tearDown(self): 32 | self.graph.close() 33 | self.graph.destroy(self.path) 34 | 35 | def get_context(self, identifier): 36 | assert isinstance(identifier, URIRef) or isinstance( 37 | identifier, BNode 38 | ), type(identifier) 39 | return Graph( 40 | store=self.graph.store, 41 | identifier=identifier, 42 | namespace_manager=self, 43 | ) 44 | 45 | def addStuff(self): 46 | graph = Graph(self.graph.store, c1) 47 | 48 | graph.add((tarek, likes, pizza)) 49 | graph.add((tarek, likes, cheese)) 50 | graph.add((michel, likes, pizza)) 51 | graph.add((michel, likes, cheese)) 52 | graph.add((bob, likes, cheese)) 53 | graph.add((bob, hates, pizza)) 54 | graph.add((bob, hates, michel)) # gasp! 55 | 56 | def removeStuff(self): 57 | graph = Graph(self.graph.store, c1) 58 | 59 | graph.remove((tarek, likes, pizza)) 60 | graph.remove((tarek, likes, cheese)) 61 | graph.remove((michel, likes, pizza)) 62 | graph.remove((michel, likes, cheese)) 63 | graph.remove((bob, likes, cheese)) 64 | graph.remove((bob, hates, pizza)) 65 | graph.remove((bob, hates, michel)) # gasp! 66 | 67 | def addStuffInMultipleContexts(self): 68 | triple = (pizza, hates, tarek) # revenge! 69 | 70 | # add to default context 71 | self.graph.add(triple) 72 | # add to context 1 73 | graph = Graph(self.graph.store, c1) 74 | graph.add(triple) 75 | # add to context 2 76 | graph = Graph(self.graph.store, c2) 77 | graph.add(triple) 78 | 79 | def testConjunction(self): 80 | self.addStuffInMultipleContexts() 81 | triple = (pizza, likes, pizza) 82 | # add to context 1 83 | graph = Graph(self.graph.store, c1) 84 | graph.add(triple) 85 | self.assertEqual(len(self.graph), len(graph)) 86 | 87 | def testAdd(self): 88 | self.addStuff() 89 | 90 | def testRemove(self): 91 | self.addStuff() 92 | self.removeStuff() 93 | 94 | def testLenInOneContext(self): 95 | # make sure context is empty 96 | 97 | self.graph.remove_context(self.get_context(c1)) 98 | graph = Graph(self.graph.store, c1) 99 | oldLen = len(self.graph) 100 | 101 | for i in range(0, 10): 102 | graph.add((BNode(), hates, hates)) 103 | self.assertEqual(len(graph), oldLen + 10) 104 | self.assertEqual(len(self.get_context(c1)), oldLen + 10) 105 | self.graph.remove_context(self.get_context(c1)) 106 | self.assertEqual(len(self.graph), oldLen) 107 | self.assertEqual(len(graph), 0) 108 | 109 | def testLenInMultipleContexts(self): 110 | oldLen = len(self.graph) 111 | self.addStuffInMultipleContexts() 112 | 113 | # addStuffInMultipleContexts is adding the same triple to 114 | # three different contexts. So it's only + 1 115 | self.assertEqual(len(self.graph), oldLen + 1) 116 | 117 | graph = Graph(self.graph.store, c1) 118 | self.assertEqual(len(graph), oldLen + 1) 119 | 120 | def testRemoveInMultipleContexts(self): 121 | triple = (pizza, hates, tarek) # revenge! 122 | 123 | self.addStuffInMultipleContexts() 124 | 125 | # triple should be still in store after removing it from c1 + c2 126 | self.assertTrue(triple in self.graph) 127 | graph = Graph(self.graph.store, c1) 128 | graph.remove(triple) 129 | self.assertTrue(triple in self.graph) 130 | graph = Graph(self.graph.store, c2) 131 | graph.remove(triple) 132 | self.assertTrue(triple in self.graph) 133 | self.graph.remove(triple) 134 | # now gone! 135 | self.assertTrue(triple not in self.graph) 136 | 137 | # add again and see if remove without context removes all triples! 138 | self.addStuffInMultipleContexts() 139 | self.graph.remove(triple) 140 | self.assertTrue(triple not in self.graph) 141 | 142 | def testContexts(self): 143 | triple = (pizza, hates, tarek) # revenge! 144 | 145 | self.addStuffInMultipleContexts() 146 | 147 | def cid(c): 148 | if not isinstance(c, str): 149 | return c.identifier 150 | return c 151 | 152 | contextList = list(map(cid, list(self.graph.contexts()))) 153 | self.assertTrue(c1 in contextList) 154 | self.assertTrue(c2 in contextList) 155 | 156 | contextList = list(map(cid, self.graph.contexts(triple))) 157 | self.assertTrue(c1 in contextList, (c1, list(contextList))) 158 | self.assertTrue(c2 in contextList, (c2, list(contextList))) 159 | 160 | def testRemoveContext(self): 161 | 162 | self.addStuffInMultipleContexts() 163 | self.assertEqual(len(Graph(self.graph.store, c1)), 1) 164 | self.assertEqual(len(self.get_context(c1)), 1) 165 | 166 | self.graph.remove_context(self.get_context(c1)) 167 | self.assertTrue(c1 not in self.graph.contexts()) 168 | 169 | def testRemoveAny(self): 170 | Any = None 171 | self.addStuffInMultipleContexts() 172 | self.graph.remove((Any, Any, Any)) 173 | self.assertEqual(len(self.graph), 0) 174 | 175 | def testTriples(self): 176 | asserte = self.assertEqual 177 | triples = self.graph.triples 178 | graph = self.graph 179 | c1graph = Graph(self.graph.store, c1) 180 | c1triples = c1graph.triples 181 | Any = None 182 | 183 | self.addStuff() 184 | 185 | # unbound subjects with context 186 | asserte(len(list(c1triples((Any, likes, pizza)))), 2) 187 | asserte(len(list(c1triples((Any, hates, pizza)))), 1) 188 | asserte(len(list(c1triples((Any, likes, cheese)))), 3) 189 | asserte(len(list(c1triples((Any, hates, cheese)))), 0) 190 | 191 | # unbound subjects without context, same results! 192 | asserte(len(list(triples((Any, likes, pizza)))), 2) 193 | asserte(len(list(triples((Any, hates, pizza)))), 1) 194 | asserte(len(list(triples((Any, likes, cheese)))), 3) 195 | asserte(len(list(triples((Any, hates, cheese)))), 0) 196 | 197 | # unbound objects with context 198 | asserte(len(list(c1triples((michel, likes, Any)))), 2) 199 | asserte(len(list(c1triples((tarek, likes, Any)))), 2) 200 | asserte(len(list(c1triples((bob, hates, Any)))), 2) 201 | asserte(len(list(c1triples((bob, likes, Any)))), 1) 202 | 203 | # unbound objects without context, same results! 204 | asserte(len(list(triples((michel, likes, Any)))), 2) 205 | asserte(len(list(triples((tarek, likes, Any)))), 2) 206 | asserte(len(list(triples((bob, hates, Any)))), 2) 207 | asserte(len(list(triples((bob, likes, Any)))), 1) 208 | 209 | # unbound predicates with context 210 | asserte(len(list(c1triples((michel, Any, cheese)))), 1) 211 | asserte(len(list(c1triples((tarek, Any, cheese)))), 1) 212 | asserte(len(list(c1triples((bob, Any, pizza)))), 1) 213 | asserte(len(list(c1triples((bob, Any, michel)))), 1) 214 | 215 | # unbound predicates without context, same results! 216 | asserte(len(list(triples((michel, Any, cheese)))), 1) 217 | asserte(len(list(triples((tarek, Any, cheese)))), 1) 218 | asserte(len(list(triples((bob, Any, pizza)))), 1) 219 | asserte(len(list(triples((bob, Any, michel)))), 1) 220 | 221 | # unbound subject, objects with context 222 | asserte(len(list(c1triples((Any, hates, Any)))), 2) 223 | asserte(len(list(c1triples((Any, likes, Any)))), 5) 224 | 225 | # unbound subject, objects without context, same results! 226 | asserte(len(list(triples((Any, hates, Any)))), 2) 227 | asserte(len(list(triples((Any, likes, Any)))), 5) 228 | 229 | # unbound predicates, objects with context 230 | asserte(len(list(c1triples((michel, Any, Any)))), 2) 231 | asserte(len(list(c1triples((bob, Any, Any)))), 3) 232 | asserte(len(list(c1triples((tarek, Any, Any)))), 2) 233 | 234 | # unbound predicates, objects without context, same results! 235 | asserte(len(list(triples((michel, Any, Any)))), 2) 236 | asserte(len(list(triples((bob, Any, Any)))), 3) 237 | asserte(len(list(triples((tarek, Any, Any)))), 2) 238 | 239 | # unbound subjects, predicates with context 240 | asserte(len(list(c1triples((Any, Any, pizza)))), 3) 241 | asserte(len(list(c1triples((Any, Any, cheese)))), 3) 242 | asserte(len(list(c1triples((Any, Any, michel)))), 1) 243 | 244 | # unbound subjects, predicates without context, same results! 245 | asserte(len(list(triples((Any, Any, pizza)))), 3) 246 | asserte(len(list(triples((Any, Any, cheese)))), 3) 247 | asserte(len(list(triples((Any, Any, michel)))), 1) 248 | 249 | # all unbound with context 250 | asserte(len(list(c1triples((Any, Any, Any)))), 7) 251 | # all unbound without context, same result! 252 | asserte(len(list(triples((Any, Any, Any)))), 7) 253 | 254 | for c in [graph, self.get_context(c1)]: 255 | # unbound subjects 256 | asserte(set(c.subjects(likes, pizza)), set((michel, tarek))) 257 | asserte(set(c.subjects(hates, pizza)), set((bob,))) 258 | asserte(set(c.subjects(likes, cheese)), set([tarek, bob, michel])) 259 | asserte(set(c.subjects(hates, cheese)), set()) 260 | 261 | # unbound objects 262 | asserte(set(c.objects(michel, likes)), set([cheese, pizza])) 263 | asserte(set(c.objects(tarek, likes)), set([cheese, pizza])) 264 | asserte(set(c.objects(bob, hates)), set([michel, pizza])) 265 | asserte(set(c.objects(bob, likes)), set([cheese])) 266 | 267 | # unbound predicates 268 | asserte(set(c.predicates(michel, cheese)), set([likes])) 269 | asserte(set(c.predicates(tarek, cheese)), set([likes])) 270 | asserte(set(c.predicates(bob, pizza)), set([hates])) 271 | asserte(set(c.predicates(bob, michel)), set([hates])) 272 | 273 | asserte( 274 | set(c.subject_objects(hates)), 275 | set([(bob, pizza), (bob, michel)]), 276 | ) 277 | asserte( 278 | set(c.subject_objects(likes)), 279 | set( 280 | [ 281 | (tarek, cheese), 282 | (michel, cheese), 283 | (michel, pizza), 284 | (bob, cheese), 285 | (tarek, pizza), 286 | ] 287 | ), 288 | ) 289 | 290 | asserte( 291 | set(c.predicate_objects(michel)), 292 | set([(likes, cheese), (likes, pizza)]), 293 | ) 294 | asserte( 295 | set(c.predicate_objects(bob)), 296 | set([(likes, cheese), (hates, pizza), (hates, michel)]), 297 | ) 298 | asserte( 299 | set(c.predicate_objects(tarek)), 300 | set([(likes, cheese), (likes, pizza)]), 301 | ) 302 | 303 | asserte( 304 | set(c.subject_predicates(pizza)), 305 | set([(bob, hates), (tarek, likes), (michel, likes)]), 306 | ) 307 | asserte( 308 | set(c.subject_predicates(cheese)), 309 | set([(bob, likes), (tarek, likes), (michel, likes)]), 310 | ) 311 | asserte(set(c.subject_predicates(michel)), set([(bob, hates)])) 312 | 313 | asserte( 314 | set(c), 315 | set( 316 | [ 317 | (bob, hates, michel), 318 | (bob, likes, cheese), 319 | (tarek, likes, pizza), 320 | (michel, likes, pizza), 321 | (michel, likes, cheese), 322 | (bob, hates, pizza), 323 | (tarek, likes, cheese), 324 | ] 325 | ), 326 | ) 327 | 328 | # remove stuff and make sure the graph is empty again 329 | self.removeStuff() 330 | asserte(len(list(c1triples((Any, Any, Any)))), 0) 331 | asserte(len(list(triples((Any, Any, Any)))), 0) 332 | 333 | 334 | if __name__ == "__main__": 335 | unittest.main() 336 | -------------------------------------------------------------------------------- /test/graph_case.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import unittest 3 | from rdflib import Graph 4 | from rdflib import RDF 5 | from rdflib import URIRef 6 | 7 | michel = URIRef("urn:michel") 8 | tarek = URIRef("urn:tarek") 9 | bob = URIRef("urn:bob") 10 | likes = URIRef("urn:likes") 11 | hates = URIRef("urn:hates") 12 | pizza = URIRef("urn:pizza") 13 | cheese = URIRef("urn:cheese") 14 | 15 | 16 | class GraphTestCase(unittest.TestCase): 17 | storetest = True 18 | store_name = "LevelDB" 19 | create = True 20 | identifier = URIRef("http://rdflib.net") 21 | 22 | def setUp(self): 23 | self.graph = Graph(store=self.store_name) 24 | self.graph.open(self.path, create=self.create) 25 | 26 | def tearDown(self): 27 | self.graph.close() 28 | self.graph.destroy(self.path) 29 | 30 | def addStuff(self): 31 | self.graph.add((tarek, likes, pizza)) 32 | self.graph.add((tarek, likes, cheese)) 33 | self.graph.add((michel, likes, pizza)) 34 | self.graph.add((michel, likes, cheese)) 35 | self.graph.add((bob, likes, cheese)) 36 | self.graph.add((bob, hates, pizza)) 37 | self.graph.add((bob, hates, michel)) # gasp! 38 | self.graph.commit() 39 | 40 | def removeStuff(self): 41 | self.graph.remove((tarek, likes, pizza)) 42 | self.graph.remove((tarek, likes, cheese)) 43 | self.graph.remove((michel, likes, pizza)) 44 | self.graph.remove((michel, likes, cheese)) 45 | self.graph.remove((bob, likes, cheese)) 46 | self.graph.remove((bob, hates, pizza)) 47 | self.graph.remove((bob, hates, michel)) # gasp! 48 | 49 | def testAdd(self): 50 | self.addStuff() 51 | 52 | def testRemove(self): 53 | self.addStuff() 54 | self.removeStuff() 55 | 56 | def testTriples(self): 57 | asserte = self.assertEqual 58 | triples = self.graph.triples 59 | Any = None 60 | 61 | self.addStuff() 62 | 63 | # unbound subjects 64 | asserte(len(list(triples((Any, likes, pizza)))), 2) 65 | asserte(len(list(triples((Any, hates, pizza)))), 1) 66 | asserte(len(list(triples((Any, likes, cheese)))), 3) 67 | asserte(len(list(triples((Any, hates, cheese)))), 0) 68 | 69 | # unbound objects 70 | asserte(len(list(triples((michel, likes, Any)))), 2) 71 | asserte(len(list(triples((tarek, likes, Any)))), 2) 72 | asserte(len(list(triples((bob, hates, Any)))), 2) 73 | asserte(len(list(triples((bob, likes, Any)))), 1) 74 | 75 | # unbound predicates 76 | asserte(len(list(triples((michel, Any, cheese)))), 1) 77 | asserte(len(list(triples((tarek, Any, cheese)))), 1) 78 | asserte(len(list(triples((bob, Any, pizza)))), 1) 79 | asserte(len(list(triples((bob, Any, michel)))), 1) 80 | 81 | # unbound subject, objects 82 | asserte(len(list(triples((Any, hates, Any)))), 2) 83 | asserte(len(list(triples((Any, likes, Any)))), 5) 84 | 85 | # unbound predicates, objects 86 | asserte(len(list(triples((michel, Any, Any)))), 2) 87 | asserte(len(list(triples((bob, Any, Any)))), 3) 88 | asserte(len(list(triples((tarek, Any, Any)))), 2) 89 | 90 | # unbound subjects, predicates 91 | asserte(len(list(triples((Any, Any, pizza)))), 3) 92 | asserte(len(list(triples((Any, Any, cheese)))), 3) 93 | asserte(len(list(triples((Any, Any, michel)))), 1) 94 | 95 | # all unbound 96 | asserte(len(list(triples((Any, Any, Any)))), 7) 97 | self.removeStuff() 98 | asserte(len(list(triples((Any, Any, Any)))), 0) 99 | 100 | @unittest.skip( 101 | "DeprecationWarning: Class Statement is deprecated, and will be removed in the future." 102 | ) 103 | def testStatementNode(self): 104 | graph = self.graph 105 | 106 | from rdflib.term import Statement 107 | 108 | c = URIRef("http://example.org/foo#c") 109 | r = URIRef("http://example.org/foo#r") 110 | s = Statement((self.michel, self.likes, self.pizza), c) 111 | graph.add((s, RDF.value, r)) 112 | self.assertEqual(r, graph.value(s, RDF.value)) 113 | self.assertEqual(s, graph.value(predicate=RDF.value, object=r)) 114 | 115 | def testGraph(self): 116 | from rdflib.graph import Graph 117 | 118 | graph = self.graph 119 | 120 | alice = URIRef("alice") 121 | 122 | g1 = Graph() 123 | g1.add((alice, RDF.value, pizza)) 124 | g1.add((bob, RDF.value, cheese)) 125 | g1.add((bob, RDF.value, pizza)) 126 | 127 | g2 = Graph() 128 | g2.add((bob, RDF.value, pizza)) 129 | g2.add((bob, RDF.value, cheese)) 130 | g2.add((alice, RDF.value, pizza)) 131 | 132 | gv1 = Graph(store=graph.store, base=g1) 133 | gv2 = Graph(store=graph.store, base=g2) 134 | graph.add((gv1, RDF.value, gv2)) 135 | v = graph.value(gv1) 136 | self.assertEqual(gv2, v) 137 | graph.remove((gv1, RDF.value, gv2)) 138 | 139 | def testConnected(self): 140 | graph = self.graph 141 | self.addStuff() 142 | self.assertEqual(True, graph.connected()) 143 | 144 | jeroen = URIRef("jeroen") 145 | unconnected = URIRef("unconnected") 146 | 147 | graph.add((jeroen, likes, unconnected)) 148 | 149 | self.assertEqual(False, graph.connected()) 150 | 151 | def testSub(self): 152 | g1 = Graph() 153 | g2 = Graph() 154 | 155 | g1.add((tarek, likes, pizza)) 156 | g1.add((bob, likes, cheese)) 157 | 158 | g2.add((bob, likes, cheese)) 159 | 160 | g3 = g1 - g2 161 | 162 | self.assertEqual(len(g3), 1) 163 | self.assertEqual((tarek, likes, pizza) in g3, True) 164 | self.assertEqual((tarek, likes, cheese) in g3, False) 165 | 166 | self.assertEqual((bob, likes, cheese) in g3, False) 167 | 168 | g1 -= g2 169 | 170 | self.assertEqual(len(g1), 1) 171 | self.assertEqual((tarek, likes, pizza) in g1, True) 172 | self.assertEqual((tarek, likes, cheese) in g1, False) 173 | 174 | self.assertEqual((bob, likes, cheese) in g1, False) 175 | 176 | def testGraphAdd(self): 177 | g1 = Graph() 178 | g2 = Graph() 179 | 180 | g1.add((tarek, likes, pizza)) 181 | 182 | g2.add((bob, likes, cheese)) 183 | 184 | g3 = g1 + g2 185 | 186 | self.assertEqual(len(g3), 2) 187 | self.assertEqual((tarek, likes, pizza) in g3, True) 188 | self.assertEqual((tarek, likes, cheese) in g3, False) 189 | 190 | self.assertEqual((bob, likes, cheese) in g3, True) 191 | 192 | g1 += g2 193 | 194 | self.assertEqual(len(g1), 2) 195 | self.assertEqual((tarek, likes, pizza) in g1, True) 196 | self.assertEqual((tarek, likes, cheese) in g1, False) 197 | 198 | self.assertEqual((bob, likes, cheese) in g1, True) 199 | 200 | def testGraphIntersection(self): 201 | g1 = Graph() 202 | g2 = Graph() 203 | 204 | g1.add((tarek, likes, pizza)) 205 | g1.add((michel, likes, cheese)) 206 | 207 | g2.add((bob, likes, cheese)) 208 | g2.add((michel, likes, cheese)) 209 | 210 | g3 = g1 * g2 211 | 212 | self.assertEqual(len(g3), 1) 213 | self.assertEqual((tarek, likes, pizza) in g3, False) 214 | self.assertEqual((tarek, likes, cheese) in g3, False) 215 | 216 | self.assertEqual((bob, likes, cheese) in g3, False) 217 | 218 | self.assertEqual((michel, likes, cheese) in g3, True) 219 | 220 | g1 *= g2 221 | 222 | self.assertEqual(len(g1), 1) 223 | 224 | self.assertEqual((tarek, likes, pizza) in g1, False) 225 | self.assertEqual((tarek, likes, cheese) in g1, False) 226 | 227 | self.assertEqual((bob, likes, cheese) in g1, False) 228 | 229 | self.assertEqual((michel, likes, cheese) in g1, True) 230 | 231 | 232 | xmltestdoc = """ 233 | 237 | 238 | 239 | 240 | 241 | """ 242 | 243 | n3testdoc = """@prefix : . 244 | 245 | :a :b :c . 246 | """ 247 | 248 | nttestdoc = ( 249 | " .\n" 250 | ) 251 | 252 | if __name__ == "__main__": 253 | unittest.main() 254 | -------------------------------------------------------------------------------- /test/test_conjunctivegraph.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | import tempfile 4 | import rdflib 5 | from rdflib_leveldb.leveldbstore import readable_index, NoopMethods 6 | from rdflib.graph import ConjunctiveGraph, URIRef 7 | import logging 8 | 9 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 10 | logger = logging.getLogger(__name__) 11 | logger.setLevel(logging.DEBUG) 12 | 13 | storetest = True 14 | 15 | michel = URIRef("urn:michel") 16 | bob = URIRef("urn:bob") 17 | cheese = URIRef("urn:cheese") 18 | likes = URIRef("urn:likes") 19 | pizza = URIRef("urn:pizza") 20 | uri1 = URIRef("urn:graph1") 21 | uri2 = URIRef("urn:graph2") 22 | 23 | 24 | @pytest.fixture 25 | def getconjunctivegraph(): 26 | store = "LevelDB" 27 | graph = ConjunctiveGraph(store=store) 28 | path = tempfile.mktemp(prefix="testleveldb") 29 | graph.open(path, create=True) 30 | yield graph 31 | graph.close() 32 | graph.destroy(path) 33 | 34 | 35 | def test_namespaces(getconjunctivegraph): 36 | graph = getconjunctivegraph 37 | graph.bind("dc", "http://http://purl.org/dc/elements/1.1/") 38 | graph.bind("foaf", "http://xmlns.com/foaf/0.1/") 39 | assert len(list(graph.namespaces())) == 6 40 | assert ("foaf", rdflib.term.URIRef("http://xmlns.com/foaf/0.1/")) in list( 41 | graph.namespaces() 42 | ) 43 | 44 | 45 | def test_readable_index(getconjunctivegraph): 46 | assert repr(readable_index(111)) == "'s,p,o'" 47 | 48 | 49 | def test_triples_context_reset(getconjunctivegraph): 50 | # I don't think this is doing what it says on the tin 51 | graph = getconjunctivegraph 52 | graph.add((michel, likes, pizza)) 53 | graph.add((michel, likes, cheese)) 54 | graph.commit() 55 | ntriples = list( 56 | graph.triples((None, None, None), context=next(graph.contexts())) 57 | ) 58 | assert len(ntriples) == 2 # len(ntriples)) 59 | 60 | 61 | def test_remove_context_reset(getconjunctivegraph): 62 | graph = getconjunctivegraph 63 | graph.add((michel, likes, pizza)) 64 | graph.add((michel, likes, cheese)) 65 | graph.commit() 66 | graph.remove((michel, likes, cheese, next(graph.contexts()))) 67 | graph.commit() 68 | ntriples = list(graph.triples((None, None, None))) 69 | assert len(ntriples) == 1 # len(ntriples)) 70 | 71 | 72 | def test_remove_db_exception(getconjunctivegraph): 73 | graph = getconjunctivegraph 74 | graph.add((michel, likes, pizza)) 75 | graph.add((michel, likes, cheese)) 76 | graph.commit() 77 | ntriples = list( 78 | graph.triples((None, None, None), context=next(graph.contexts())) 79 | ) 80 | assert len(ntriples) == 2 # len(ntriples)) 81 | 82 | 83 | def test_nquads_default_graph(getconjunctivegraph): 84 | graph = getconjunctivegraph 85 | data = """ 86 | . 87 | . 88 | . 89 | """ 90 | 91 | publicID = URIRef("http://example.org/g0") 92 | 93 | graph.parse(data=data, format="nquads", publicID=publicID) 94 | 95 | assert len(graph) == 3, len(graph) 96 | assert len(list(graph.contexts())) == 2, len(list(graph.contexts())) 97 | assert len(graph.get_context(publicID)) == 2, len( 98 | graph.get_context(publicID) 99 | ) 100 | 101 | 102 | def test_serialize(getconjunctivegraph): 103 | graph = getconjunctivegraph 104 | graph.get_context(uri1).add((bob, likes, pizza)) 105 | graph.get_context(uri2).add((bob, likes, pizza)) 106 | s = graph.serialize(format="nquads") 107 | assert len([x for x in s.split("\n") if x.strip()]) == 2 108 | 109 | g2 = ConjunctiveGraph(store="LevelDB") 110 | g2.open(tempfile.mktemp(prefix="leveldbstoretest"), create=True) 111 | g2.parse(data=s, format="nquads") 112 | 113 | assert len(graph) == len(g2) 114 | assert sorted(x.identifier for x in graph.contexts()) == sorted( 115 | x.identifier for x in g2.contexts() 116 | ) 117 | 118 | 119 | def test_NoopMethods(): 120 | obj = NoopMethods() 121 | res = obj.__getattr__("amethod") 122 | assert res() is None 123 | -------------------------------------------------------------------------------- /test/test_extended.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import gc 4 | from time import time 5 | from rdflib.namespace import Namespace 6 | from rdflib import Graph, RDF, RDFS, FOAF, OWL 7 | from rdflib.term import URIRef 8 | import cProfile 9 | 10 | REDO_FROM_START = True 11 | 12 | ukparl = f"""{os.path.join(os.path.dirname(__file__), 'ukparl')}""" 13 | ukppdbpath = os.path.join(ukparl, "ukparl-working") 14 | doacc = f"""{os.path.join(os.path.dirname(__file__), 'doacc')}""" 15 | doaccdbpath = os.path.join(ukparl, "doacc-working") 16 | graph = Graph("LevelDB", URIRef("http://rdflib.net")) 17 | 18 | # doacc_tbox = "https://raw.githubusercontent.com/DOACC/doacc/master/doacc.owl" 19 | # doacc_abox = "https://raw.githubusercontent.com/DOACC/individuals/master/cryptocurrency.nt" 20 | 21 | doacc_tbox = f"""{os.path.join(doacc, 'doacc.owl')}""" 22 | doacc_abox = f"""{os.path.join(doacc, 'cryptocurrency.nt')}""" 23 | 24 | 25 | def create_database_from_memorygraph(): 26 | graph.open(ukppdbpath, create=True) 27 | memgraph = Graph("Memory", URIRef("http://rdflib.net")) 28 | 29 | gcold = gc.isenabled() 30 | gc.collect() 31 | gc.disable() 32 | 33 | t0 = time() 34 | memgraph.parse( 35 | f"""{os.path.join(ukparl, 'ukparl-tbox.xml')}""", format="xml" 36 | ) 37 | memgraph.parse( 38 | f"""{os.path.join(ukparl, 'ukparl-abox.xml')}""", format="xml" 39 | ) 40 | t1 = time() 41 | print(f"Parse time: {t1 - t0:.3f}s") # Parse time: 10.284s 42 | 43 | t0 = time() 44 | for triple in memgraph.triples((None, None, None)): 45 | graph.add(triple) 46 | t1 = time() 47 | assert len(graph) == 113545, len(graph) 48 | print(f"Number of triples loaded {len(graph)}") 49 | memgraph.close() 50 | graph.close() 51 | print(f"Add to graph: {t1 - t0:.3f}s") # Add to graph: 9.481s 52 | if gcold: 53 | gc.enable() 54 | 55 | 56 | def create_database_from_parse(): 57 | graph.open(ukppdbpath, create=True) 58 | 59 | gcold = gc.isenabled() 60 | gc.collect() 61 | gc.disable() 62 | 63 | t0 = time() 64 | graph.parse(f"""{os.path.join(ukparl, 'ukparl-tbox.xml')}""", format="xml") 65 | graph.parse(f"""{os.path.join(ukparl, 'ukparl-abox.xml')}""", format="xml") 66 | t1 = time() 67 | assert len(graph) == 113545, len(graph) 68 | graph.close() 69 | print(f"Load into to graph: {t1 - t0:.3f}s") # Load into to graph: 17.815s 70 | if gcold: 71 | gc.enable() 72 | 73 | 74 | def query_database_from_parse(): 75 | graph.open(ukppdbpath, create=False) 76 | 77 | gcold = gc.isenabled() 78 | gc.collect() 79 | gc.disable() 80 | 81 | classquery = """prefix owl: 82 | prefix rdfs: 83 | 84 | SELECT DISTINCT ?class ?label ?description 85 | WHERE { 86 | ?class a owl:Class. 87 | OPTIONAL { ?class rdfs:label ?label} 88 | OPTIONAL { ?class rdfs:comment ?description} 89 | } 90 | """ 91 | t0 = time() 92 | r = graph.query(classquery) 93 | t1 = time() 94 | print(len(r)) 95 | print(f"Run SPARQL class query: {t1 - t0:.5f}s") # Run SPARQL query: 96 | 97 | triplesquery = """prefix rdfs: 98 | prefix owl: 99 | 100 | SELECT ?subject ?predicate ?object 101 | WHERE { 102 | ?subject ?predicate ?object 103 | } 104 | LIMIT 100000 105 | """ 106 | t0 = time() 107 | r = graph.query(triplesquery) 108 | t1 = time() 109 | print(len(r)) 110 | print(f"Run SPARQL triples query: {t1 - t0:.5f}s") # Run SPARQL query: 111 | if gcold: 112 | gc.enable() 113 | 114 | 115 | def reload_database(): 116 | gcold = gc.isenabled() 117 | gc.collect() 118 | gc.disable() 119 | t0 = time() 120 | graph.open(ukppdbpath, create=False) 121 | t1 = time() 122 | assert len(graph) == 113545, len(graph) 123 | # with open(os.path.join(ukparl, "ukparl-working.n3"), "w") as fp: 124 | # fp.write(graph.serialize(format="n3")) 125 | graph.close() 126 | print( 127 | f"Read from persistence: {t1 - t0:.3f}s" 128 | ) # Read from persistence: 0.017s 129 | if gcold: 130 | gc.enable() 131 | 132 | 133 | def query_database(): 134 | graph.open(ukppdbpath, create=False) 135 | assert len(graph) == 113545, len(graph) 136 | 137 | # ukparl = Namespace(URIRef("http://bel-epa.com/ont/2007/6/ukpp.owl#")) 138 | # foaf = Namespace( 139 | # URIRef("http://daml.umbc.edu/ontologies/cobra/0.4/foaf-basic#") 140 | # ) 141 | # graph.bind("ukparl", str(ukparl)) 142 | 143 | # things = {} 144 | # for subj, pred, obj in graph.triples((None, RDF.type, None)): 145 | # if "ukpp" in obj: 146 | # thing = obj.n3().split("#")[-1][:-1] 147 | # if obj in things: 148 | # things[thing] += 1 149 | # else: 150 | # things[thing] = 1 151 | 152 | # print(pformat(sorted(list(things.keys())), compact=True)) 153 | 154 | # ['Area', 'Constituency', 'Department', 'HouseOfCommons', 'HouseOfLords', 155 | # 'LordOfParliament', 'LordOfParliamentRole', 'MemberOfParliament', 156 | # 'MemberOfParliamentRole', 'ParliamentaryRole', 'PartyAffiliation', 157 | # 'Region', 'UKGBNIParliament', 'UKParliament', 'UKPoliticalParty'] 158 | 159 | # things = {} 160 | # for pred in graph.predicates(): 161 | # if "ukpp" in pred: 162 | # thing = pred.n3().split("#")[-1][:-1] 163 | # if pred in things: 164 | # things[thing] += 1 165 | # else: 166 | # things[thing] = 1 167 | 168 | # print(pformat(sorted(list(things.keys())), compact=True)) 169 | 170 | # ['abolitionDate', 'abolitionFromDate', 'abolitionToDate', 'area', 'assembled', 171 | # 'context', 'country', 'countyName', 'dbpediaEntry', 'dissolved', 'duration', 172 | # 'elected', 'end', 'endingDate', 'establishedDate', 'familyName', 'foreNames', 173 | # 'foreNamesInFull', 'fromDate', 'fromWhy', 'givenName', 'hasConstituency', 174 | # 'hasMemberOfParliament', 'lordName', 'lordOfName', 'lordOfNameInFull', 175 | # 'majorityInSeat', 'name', 'note', 'number', 'parliament_number', 176 | # 'parliamentaryRole', 'party', 'partyAffiliation', 'peerageType', 177 | # 'prime_minister', 'region', 'reign', 'roleTaken', 'sessions', 'speaker', 178 | # 'start', 'startingDate', 'summoned', 'swingToLoseSeat', 'toDate', 'toWhy', 179 | # 'wikipediaEntry'] 180 | 181 | for s, o in list( 182 | set( 183 | graph.subject_objects( 184 | predicate=URIRef( 185 | "http://bel-epa.com/ont/2007/6/ukpp.owl#familyName" 186 | ) 187 | # predicate=ukparl.party 188 | ) 189 | ) 190 | )[:12]: 191 | print(f"{s.n3(), o.value}") 192 | 193 | # for s, p, o in graph.triples((None, OWL.Subclass, ukparl.party)): 194 | # print(f"{s} is a party") 195 | 196 | # for s, p, o in graph.triples( 197 | # ( 198 | # URIRef("http://bel-epa.com/ont/2007/6/ukpp.owl#ukpp-member-1"), 199 | # RDF.type, 200 | # ukparl.MemberOfParliament, 201 | # ) 202 | # ): 203 | # print(f"{s} is a person") 204 | 205 | # for s, p, o in graph.triples( 206 | # ( 207 | # URIRef("http://bel-epa.com/ont/2007/6/ukpp.owl#ukpp-member-1"), 208 | # DFOAF.name, # RDFS.label, 209 | # None, 210 | # ) 211 | # ): 212 | # print(f"{o}") 213 | 214 | graph.close() 215 | 216 | 217 | def read_doacc_into_memory_and_create_database(): 218 | graph.open(doaccdbpath, create=True) 219 | memgraph = Graph("Memory", URIRef("http://rdflib.net")) 220 | 221 | gcold = gc.isenabled() 222 | gc.collect() 223 | gc.disable() 224 | 225 | t0 = time() 226 | memgraph.parse(doacc_tbox, format="xml") 227 | memgraph.parse(doacc_abox, format="nt") 228 | t1 = time() 229 | print(f"Parse time: {t1 - t0:.3f}s") # Parse time: 4.796s 230 | 231 | t0 = time() 232 | for triple in memgraph.triples((None, None, None)): 233 | graph.add(triple) 234 | t1 = time() 235 | assert len(graph) == 45498, len(graph) 236 | print(f"no of doacc triples {len(graph)}") 237 | memgraph.close() 238 | graph.close() 239 | print(f"Add to graph: {t1 - t0:.3f}s") # Add to graph: 2.896s 240 | if gcold: 241 | gc.enable() 242 | 243 | 244 | def read_doacc(): 245 | graph.open(doaccdbpath, create=True) 246 | 247 | gcold = gc.isenabled() 248 | gc.collect() 249 | gc.disable() 250 | 251 | t0 = time() 252 | graph.parse(doacc_tbox, format="xml") 253 | graph.parse(doacc_abox, format="xml") 254 | t1 = time() 255 | # assert len(graph) == 113545, len(graph) 256 | print(f"no of doacc triples {len(graph)}") 257 | graph.close() 258 | print(f"Load into to graph: {t1 - t0:.3f}s") # Load into to graph: 17.815s 259 | if gcold: 260 | gc.enable() 261 | 262 | 263 | def show_doacc(): 264 | from rdflib.extras.visualizegraph import visualize_graph 265 | 266 | graph = Graph() 267 | graph.parse(doacc_tbox, format="xml") 268 | visualize_graph(graph, "DOACC", shortMode=True, format1="png") 269 | 270 | 271 | def show_ukparl(): 272 | from rdflib.extras.visualizegraph import visualize_graph 273 | 274 | graph = Graph() 275 | graph.parse(os.path.join(ukparl, "ukparl-tbox.xml"), format="xml") 276 | visualize_graph(graph, "DOACC", shortMode=True, format1="png") 277 | 278 | 279 | if __name__ == "__main__": 280 | if REDO_FROM_START is True: 281 | import shutil 282 | 283 | if not os.path.exists(ukppdbpath): 284 | create_database_from_memorygraph() 285 | 286 | reload_database() 287 | shutil.rmtree(ukppdbpath) 288 | 289 | # with cProfile.Profile() as pr: 290 | # create_leveldb_database_from_parse() 291 | # pr.print_stats() 292 | 293 | # reload_database() 294 | # query_database() 295 | # read_doacc_into_memory_and_create_database() 296 | # show_ukparl() 297 | # with cProfile.Profile() as pr: 298 | # # create_database_from_parse() 299 | # query_database_from_parse() 300 | # pr.print_stats() 301 | -------------------------------------------------------------------------------- /test/test_functionality.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | import os 4 | import re 5 | import tempfile 6 | from rdflib import BNode, Literal, RDF, RDFS, URIRef, Variable 7 | from rdflib.graph import ConjunctiveGraph, Graph, QuotedGraph 8 | import logging 9 | 10 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 11 | logger = logging.getLogger(__name__) 12 | logger.setLevel(logging.DEBUG) 13 | 14 | 15 | storename = "LevelDB" 16 | storetest = True 17 | 18 | implies = URIRef("http://www.w3.org/2000/10/swap/log#implies") 19 | 20 | testN3 = """\ 21 | @prefix rdf: . 22 | @prefix rdfs: . 23 | @prefix : . 24 | {:a :b :c;a :foo} => {:a :d :c,?y} . 25 | _:foo a rdfs:Class . 26 | :a :d :c . 27 | """ 28 | 29 | michel = URIRef("urn:michel") 30 | tarek = URIRef("urn:tarek") 31 | bob = URIRef("urn:bob") 32 | likes = URIRef("urn:likes") 33 | hates = URIRef("urn:hates") 34 | pizza = URIRef("urn:pizza") 35 | cheese = URIRef("urn:cheese") 36 | 37 | graphuri = URIRef("urn:graph") 38 | othergraphuri = URIRef("urn:othergraph") 39 | 40 | 41 | create = True 42 | reuse = False 43 | 44 | 45 | @pytest.fixture 46 | def getgraph(): 47 | graph = ConjunctiveGraph(store="LevelDB") 48 | path = os.path.join(tempfile.gettempdir(), "test_leveldb") 49 | if os.path.exists(path): 50 | if os.path.isdir(path): 51 | import shutil 52 | 53 | shutil.rmtree(path) 54 | elif len(path.split(":")) == 1: 55 | os.unlink(path) 56 | else: 57 | os.remove(path) 58 | 59 | graph.open(path, create=create) 60 | yield graph 61 | 62 | graph.close() 63 | 64 | if not reuse: 65 | graph.store.destroy(configuration=path) 66 | 67 | 68 | def testSimpleGraph(getgraph): 69 | graph = getgraph 70 | g = graph.get_context(graphuri) 71 | g.add((tarek, likes, pizza)) 72 | g.add((bob, likes, pizza)) 73 | g.add((bob, likes, cheese)) 74 | 75 | g2 = graph.get_context(othergraphuri) 76 | g2.add((michel, likes, pizza)) 77 | 78 | assert len(g) == 3 # "graph contains 3 triples") 79 | assert len(g2) == 1 # "other graph contains 1 triple") 80 | 81 | r = g.query("SELECT * WHERE { ?s . }") 82 | assert len(list(r)) == 2 # "two people like pizza") 83 | 84 | r = g.triples((None, likes, pizza)) 85 | assert len(list(r)) == 2 # "two people like pizza") 86 | 87 | # Test initBindings 88 | r = g.query( 89 | "SELECT * WHERE { ?s . }", 90 | initBindings={"s": tarek}, 91 | ) 92 | assert len(list(r)) == 1 # "i was asking only about tarek") 93 | 94 | r = g.triples((tarek, likes, pizza)) 95 | assert len(list(r)) == 1 # "i was asking only about tarek") 96 | 97 | r = g.triples((tarek, likes, cheese)) 98 | assert len(list(r)) == 0 # "tarek doesn't like cheese") 99 | 100 | g2.add((tarek, likes, pizza)) 101 | g.remove((tarek, likes, pizza)) 102 | r = g.query("SELECT * WHERE { ?s . }") 103 | 104 | 105 | def testConjunctiveDefault(getgraph): 106 | graph = getgraph 107 | g = graph.get_context(graphuri) 108 | g.add((tarek, likes, pizza)) 109 | g2 = graph.get_context(othergraphuri) 110 | g2.add((bob, likes, pizza)) 111 | g.add((tarek, hates, cheese)) 112 | 113 | assert len(g) == 2 # "graph contains 2 triples") 114 | 115 | # the following are actually bad tests as they depend on your endpoint, 116 | # as pointed out in the sparqlstore.py code: 117 | # 118 | # # For ConjunctiveGraphs, reading is done from the "default graph" Exactly 119 | # # what this means depends on your endpoint, because SPARQL does not offer a 120 | # # simple way to query the union of all graphs as it would be expected for a 121 | # # ConjuntiveGraph. 122 | # # 123 | # # Fuseki/TDB has a flag for specifying that the default graph 124 | # # is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). 125 | assert ( 126 | len(graph) == 3 127 | ) # "default union graph should contain three triples but contains:\n" "%s" % list(graph), 128 | 129 | r = graph.query("SELECT * WHERE { ?s . }") 130 | assert len(list(r)) == 2 # "two people like pizza") 131 | 132 | r = graph.query( 133 | "SELECT * WHERE { ?s . }", 134 | initBindings={"s": tarek}, 135 | ) 136 | assert len(list(r)) == 1 # "i was asking only about tarek") 137 | 138 | r = graph.triples((tarek, likes, pizza)) 139 | assert len(list(r)) == 1 # "i was asking only about tarek") 140 | 141 | r = graph.triples((tarek, likes, cheese)) 142 | assert len(list(r)) == 0 # "tarek doesn't like cheese") 143 | 144 | g2.remove((bob, likes, pizza)) 145 | 146 | r = graph.query("SELECT * WHERE { ?s . }") 147 | assert len(list(r)) == 1 # "only tarek likes pizza") 148 | 149 | 150 | def testUpdate(getgraph): 151 | graph = getgraph 152 | graph.update( 153 | "INSERT DATA { GRAPH { . } }" 154 | ) 155 | 156 | g = graph.get_context(graphuri) 157 | assert len(g) == 1 # "graph contains 1 triples") 158 | 159 | 160 | def testUpdateWithInitNs(getgraph): 161 | graph = getgraph 162 | graph.update( 163 | "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", 164 | initNs={"ns": URIRef("urn:")}, 165 | ) 166 | 167 | g = graph.get_context(graphuri) 168 | assert set(g.triples((None, None, None))) == set( 169 | [(michel, likes, pizza)] 170 | ) # "only michel likes pizza" 171 | 172 | 173 | def testUpdateWithInitBindings(getgraph): 174 | graph = getgraph 175 | graph.update( 176 | "INSERT { GRAPH { ?a ?b ?c . } } WherE { }", 177 | initBindings={ 178 | "a": URIRef("urn:michel"), 179 | "b": URIRef("urn:likes"), 180 | "c": URIRef("urn:pizza"), 181 | }, 182 | ) 183 | 184 | g = graph.get_context(graphuri) 185 | assert set(g.triples((None, None, None))) == set( 186 | [(michel, likes, pizza)] 187 | ) # only michel likes pizza" 188 | 189 | 190 | def testMultipleUpdateWithInitBindings(getgraph): 191 | graph = getgraph 192 | graph.update( 193 | "INSERT { GRAPH { ?a ?b ?c . } } WHERE { };" 194 | "INSERT { GRAPH { ?d ?b ?c . } } WHERE { }", 195 | initBindings={ 196 | "a": URIRef("urn:michel"), 197 | "b": URIRef("urn:likes"), 198 | "c": URIRef("urn:pizza"), 199 | "d": URIRef("urn:bob"), 200 | }, 201 | ) 202 | 203 | g = graph.get_context(graphuri) 204 | assert set(g.triples((None, None, None))) == set( 205 | [(michel, likes, pizza), (bob, likes, pizza)] 206 | ) # "michel and bob like pizza", 207 | 208 | 209 | def testNamedGraphUpdate(getgraph): 210 | graph = getgraph 211 | g = graph.get_context(graphuri) 212 | r1 = "INSERT DATA { }" 213 | g.update(r1) 214 | assert set(g.triples((None, None, None))) == set( 215 | [(michel, likes, pizza)] 216 | ) # "only michel likes pizza" 217 | 218 | r2 = ( 219 | "DELETE { } " 220 | + "INSERT { } WHERE {}" 221 | ) 222 | g.update(r2) 223 | assert set(g.triples((None, None, None))) == set( 224 | [(bob, likes, pizza)] 225 | ) # "only bob likes pizza", 226 | 227 | says = URIRef("urn:says") 228 | 229 | # Strings with unbalanced curly braces 230 | tricky_strs = [ 231 | "With an unbalanced curly brace %s " % brace for brace in ["{", "}"] 232 | ] 233 | for tricky_str in tricky_strs: 234 | r3 = ( 235 | """INSERT { ?b "%s" } 236 | WHERE { ?b } """ 237 | % tricky_str 238 | ) 239 | g.update(r3) 240 | 241 | values = set() 242 | for v in g.objects(bob, says): 243 | values.add(str(v)) 244 | assert values == set(tricky_strs) 245 | 246 | # Complicated Strings 247 | r4strings = [] 248 | r4strings.append(r'''"1: adfk { ' \\\" \" { "''') 249 | r4strings.append(r'''"2: adfk } #éï \\"''') 250 | 251 | r4strings.append(r"""'3: adfk { " \\\' \' { '""") 252 | r4strings.append(r"""'4: adfk } #éï \\'""") 253 | 254 | r4strings.append(r'''"""5: adfk { ' \\\" \" { """''') 255 | r4strings.append(r'''"""6: adfk } #éï \\"""''') 256 | r4strings.append('"""7: ad adsfj \n { \n sadfj"""') 257 | 258 | r4strings.append(r"""'''8: adfk { " \\\' \' { '''""") 259 | r4strings.append(r"""'''9: adfk } #éï \\'''""") 260 | r4strings.append("'''10: ad adsfj \n { \n sadfj'''") 261 | 262 | r4 = "\n".join( 263 | ["INSERT DATA { %s } ;" % s for s in r4strings] 264 | ) 265 | g.update(r4) 266 | values = set() 267 | for v in g.objects(michel, says): 268 | values.add(str(v)) 269 | assert values == set( 270 | [ 271 | re.sub( 272 | r"\\(.)", 273 | r"\1", 274 | re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s), 275 | ) 276 | for s in r4strings 277 | ] 278 | ) 279 | 280 | # IRI Containing ' or # 281 | # The fragment identifier must not be misinterpreted as a comment 282 | # (commenting out the end of the block). 283 | # The ' must not be interpreted as the start of a string, causing the } 284 | # in the literal to be identified as the end of the block. 285 | r5 = """INSERT DATA { , "'}" }""" 286 | 287 | g.update(r5) 288 | values = set() 289 | for v in g.objects(michel, hates): 290 | values.add(str(v)) 291 | assert values == set(["urn:foo'bar?baz;a=1&b=2#fragment", "'}"]) 292 | 293 | # Comments 294 | r6 = """ 295 | INSERT DATA { 296 | . # No closing brace: } 297 | . 298 | } 299 | #Final { } comment""" 300 | 301 | g.update(r6) 302 | values = set() 303 | for v in g.objects(bob, hates): 304 | values.add(v) 305 | assert values == set([bob, michel]) 306 | 307 | 308 | def testNamedGraphUpdateWithInitBindings(getgraph): 309 | graph = getgraph 310 | g = graph.get_context(graphuri) 311 | r = "INSERT { ?a ?b ?c } WHERE {}" 312 | g.update(r, initBindings={"a": michel, "b": likes, "c": pizza}) 313 | assert set(g.triples((None, None, None))) == set( 314 | [(michel, likes, pizza)] 315 | ) # "only michel likes pizza", 316 | 317 | 318 | def testEmptyLiteral(getgraph): 319 | graph = getgraph 320 | # test for https://github.com/RDFLib/rdflib/issues/457 321 | # also see test_issue457.py which is sparql store independent! 322 | g = graph.get_context(graphuri) 323 | g.add( 324 | ( 325 | URIRef("http://example.com/s"), 326 | URIRef("http://example.com/p"), 327 | Literal(""), 328 | ) 329 | ) 330 | 331 | o = tuple(g)[0][2] 332 | assert o == Literal(""), repr(o) 333 | 334 | 335 | def testN3Store(getgraph): 336 | g = getgraph 337 | g.parse(data=testN3, format="n3") 338 | formulaA = BNode() 339 | formulaB = BNode() 340 | for s, p, o in g.triples((None, implies, None)): 341 | formulaA = s 342 | formulaB = o 343 | 344 | assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph 345 | a = URIRef("http://test/a") 346 | b = URIRef("http://test/b") 347 | c = URIRef("http://test/c") 348 | d = URIRef("http://test/d") 349 | v = Variable("y") 350 | 351 | universe = ConjunctiveGraph(g.store) 352 | 353 | # test formula as terms 354 | assert len(list(universe.triples((formulaA, implies, formulaB)))) == 1 355 | 356 | # test variable as term and variable roundtrip 357 | assert len(list(formulaB.triples((None, None, v)))) == 1 358 | for s, p, o in formulaB.triples((None, d, None)): 359 | if o != c: 360 | assert isinstance(o, Variable) 361 | assert o == v 362 | s = list(universe.subjects(RDF.type, RDFS.Class))[0] 363 | assert isinstance(s, BNode) 364 | assert len(list(universe.triples((None, implies, None)))) == 1 365 | assert len(list(universe.triples((None, RDF.type, None)))) == 1 366 | assert len(list(formulaA.triples((None, RDF.type, None)))) == 1 367 | assert len(list(formulaA.triples((None, None, None)))) == 2 368 | assert len(list(formulaB.triples((None, None, None)))) == 2 369 | assert len(list(universe.triples((None, None, None)))) == 3 370 | assert ( 371 | len(list(formulaB.triples((None, URIRef("http://test/d"), None)))) == 2 372 | ) 373 | assert ( 374 | len(list(universe.triples((None, URIRef("http://test/d"), None)))) == 1 375 | ) 376 | 377 | # context tests 378 | # test contexts with triple argument 379 | assert len(list(universe.contexts((a, d, c)))) == 1 380 | 381 | # Remove test cases 382 | universe.remove((None, implies, None)) 383 | assert len(list(universe.triples((None, implies, None)))) == 0 384 | assert len(list(formulaA.triples((None, None, None)))) == 2 385 | assert len(list(formulaB.triples((None, None, None)))) == 2 386 | 387 | formulaA.remove((None, b, None)) 388 | assert len(list(formulaA.triples((None, None, None)))) == 1 389 | formulaA.remove((None, RDF.type, None)) 390 | assert len(list(formulaA.triples((None, None, None)))) == 0 391 | 392 | universe.remove((None, RDF.type, RDFS.Class)) 393 | 394 | # remove_context tests 395 | universe.remove_context(formulaB) 396 | assert len(list(universe.triples((None, RDF.type, None)))) == 0 397 | assert len(universe) == 1 398 | assert len(formulaB) == 0 399 | 400 | universe.remove((None, None, None)) 401 | assert len(universe) == 0 402 | 403 | 404 | xmltestdoc = """ 405 | 409 | 410 | 411 | 412 | 413 | """ 414 | 415 | n3testdoc = """@prefix : . 416 | 417 | :a :b :c . 418 | """ 419 | 420 | nttestdoc = ( 421 | " .\n" 422 | ) 423 | -------------------------------------------------------------------------------- /test/test_graph.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import tempfile 4 | import shutil 5 | import rdflib 6 | from rdflib_leveldb.leveldbstore import readable_index, NoopMethods 7 | from rdflib.graph import Graph, Literal, URIRef 8 | from rdflib.namespace import XSD, RDFS 9 | from rdflib.store import VALID_STORE, NO_STORE 10 | import logging 11 | 12 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 13 | logger = logging.getLogger(__name__) 14 | logger.setLevel(logging.DEBUG) 15 | 16 | 17 | store = "LevelDB" 18 | storetest = True 19 | path = os.path.join(tempfile.gettempdir(), f"test_{store.lower()}") 20 | 21 | 22 | michel = URIRef("urn:michel") 23 | bob = URIRef("urn:bob") 24 | cheese = URIRef("urn:cheese") 25 | likes = URIRef("urn:likes") 26 | pizza = URIRef("urn:pizza") 27 | uri1 = URIRef("urn:graph1") 28 | uri2 = URIRef("urn:graph2") 29 | 30 | 31 | @pytest.fixture 32 | def getgraph(): 33 | graph = Graph(store=store) 34 | rt = graph.open(path, create=True) 35 | assert rt == VALID_STORE, "The underlying store is corrupt" 36 | assert ( 37 | len(graph) == 0 38 | ), "There must be zero triples in the graph just after store (file) creation" 39 | data = """ 40 | PREFIX : 41 | 42 | :a :b :c . 43 | :d :e :f . 44 | :d :g :h . 45 | """ 46 | graph.parse(data=data, format="ttl") 47 | assert ( 48 | len(graph) == 3 49 | ), "There must be three triples in the graph after the first data chunk parse" 50 | yield graph 51 | 52 | graph.close() 53 | graph.destroy(configuration=path) 54 | 55 | 56 | def test_create_db(getgraph): 57 | graph = getgraph 58 | graph.add((michel, likes, pizza)) 59 | graph.add((michel, likes, cheese)) 60 | graph.commit() 61 | assert ( 62 | len(graph) == 5 63 | ) # f"There must be three triples in the graph after the first data chunk parse, not {len(graph)}" 64 | 65 | 66 | # def test_dumpdb(getconjunctivegraph): 67 | # logger.debug(graph.store.dumpdb()) 68 | 69 | 70 | def test_escape_quoting(getgraph): 71 | graph = getgraph 72 | assert ( 73 | len(graph) == 3 74 | ), "There must be three triples in the graph after the first data chunk parse" 75 | test_string = "That’s a Literal!!" 76 | graph.add( 77 | ( 78 | URIRef("http://example.org/foo"), 79 | RDFS.label, 80 | Literal(test_string, datatype=XSD.string), 81 | ) 82 | ) 83 | graph.commit() 84 | assert ("That’s a Literal!!") in graph.serialize(format="xml") 85 | 86 | 87 | def test_namespaces(getgraph): 88 | graph = getgraph 89 | graph.bind("dc", "http://http://purl.org/dc/elements/1.1/") 90 | graph.bind("foaf", "http://xmlns.com/foaf/0.1/") 91 | assert ( 92 | len(list(graph.namespaces())) == 7 93 | ) # f"expected 6, got {len(list(graph.namespaces()))}" 94 | assert ("foaf", URIRef("http://xmlns.com/foaf/0.1/")) in list( 95 | graph.namespaces() 96 | ) 97 | 98 | 99 | def test_readable_index(getgraph): 100 | assert readable_index(111) == "s,p,o" 101 | 102 | 103 | # def test_missing_db_exception(getgraph): 104 | # graph.store.close() 105 | # if getattr(self, "path", False) and path is not None: 106 | # if os.path.exists(path): 107 | # if os.path.isdir(path): 108 | # shutil.rmtree(path) 109 | # elif len(path.split(":")) == 1: 110 | # os.unlink(path) 111 | # else: 112 | # os.remove(path) 113 | # graph.store.open(path, create=True) 114 | # ntriples = graph.triples((None, None, None)) 115 | # assertTrue(len(list(ntriples)) == 0) 116 | 117 | 118 | def test_reopening_db(getgraph): 119 | graph = getgraph 120 | graph.add((michel, likes, pizza)) 121 | graph.add((michel, likes, cheese)) 122 | graph.commit() 123 | graph.store.close() 124 | graph.store.open(path, create=False) 125 | ntriples = graph.triples((None, None, None)) 126 | listntriples = list(ntriples) 127 | assert len(listntriples) == 5 # f"Expected 2 not {len(listntriples)}" 128 | 129 | 130 | def test_reopening_missing_db(getgraph): 131 | graph = getgraph 132 | graph.store.close() 133 | graph.store.destroy() 134 | assert graph.open(path, create=False) == NO_STORE 135 | 136 | 137 | def test_isopen_db(getgraph): 138 | graph = getgraph 139 | assert graph.store.is_open() is True 140 | graph.store.close() 141 | assert graph.store.is_open() is False 142 | -------------------------------------------------------------------------------- /test/test_graph_and_context.py: -------------------------------------------------------------------------------- 1 | try: 2 | import plyvel 3 | 4 | assert plyvel 5 | except ImportError: 6 | from nose import SkipTest 7 | 8 | raise SkipTest("LevelDB not installed") 9 | 10 | # import unittest 11 | from . import context_case 12 | from . import graph_case 13 | import tempfile 14 | import os 15 | 16 | storename = "LevelDB" 17 | storetest = True 18 | configString = os.path.join(tempfile.gettempdir(), "test_leveldb") 19 | 20 | 21 | # @unittest.skip("WIP") 22 | class LevelDBGraphTestCase(graph_case.GraphTestCase): 23 | store_name = storename 24 | path = configString 25 | storetest = True 26 | 27 | 28 | # @unittest.skip("WIP") 29 | class LevelDBContextTestCase(context_case.ContextTestCase): 30 | store_name = storename 31 | path = configString 32 | storetest = True 33 | -------------------------------------------------------------------------------- /test/test_store.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | import tempfile 4 | import os 5 | from rdflib import ConjunctiveGraph, URIRef 6 | from rdflib.store import VALID_STORE 7 | 8 | path = os.path.join(tempfile.gettempdir(), "test_leveldb") 9 | 10 | 11 | @pytest.fixture 12 | def getgraph(): 13 | store_name = "LevelDB" 14 | 15 | graph = ConjunctiveGraph(store=store_name) 16 | rt = graph.open(path, create=True) 17 | assert rt == VALID_STORE, "The underlying store is corrupt" 18 | assert ( 19 | len(graph) == 0 20 | ), "There must be zero triples in the graph just after store (file) creation" 21 | data = """ 22 | PREFIX : 23 | 24 | :a :b :c . 25 | :d :e :f . 26 | :d :g :h . 27 | """ 28 | graph.parse(data=data, format="ttl") 29 | yield graph 30 | 31 | graph.close() 32 | graph.store.destroy(configuration=path) 33 | 34 | 35 | def test_write(getgraph): 36 | graph = getgraph 37 | assert ( 38 | len(graph) == 3 39 | ), "There must be three triples in the graph after the first data chunk parse" 40 | data2 = """ 41 | PREFIX : 42 | 43 | :d :i :j . 44 | """ 45 | graph.parse(data=data2, format="ttl") 46 | assert ( 47 | len(graph) == 4 48 | ), "There must be four triples in the graph after the second data chunk parse" 49 | data3 = """ 50 | PREFIX : 51 | 52 | :d :i :j . 53 | """ 54 | graph.parse(data=data3, format="ttl") 55 | assert ( 56 | len(graph) == 4 57 | ), "There must still be four triples in the graph after the thrd data chunk parse" 58 | 59 | 60 | def test_read(getgraph): 61 | graph = getgraph 62 | sx = None 63 | for s in graph.subjects( 64 | predicate=URIRef("https://example.org/e"), 65 | object=URIRef("https://example.org/f"), 66 | ): 67 | sx = s 68 | assert sx == URIRef("https://example.org/d") 69 | 70 | 71 | def test_sparql_query(getgraph): 72 | graph = getgraph 73 | q = r""" 74 | PREFIX : 75 | 76 | SELECT (COUNT(*) AS ?c) 77 | WHERE { 78 | :d ?p ?o . 79 | }""" 80 | 81 | c = 0 82 | for row in graph.query(q): 83 | c = int(row.c) 84 | assert c == 2, "SPARQL COUNT must return 2" 85 | 86 | 87 | def test_sparql_insert(getgraph): 88 | graph = getgraph 89 | q = r""" 90 | PREFIX : 91 | 92 | INSERT DATA { 93 | :x :y :z . 94 | }""" 95 | 96 | graph.update(q) 97 | assert len(graph) == 4, "After extra triple insert, length must be 4" 98 | 99 | 100 | def test_multigraph(getgraph): 101 | graph = getgraph 102 | q = r""" 103 | PREFIX : 104 | 105 | INSERT DATA { 106 | GRAPH :m { 107 | :x :y :z . 108 | } 109 | GRAPH :n { 110 | :x :y :z . 111 | } 112 | }""" 113 | 114 | graph.update(q) 115 | 116 | q = """ 117 | SELECT (COUNT(?g) AS ?c) 118 | WHERE { 119 | SELECT DISTINCT ?g 120 | WHERE { 121 | GRAPH ?g { 122 | ?s ?p ?o 123 | } 124 | } 125 | } 126 | """ 127 | c = 0 128 | for row in graph.query(q): 129 | c = int(row.c) 130 | assert c == 3, "SPARQL COUNT must return 3 (default, :m & :n)" 131 | 132 | 133 | def test_open_shut(getgraph): 134 | graph = getgraph 135 | assert len(graph) == 3, "Initially we must have 3 triples from setUp" 136 | graph.close() 137 | graph = None 138 | 139 | # reopen the graph 140 | graph = ConjunctiveGraph("LevelDB") 141 | graph.open(path, create=False) 142 | assert ( 143 | len(graph) == 3 144 | ), "After close and reopen, we should still have the 3 originally added triples" 145 | graph.close() 146 | graph.destroy(configuration=path) 147 | -------------------------------------------------------------------------------- /test/test_store_performance1.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import unittest 3 | import gc 4 | import os 5 | import logging 6 | from time import time 7 | import tempfile 8 | from rdflib import Graph 9 | 10 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 11 | log = logging.getLogger(__name__) 12 | log.setLevel(logging.DEBUG) 13 | 14 | 15 | class StoreTestCase(unittest.TestCase): 16 | """ 17 | Test case for testing store performance... probably should be 18 | something other than a unit test... but for now we'll add it as a 19 | unit test. 20 | """ 21 | 22 | store = "Memory" 23 | path = None 24 | storetest = True 25 | performancetest = True 26 | 27 | def setUp(self): 28 | self.gcold = gc.isenabled() 29 | gc.collect() 30 | gc.disable() 31 | 32 | self.graph = Graph(store=self.store) 33 | 34 | self.path = os.path.join( 35 | tempfile.gettempdir(), f"test_{self.store.lower()}" 36 | ) 37 | self.graph.open(self.path, create=True) 38 | self.input = Graph() 39 | 40 | def tearDown(self): 41 | self.graph.close() 42 | if self.gcold: 43 | gc.enable() 44 | # TODO: delete a_tmp_dir 45 | self.graph.close() 46 | del self.graph 47 | 48 | # Remove test detritus 49 | if hasattr(self, "path") and self.path is not None: 50 | if os.path.exists(self.path): 51 | if os.path.isdir(self.path): 52 | import shutil 53 | 54 | shutil.rmtree(self.path) 55 | elif len(self.path.split(":")) == 1: 56 | os.unlink(self.path) 57 | else: 58 | os.remove(self.path) 59 | 60 | def testTime(self): 61 | fixturelist = { 62 | "500triples": 691, 63 | "1ktriples": 1285, 64 | "2ktriples": 2006, 65 | "3ktriples": 3095, 66 | "5ktriples": 5223, 67 | "10ktriples": 10303, 68 | "25ktriples": 25161, 69 | "50ktriples": 50168, 70 | } 71 | log.debug(f"{self.store}: ") 72 | for i in fixturelist.keys(): 73 | inputloc = os.getcwd() + f"/test/sp2b/{i}.n3" 74 | # Clean up graphs so that BNodes in input data 75 | # won't create random results 76 | self.input = Graph() 77 | self.graph.remove((None, None, None)) 78 | 79 | res = self._testInput(inputloc) 80 | 81 | log.debug(f"Loaded {len(self.graph):5d} triples in {res.strip()}s") 82 | 83 | self.assertEqual(len(self.graph), fixturelist[i], len(self.graph)) 84 | 85 | # Read triples back into memory from store 86 | self.graph.close() 87 | self.graph.open(self.path, create=False) 88 | 89 | t0 = time() 90 | for _i in self.graph.triples((None, None, None)): 91 | pass 92 | 93 | t1 = time() 94 | log.debug(f"Re-reading: {t1 - t0:.3f}s") 95 | 96 | self.assertEqual( 97 | len(self.graph), sorted(fixturelist.values())[-1], len(self.graph) 98 | ) 99 | 100 | # Delete the store by removing triples 101 | t0 = time() 102 | self.graph.remove((None, None, None)) 103 | self.assertEqual(len(self.graph), 0) 104 | t1 = time() 105 | log.debug(f"Deleting : {t1 - t0:.3f}s") 106 | 107 | def _testInput(self, inputloc): 108 | # number = 1 109 | store = self.graph 110 | self.input.parse(location=inputloc, format="n3") 111 | 112 | # def add_from_input(): 113 | # for t in self.input: 114 | # store.add(t) 115 | 116 | # it = itertools.repeat(None, number) 117 | t0 = time() 118 | # for _i in it: 119 | # add_from_input() 120 | # for s in store.subjects(RDF.type, None): 121 | # for t in store.triples((s, None, None)): 122 | # pass 123 | 124 | store.addN(tuple(t) + (store,) for t in self.input) 125 | t1 = time() 126 | return f"{t1 - t0:.3f}" 127 | 128 | 129 | class LevelDBStoreTestCase(StoreTestCase, unittest.TestCase): 130 | store = "LevelDB" 131 | 132 | def setUp(self): 133 | self.store = "LevelDB" 134 | # self.path = mktemp(prefix="testleveldb") 135 | StoreTestCase.setUp(self) 136 | 137 | 138 | class BerkeleyDBStoreTestCase(StoreTestCase, unittest.TestCase): 139 | store = "BerkeleyDB" 140 | 141 | def setUp(self): 142 | try: 143 | import berkeleydb 144 | 145 | assert berkeleydb 146 | except Exception: 147 | return unittest.skip("Skipping BerkeleyDB test, store unavailable") 148 | 149 | self.store = "BerkeleyDB" 150 | # self.path = mktemp(prefix="testbdb") 151 | StoreTestCase.setUp(self) 152 | 153 | 154 | if __name__ == "__main__": 155 | if False: 156 | import cProfile 157 | 158 | cProfile.run("unittest.main()", "profile.out") 159 | else: 160 | unittest.main() 161 | -------------------------------------------------------------------------------- /test/test_store_performance2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import unittest 3 | import gc 4 | import os 5 | import re 6 | import logging 7 | from time import time 8 | import tempfile 9 | from rdflib import ConjunctiveGraph, URIRef 10 | 11 | logging.basicConfig(level=logging.ERROR, format="%(message)s") 12 | log = logging.getLogger(__name__) 13 | log.setLevel(logging.DEBUG) 14 | 15 | implies = URIRef("http://www.w3.org/2000/10/swap/log#implies") 16 | 17 | testN3 = """\ 18 | @prefix rdf: . 19 | @prefix rdfs: . 20 | @prefix : . 21 | {:a :b :c;a :foo} => {:a :d :c,?y} . 22 | _:foo a rdfs:Class . 23 | :a :d :c . 24 | """ 25 | 26 | michel = URIRef("urn:michel") 27 | tarek = URIRef("urn:tarek") 28 | bob = URIRef("urn:bob") 29 | likes = URIRef("urn:likes") 30 | hates = URIRef("urn:hates") 31 | pizza = URIRef("urn:pizza") 32 | cheese = URIRef("urn:cheese") 33 | 34 | graphuri = URIRef("urn:graph") 35 | othergraphuri = URIRef("urn:othergraph") 36 | 37 | 38 | class StoreTestCase(unittest.TestCase): 39 | """ 40 | Test case for testing store performance... probably should be 41 | something other than a unit test... but for now we'll add it as a 42 | unit test. 43 | """ 44 | 45 | store = "Memory" 46 | path = None 47 | storetest = True 48 | performancetest = True 49 | 50 | def setUp(self): 51 | self.gcold = gc.isenabled() 52 | gc.collect() 53 | gc.disable() 54 | self.graph = ConjunctiveGraph(store=self.store) 55 | path = os.path.join( 56 | tempfile.gettempdir(), f"test_{self.store.lower()}" 57 | ) 58 | self.path = path 59 | self.graph.open(self.path, create=True) 60 | self.input = ConjunctiveGraph() 61 | 62 | def tearDown(self): 63 | self.graph.close() 64 | if self.gcold: 65 | gc.enable() 66 | # TODO: delete a_tmp_dir 67 | self.graph.close() 68 | del self.graph 69 | 70 | # Remove test detritus 71 | if hasattr(self, "path") and self.path is not None: 72 | if os.path.exists(self.path): 73 | if os.path.isdir(self.path): 74 | import shutil 75 | 76 | shutil.rmtree(self.path) 77 | elif len(self.path.split(":")) == 1: 78 | os.unlink(self.path) 79 | else: 80 | os.remove(self.path) 81 | 82 | # @unittest.skip("WIP") 83 | def testSimpleGraph(self): 84 | t0 = time() 85 | g = self.graph.get_context(graphuri) 86 | g.add((tarek, likes, pizza)) 87 | g.add((bob, likes, pizza)) 88 | g.add((bob, likes, cheese)) 89 | 90 | g2 = self.graph.get_context(othergraphuri) 91 | g2.add((michel, likes, pizza)) 92 | 93 | self.assertEqual(3, len(g), "graph contains 3 triples") 94 | self.assertEqual(1, len(g2), "other graph contains 1 triple") 95 | 96 | r = g.query("SELECT * WHERE { ?s . }") 97 | self.assertEqual(2, len(list(r)), "two people like pizza") 98 | 99 | r = g.triples((None, likes, pizza)) 100 | self.assertEqual(2, len(list(r)), "two people like pizza") 101 | 102 | # Test initBindings 103 | r = g.query( 104 | "SELECT * WHERE { ?s . }", 105 | initBindings={"s": tarek}, 106 | ) 107 | self.assertEqual(1, len(list(r)), "i was asking only about tarek") 108 | 109 | r = g.triples((tarek, likes, pizza)) 110 | self.assertEqual(1, len(list(r)), "i was asking only about tarek") 111 | 112 | r = g.triples((tarek, likes, cheese)) 113 | self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") 114 | 115 | g2.add((tarek, likes, pizza)) 116 | g.remove((tarek, likes, pizza)) 117 | r = g.query("SELECT * WHERE { ?s . }") 118 | t1 = time() 119 | log.debug(f"testSimpleGraph {self.store}: {t1 - t0:.5f}") 120 | 121 | # @unittest.skip("WIP") 122 | def testConjunctiveDefault(self): 123 | t0 = time() 124 | g = self.graph.get_context(graphuri) 125 | g.add((tarek, likes, pizza)) 126 | g2 = self.graph.get_context(othergraphuri) 127 | g2.add((bob, likes, pizza)) 128 | g.add((tarek, hates, cheese)) 129 | 130 | self.assertEqual(2, len(g), "graph contains 2 triples") 131 | 132 | # the following are actually bad tests as they depend on your endpoint, 133 | # as pointed out in the sparqlstore.py code: 134 | # 135 | # # For ConjunctiveGraphs, reading is done from the "default graph" Exactly 136 | # # what this means depends on your endpoint, because SPARQL does not offer a 137 | # # simple way to query the union of all graphs as it would be expected for a 138 | # # ConjuntiveGraph. 139 | # # 140 | # # Fuseki/TDB has a flag for specifying that the default graph 141 | # # is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). 142 | self.assertEqual( 143 | 3, 144 | len(self.graph), 145 | "default union graph should contain three triples but contains:\n" 146 | "%s" % list(self.graph), 147 | ) 148 | 149 | r = self.graph.query("SELECT * WHERE { ?s . }") 150 | self.assertEqual(2, len(list(r)), "two people like pizza") 151 | 152 | r = self.graph.query( 153 | "SELECT * WHERE { ?s . }", 154 | initBindings={"s": tarek}, 155 | ) 156 | self.assertEqual(1, len(list(r)), "i was asking only about tarek") 157 | 158 | r = self.graph.triples((tarek, likes, pizza)) 159 | self.assertEqual(1, len(list(r)), "i was asking only about tarek") 160 | 161 | r = self.graph.triples((tarek, likes, cheese)) 162 | self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") 163 | 164 | g2.remove((bob, likes, pizza)) 165 | 166 | r = self.graph.query("SELECT * WHERE { ?s . }") 167 | self.assertEqual(1, len(list(r)), "only tarek likes pizza") 168 | t1 = time() 169 | log.debug(f"testConjunctiveDefault {self.store}: {t1 - t0:.5f}") 170 | 171 | # @unittest.skip("WIP") 172 | def testUpdate(self): 173 | t0 = time() 174 | self.graph.update( 175 | "INSERT DATA { GRAPH { . } }" 176 | ) 177 | 178 | g = self.graph.get_context(graphuri) 179 | self.assertEqual(1, len(g), "graph contains 1 triples") 180 | t1 = time() 181 | log.debug(f"testUpdate {self.store}: {t1 - t0:.5f} ") 182 | 183 | # @unittest.skip("WIP") 184 | def testUpdateWithInitNs(self): 185 | t0 = time() 186 | self.graph.update( 187 | "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", 188 | initNs={"ns": URIRef("urn:")}, 189 | ) 190 | 191 | g = self.graph.get_context(graphuri) 192 | self.assertEqual( 193 | set(g.triples((None, None, None))), 194 | set([(michel, likes, pizza)]), 195 | "only michel likes pizza", 196 | ) 197 | t1 = time() 198 | log.debug(f"testUpdateWithInitNs {self.store}: {t1 - t0:.5f}") 199 | 200 | # @unittest.skip("WIP") 201 | def testUpdateWithInitBindings(self): 202 | t0 = time() 203 | self.graph.update( 204 | "INSERT { GRAPH { ?a ?b ?c . } } WherE { }", 205 | initBindings={ 206 | "a": URIRef("urn:michel"), 207 | "b": URIRef("urn:likes"), 208 | "c": URIRef("urn:pizza"), 209 | }, 210 | ) 211 | 212 | g = self.graph.get_context(graphuri) 213 | self.assertEqual( 214 | set(g.triples((None, None, None))), 215 | set([(michel, likes, pizza)]), 216 | "only michel likes pizza", 217 | ) 218 | t1 = time() 219 | log.debug(f"testUpdateWithInitBindings {self.store}: {t1 - t0:.5f}") 220 | 221 | # @unittest.skip("WIP") 222 | def testMultipleUpdateWithInitBindings(self): 223 | t0 = time() 224 | self.graph.update( 225 | "INSERT { GRAPH { ?a ?b ?c . } } WHERE { };" 226 | "INSERT { GRAPH { ?d ?b ?c . } } WHERE { }", 227 | initBindings={ 228 | "a": URIRef("urn:michel"), 229 | "b": URIRef("urn:likes"), 230 | "c": URIRef("urn:pizza"), 231 | "d": URIRef("urn:bob"), 232 | }, 233 | ) 234 | 235 | g = self.graph.get_context(graphuri) 236 | self.assertEqual( 237 | set(g.triples((None, None, None))), 238 | set([(michel, likes, pizza), (bob, likes, pizza)]), 239 | "michel and bob like pizza", 240 | ) 241 | t1 = time() 242 | log.debug( 243 | f"testMultipleUpdateWithInitBindings {self.store}: {t1 - t0:.5f}" 244 | ) 245 | 246 | # @unittest.skip("WIP") 247 | def testNamedGraphUpdate(self): 248 | t0 = time() 249 | g = self.graph.get_context(graphuri) 250 | r1 = "INSERT DATA { }" 251 | g.update(r1) 252 | self.assertEqual( 253 | set(g.triples((None, None, None))), 254 | set([(michel, likes, pizza)]), 255 | "only michel likes pizza", 256 | ) 257 | 258 | r2 = ( 259 | "DELETE { } " 260 | + "INSERT { } WHERE {}" 261 | ) 262 | g.update(r2) 263 | self.assertEqual( 264 | set(g.triples((None, None, None))), 265 | set([(bob, likes, pizza)]), 266 | "only bob likes pizza", 267 | ) 268 | says = URIRef("urn:says") 269 | 270 | # Strings with unbalanced curly braces 271 | tricky_strs = [ 272 | "With an unbalanced curly brace %s " % brace 273 | for brace in ["{", "}"] 274 | ] 275 | for tricky_str in tricky_strs: 276 | r3 = ( 277 | """INSERT { ?b "%s" } 278 | WHERE { ?b } """ 279 | % tricky_str 280 | ) 281 | g.update(r3) 282 | 283 | values = set() 284 | for v in g.objects(bob, says): 285 | values.add(str(v)) 286 | self.assertEqual(values, set(tricky_strs)) 287 | 288 | # Complicated Strings 289 | r4strings = [] 290 | r4strings.append(r'''"1: adfk { ' \\\" \" { "''') 291 | r4strings.append(r'''"2: adfk } #éï \\"''') 292 | 293 | r4strings.append(r"""'3: adfk { " \\\' \' { '""") 294 | r4strings.append(r"""'4: adfk } #éï \\'""") 295 | 296 | r4strings.append(r'''"""5: adfk { ' \\\" \" { """''') 297 | r4strings.append(r'''"""6: adfk } #éï \\"""''') 298 | r4strings.append('"""7: ad adsfj \n { \n sadfj"""') 299 | 300 | r4strings.append(r"""'''8: adfk { " \\\' \' { '''""") 301 | r4strings.append(r"""'''9: adfk } #éï \\'''""") 302 | r4strings.append("'''10: ad adsfj \n { \n sadfj'''") 303 | 304 | r4 = "\n".join( 305 | [ 306 | "INSERT DATA { %s } ;" % s 307 | for s in r4strings 308 | ] 309 | ) 310 | g.update(r4) 311 | values = set() 312 | for v in g.objects(michel, says): 313 | values.add(str(v)) 314 | self.assertEqual( 315 | values, 316 | set( 317 | [ 318 | re.sub( 319 | r"\\(.)", 320 | r"\1", 321 | re.sub( 322 | r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s 323 | ), 324 | ) 325 | for s in r4strings 326 | ] 327 | ), 328 | ) 329 | 330 | # IRI Containing ' or # 331 | # The fragment identifier must not be misinterpreted as a comment 332 | # (commenting out the end of the block). 333 | # The ' must not be interpreted as the start of a string, causing the } 334 | # in the literal to be identified as the end of the block. 335 | r5 = """INSERT DATA { , "'}" }""" 336 | 337 | g.update(r5) 338 | values = set() 339 | for v in g.objects(michel, hates): 340 | values.add(str(v)) 341 | self.assertEqual( 342 | values, set(["urn:foo'bar?baz;a=1&b=2#fragment", "'}"]) 343 | ) 344 | 345 | # Comments 346 | r6 = """ 347 | INSERT DATA { 348 | . # No closing brace: } 349 | . 350 | } 351 | #Final { } comment""" 352 | 353 | g.update(r6) 354 | values = set() 355 | for v in g.objects(bob, hates): 356 | values.add(v) 357 | self.assertEqual(values, set([bob, michel])) 358 | t1 = time() 359 | log.debug(f"testNamedGraphUpdate {self.store}: {t1 - t0:.5f}") 360 | 361 | # @unittest.skip("WIP") 362 | def testNamedGraphUpdateWithInitBindings(self): 363 | t0 = time() 364 | g = self.graph.get_context(graphuri) 365 | r = "INSERT { ?a ?b ?c } WHERE {}" 366 | g.update(r, initBindings={"a": michel, "b": likes, "c": pizza}) 367 | self.assertEqual( 368 | set(g.triples((None, None, None))), 369 | set([(michel, likes, pizza)]), 370 | "only michel likes pizza", 371 | ) 372 | t1 = time() 373 | log.debug( 374 | f"testNamedGraphUpdateWithInitBindings {self.store}: {t1 - t0:.5f}" 375 | ) 376 | 377 | 378 | class LevelDBStoreTestCase(StoreTestCase, unittest.TestCase): 379 | store = "LevelDB" 380 | 381 | def setUp(self): 382 | self.store = "LevelDB" 383 | # self.path = mktemp(prefix="testleveldb") 384 | StoreTestCase.setUp(self) 385 | 386 | 387 | class BerkeleyDBStoreTestCase(StoreTestCase, unittest.TestCase): 388 | store = "BerkeleyDB" 389 | 390 | def setUp(self): 391 | try: 392 | import berkeleydb 393 | 394 | assert berkeleydb 395 | except Exception: 396 | return unittest.skip("Skipping BerkeleyDB test, store unavailable") 397 | 398 | self.store = "BerkeleyDB" 399 | # self.path = mktemp(prefix="testbdb") 400 | StoreTestCase.setUp(self) 401 | 402 | 403 | if __name__ == "__main__": 404 | if False: 405 | import cProfile 406 | 407 | cProfile.run("unittest.main()", "profile.out") 408 | else: 409 | unittest.main() 410 | -------------------------------------------------------------------------------- /test/test_usecase.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*-import tempfile 2 | import os 3 | import tempfile 4 | from rdflib import URIRef 5 | from rdflib.graph import Graph 6 | 7 | 8 | path = os.path.join(tempfile.gettempdir(), "test_leveldb") 9 | 10 | 11 | def test_create_db(): 12 | if os.path.exists(path): 13 | if os.path.isdir(path): 14 | import shutil 15 | 16 | shutil.rmtree(path) 17 | elif len(path.split(":")) == 1: 18 | os.unlink(path) 19 | else: 20 | os.remove(path) 21 | 22 | graph = Graph("LevelDB", URIRef("http://rdflib.net")) 23 | graph.open(path, create=True) 24 | assert repr(graph.identifier) == "rdflib.term.URIRef('http://rdflib.net')" 25 | assert ( 26 | str(graph) 27 | == " a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'LevelDBStore']." 28 | ) 29 | graph.close() 30 | graph.destroy(configuration=path) 31 | 32 | 33 | def test_reuse(): 34 | graph = Graph("LevelDB", URIRef("http://rdflib.net")) 35 | graph.open(path, create=True) 36 | assert repr(graph.identifier) == "rdflib.term.URIRef('http://rdflib.net')" 37 | graph.close() 38 | 39 | graph = Graph("LevelDB", URIRef("http://rdflib.net")) 40 | graph.open(path, create=False) 41 | assert repr(graph.identifier) == "rdflib.term.URIRef('http://rdflib.net')" 42 | assert ( 43 | str(graph) 44 | == " a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'LevelDBStore']." 45 | ) 46 | graph.close() 47 | graph.destroy(configuration=path) 48 | 49 | 50 | def test_example(): 51 | graph = Graph("LevelDB", URIRef("http://rdflib.net")) 52 | graph.open(path, create=True) 53 | assert repr(graph.identifier) == "rdflib.term.URIRef('http://rdflib.net')" 54 | # Parse in an RDF file hosted on the Internet 55 | graph.parse("http://www.w3.org/People/Berners-Lee/card") 56 | 57 | # Loop through each triple in the graph (subj, pred, obj) 58 | for subj, pred, obj in graph: 59 | # Check if there is at least one triple in the Graph 60 | if (subj, pred, obj) not in graph: 61 | raise Exception("It better be!") 62 | 63 | assert len(graph) == 86, len(graph) 64 | 65 | # Print out the entire Graph in the RDF Turtle format 66 | # print(graph.serialize(format="turtle")) 67 | graph.close() 68 | graph.destroy(configuration=path) 69 | --------------------------------------------------------------------------------