├── .github ├── dependabot.yml └── workflows │ ├── pypi-publish.yaml │ └── test.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── conf.py │ ├── falkordb.asyncio.rst │ ├── falkordb.rst │ ├── index.rst │ └── modules.rst ├── falkordb ├── __init__.py ├── asyncio │ ├── __init__.py │ ├── cluster.py │ ├── falkordb.py │ ├── graph.py │ ├── graph_schema.py │ └── query_result.py ├── cluster.py ├── edge.py ├── exceptions.py ├── execution_plan.py ├── falkordb.py ├── graph.py ├── graph_schema.py ├── helpers.py ├── node.py ├── path.py ├── query_result.py └── sentinel.py ├── pyproject.toml ├── pytest.ini └── tests ├── __init__.py ├── test_async_constraints.py ├── test_async_copy.py ├── test_async_db.py ├── test_async_explain.py ├── test_async_graph.py ├── test_async_indices.py ├── test_async_profile.py ├── test_constraints.py ├── test_copy.py ├── test_db.py ├── test_edge.py ├── test_explain.py ├── test_graph.py ├── test_indices.py ├── test_node.py ├── test_path.py └── test_profile.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | 13 | - package-ecosystem: "github-actions" 14 | directory: "/" 15 | schedule: 16 | interval: "weekly" 17 | -------------------------------------------------------------------------------- /.github/workflows/pypi-publish.yaml: -------------------------------------------------------------------------------- 1 | # Run this job on tagging 2 | name: Release to PYPI 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - name: Build and publish to pypi 13 | uses: JRubics/poetry-publish@v2.0 14 | with: 15 | pypi_token: ${{ secrets.PYPI_API_TOKEN }} 16 | build_format: "sdist" 17 | ignore_dev_requirements: "yes" 18 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | 7 | jobs: 8 | test: 9 | name: Test with Python ${{ matrix.python }} 10 | runs-on: ubuntu-latest 11 | 12 | services: 13 | falkordb: 14 | # Docker Hub image 15 | image: falkordb/falkordb:edge 16 | # Map port 6379 on the Docker host to port 6379 on the FalkorDB container 17 | ports: 18 | - 6379:6379 19 | 20 | strategy: 21 | matrix: 22 | python: ['3.8', '3.10', '3.11'] 23 | fail-fast: false 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - uses: actions/setup-python@v5 29 | with: 30 | python-version: ${{matrix.python}} 31 | 32 | - uses: snok/install-poetry@v1 33 | with: 34 | version: 1.7.1 35 | virtualenvs-create: true 36 | virtualenvs-in-project: true 37 | 38 | - name: Install dependencies 39 | run: poetry install --no-interaction 40 | 41 | - name: Run Tests 42 | run: poetry run pytest --cov --cov-report=xml 43 | 44 | - name: Upload coverage 45 | uses: codecov/codecov-action@v5 46 | if: matrix.python == '3.10' && matrix.platform != 'macos-11' 47 | with: 48 | fail_ci_if_error: false 49 | token: ${{ secrets.CODECOV_TOKEN }} 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | __pycache__ 3 | poetry.lock -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # # Read the Docs configuration file 3 | # # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | # 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.12" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | 18 | # Build documentation in the "docs/" directory with Sphinx 19 | sphinx: 20 | configuration: docs/source/conf.py 21 | 22 | # Optionally build your docs in additional formats such as PDF and ePub 23 | # formats: 24 | # - pdf 25 | # - epub 26 | 27 | # Optional but recommended, declare the Python requirements required 28 | # to build your documentation 29 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 30 | python: 31 | install: 32 | - requirements: docs/requirements.txt 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 FalkorDB 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![license](https://img.shields.io/github/license/falkordb/falkordb-py.svg)](https://github.com/falkordb/falkordb-py) 2 | [![Release](https://img.shields.io/github/release/falkordb/falkordb-py.svg)](https://github.com/falkordb/falkordb-py/releases/latest) 3 | [![PyPI version](https://badge.fury.io/py/falkordb.svg)](https://badge.fury.io/py/falkordb) 4 | [![Codecov](https://codecov.io/gh/falkordb/falkordb-py/branch/main/graph/badge.svg)](https://codecov.io/gh/falkordb/falkordb-py) 5 | [![Forum](https://img.shields.io/badge/Forum-falkordb-blue)](https://github.com/orgs/FalkorDB/discussions) 6 | [![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) 7 | 8 | # falkordb-py 9 | 10 | [![Try Free](https://img.shields.io/badge/Try%20Free-FalkorDB%20Cloud-FF8101?labelColor=FDE900&style=for-the-badge&link=https://app.falkordb.cloud)](https://app.falkordb.cloud) 11 | 12 | FalkorDB Python client 13 | 14 | see [docs](http://falkordb-py.readthedocs.io/) 15 | 16 | ## Installation 17 | ```sh 18 | pip install FalkorDB 19 | ``` 20 | 21 | ## Usage 22 | 23 | ### Run FalkorDB instance 24 | Docker: 25 | ```sh 26 | docker run --rm -p 6379:6379 falkordb/falkordb 27 | ``` 28 | Or use [FalkorDB Cloud](https://app.falkordb.cloud) 29 | 30 | ### Synchronous Example 31 | 32 | ```python 33 | from falkordb import FalkorDB 34 | 35 | # Connect to FalkorDB 36 | db = FalkorDB(host='localhost', port=6379) 37 | 38 | # Select the social graph 39 | g = db.select_graph('social') 40 | 41 | # Create 100 nodes and return a handful 42 | nodes = g.query('UNWIND range(0, 100) AS i CREATE (n {v:1}) RETURN n LIMIT 10').result_set 43 | for n in nodes: 44 | print(n) 45 | 46 | # Read-only query the graph for the first 10 nodes 47 | nodes = g.ro_query('MATCH (n) RETURN n LIMIT 10').result_set 48 | 49 | # Copy the Graph 50 | copy_graph = g.copy('social_copy') 51 | 52 | # Delete the Graph 53 | g.delete() 54 | ``` 55 | 56 | ### Asynchronous Example 57 | 58 | ```python 59 | import asyncio 60 | from falkordb.asyncio import FalkorDB 61 | from redis.asyncio import BlockingConnectionPool 62 | 63 | async def main(): 64 | 65 | # Connect to FalkorDB 66 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 67 | db = FalkorDB(connection_pool=pool) 68 | 69 | # Select the social graph 70 | g = db.select_graph('social') 71 | 72 | # Execute query asynchronously 73 | result = await g.query('UNWIND range(0, 100) AS i CREATE (n {v:1}) RETURN n LIMIT 10') 74 | 75 | # Process results 76 | for n in result.result_set: 77 | print(n) 78 | 79 | # Run multiple queries concurrently 80 | tasks = [ 81 | g.query('MATCH (n) WHERE n.v = 1 RETURN count(n) AS count'), 82 | g.query('CREATE (p:Person {name: "Alice"}) RETURN p'), 83 | g.query('CREATE (p:Person {name: "Bob"}) RETURN p') 84 | ] 85 | 86 | results = await asyncio.gather(*tasks) 87 | 88 | # Process concurrent results 89 | print(f"Node count: {results[0].result_set[0][0]}") 90 | print(f"Created Alice: {results[1].result_set[0][0]}") 91 | print(f"Created Bob: {results[2].result_set[0][0]}") 92 | 93 | # Close the connection when done 94 | await pool.aclose() 95 | 96 | # Run the async example 97 | if __name__ == "__main__": 98 | asyncio.run(main()) 99 | ``` 100 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | redis 2 | sphinx_rtd_theme==3.0.2 3 | requests>=2.32.2 # not directly required, pinned by Snyk to avoid a vulnerability 4 | urllib3>=2.2.2 # not directly required, pinned by Snyk to avoid a vulnerability 5 | zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability 6 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Path setup -------------------------------------------------------------- 7 | 8 | import os 9 | import sys 10 | sys.path.insert(0, os.path.abspath('../..')) 11 | 12 | # -- Project information ----------------------------------------------------- 13 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 14 | 15 | project = 'FalkorDB-py' 16 | copyright = '2023, FalkorDB inc' 17 | author = 'FalkorDB inc' 18 | 19 | # -- General configuration --------------------------------------------------- 20 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 21 | 22 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] 23 | 24 | templates_path = ['_templates'] 25 | exclude_patterns = [] 26 | 27 | 28 | 29 | # -- Options for HTML output ------------------------------------------------- 30 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 31 | 32 | html_theme = 'sphinx_rtd_theme' 33 | html_static_path = ['_static'] 34 | -------------------------------------------------------------------------------- /docs/source/falkordb.asyncio.rst: -------------------------------------------------------------------------------- 1 | falkordb.asyncio package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | falkordb.asyncio.falkordb module 8 | -------------------------------- 9 | 10 | .. automodule:: falkordb.asyncio.falkordb 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | falkordb.asyncio.graph module 16 | ----------------------------- 17 | 18 | .. automodule:: falkordb.asyncio.graph 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | falkordb.asyncio.graph\_schema module 24 | ------------------------------------- 25 | 26 | .. automodule:: falkordb.asyncio.graph_schema 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | falkordb.asyncio.query\_result module 32 | ------------------------------------- 33 | 34 | .. automodule:: falkordb.asyncio.query_result 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | Module contents 40 | --------------- 41 | 42 | .. automodule:: falkordb.asyncio 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | -------------------------------------------------------------------------------- /docs/source/falkordb.rst: -------------------------------------------------------------------------------- 1 | falkordb package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | falkordb.asyncio 11 | 12 | Submodules 13 | ---------- 14 | 15 | falkordb.edge module 16 | -------------------- 17 | 18 | .. automodule:: falkordb.edge 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | falkordb.exceptions module 24 | -------------------------- 25 | 26 | .. automodule:: falkordb.exceptions 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | falkordb.execution\_plan module 32 | ------------------------------- 33 | 34 | .. automodule:: falkordb.execution_plan 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | falkordb.falkordb module 40 | ------------------------ 41 | 42 | .. automodule:: falkordb.falkordb 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | falkordb.graph module 48 | --------------------- 49 | 50 | .. automodule:: falkordb.graph 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | falkordb.graph\_schema module 56 | ----------------------------- 57 | 58 | .. automodule:: falkordb.graph_schema 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | falkordb.helpers module 64 | ----------------------- 65 | 66 | .. automodule:: falkordb.helpers 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | falkordb.node module 72 | -------------------- 73 | 74 | .. automodule:: falkordb.node 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | falkordb.path module 80 | -------------------- 81 | 82 | .. automodule:: falkordb.path 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | falkordb.query\_result module 88 | ----------------------------- 89 | 90 | .. automodule:: falkordb.query_result 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | Module contents 96 | --------------- 97 | 98 | .. automodule:: falkordb 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. FalkorDB-py documentation master file, created by 2 | sphinx-quickstart on Sun Nov 19 15:57:26 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to FalkorDB-py's documentation! 7 | ======================================= 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | modules 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | falkordb 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | falkordb 8 | -------------------------------------------------------------------------------- /falkordb/__init__.py: -------------------------------------------------------------------------------- 1 | from .falkordb import FalkorDB 2 | from .node import Node 3 | from .edge import Edge 4 | from .path import Path 5 | from .graph import Graph 6 | from .execution_plan import ExecutionPlan, Operation 7 | from .query_result import QueryResult 8 | -------------------------------------------------------------------------------- /falkordb/asyncio/__init__.py: -------------------------------------------------------------------------------- 1 | from .falkordb import FalkorDB 2 | -------------------------------------------------------------------------------- /falkordb/asyncio/cluster.py: -------------------------------------------------------------------------------- 1 | from redis.asyncio.cluster import RedisCluster 2 | import redis.exceptions as redis_exceptions 3 | import redis.asyncio as redis 4 | import redis as sync_redis 5 | import socket 6 | 7 | 8 | # detect if a connection is a cluster 9 | def Is_Cluster(conn: redis.Redis): 10 | 11 | pool = conn.connection_pool 12 | kwargs = pool.connection_kwargs.copy() 13 | 14 | # Check if the connection is using SSL and add it 15 | # this propery is not kept in the connection_kwargs 16 | kwargs["ssl"] = pool.connection_class is redis.SSLConnection 17 | 18 | # Create a synchronous Redis client with the same parameters 19 | # as the connection pool just to keep Is_Cluster synchronous 20 | info = sync_redis.Redis(**kwargs).info(section="server") 21 | 22 | return "redis_mode" in info and info["redis_mode"] == "cluster" 23 | 24 | 25 | # create a cluster connection from a Redis connection 26 | def Cluster_Conn( 27 | conn, 28 | ssl, 29 | cluster_error_retry_attempts=3, 30 | startup_nodes=None, 31 | require_full_coverage=False, 32 | reinitialize_steps=5, 33 | read_from_replicas=False, 34 | address_remap=None, 35 | ): 36 | connection_kwargs = conn.connection_pool.connection_kwargs 37 | host = connection_kwargs.pop("host") 38 | port = connection_kwargs.pop("port") 39 | username = connection_kwargs.pop("username") 40 | password = connection_kwargs.pop("password") 41 | 42 | retry = connection_kwargs.pop("retry", None) 43 | retry_on_error = connection_kwargs.pop( 44 | "retry_on_error", 45 | [ 46 | ConnectionRefusedError, 47 | ConnectionError, 48 | TimeoutError, 49 | socket.timeout, 50 | redis_exceptions.ConnectionError, 51 | ], 52 | ) 53 | return RedisCluster( 54 | host=host, 55 | port=port, 56 | username=username, 57 | password=password, 58 | decode_responses=True, 59 | ssl=ssl, 60 | retry=retry, 61 | retry_on_error=retry_on_error, 62 | require_full_coverage=require_full_coverage, 63 | reinitialize_steps=reinitialize_steps, 64 | read_from_replicas=read_from_replicas, 65 | address_remap=address_remap, 66 | startup_nodes=startup_nodes, 67 | cluster_error_retry_attempts=cluster_error_retry_attempts, 68 | ) 69 | -------------------------------------------------------------------------------- /falkordb/asyncio/falkordb.py: -------------------------------------------------------------------------------- 1 | import redis.asyncio as redis 2 | from .cluster import * 3 | from .graph import AsyncGraph 4 | from typing import List, Union 5 | 6 | # config command 7 | LIST_CMD = "GRAPH.LIST" 8 | CONFIG_CMD = "GRAPH.CONFIG" 9 | 10 | class FalkorDB(): 11 | """ 12 | Asynchronous FalkorDB Class for interacting with a FalkorDB server. 13 | 14 | Usage example:: 15 | from falkordb.asyncio import FalkorDB 16 | # connect to the database and select the 'social' graph 17 | db = FalkorDB() 18 | graph = db.select_graph("social") 19 | 20 | # get a single 'Person' node from the graph and print its name 21 | response = await graph.query("MATCH (n:Person) RETURN n LIMIT 1") 22 | result = response.result_set 23 | person = result[0][0] 24 | print(node.properties['name']) 25 | """ 26 | 27 | def __init__( 28 | self, 29 | host='localhost', 30 | port=6379, 31 | password=None, 32 | socket_timeout=None, 33 | socket_connect_timeout=None, 34 | socket_keepalive=None, 35 | socket_keepalive_options=None, 36 | connection_pool=None, 37 | unix_socket_path=None, 38 | encoding='utf-8', 39 | encoding_errors='strict', 40 | retry_on_timeout=False, 41 | retry_on_error=None, 42 | ssl=False, 43 | ssl_keyfile=None, 44 | ssl_certfile=None, 45 | ssl_cert_reqs='required', 46 | ssl_ca_certs=None, 47 | ssl_ca_data=None, 48 | ssl_check_hostname=False, 49 | max_connections=None, 50 | single_connection_client=False, 51 | health_check_interval=0, 52 | client_name=None, 53 | lib_name='FalkorDB', 54 | lib_version='1.0.0', 55 | username=None, 56 | retry=None, 57 | connect_func=None, 58 | credential_provider=None, 59 | protocol=2, 60 | # FalkorDB Cluster Params 61 | cluster_error_retry_attempts=3, 62 | startup_nodes=None, 63 | require_full_coverage=False, 64 | reinitialize_steps=5, 65 | read_from_replicas=False, 66 | address_remap=None, 67 | ): 68 | 69 | conn = redis.Redis(host=host, port=port, db=0, password=password, 70 | socket_timeout=socket_timeout, 71 | socket_connect_timeout=socket_connect_timeout, 72 | socket_keepalive=socket_keepalive, 73 | socket_keepalive_options=socket_keepalive_options, 74 | connection_pool=connection_pool, 75 | unix_socket_path=unix_socket_path, 76 | encoding=encoding, encoding_errors=encoding_errors, 77 | decode_responses=True, 78 | retry_on_timeout=retry_on_timeout, 79 | retry_on_error=retry_on_error, ssl=ssl, 80 | ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, 81 | ssl_cert_reqs=ssl_cert_reqs, 82 | ssl_ca_certs=ssl_ca_certs, 83 | ssl_ca_data=ssl_ca_data, 84 | ssl_check_hostname=ssl_check_hostname, 85 | max_connections=max_connections, 86 | single_connection_client=single_connection_client, 87 | health_check_interval=health_check_interval, 88 | client_name=client_name, lib_name=lib_name, 89 | lib_version=lib_version, username=username, 90 | retry=retry, redis_connect_func=connect_func, 91 | credential_provider=credential_provider, 92 | protocol=protocol) 93 | 94 | if Is_Cluster(conn): 95 | conn = Cluster_Conn( 96 | conn, 97 | ssl, 98 | cluster_error_retry_attempts, 99 | startup_nodes, 100 | require_full_coverage, 101 | reinitialize_steps, 102 | read_from_replicas, 103 | address_remap, 104 | ) 105 | 106 | self.connection = conn 107 | self.flushdb = conn.flushdb 108 | self.execute_command = conn.execute_command 109 | 110 | @classmethod 111 | def from_url(cls, url: str, **kwargs) -> "FalkorDB": 112 | """ 113 | Creates a new FalkorDB instance from a URL. 114 | 115 | Args: 116 | cls: The class itself. 117 | url (str): The URL. 118 | kwargs: Additional keyword arguments to pass to the ``DB.from_url`` function. 119 | 120 | Returns: 121 | DB: A new DB instance. 122 | 123 | Usage example:: 124 | db = FalkorDB.from_url("falkor://[[username]:[password]]@localhost:6379") 125 | db = FalkorDB.from_url("falkors://[[username]:[password]]@localhost:6379") 126 | db = FalkorDB.from_url("unix://[username@]/path/to/socket.sock?db=0[&password=password]") 127 | """ 128 | 129 | db = cls() 130 | 131 | # switch from redis:// to falkordb:// 132 | if url.startswith('falkor://'): 133 | url = 'redis://' + url[len('falkor://'):] 134 | elif url.startswith('falkors://'): 135 | url = 'rediss://' + url[len('falkors://'):] 136 | 137 | conn = redis.from_url(url, **kwargs) 138 | db.connection = conn 139 | db.flushdb = conn.flushdb 140 | db.execute_command = conn.execute_command 141 | 142 | return db 143 | 144 | def select_graph(self, graph_id: str) -> AsyncGraph: 145 | """ 146 | Selects a graph by creating a new Graph instance. 147 | 148 | Args: 149 | graph_id (str): The identifier of the graph. 150 | 151 | Returns: 152 | AsyncGraph: A new Graph instance associated with the selected graph. 153 | """ 154 | if not isinstance(graph_id, str) or graph_id == "": 155 | raise TypeError(f"Expected a string parameter, but received {type(graph_id)}.") 156 | 157 | return AsyncGraph(self, graph_id) 158 | 159 | async def list_graphs(self) -> List[str]: 160 | """ 161 | Lists all graph names. 162 | See: https://docs.falkordb.com/commands/graph.list.html 163 | 164 | Returns: 165 | List: List of graph names. 166 | 167 | """ 168 | 169 | return await self.connection.execute_command(LIST_CMD) 170 | 171 | async def config_get(self, name: str) -> Union[int, str]: 172 | """ 173 | Retrieve a DB level configuration. 174 | For a list of available configurations see: https://docs.falkordb.com/configuration.html#falkordb-configuration-parameters 175 | 176 | Args: 177 | name (str): The name of the configuration. 178 | 179 | Returns: 180 | int or str: The configuration value. 181 | 182 | """ 183 | 184 | res = await self.connection.execute_command(CONFIG_CMD, "GET", name) 185 | return res[1] 186 | 187 | async def config_set(self, name: str, value=None) -> None: 188 | """ 189 | Update a DB level configuration. 190 | For a list of available configurations see: https://docs.falkordb.com/configuration.html#falkordb-configuration-parameters 191 | 192 | Args: 193 | name (str): The name of the configuration. 194 | value: The value to set. 195 | 196 | Returns: 197 | None 198 | 199 | """ 200 | 201 | return await self.connection.execute_command(CONFIG_CMD, "SET", name, value) 202 | -------------------------------------------------------------------------------- /falkordb/asyncio/graph.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Optional 2 | from .graph_schema import GraphSchema 3 | from .query_result import QueryResult 4 | 5 | from falkordb.graph import Graph 6 | from falkordb.helpers import quote_string, stringify_param_value 7 | from falkordb.exceptions import SchemaVersionMismatchException 8 | from falkordb.execution_plan import ExecutionPlan 9 | 10 | # procedures 11 | GRAPH_INDEXES = "DB.INDEXES" 12 | GRAPH_LIST_CONSTRAINTS = "DB.CONSTRAINTS" 13 | 14 | # commands 15 | COPY_CMD = "GRAPH.COPY" 16 | QUERY_CMD = "GRAPH.QUERY" 17 | DELETE_CMD = "GRAPH.DELETE" 18 | EXPLAIN_CMD = "GRAPH.EXPLAIN" 19 | SLOWLOG_CMD = "GRAPH.SLOWLOG" 20 | PROFILE_CMD = "GRAPH.PROFILE" 21 | RO_QUERY_CMD = "GRAPH.RO_QUERY" 22 | 23 | 24 | class AsyncGraph(Graph): 25 | """ 26 | Graph, collection of nodes and edges. 27 | """ 28 | 29 | def __init__(self, client, name: str): 30 | """ 31 | Create a new graph. 32 | 33 | Args: 34 | client: The client object. 35 | name (str): Graph ID 36 | 37 | """ 38 | 39 | super().__init__(client, name) 40 | self.schema = GraphSchema(self) 41 | 42 | async def _query(self, q: str, params: Optional[Dict[str, object]] = None, 43 | timeout: Optional[int] = None, read_only: bool = False) -> QueryResult: 44 | """ 45 | Executes a query asynchronously against the graph. 46 | See: https://docs.falkordb.com/commands/graph.query.html 47 | 48 | Args: 49 | q (str): The query. 50 | params (dict): Query parameters. 51 | timeout (int): Maximum query runtime in milliseconds. 52 | read_only (bool): Whether the query is read-only. 53 | 54 | Returns: 55 | QueryResult: query result set. 56 | 57 | """ 58 | 59 | # maintain original 'q' 60 | query = q 61 | 62 | # handle query parameters 63 | query = self._build_params_header(params) + query 64 | 65 | # construct query command 66 | # ask for compact result-set format 67 | # specify known graph version 68 | cmd = RO_QUERY_CMD if read_only else QUERY_CMD 69 | command = [cmd, self.name, query, "--compact"] 70 | 71 | # include timeout is specified 72 | if isinstance(timeout, int): 73 | command.extend(["timeout", timeout]) 74 | elif timeout is not None: 75 | raise Exception("Timeout argument must be a positive integer") 76 | 77 | # issue query 78 | try: 79 | response = await self.execute_command(*command) 80 | query_result = QueryResult(self) 81 | await query_result.parse(response) 82 | return query_result 83 | except SchemaVersionMismatchException as e: 84 | # client view over the graph schema is out of sync 85 | # set client version and refresh local schema 86 | self.schema.refresh(e.version) 87 | raise e 88 | 89 | async def query(self, q: str, params: Optional[Dict[str, object]] = None, 90 | timeout: Optional[int] = None) -> QueryResult: 91 | """ 92 | Executes a query asynchronously against the graph. 93 | See: https://docs.falkordb.com/commands/graph.query.html 94 | 95 | Args: 96 | q (str): The query. 97 | params (dict): Query parameters. 98 | timeout (int): Maximum query runtime in milliseconds. 99 | 100 | Returns: 101 | QueryResult: query result set. 102 | 103 | """ 104 | 105 | return await self._query(q, params=params, timeout=timeout, read_only=False) 106 | 107 | async def ro_query(self, q: str, params: Optional[Dict[str, object]] = None, 108 | timeout: Optional[int] = None) -> QueryResult: 109 | """ 110 | Executes a read-only query against the graph. 111 | See: https://docs.falkordb.com/commands/graph.ro_query.html 112 | 113 | Args: 114 | q (str): The query. 115 | params (dict): Query parameters. 116 | timeout (int): Maximum query runtime in milliseconds. 117 | 118 | Returns: 119 | QueryResult: query result set. 120 | 121 | """ 122 | 123 | return await self._query(q, params=params, timeout=timeout, read_only=True) 124 | 125 | async def copy(self, clone: str): 126 | """ 127 | Creates a copy of graph 128 | 129 | Args: 130 | clone (str): Name of cloned graph 131 | 132 | Returns: 133 | AsyncGraph: the cloned graph 134 | """ 135 | 136 | await self.execute_command(COPY_CMD, self._name, clone) 137 | return AsyncGraph(self.client, clone) 138 | 139 | async def delete(self) -> None: 140 | """ 141 | Deletes the graph. 142 | See: https://docs.falkordb.com/commands/graph.delete.html 143 | 144 | Returns: 145 | None 146 | 147 | """ 148 | 149 | self.schema.clear() 150 | return await self.execute_command(DELETE_CMD, self._name) 151 | 152 | async def slowlog(self): 153 | """ 154 | Get a list containing up to 10 of the slowest queries issued 155 | against the graph. 156 | 157 | Each item in the list has the following structure: 158 | 1. a unix timestamp at which the log entry was processed 159 | 2. the issued command 160 | 3. the issued query 161 | 4. the amount of time needed for its execution, in milliseconds. 162 | 163 | See: https://docs.falkordb.com/commands/graph.slowlog.html 164 | 165 | Returns: 166 | List: List of slow log entries. 167 | 168 | """ 169 | 170 | return await self.execute_command(SLOWLOG_CMD, self._name) 171 | 172 | async def slowlog_reset(self): 173 | """ 174 | Reset the slowlog. 175 | See: https://docs.falkordb.com/commands/graph.slowlog.html 176 | 177 | Returns: 178 | None 179 | 180 | """ 181 | await self.execute_command(SLOWLOG_CMD, self._name, "RESET") 182 | 183 | async def profile(self, query: str, params=None) -> ExecutionPlan: 184 | """ 185 | Execute a query and produce an execution plan augmented with metrics 186 | for each operation's execution. Return an execution plan, 187 | with details on results produced by and time spent in each operation. 188 | See: https://docs.falkordb.com/commands/graph.profile.html 189 | 190 | Args: 191 | query (str): The query to profile. 192 | params (dict): Query parameters. 193 | 194 | Returns: 195 | ExecutionPlan: The profile information. 196 | 197 | """ 198 | 199 | query = self._build_params_header(params) + query 200 | plan = await self.execute_command(PROFILE_CMD, self._name, query) 201 | return ExecutionPlan(plan) 202 | 203 | async def explain(self, query: str, params=None) -> ExecutionPlan: 204 | """ 205 | Get the execution plan for a given query. 206 | GRAPH.EXPLAIN returns an ExecutionPlan object. 207 | See: https://docs.falkordb.com/commands/graph.explain.html 208 | 209 | Args: 210 | query (str): The query for which to get the execution plan. 211 | params (dict): Query parameters. 212 | 213 | Returns: 214 | ExecutionPlan: The execution plan. 215 | 216 | """ 217 | 218 | query = self._build_params_header(params) + query 219 | 220 | plan = await self.execute_command(EXPLAIN_CMD, self._name, query) 221 | return ExecutionPlan(plan) 222 | 223 | # procedures 224 | async def call_procedure(self, procedure: str, read_only: bool = True, 225 | args: Optional[List] = None, 226 | emit: Optional[List[str]] = None) -> QueryResult: 227 | """ 228 | Call a procedure. 229 | 230 | Args: 231 | procedure (str): The procedure to call. 232 | read_only (bool): Whether the procedure is read-only. 233 | args: Procedure arguments. 234 | emit: Procedure yield. 235 | 236 | Returns: 237 | QueryResult: The result of the procedure call. 238 | 239 | """ 240 | 241 | # make sure strings arguments are quoted 242 | args = args or [] 243 | # args = [quote_string(arg) for arg in args] 244 | 245 | params = None 246 | if(len(args) > 0): 247 | params = {} 248 | # convert arguments to query parameters 249 | # CALL (1) -> CYPHER param_0=1 CALL ($param_0) 250 | for i, arg in enumerate(args): 251 | param_name = f'param{i}' 252 | params[param_name] = arg 253 | args[i] = '$' + param_name 254 | 255 | q = f"CALL {procedure}({','.join(args)})" 256 | 257 | if emit is not None and len(emit) > 0: 258 | q += f"YIELD {','.join(emit)}" 259 | 260 | return await self._query(q, params=params, read_only=read_only) 261 | 262 | # index operations 263 | 264 | async def _drop_index(self, idx_type: str, entity_type: str, label: str, 265 | attribute: str) -> QueryResult: 266 | """Drop a graph index. 267 | 268 | Args: 269 | idx_type (str): The type of index ("RANGE", "FULLTEXT", "VECTOR"). 270 | entity_type (str): The type of entity ("NODE" or "EDGE"). 271 | label (str): The label of the node or edge. 272 | attribute (str): The attribute to drop the index on. 273 | 274 | Returns: 275 | Any: The result of the index dropping query. 276 | """ 277 | # set pattern 278 | if entity_type == "NODE": 279 | pattern = f"(e:{label})" 280 | elif entity_type == "EDGE": 281 | pattern = f"()-[e:{label}]->()" 282 | else: 283 | raise ValueError("Invalid entity type") 284 | 285 | # build drop index command 286 | if idx_type == "RANGE": 287 | q = f"DROP INDEX FOR {pattern} ON (e.{attribute})" 288 | elif idx_type == "VECTOR": 289 | q = f"DROP VECTOR INDEX FOR {pattern} ON (e.{attribute})" 290 | elif idx_type == "FULLTEXT": 291 | q = f"DROP FULLTEXT INDEX FOR {pattern} ON (e.{attribute})" 292 | else: 293 | raise ValueError("Invalid index type") 294 | 295 | return await self.query(q) 296 | 297 | async def drop_node_range_index(self, label: str, attribute: str) -> QueryResult: 298 | """Drop a range index for a node. 299 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-node-label 300 | 301 | Args: 302 | label (str): The label of the node. 303 | attribute (str): The attribute to drop the index on. 304 | 305 | Returns: 306 | Any: The result of the index dropping query. 307 | """ 308 | return await self._drop_index("RANGE", "NODE", label, attribute) 309 | 310 | async def drop_node_fulltext_index(self, label: str, attribute: str) -> QueryResult: 311 | """Drop a full-text index for a node. 312 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-node-label 313 | 314 | Args: 315 | label (str): The label of the node. 316 | attribute (str): The attribute to drop the index on. 317 | 318 | Returns: 319 | Any: The result of the index dropping query. 320 | """ 321 | return await self._drop_index("FULLTEXT", "NODE", label, attribute) 322 | 323 | async def drop_node_vector_index(self, label: str, attribute: str) -> QueryResult: 324 | """Drop a vector index for a node. 325 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-node-label 326 | 327 | Args: 328 | label (str): The label of the node. 329 | attribute (str): The attribute to drop the index on. 330 | 331 | Returns: 332 | Any: The result of the index dropping query. 333 | """ 334 | return await self._drop_index("VECTOR", "NODE", label, attribute) 335 | 336 | async def drop_edge_range_index(self, label: str, attribute: str) -> QueryResult: 337 | """Drop a range index for an edge. 338 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-relationship-type 339 | 340 | Args: 341 | label (str): The label of the edge. 342 | attribute (str): The attribute to drop the index on. 343 | 344 | Returns: 345 | Any: The result of the index dropping query. 346 | """ 347 | return await self._drop_index("RANGE", "EDGE", label, attribute) 348 | 349 | async def drop_edge_fulltext_index(self, label: str, attribute: str) -> QueryResult: 350 | """Drop a full-text index for an edge. 351 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-relationship-type 352 | 353 | Args: 354 | label (str): The label of the edge. 355 | attribute (str): The attribute to drop the index on. 356 | 357 | Returns: 358 | Any: The result of the index dropping query. 359 | """ 360 | return await self._drop_index("FULLTEXT", "EDGE", label, attribute) 361 | 362 | async def drop_edge_vector_index(self, label: str, attribute: str) -> QueryResult: 363 | """Drop a vector index for an edge. 364 | See: https://docs.falkordb.com/commands/graph.query.html#deleting-an-index-for-a-relationship-type 365 | 366 | Args: 367 | label (str): The label of the edge. 368 | attribute (str): The attribute to drop the index on. 369 | 370 | Returns: 371 | Any: The result of the index dropping query. 372 | """ 373 | return await self._drop_index("VECTOR", "EDGE", label, attribute) 374 | 375 | async def list_indices(self) -> QueryResult: 376 | """Retrieve a list of graph indices. 377 | See: https://docs.falkordb.com/commands/graph.query.html#procedures 378 | 379 | Returns: 380 | list: List of graph indices. 381 | """ 382 | return await self.call_procedure(GRAPH_INDEXES) 383 | 384 | async def _create_typed_index(self, idx_type: str, entity_type: str, label: str, 385 | *properties: List[str], options=None) -> QueryResult: 386 | """Create a typed index for nodes or edges. 387 | 388 | Args: 389 | idx_type (str): The type of index ("RANGE", "FULLTEXT", "VECTOR"). 390 | entity_type (str): The type of entity ("NODE" or "EDGE"). 391 | label (str): The label of the node or edge. 392 | properties: Variable number of property names to be indexed. 393 | options (dict, optional): Additional options for the index. 394 | 395 | Returns: 396 | Any: The result of the index creation query. 397 | """ 398 | if entity_type == "NODE": 399 | pattern = f"(e:{label})" 400 | elif entity_type == "EDGE": 401 | pattern = f"()-[e:{label}]->()" 402 | else: 403 | raise ValueError("Invalid entity type") 404 | 405 | if idx_type == "RANGE": 406 | idx_type = "" 407 | 408 | q = f"CREATE {idx_type} INDEX FOR {pattern} ON (" 409 | q += ",".join(map("e.{0}".format, properties)) 410 | q += ")" 411 | 412 | if options is not None: 413 | # convert options to a Cypher map 414 | options_map = "{" 415 | for key, value in options.items(): 416 | if isinstance(value, str): 417 | options_map += key + ":'" + value + "'," 418 | else: 419 | options_map += key + ':' + str(value) + ',' 420 | options_map = options_map[:-1] + "}" 421 | q += f" OPTIONS {options_map}" 422 | 423 | return await self.query(q) 424 | 425 | async def create_node_range_index(self, label: str, *properties) -> QueryResult: 426 | """Create a range index for a node. 427 | See: https://docs.falkordb.com/commands/graph.query.html#creating-an-index-for-a-node-label 428 | 429 | Args: 430 | label (str): The label of the node. 431 | properties: Variable number of property names to be indexed. 432 | 433 | Returns: 434 | Any: The result of the index creation query. 435 | """ 436 | res = await self._create_typed_index("RANGE", "NODE", label, *properties) 437 | return res 438 | 439 | async def create_node_fulltext_index(self, label: str, *properties) -> QueryResult: 440 | """Create a full-text index for a node. 441 | See: https://docs.falkordb.com/commands/graph.query.html#creating-a-full-text-index-for-a-node-label 442 | 443 | Args: 444 | label (str): The label of the node. 445 | properties: Variable number of property names to be indexed. 446 | 447 | Returns: 448 | Any: The result of the index creation query. 449 | """ 450 | res = await self._create_typed_index("FULLTEXT", "NODE", label, *properties) 451 | return res 452 | 453 | async def create_node_vector_index(self, label: str, *properties, dim: int = 0, 454 | similarity_function: str = "euclidean") -> QueryResult: 455 | """Create a vector index for a node. 456 | See: https://docs.falkordb.com/commands/graph.query.html#vector-indexing 457 | 458 | Args: 459 | label (str): The label of the node. 460 | properties: Variable number of property names to be indexed. 461 | dim (int, optional): The dimension of the vector. 462 | similarity_function (str, optional): The similarity function for the vector. 463 | 464 | Returns: 465 | Any: The result of the index creation query. 466 | """ 467 | options = {'dimension': dim, 'similarityFunction': similarity_function} 468 | res = await self._create_typed_index("VECTOR", "NODE", label, *properties, options=options) 469 | return res 470 | 471 | async def create_edge_range_index(self, relation: str, *properties) -> QueryResult: 472 | """Create a range index for an edge. 473 | See: https://docs.falkordb.com/commands/graph.query.html#creating-an-index-for-a-relationship-type 474 | 475 | Args: 476 | relation (str): The relation of the edge. 477 | properties: Variable number of property names to be indexed. 478 | 479 | Returns: 480 | Any: The result of the index creation query. 481 | """ 482 | res = await self._create_typed_index("RANGE", "EDGE", relation, *properties) 483 | return res 484 | 485 | async def create_edge_fulltext_index(self, relation: str, *properties) -> QueryResult: 486 | """Create a full-text index for an edge. 487 | See: https://docs.falkordb.com/commands/graph.query.html#full-text-indexing 488 | 489 | Args: 490 | relation (str): The relation of the edge. 491 | properties: Variable number of property names to be indexed. 492 | 493 | Returns: 494 | Any: The result of the index creation query. 495 | """ 496 | res = await self._create_typed_index("FULLTEXT", "EDGE", relation, *properties) 497 | return res 498 | 499 | async def create_edge_vector_index(self, relation: str, *properties, dim: int = 0, 500 | similarity_function: str = "euclidean") -> QueryResult: 501 | """Create a vector index for an edge. 502 | See: https://docs.falkordb.com/commands/graph.query.html#vector-indexing 503 | 504 | Args: 505 | relation (str): The relation of the edge. 506 | properties: Variable number of property names to be indexed. 507 | dim (int, optional): The dimension of the vector. 508 | similarity_function (str, optional): The similarity function for the vector. 509 | 510 | Returns: 511 | Any: The result of the index creation query. 512 | """ 513 | options = {'dimension': dim, 'similarityFunction': similarity_function} 514 | res = await self._create_typed_index("VECTOR", "EDGE", relation, *properties, options=options) 515 | return res 516 | 517 | async def _create_constraint(self, constraint_type: str, entity_type: str, label: str, *properties): 518 | """ 519 | Create a constraint 520 | """ 521 | 522 | # GRAPH.CONSTRAINT CREATE key constraintType {NODE label | RELATIONSHIP reltype} PROPERTIES propCount prop [prop...] 523 | return await self.execute_command("GRAPH.CONSTRAINT", "CREATE", self.name, 524 | constraint_type, entity_type, label, 525 | "PROPERTIES", len(properties), *properties) 526 | 527 | async def create_node_unique_constraint(self, label: str, *properties): 528 | """ 529 | Create node unique constraint 530 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 531 | 532 | The constraint is created asynchronously, use list constraints to pull on 533 | constraint creation status 534 | 535 | Note: unique constraints require a the existance of a range index 536 | over the constraint properties, this function will create any missing range indices 537 | 538 | Args: 539 | label (str): Node label to apply constraint to 540 | properties: Variable number of property names to constrain 541 | """ 542 | 543 | # create required range indices 544 | try: 545 | await self.create_node_range_index(label, *properties) 546 | except Exception: 547 | pass 548 | 549 | # create constraint 550 | return await self._create_constraint("UNIQUE", "NODE", label, *properties) 551 | 552 | async def create_edge_unique_constraint(self, relation: str, *properties): 553 | """ 554 | Create edge unique constraint 555 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 556 | 557 | The constraint is created asynchronously, use list constraints to pull on 558 | constraint creation status 559 | 560 | Note: unique constraints require a the existance of a range index 561 | over the constraint properties, this function will create any missing range indices 562 | 563 | Args: 564 | relation (str): Edge relationship-type to apply constraint to 565 | properties: Variable number of property names to constrain 566 | """ 567 | 568 | # create required range indices 569 | try: 570 | await self.create_edge_range_index(relation, *properties) 571 | except Exception: 572 | pass 573 | 574 | return await self._create_constraint("UNIQUE", "RELATIONSHIP", relation, *properties) 575 | 576 | async def create_node_mandatory_constraint(self, label: str, *properties): 577 | """ 578 | Create node mandatory constraint 579 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 580 | 581 | The constraint is created asynchronously, use list constraints to pull on 582 | constraint creation status 583 | 584 | Args: 585 | label (str): Node label to apply constraint to 586 | properties: Variable number of property names to constrain 587 | """ 588 | 589 | return await self._create_constraint("MANDATORY", "NODE", label, *properties) 590 | 591 | async def create_edge_mandatory_constraint(self, relation: str, *properties): 592 | """ 593 | Create edge mandatory constraint 594 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 595 | 596 | The constraint is created asynchronously, use list constraints to pull on 597 | constraint creation status 598 | 599 | Args: 600 | relation (str): Edge relationship-type to apply constraint to 601 | properties: Variable number of property names to constrain 602 | """ 603 | return await self._create_constraint("MANDATORY", "RELATIONSHIP", relation, *properties) 604 | 605 | async def _drop_constraint(self, constraint_type: str, entity_type: str, label: str, *properties): 606 | """ 607 | Drops a constraint 608 | 609 | Args: 610 | constraint_type (str): Type of constraint to drop 611 | entity_type (str): Type of entity to drop constraint from 612 | label (str): entity's label / relationship-type 613 | properties: entity's properties to remove constraint from 614 | """ 615 | 616 | return await self.execute_command("GRAPH.CONSTRAINT", "DROP", self.name, 617 | constraint_type, entity_type, label, 618 | "PROPERTIES", len(properties), *properties) 619 | 620 | async def drop_node_unique_constraint(self, label: str, *properties): 621 | """ 622 | Drop node unique constraint 623 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 624 | 625 | Note: the constraint supporting range index is not removed 626 | 627 | Args: 628 | label (str): Node label to remove the constraint from 629 | properties: properties to remove constraint from 630 | """ 631 | 632 | # drop constraint 633 | return await self._drop_constraint("UNIQUE", "NODE", label, *properties) 634 | 635 | async def drop_edge_unique_constraint(self, relation: str, *properties): 636 | """ 637 | Drop edge unique constraint 638 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 639 | 640 | Note: the constraint supporting range index is not removed 641 | 642 | Args: 643 | label (str): Edge relationship-type to remove the constraint from 644 | properties: properties to remove constraint from 645 | """ 646 | 647 | return await self._drop_constraint("UNIQUE", "RELATIONSHIP", relation, *properties) 648 | 649 | async def drop_node_mandatory_constraint(self, label: str, *properties): 650 | """ 651 | Drop node mandatory constraint 652 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 653 | 654 | Args: 655 | label (str): Node label to remove the constraint from 656 | properties: properties to remove constraint from 657 | """ 658 | 659 | return await self._drop_constraint("MANDATORY", "NODE", label, *properties) 660 | 661 | async def drop_edge_mandatory_constraint(self, relation: str, *properties): 662 | """ 663 | Drop edge mandatory constraint 664 | See: https://docs.falkordb.com/commands/graph.constraint-create.html 665 | 666 | Args: 667 | label (str): Edge relationship-type to remove the constraint from 668 | properties: properties to remove constraint from 669 | """ 670 | return await self._drop_constraint("MANDATORY", "RELATIONSHIP", relation, *properties) 671 | 672 | async def list_constraints(self) -> [Dict[str, object]]: 673 | """ 674 | Lists graph's constraints 675 | 676 | See: https://docs.falkordb.com/commands/graph.constraint-create.html#listing-constraints 677 | 678 | Returns: 679 | [Dict[str, object]]: list of constraints 680 | """ 681 | 682 | result = (await self.call_procedure(GRAPH_LIST_CONSTRAINTS)).result_set 683 | 684 | constraints = [] 685 | for row in result: 686 | constraints.append({"type": row[0], 687 | "label": row[1], 688 | "properties": row[2], 689 | "entitytype": row[3], 690 | "status": row[4]}) 691 | return constraints 692 | 693 | -------------------------------------------------------------------------------- /falkordb/asyncio/graph_schema.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from falkordb.exceptions import SchemaVersionMismatchException 3 | 4 | # procedures 5 | DB_LABELS = "DB.LABELS" 6 | DB_PROPERTYKEYS = "DB.PROPERTYKEYS" 7 | DB_RELATIONSHIPTYPES = "DB.RELATIONSHIPTYPES" 8 | 9 | 10 | class GraphSchema(): 11 | """ 12 | The graph schema. 13 | Maintains the labels, properties and relationships of the graph. 14 | """ 15 | 16 | def __init__(self, graph: 'Graph'): 17 | """ 18 | Initialize the graph schema. 19 | 20 | Args: 21 | graph (Graph): The graph. 22 | 23 | Returns: 24 | GraphSchema: The graph schema. 25 | """ 26 | 27 | self.graph = graph 28 | self.clear() 29 | 30 | def clear(self): 31 | """ 32 | Clear the graph schema. 33 | 34 | Returns: 35 | None 36 | 37 | """ 38 | 39 | self.version = 0 40 | self.labels = [] 41 | self.properties = [] 42 | self.relationships = [] 43 | 44 | async def refresh_labels(self) -> None: 45 | """ 46 | Refresh labels. 47 | 48 | Returns: 49 | None 50 | 51 | """ 52 | 53 | result_set = (await self.graph.call_procedure(DB_LABELS)).result_set 54 | self.labels = [l[0] for l in result_set] 55 | 56 | async def refresh_relations(self) -> None: 57 | """ 58 | Refresh relationship types. 59 | 60 | Returns: 61 | None 62 | 63 | """ 64 | 65 | result_set = (await self.graph.call_procedure(DB_RELATIONSHIPTYPES)).result_set 66 | self.relationships = [r[0] for r in result_set] 67 | 68 | async def refresh_properties(self) -> None: 69 | """ 70 | Refresh property keys. 71 | 72 | Returns: 73 | None 74 | 75 | """ 76 | 77 | result_set = (await self.graph.call_procedure(DB_PROPERTYKEYS)).result_set 78 | self.properties = [p[0] for p in result_set] 79 | 80 | async def refresh(self, version: int) -> None: 81 | """ 82 | Refresh the graph schema. 83 | 84 | Args: 85 | version (int): The version of the graph schema. 86 | 87 | Returns: 88 | None 89 | 90 | """ 91 | 92 | self.clear() 93 | self.version = version 94 | await self.refresh_labels() 95 | await self.refresh_relations() 96 | await self.refresh_properties() 97 | 98 | async def get_label(self, idx: int) -> str: 99 | """ 100 | Returns a label by its index. 101 | 102 | Args: 103 | idx (int): The index of the label. 104 | 105 | Returns: 106 | str: The label. 107 | 108 | """ 109 | 110 | try: 111 | l = self.labels[idx] 112 | except IndexError: 113 | # refresh labels 114 | await self.refresh_labels() 115 | l = self.labels[idx] 116 | return l 117 | 118 | async def get_relation(self, idx: int) -> str: 119 | """ 120 | Returns a relationship type by its index. 121 | 122 | Args: 123 | idx (int): The index of the relation. 124 | 125 | Returns: 126 | str: The relationship type. 127 | 128 | """ 129 | 130 | try: 131 | r = self.relationships[idx] 132 | except IndexError: 133 | # refresh relationship types 134 | await self.refresh_relations() 135 | r = self.relationships[idx] 136 | return r 137 | 138 | async def get_property(self, idx: int) -> str: 139 | """ 140 | Returns a property by its index. 141 | 142 | Args: 143 | idx (int): The index of the property. 144 | 145 | Returns: 146 | str: The property. 147 | 148 | """ 149 | 150 | try: 151 | p = self.properties[idx] 152 | except IndexError: 153 | # refresh properties 154 | await self.refresh_properties() 155 | p = self.properties[idx] 156 | return p 157 | -------------------------------------------------------------------------------- /falkordb/asyncio/query_result.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from enum import Enum 3 | from typing import List 4 | from collections import OrderedDict 5 | 6 | from redis import ResponseError 7 | 8 | from falkordb.edge import Edge 9 | from falkordb.node import Node 10 | from falkordb.path import Path 11 | from falkordb.exceptions import SchemaVersionMismatchException 12 | 13 | # statistics 14 | LABELS_ADDED = "Labels added" 15 | LABELS_REMOVED = "Labels removed" 16 | NODES_CREATED = "Nodes created" 17 | NODES_DELETED = "Nodes deleted" 18 | PROPERTIES_SET = "Properties set" 19 | INDICES_CREATED = "Indices created" 20 | INDICES_DELETED = "Indices deleted" 21 | CACHED_EXECUTION = "Cached execution" 22 | PROPERTIES_REMOVED = "Properties removed" 23 | RELATIONSHIPS_DELETED = "Relationships deleted" 24 | RELATIONSHIPS_CREATED = "Relationships created" 25 | INTERNAL_EXECUTION_TIME = "internal execution time" 26 | 27 | STATS = [ 28 | LABELS_ADDED, 29 | NODES_CREATED, 30 | NODES_DELETED, 31 | LABELS_REMOVED, 32 | PROPERTIES_SET, 33 | INDICES_CREATED, 34 | INDICES_DELETED, 35 | CACHED_EXECUTION, 36 | PROPERTIES_REMOVED, 37 | RELATIONSHIPS_CREATED, 38 | RELATIONSHIPS_DELETED, 39 | INTERNAL_EXECUTION_TIME, 40 | ] 41 | 42 | class ResultSetScalarTypes(Enum): 43 | """ 44 | Enumeration representing different scalar types in the query result set. 45 | 46 | Attributes: 47 | VALUE_UNKNOWN (int): Unknown scalar type (0). 48 | VALUE_NULL (int): Null scalar type (1). 49 | VALUE_STRING (int): String scalar type (2). 50 | VALUE_INTEGER (int): Integer scalar type (3). 51 | VALUE_BOOLEAN (int): Boolean scalar type (4). 52 | VALUE_DOUBLE (int): Double scalar type (5). 53 | VALUE_ARRAY (int): Array scalar type (6). 54 | VALUE_EDGE (int): Edge scalar type (7). 55 | VALUE_NODE (int): Node scalar type (8). 56 | VALUE_PATH (int): Path scalar type (9). 57 | VALUE_MAP (int): Map scalar type (10). 58 | VALUE_POINT (int): Point scalar type (11). 59 | VALUE_VECTORF32 (int): Vector scalar type (12). 60 | """ 61 | 62 | VALUE_UNKNOWN = 0 63 | VALUE_NULL = 1 64 | VALUE_STRING = 2 65 | VALUE_INTEGER = 3 66 | VALUE_BOOLEAN = 4 67 | VALUE_DOUBLE = 5 68 | VALUE_ARRAY = 6 69 | VALUE_EDGE = 7 70 | VALUE_NODE = 8 71 | VALUE_PATH = 9 72 | VALUE_MAP = 10 73 | VALUE_POINT = 11 74 | VALUE_VECTORF32 = 12 75 | 76 | async def __parse_unknown(value, graph): 77 | """ 78 | Parse a value of unknown type. 79 | 80 | Args: 81 | value: The value to parse. 82 | graph: The graph instance. 83 | 84 | Returns: 85 | None 86 | """ 87 | sys.stderr.write("Unknown type\n") 88 | 89 | async def __parse_null(value, graph) -> None: 90 | """ 91 | Parse a null value. 92 | 93 | Args: 94 | value: The null value. 95 | graph: The graph instance. 96 | 97 | Returns: 98 | None: Always returns None. 99 | """ 100 | return None 101 | 102 | async def __parse_string(value, graph) -> str: 103 | """ 104 | Parse the value as a string. 105 | 106 | Args: 107 | value: The value to parse. 108 | graph: The graph instance. 109 | 110 | Returns: 111 | str: The parsed string value. 112 | """ 113 | if isinstance(value, bytes): 114 | return value.decode() 115 | 116 | if not isinstance(value, str): 117 | return str(value) 118 | 119 | return value 120 | 121 | async def __parse_integer(value, graph) -> int: 122 | """ 123 | Parse the integer value from the value. 124 | 125 | Args: 126 | value: The value to parse. 127 | graph: The graph instance. 128 | 129 | Returns: 130 | int: The parsed integer value. 131 | """ 132 | return int(value) 133 | 134 | async def __parse_boolean(value, graph) -> bool: 135 | """ 136 | Parse the value as a boolean. 137 | 138 | Args: 139 | value: The value to parse. 140 | graph: The graph instance. 141 | 142 | Returns: 143 | bool: The parsed boolean value. 144 | """ 145 | value = value.decode() if isinstance(value, bytes) else value 146 | return value == "true" 147 | 148 | async def __parse_double(value, graph) -> float: 149 | """ 150 | Parse the value as a double. 151 | 152 | Args: 153 | value: The value to parse. 154 | graph: The graph instance. 155 | 156 | Returns: 157 | float: The parsed double value. 158 | """ 159 | return float(value) 160 | 161 | async def __parse_array(value, graph) -> List: 162 | """ 163 | Parse an array of values. 164 | 165 | Args: 166 | value: The array value to parse. 167 | graph: The graph instance. 168 | 169 | Returns: 170 | list: The parsed list of values. 171 | """ 172 | scalar = [await parse_scalar(value[i], graph) for i in range(len(value))] 173 | return scalar 174 | 175 | async def __parse_vectorf32(value, graph) -> List: 176 | """ 177 | Parse a vector32f. 178 | 179 | Args: 180 | value: The vector to parse. 181 | graph: The graph instance. 182 | 183 | Returns: 184 | list: The parsed vector. 185 | """ 186 | 187 | return [float(v) for v in value] 188 | 189 | async def __parse_entity_properties(props, graph): 190 | """ 191 | Parse node/edge properties. 192 | 193 | Args: 194 | props (List): List of properties. 195 | graph: The graph instance. 196 | 197 | Returns: 198 | dict: Dictionary containing parsed properties. 199 | """ 200 | properties = {} 201 | for prop in props: 202 | prop_name = await graph.schema.get_property(prop[0]) 203 | prop_value = await parse_scalar(prop[1:], graph) 204 | properties[prop_name] = prop_value 205 | 206 | return properties 207 | 208 | async def __parse_node(value, graph) -> Node: 209 | """ 210 | Parse the value to a node. 211 | 212 | Args: 213 | value: The value to parse. 214 | graph: The graph instance. 215 | 216 | Returns: 217 | Node: The parsed Node instance. 218 | """ 219 | node_id = int(value[0]) 220 | labels = None 221 | if len(value[1]) > 0: 222 | labels = [await graph.schema.get_label(inner_label) for inner_label in value[1]] 223 | properties = await __parse_entity_properties(value[2], graph) 224 | return Node(node_id=node_id, alias="", labels=labels, properties=properties) 225 | 226 | async def __parse_edge(value, graph) -> Edge: 227 | """ 228 | Parse the value to an edge. 229 | 230 | Args: 231 | value: The value to parse. 232 | graph: The graph instance. 233 | 234 | Returns: 235 | Edge: The parsed Edge instance. 236 | """ 237 | edge_id = int(value[0]) 238 | relation = await graph.schema.get_relation(value[1]) 239 | src_node_id = int(value[2]) 240 | dest_node_id = int(value[3]) 241 | properties = await __parse_entity_properties(value[4], graph) 242 | return Edge(src_node_id, relation, dest_node_id, edge_id=edge_id, properties=properties) 243 | 244 | async def __parse_path(value, graph) -> Path: 245 | """ 246 | Parse the value to a path. 247 | 248 | Args: 249 | value: The value to parse. 250 | graph: The graph instance. 251 | 252 | Returns: 253 | Path: The parsed Path instance. 254 | """ 255 | nodes = await parse_scalar(value[0], graph) 256 | edges = await parse_scalar(value[1], graph) 257 | return Path(nodes, edges) 258 | 259 | async def __parse_map(value, graph) -> OrderedDict: 260 | """ 261 | Parse the value as a map. 262 | 263 | Args: 264 | value: The value to parse. 265 | graph: The graph instance. 266 | 267 | Returns: 268 | OrderedDict: The parsed OrderedDict. 269 | """ 270 | m = OrderedDict() 271 | n_entries = len(value) 272 | 273 | for i in range(0, n_entries, 2): 274 | key = await __parse_string(value[i], graph) 275 | m[key] = await parse_scalar(value[i + 1], graph) 276 | 277 | return m 278 | 279 | async def __parse_point(value, graph): 280 | """ 281 | Parse the value to point. 282 | 283 | Args: 284 | value: The value to parse. 285 | graph: The graph instance. 286 | 287 | Returns: 288 | dict: The parsed dictionary representing a point. 289 | """ 290 | p = {"latitude": float(value[0]), "longitude": float(value[1])} 291 | return p 292 | 293 | async def parse_scalar(value, graph): 294 | """ 295 | Parse a scalar value from a value in the result set. 296 | 297 | Args: 298 | value: The value to parse. 299 | graph: The graph instance. 300 | 301 | Returns: 302 | Any: The parsed scalar value. 303 | """ 304 | scalar_type = int(value[0]) 305 | value = value[1] 306 | scalar = await PARSE_SCALAR_TYPES[scalar_type](value, graph) 307 | 308 | return scalar 309 | 310 | 311 | PARSE_SCALAR_TYPES = [ 312 | __parse_unknown, # VALUE_UNKNOWN 313 | __parse_null, # VALUE_NULL 314 | __parse_string, # VALUE_STRING 315 | __parse_integer, # VALUE_INTEGER 316 | __parse_boolean, # VALUE_BOOLEAN 317 | __parse_double, # VALUE_DOUBLE 318 | __parse_array, # VALUE_ARRAY 319 | __parse_edge, # VALUE_EDGE 320 | __parse_node, # VALUE_NODE 321 | __parse_path, # VALUE_PATH 322 | __parse_map, # VALUE_MAP 323 | __parse_point, # VALUE_POINT 324 | __parse_vectorf32 # VALUE_VECTORF32 325 | ] 326 | 327 | class QueryResult: 328 | """ 329 | Represents the result of a query operation on a graph. 330 | """ 331 | def __init__(self, graph): 332 | """ 333 | Initializes a QueryResult instance. 334 | 335 | Args: 336 | graph: The graph on which the query was executed. 337 | """ 338 | 339 | self.graph = graph 340 | self.header = [] 341 | self.result_set = [] 342 | self._raw_stats = [] 343 | 344 | async def parse(self, response): 345 | """ 346 | Parse the response from the server. 347 | 348 | Args: 349 | response: The response from the server. 350 | """ 351 | 352 | # in case of an error, an exception will be raised 353 | self.__check_for_errors(response) 354 | 355 | if len(response) == 1: 356 | self._raw_stats = response[0] 357 | else: 358 | # start by parsing statistics, matches the one we have 359 | self._raw_stats = response[-1] 360 | await self.__parse_results(response) 361 | 362 | def __check_for_errors(self, response): 363 | """ 364 | Checks if the response contains an error. 365 | 366 | Args: 367 | response: The response from the server. 368 | 369 | Raises: 370 | ResponseError: If an error is encountered. 371 | """ 372 | if isinstance(response[0], ResponseError): 373 | error = response[0] 374 | if str(error) == "version mismatch": 375 | version = response[1] 376 | error = VersionMismatchException(version) 377 | raise error 378 | 379 | # if we encountered a run-time error, the last response 380 | # element will be an exception 381 | if isinstance(response[-1], ResponseError): 382 | raise response[-1] 383 | 384 | async def __parse_results(self, raw_result_set): 385 | """ 386 | Parse the query execution result returned from the server. 387 | 388 | Args: 389 | raw_result_set: The raw result set from the server. 390 | """ 391 | self.header = self.__parse_header(raw_result_set) 392 | 393 | # empty header 394 | if len(self.header) == 0: 395 | return 396 | 397 | self.result_set = await self.__parse_records(raw_result_set) 398 | 399 | def __get_statistics(self, s): 400 | """ 401 | Get the value of a specific statistical metric. 402 | 403 | Args: 404 | s (str): The statistical metric to retrieve. 405 | 406 | Returns: 407 | float: The value of the specified statistical metric. Returns 0 if the metric is not found. 408 | """ 409 | for stat in self._raw_stats: 410 | if s in stat: 411 | return float(stat.split(": ")[1].split(" ")[0]) 412 | 413 | return 0 414 | 415 | def __parse_header(self, raw_result_set): 416 | """ 417 | Parse the header of the result. 418 | 419 | Args: 420 | raw_result_set: The raw result set from the server. 421 | 422 | Returns: 423 | list: An array of column name/column type pairs. 424 | """ 425 | # an array of column name/column type pairs 426 | header = raw_result_set[0] 427 | return header 428 | 429 | async def __parse_records(self, raw_result_set): 430 | """ 431 | Parses the result set and returns a list of records. 432 | 433 | Args: 434 | raw_result_set: The raw result set from the server. 435 | 436 | Returns: 437 | list: A list of records. 438 | """ 439 | records = [] 440 | for row in raw_result_set[1]: 441 | record = [] 442 | for cell in row: 443 | record.append(await parse_scalar(cell, self.graph)) 444 | records.append(record) 445 | 446 | return records 447 | 448 | @property 449 | def labels_added(self) -> int: 450 | """ 451 | Get the number of labels added in the query. 452 | 453 | Returns: 454 | int: The number of labels added. 455 | """ 456 | 457 | return self.__get_statistics(LABELS_ADDED) 458 | 459 | @property 460 | def labels_removed(self) -> int: 461 | """ 462 | Get the number of labels removed in the query. 463 | 464 | Returns: 465 | int: The number of labels removed. 466 | """ 467 | return self.__get_statistics(LABELS_REMOVED) 468 | 469 | @property 470 | def nodes_created(self) -> int: 471 | """ 472 | Get the number of nodes created in the query. 473 | 474 | Returns: 475 | int: The number of nodes created. 476 | """ 477 | return self.__get_statistics(NODES_CREATED) 478 | 479 | @property 480 | def nodes_deleted(self) -> int: 481 | """ 482 | Get the number of nodes deleted in the query. 483 | 484 | Returns: 485 | int: The number of nodes deleted. 486 | """ 487 | return self.__get_statistics(NODES_DELETED) 488 | 489 | @property 490 | def properties_set(self) -> int: 491 | """ 492 | Get the number of properties set in the query. 493 | 494 | Returns: 495 | int: The number of properties set. 496 | """ 497 | return self.__get_statistics(PROPERTIES_SET) 498 | 499 | @property 500 | def properties_removed(self) -> int: 501 | """ 502 | Get the number of properties removed in the query. 503 | 504 | Returns: 505 | int: The number of properties removed. 506 | """ 507 | return self.__get_statistics(PROPERTIES_REMOVED) 508 | 509 | @property 510 | def relationships_created(self) -> int: 511 | """ 512 | Get the number of relationships created in the query. 513 | 514 | Returns: 515 | int: The number of relationships created. 516 | """ 517 | return self.__get_statistics(RELATIONSHIPS_CREATED) 518 | 519 | @property 520 | def relationships_deleted(self) -> int: 521 | """ 522 | Get the number of relationships deleted in the query. 523 | 524 | Returns: 525 | int: The number of relationships deleted. 526 | """ 527 | return self.__get_statistics(RELATIONSHIPS_DELETED) 528 | 529 | @property 530 | def indices_created(self) -> int: 531 | """ 532 | Get the number of indices created in the query. 533 | 534 | Returns: 535 | int: The number of indices created. 536 | """ 537 | return self.__get_statistics(INDICES_CREATED) 538 | 539 | @property 540 | def indices_deleted(self) -> int: 541 | """ 542 | Get the number of indices deleted in the query. 543 | 544 | Returns: 545 | int: The number of indices deleted. 546 | """ 547 | return self.__get_statistics(INDICES_DELETED) 548 | 549 | @property 550 | def cached_execution(self) -> bool: 551 | """ 552 | Check if the query execution plan was cached. 553 | 554 | Returns: 555 | bool: True if the query execution plan was cached, False otherwise. 556 | """ 557 | return self.__get_statistics(CACHED_EXECUTION) == 1 558 | 559 | @property 560 | def run_time_ms(self) -> float: 561 | """ 562 | Get the server execution time of the query. 563 | 564 | Returns: 565 | float: The server execution time of the query in milliseconds. 566 | """ 567 | return self.__get_statistics(INTERNAL_EXECUTION_TIME) 568 | -------------------------------------------------------------------------------- /falkordb/cluster.py: -------------------------------------------------------------------------------- 1 | from redis.cluster import RedisCluster 2 | import redis.exceptions as redis_exceptions 3 | import socket 4 | 5 | # detect if a connection is a Cluster 6 | def Is_Cluster(conn): 7 | info = conn.info(section="server") 8 | return "redis_mode" in info and info["redis_mode"] == "cluster" 9 | 10 | 11 | # create a cluster connection from a Redis connection 12 | def Cluster_Conn( 13 | conn, 14 | ssl, 15 | cluster_error_retry_attempts=3, 16 | startup_nodes=None, 17 | require_full_coverage=False, 18 | reinitialize_steps=5, 19 | read_from_replicas=False, 20 | dynamic_startup_nodes=True, 21 | url=None, 22 | address_remap=None, 23 | ): 24 | connection_kwargs = conn.connection_pool.connection_kwargs 25 | host = connection_kwargs.pop("host") 26 | port = connection_kwargs.pop("port") 27 | username = connection_kwargs.pop("username") 28 | password = connection_kwargs.pop("password") 29 | 30 | retry = connection_kwargs.pop("retry", None) 31 | retry_on_timeout = connection_kwargs.pop("retry_on_timeout", None) 32 | retry_on_error = connection_kwargs.pop( 33 | "retry_on_error", 34 | [ 35 | ConnectionRefusedError, 36 | ConnectionError, 37 | TimeoutError, 38 | socket.timeout, 39 | redis_exceptions.ConnectionError, 40 | ], 41 | ) 42 | return RedisCluster( 43 | host=host, 44 | port=port, 45 | username=username, 46 | password=password, 47 | decode_responses=True, 48 | ssl=ssl, 49 | retry=retry, 50 | retry_on_timeout=retry_on_timeout, 51 | retry_on_error=retry_on_error, 52 | require_full_coverage=require_full_coverage, 53 | reinitialize_steps=reinitialize_steps, 54 | read_from_replicas=read_from_replicas, 55 | dynamic_startup_nodes=dynamic_startup_nodes, 56 | url=url, 57 | address_remap=address_remap, 58 | startup_nodes=startup_nodes, 59 | cluster_error_retry_attempts=cluster_error_retry_attempts, 60 | ) 61 | -------------------------------------------------------------------------------- /falkordb/edge.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from .node import Node 3 | from .helpers import quote_string 4 | 5 | class Edge: 6 | """ 7 | An edge connecting two nodes. 8 | """ 9 | 10 | def __init__(self, src_node: Node, relation: str, dest_node: Node, 11 | edge_id: Optional[int] = None, alias: Optional[str] = '', 12 | properties=None): 13 | """ 14 | Create a new edge. 15 | 16 | Args: 17 | src_node: The source node of the edge. 18 | relation: The relationship type of the edge. 19 | dest_node: The destination node of the edge. 20 | edge_id: The ID of the edge. 21 | alias: An alias for the edge (default is empty string). 22 | properties: The properties of the edge. 23 | 24 | Raises: 25 | AssertionError: If either src_node or dest_node is not provided. 26 | 27 | Returns: 28 | None 29 | """ 30 | if src_node is None or dest_node is None: 31 | raise AssertionError("Both src_node & dest_node must be provided") 32 | 33 | self.id = edge_id 34 | self.alias = alias 35 | self.src_node = src_node 36 | self.dest_node = dest_node 37 | self.relation = relation 38 | self.properties = properties or {} 39 | 40 | def to_string(self) -> str: 41 | """ 42 | Get a string representation of the edge's properties. 43 | 44 | Returns: 45 | str: A string representation of the edge's properties. 46 | """ 47 | res = "" 48 | if self.properties: 49 | props = ",".join( 50 | key + ":" + str(quote_string(val)) 51 | for key, val in sorted(self.properties.items()) 52 | ) 53 | res += "{" + props + "}" 54 | 55 | return res 56 | 57 | def __str__(self) -> str: 58 | """ 59 | Get a string representation of the edge. 60 | 61 | Returns: 62 | str: A string representation of the edge. 63 | """ 64 | # Source node 65 | if isinstance(self.src_node, Node): 66 | res = f"({self.src_node.alias})" 67 | else: 68 | res = "()" 69 | 70 | # Edge 71 | res += f"-[{self.alias}" 72 | if self.relation: 73 | res += ":" + self.relation 74 | if self.properties: 75 | props = ",".join( 76 | key + ":" + str(quote_string(val)) 77 | for key, val in sorted(self.properties.items()) 78 | ) 79 | res += f"{{{props}}}" 80 | res += "]->" 81 | 82 | # Dest node 83 | if isinstance(self.dest_node, Node): 84 | res += f"({self.dest_node.alias})" 85 | else: 86 | res += "()" 87 | 88 | return res 89 | 90 | def __eq__(self, rhs) -> bool: 91 | """ 92 | Check if two edges are equal. 93 | 94 | Args: 95 | rhs: The edge to compare. 96 | 97 | Returns: 98 | bool: True if the edges are equal, False otherwise. 99 | """ 100 | # Type checking 101 | if not isinstance(rhs, Edge): 102 | return False 103 | 104 | # Quick positive check, if both IDs are set 105 | if self.id is not None and rhs.id is not None and self.id == rhs.id: 106 | return True 107 | 108 | # Source and destination nodes should match 109 | if self.src_node != rhs.src_node: 110 | return False 111 | 112 | if self.dest_node != rhs.dest_node: 113 | return False 114 | 115 | # Relation should match 116 | if self.relation != rhs.relation: 117 | return False 118 | 119 | # Quick check for the number of properties 120 | if len(self.properties) != len(rhs.properties): 121 | return False 122 | 123 | # Compare properties 124 | if self.properties != rhs.properties: 125 | return False 126 | 127 | return True 128 | -------------------------------------------------------------------------------- /falkordb/exceptions.py: -------------------------------------------------------------------------------- 1 | class SchemaVersionMismatchException(Exception): 2 | """ 3 | Exception raised when the schema version of the database does not match the 4 | version of the schema that the application expects. 5 | """ 6 | def __init__(self, version: int): 7 | """ 8 | Create a new SchemaVersionMismatchException. 9 | 10 | Args: 11 | version: The version of the schema that the application expects. 12 | 13 | """ 14 | 15 | self.version = version 16 | -------------------------------------------------------------------------------- /falkordb/execution_plan.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | class ProfileStats: 5 | """ 6 | ProfileStats Class for representing runtime execution statistics of an operation. 7 | 8 | Attributes: 9 | records_produced (int): The number of records produced. 10 | execution_time (float): The execution time in milliseconds. 11 | """ 12 | 13 | def __init__(self, records_produced: int, execution_time: float): 14 | """ 15 | Initializes a new ProfileStats instance with the given records_produced and execution_time. 16 | 17 | Args: 18 | records_produced (int): The number of records produced. 19 | execution_time (float): The execution time in milliseconds. 20 | """ 21 | self.execution_time = execution_time 22 | self.records_produced = records_produced 23 | 24 | 25 | class Operation: 26 | """ 27 | Operation Class for representing a single operation within an execution plan. 28 | 29 | Attributes: 30 | name (str): The name of the operation. 31 | args (str): Operation arguments. 32 | children (list): List of child operations. 33 | profile_stats (ProfileStats): Profile statistics for the operation. 34 | """ 35 | 36 | def __init__(self, name: str, args=None, profile_stats: bool = None): 37 | """ 38 | Creates a new Operation instance. 39 | 40 | Args: 41 | name (str): The name of the operation. 42 | args (str, optional): Operation arguments. 43 | profile_stats (ProfileStats, optional): Profile statistics for the operation. 44 | """ 45 | self.name = name 46 | self.args = args 47 | self.children = [] 48 | self.profile_stats = profile_stats 49 | 50 | @property 51 | def execution_time(self) -> int: 52 | """ 53 | returns operation's execution time in ms 54 | """ 55 | return self.profile_stats.execution_time 56 | 57 | @property 58 | def records_produced(self) -> int: 59 | """ 60 | returns number of records produced by operation. 61 | """ 62 | return self.profile_stats.records_produced 63 | 64 | def append_child(self, child): 65 | """ 66 | Appends a child operation to the current operation. 67 | 68 | Args: 69 | child (Operation): The child operation to append. 70 | 71 | Returns: 72 | Operation: The updated operation instance. 73 | """ 74 | if not isinstance(child, Operation): 75 | raise Exception("child must be Operation") 76 | 77 | self.children.append(child) 78 | return self 79 | 80 | def child_count(self) -> int: 81 | """ 82 | Returns the number of child operations. 83 | 84 | Returns: 85 | int: Number of child operations. 86 | """ 87 | return len(self.children) 88 | 89 | def __eq__(self, o: object) -> bool: 90 | """ 91 | Compares two Operation instances for equality based on their name and arguments. 92 | 93 | Args: 94 | o (object): Another Operation instance for comparison. 95 | 96 | Returns: 97 | bool: True if the operations are equal, False otherwise. 98 | """ 99 | if not isinstance(o, Operation): 100 | return False 101 | 102 | return self.name == o.name and self.args == o.args 103 | 104 | def __str__(self) -> str: 105 | """ 106 | Returns a string representation of the operation. 107 | 108 | Returns: 109 | str: String representation of the operation. 110 | """ 111 | args_str = "" if self.args is None else " | " + self.args 112 | return f"{self.name}{args_str}" 113 | 114 | 115 | class ExecutionPlan: 116 | """ 117 | ExecutionPlan Class for representing a collection of operations. 118 | 119 | Attributes: 120 | plan (list): List of strings representing the collection of operations. 121 | structured_plan (Operation): Root of the structured operation tree. 122 | """ 123 | 124 | def __init__(self, plan): 125 | """ 126 | Creates a new ExecutionPlan instance. 127 | 128 | Args: 129 | plan (list): List of strings representing the collection of operations. 130 | """ 131 | if not isinstance(plan, list): 132 | raise Exception("plan must be an array") 133 | 134 | if isinstance(plan[0], bytes): 135 | plan = [b.decode() for b in plan] 136 | 137 | self.plan = plan 138 | self.operations = {} 139 | self.structured_plan = self._operation_tree() 140 | for key in self.operations: 141 | self.operations[key].reverse() 142 | 143 | def collect_operations(self, op_name): 144 | """ 145 | Collects all operations with specified name from plan 146 | 147 | Args: 148 | op_name (string): Name of operation to collect 149 | 150 | Returns: 151 | List[Operation]: All operations with the specified name 152 | """ 153 | if op_name in self.operations: 154 | return self.operations[op_name] 155 | return [] 156 | 157 | ops = [] 158 | 159 | for op in self.operations: 160 | if op.name == op_name: 161 | ops.append(op) 162 | 163 | return ops 164 | 165 | def __compare_operations(self, root_a, root_b) -> bool: 166 | """ 167 | Compares execution plan operation trees. 168 | 169 | Returns: 170 | bool: True if operation trees are equal, False otherwise. 171 | """ 172 | # compare current root 173 | if root_a != root_b: 174 | return False 175 | 176 | # make sure root have the same number of children 177 | if root_a.child_count() != root_b.child_count(): 178 | return False 179 | 180 | # recursively compare children 181 | for i in range(root_a.child_count()): 182 | if not self.__compare_operations(root_a.children[i], root_b.children[i]): 183 | return False 184 | 185 | return True 186 | 187 | def __str__(self) -> str: 188 | """ 189 | Returns a string representation of the execution plan. 190 | 191 | Returns: 192 | str: String representation of the execution plan. 193 | """ 194 | def aggregate_str(str_children): 195 | return "\n".join( 196 | [ 197 | " " + line 198 | for str_child in str_children 199 | for line in str_child.splitlines() 200 | ] 201 | ) 202 | 203 | def combine_str(x, y): 204 | return f"{x}\n{y}" 205 | 206 | return self._operation_traverse( 207 | self.structured_plan, str, aggregate_str, combine_str 208 | ) 209 | 210 | def __eq__(self, o: object) -> bool: 211 | """ 212 | Compares two execution plans. 213 | 214 | Returns: 215 | bool: True if the two plans are equal, False otherwise. 216 | """ 217 | # make sure 'o' is an execution-plan 218 | if not isinstance(o, ExecutionPlan): 219 | return False 220 | 221 | # get root for both plans 222 | root_a = self.structured_plan 223 | root_b = o.structured_plan 224 | 225 | # compare execution trees 226 | return self.__compare_operations(root_a, root_b) 227 | 228 | def __iter__(self): 229 | return iter(self.operations) 230 | 231 | def _operation_traverse(self, op, op_f, aggregate_f, combine_f): 232 | """ 233 | Traverses the operation tree recursively applying functions. 234 | 235 | Args: 236 | op: Operation to traverse. 237 | op_f: Function applied for each operation. 238 | aggregate_f: Aggregation function applied for all children of a single operation. 239 | combine_f: Combine function applied for the operation result and the children result. 240 | """ 241 | # apply op_f for each operation 242 | op_res = op_f(op) 243 | if len(op.children) == 0: 244 | return op_res # no children return 245 | 246 | # apply _operation_traverse recursively 247 | children = [ 248 | self._operation_traverse(child, op_f, aggregate_f, combine_f) 249 | for child in op.children 250 | ] 251 | # combine the operation result with the children aggregated result 252 | return combine_f(op_res, aggregate_f(children)) 253 | 254 | def _operation_tree(self): 255 | """ 256 | Builds the operation tree from the string representation. 257 | 258 | Returns: 259 | Operation: Root of the structured operation tree. 260 | """ 261 | # initial state 262 | i = 0 263 | level = 0 264 | stack = [] 265 | current = None 266 | 267 | def create_operation(args): 268 | profile_stats = None 269 | name = args[0].strip() 270 | args.pop(0) 271 | if len(args) > 0 and "Records produced" in args[-1]: 272 | records_produced = int( 273 | re.search("Records produced: (\\d+)", args[-1]).group(1) 274 | ) 275 | execution_time = float( 276 | re.search("Execution time: (\\d+.\\d+) ms", args[-1]).group(1) 277 | ) 278 | profile_stats = ProfileStats(records_produced, execution_time) 279 | args.pop(-1) 280 | return Operation( 281 | name, None if len(args) == 0 else args[0].strip(), profile_stats 282 | ) 283 | 284 | # iterate plan operations 285 | while i < len(self.plan): 286 | current_op = self.plan[i] 287 | op_level = current_op.count(" ") 288 | if op_level == level: 289 | # if the operation level equal to the current level 290 | # set the current operation and move next 291 | child = create_operation(current_op.split("|")) 292 | if child.name not in self.operations: 293 | self.operations[child.name] = [] 294 | self.operations[child.name].append(child) 295 | 296 | if current: 297 | current = stack.pop() 298 | current.append_child(child) 299 | current = child 300 | i += 1 301 | stack.append(child) 302 | elif op_level == level + 1: 303 | # if the operation is child of the current operation 304 | # add it as child and set as current operation 305 | child = create_operation(current_op.split("|")) 306 | if child.name not in self.operations: 307 | self.operations[child.name] = [] 308 | self.operations[child.name].append(child) 309 | 310 | current.append_child(child) 311 | stack.append(current) 312 | current = child 313 | level += 1 314 | i += 1 315 | elif op_level < level: 316 | # if the operation is not child of current operation 317 | # go back to it's parent operation 318 | levels_back = level - op_level + 1 319 | for _ in range(levels_back): 320 | current = stack.pop() 321 | level -= levels_back 322 | else: 323 | raise Exception("corrupted plan") 324 | return stack[0] 325 | -------------------------------------------------------------------------------- /falkordb/falkordb.py: -------------------------------------------------------------------------------- 1 | import redis 2 | from .cluster import * 3 | from .sentinel import * 4 | from .graph import Graph 5 | from typing import List, Union 6 | 7 | # config command 8 | LIST_CMD = "GRAPH.LIST" 9 | CONFIG_CMD = "GRAPH.CONFIG" 10 | 11 | 12 | class FalkorDB: 13 | """ 14 | FalkorDB Class for interacting with a FalkorDB server. 15 | 16 | Usage example:: 17 | from falkordb import FalkorDB 18 | # connect to the database and select the 'social' graph 19 | db = FalkorDB() 20 | graph = db.select_graph("social") 21 | 22 | # get a single 'Person' node from the graph and print its name 23 | result = graph.query("MATCH (n:Person) RETURN n LIMIT 1").result_set 24 | person = result[0][0] 25 | print(node.properties['name']) 26 | """ 27 | 28 | def __init__( 29 | self, 30 | host="localhost", 31 | port=6379, 32 | password=None, 33 | socket_timeout=None, 34 | socket_connect_timeout=None, 35 | socket_keepalive=None, 36 | socket_keepalive_options=None, 37 | connection_pool=None, 38 | unix_socket_path=None, 39 | encoding="utf-8", 40 | encoding_errors="strict", 41 | charset=None, 42 | errors=None, 43 | retry_on_timeout=False, 44 | retry_on_error=None, 45 | ssl=False, 46 | ssl_keyfile=None, 47 | ssl_certfile=None, 48 | ssl_cert_reqs="required", 49 | ssl_ca_certs=None, 50 | ssl_ca_path=None, 51 | ssl_ca_data=None, 52 | ssl_check_hostname=False, 53 | ssl_password=None, 54 | ssl_validate_ocsp=False, 55 | ssl_validate_ocsp_stapled=False, 56 | ssl_ocsp_context=None, 57 | ssl_ocsp_expected_cert=None, 58 | max_connections=None, 59 | single_connection_client=False, 60 | health_check_interval=0, 61 | client_name=None, 62 | lib_name="FalkorDB", 63 | lib_version="1.0.0", 64 | username=None, 65 | retry=None, 66 | connect_func=None, 67 | credential_provider=None, 68 | protocol=2, 69 | # FalkorDB Cluster Params 70 | cluster_error_retry_attempts=3, 71 | startup_nodes=None, 72 | require_full_coverage=False, 73 | reinitialize_steps=5, 74 | read_from_replicas=False, 75 | dynamic_startup_nodes=True, 76 | url=None, 77 | address_remap=None, 78 | ): 79 | 80 | conn = redis.Redis( 81 | host=host, 82 | port=port, 83 | db=0, 84 | password=password, 85 | socket_timeout=socket_timeout, 86 | socket_connect_timeout=socket_connect_timeout, 87 | socket_keepalive=socket_keepalive, 88 | socket_keepalive_options=socket_keepalive_options, 89 | connection_pool=connection_pool, 90 | unix_socket_path=unix_socket_path, 91 | encoding=encoding, 92 | encoding_errors=encoding_errors, 93 | charset=charset, 94 | errors=errors, 95 | decode_responses=True, 96 | retry_on_timeout=retry_on_timeout, 97 | retry_on_error=retry_on_error, 98 | ssl=ssl, 99 | ssl_keyfile=ssl_keyfile, 100 | ssl_certfile=ssl_certfile, 101 | ssl_cert_reqs=ssl_cert_reqs, 102 | ssl_ca_certs=ssl_ca_certs, 103 | ssl_ca_path=ssl_ca_path, 104 | ssl_ca_data=ssl_ca_data, 105 | ssl_check_hostname=ssl_check_hostname, 106 | ssl_password=ssl_password, 107 | ssl_validate_ocsp=ssl_validate_ocsp, 108 | ssl_validate_ocsp_stapled=ssl_validate_ocsp_stapled, 109 | ssl_ocsp_context=ssl_ocsp_context, 110 | ssl_ocsp_expected_cert=ssl_ocsp_expected_cert, 111 | max_connections=max_connections, 112 | single_connection_client=single_connection_client, 113 | health_check_interval=health_check_interval, 114 | client_name=client_name, 115 | lib_name=lib_name, 116 | lib_version=lib_version, 117 | username=username, 118 | retry=retry, 119 | redis_connect_func=connect_func, 120 | credential_provider=credential_provider, 121 | protocol=protocol, 122 | ) 123 | 124 | if Is_Sentinel(conn): 125 | self.sentinel, self.service_name = Sentinel_Conn(conn, ssl) 126 | conn = self.sentinel.master_for(self.service_name, ssl=ssl) 127 | 128 | if Is_Cluster(conn): 129 | conn = Cluster_Conn( 130 | conn, 131 | ssl, 132 | cluster_error_retry_attempts, 133 | startup_nodes, 134 | require_full_coverage, 135 | reinitialize_steps, 136 | read_from_replicas, 137 | dynamic_startup_nodes, 138 | url, 139 | address_remap, 140 | ) 141 | 142 | self.connection = conn 143 | self.flushdb = conn.flushdb 144 | self.execute_command = conn.execute_command 145 | 146 | @classmethod 147 | def from_url(cls, url: str, **kwargs) -> "FalkorDB": 148 | """ 149 | Creates a new FalkorDB instance from a URL. 150 | 151 | Args: 152 | cls: The class itself. 153 | url (str): The URL. 154 | kwargs: Additional keyword arguments to pass to the ``DB.from_url`` function. 155 | 156 | Returns: 157 | DB: A new DB instance. 158 | 159 | Usage example:: 160 | db = FalkorDB.from_url("falkor://[[username]:[password]]@localhost:6379") 161 | db = FalkorDB.from_url("falkors://[[username]:[password]]@localhost:6379") 162 | db = FalkorDB.from_url("unix://[username@]/path/to/socket.sock?db=0[&password=password]") 163 | """ 164 | 165 | # switch from redis:// to falkordb:// 166 | if url.startswith("falkor://"): 167 | url = "redis://" + url[len("falkor://") :] 168 | elif url.startswith("falkors://"): 169 | url = "rediss://" + url[len("falkors://") :] 170 | 171 | conn = redis.from_url(url, **kwargs) 172 | 173 | connection_kwargs = conn.connection_pool.connection_kwargs 174 | connection_class = conn.connection_pool.connection_class 175 | kwargs["host"] = connection_kwargs.get("host", "localhost") 176 | kwargs["port"] = connection_kwargs.get("port", 6379) 177 | kwargs["username"] = connection_kwargs.get("username") 178 | kwargs["password"] = connection_kwargs.get("password") 179 | if connection_class is redis.SSLConnection: 180 | kwargs["ssl"] = True 181 | 182 | # Initialize a FalkorDB instance using the updated kwargs 183 | db = cls(**kwargs) 184 | 185 | return db 186 | 187 | def select_graph(self, graph_id: str) -> Graph: 188 | """ 189 | Selects a graph by creating a new Graph instance. 190 | 191 | Args: 192 | graph_id (str): The identifier of the graph. 193 | 194 | Returns: 195 | Graph: A new Graph instance associated with the selected graph. 196 | """ 197 | if not isinstance(graph_id, str) or graph_id == "": 198 | raise TypeError( 199 | f"Expected a string parameter, but received {type(graph_id)}." 200 | ) 201 | 202 | return Graph(self, graph_id) 203 | 204 | def list_graphs(self) -> List[str]: 205 | """ 206 | Lists all graph names. 207 | See: https://docs.falkordb.com/commands/graph.list.html 208 | 209 | Returns: 210 | List: List of graph names. 211 | 212 | """ 213 | 214 | return self.connection.execute_command(LIST_CMD) 215 | 216 | def config_get(self, name: str) -> Union[int, str]: 217 | """ 218 | Retrieve a DB level configuration. 219 | For a list of available configurations see: https://docs.falkordb.com/configuration.html#falkordb-configuration-parameters 220 | 221 | Args: 222 | name (str): The name of the configuration. 223 | 224 | Returns: 225 | int or str: The configuration value. 226 | 227 | """ 228 | 229 | return self.connection.execute_command(CONFIG_CMD, "GET", name)[1] 230 | 231 | def config_set(self, name: str, value=None) -> None: 232 | """ 233 | Update a DB level configuration. 234 | For a list of available configurations see: https://docs.falkordb.com/configuration.html#falkordb-configuration-parameters 235 | 236 | Args: 237 | name (str): The name of the configuration. 238 | value: The value to set. 239 | 240 | Returns: 241 | None 242 | 243 | """ 244 | 245 | return self.connection.execute_command(CONFIG_CMD, "SET", name, value) 246 | -------------------------------------------------------------------------------- /falkordb/graph_schema.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from .exceptions import SchemaVersionMismatchException 3 | 4 | # procedures 5 | DB_LABELS = "DB.LABELS" 6 | DB_PROPERTYKEYS = "DB.PROPERTYKEYS" 7 | DB_RELATIONSHIPTYPES = "DB.RELATIONSHIPTYPES" 8 | 9 | 10 | class GraphSchema(): 11 | """ 12 | The graph schema. 13 | Maintains the labels, properties and relationships of the graph. 14 | """ 15 | 16 | def __init__(self, graph: 'Graph'): 17 | """ 18 | Initialize the graph schema. 19 | 20 | Args: 21 | graph (Graph): The graph. 22 | 23 | Returns: 24 | GraphSchema: The graph schema. 25 | """ 26 | 27 | self.graph = graph 28 | self.clear() 29 | 30 | def clear(self): 31 | """ 32 | Clear the graph schema. 33 | 34 | Returns: 35 | None 36 | 37 | """ 38 | 39 | self.version = 0 40 | self.labels = [] 41 | self.properties = [] 42 | self.relationships = [] 43 | 44 | def refresh_labels(self) -> None: 45 | """ 46 | Refresh labels. 47 | 48 | Returns: 49 | None 50 | 51 | """ 52 | 53 | result_set = self.graph.call_procedure(DB_LABELS).result_set 54 | self.labels = [l[0] for l in result_set] 55 | 56 | def refresh_relations(self) -> None: 57 | """ 58 | Refresh relationship types. 59 | 60 | Returns: 61 | None 62 | 63 | """ 64 | 65 | result_set = self.graph.call_procedure(DB_RELATIONSHIPTYPES).result_set 66 | self.relationships = [r[0] for r in result_set] 67 | 68 | def refresh_properties(self) -> None: 69 | """ 70 | Refresh property keys. 71 | 72 | Returns: 73 | None 74 | 75 | """ 76 | 77 | result_set = self.graph.call_procedure(DB_PROPERTYKEYS).result_set 78 | self.properties = [p[0] for p in result_set] 79 | 80 | def refresh(self, version: int) -> None: 81 | """ 82 | Refresh the graph schema. 83 | 84 | Args: 85 | version (int): The version of the graph schema. 86 | 87 | Returns: 88 | None 89 | 90 | """ 91 | 92 | self.clear() 93 | self.version = version 94 | self.refresh_labels() 95 | self.refresh_relations() 96 | self.refresh_properties() 97 | 98 | def get_label(self, idx: int) -> str: 99 | """ 100 | Returns a label by its index. 101 | 102 | Args: 103 | idx (int): The index of the label. 104 | 105 | Returns: 106 | str: The label. 107 | 108 | """ 109 | 110 | try: 111 | l = self.labels[idx] 112 | except IndexError: 113 | # refresh labels 114 | self.refresh_labels() 115 | l = self.labels[idx] 116 | return l 117 | 118 | def get_relation(self, idx: int) -> str: 119 | """ 120 | Returns a relationship type by its index. 121 | 122 | Args: 123 | idx (int): The index of the relation. 124 | 125 | Returns: 126 | str: The relationship type. 127 | 128 | """ 129 | 130 | try: 131 | r = self.relationships[idx] 132 | except IndexError: 133 | # refresh relationship types 134 | self.refresh_relations() 135 | r = self.relationships[idx] 136 | return r 137 | 138 | def get_property(self, idx: int) -> str: 139 | """ 140 | Returns a property by its index. 141 | 142 | Args: 143 | idx (int): The index of the property. 144 | 145 | Returns: 146 | str: The property. 147 | 148 | """ 149 | 150 | try: 151 | p = self.properties[idx] 152 | except IndexError: 153 | # refresh properties 154 | self.refresh_properties() 155 | p = self.properties[idx] 156 | return p 157 | -------------------------------------------------------------------------------- /falkordb/helpers.py: -------------------------------------------------------------------------------- 1 | def quote_string(v): 2 | """ 3 | FalkorDB strings must be quoted, 4 | quote_string wraps given v with quotes incase 5 | v is a string. 6 | """ 7 | 8 | if isinstance(v, bytes): 9 | v = v.decode() 10 | elif not isinstance(v, str): 11 | return v 12 | if len(v) == 0: 13 | return '""' 14 | 15 | v = v.replace("\\", "\\\\") 16 | v = v.replace('"', '\\"') 17 | 18 | return f'"{v}"' 19 | 20 | def stringify_param_value(value): 21 | """ 22 | turn a parameter value into a string suitable for the params header of 23 | a Cypher command 24 | you may pass any value that would be accepted by `json.dumps()` 25 | 26 | ways in which output differs from that of `str()`: 27 | * strings are quoted 28 | * None --> "null" 29 | * in dictionaries, keys are _not_ quoted 30 | 31 | :param value: the parameter value to be turned into a string 32 | :return: string 33 | """ 34 | 35 | if isinstance(value, str): 36 | return quote_string(value) 37 | 38 | if value is None: 39 | return "null" 40 | 41 | if isinstance(value, (list, tuple)): 42 | return f'[{",".join(map(stringify_param_value, value))}]' 43 | 44 | if isinstance(value, dict): 45 | return f'{{{",".join(f"{k}:{stringify_param_value(v)}" for k, v in value.items())}}}' # noqa 46 | 47 | return str(value) 48 | -------------------------------------------------------------------------------- /falkordb/node.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Union 2 | from .helpers import quote_string 3 | 4 | class Node: 5 | """ 6 | A graph node. 7 | """ 8 | 9 | def __init__(self, node_id: Optional[int] = None, 10 | alias: Optional[str] = '', 11 | labels: Optional[Union[str, List[str]]] = None, 12 | properties=None): 13 | """ 14 | Create a new node. 15 | 16 | Args: 17 | node_id: The ID of the node. 18 | alias: An alias for the node (default is empty string). 19 | labels: The label or list of labels for the node. 20 | properties: The properties of the node. 21 | 22 | Returns: 23 | None 24 | """ 25 | self.id = node_id 26 | self.alias = alias 27 | self.labels = None 28 | 29 | if isinstance(labels, list): 30 | self.labels = [l for l in labels if isinstance(l, str) and l != ""] 31 | elif isinstance(labels, str) and labels != "": 32 | self.labels = [labels] 33 | 34 | self.properties = properties or {} 35 | 36 | def to_string(self) -> str: 37 | """ 38 | Get a string representation of the node's properties. 39 | 40 | Returns: 41 | str: A string representation of the node's properties. 42 | """ 43 | res = "" 44 | if self.properties: 45 | props = ",".join( 46 | key + ":" + str(quote_string(val)) 47 | for key, val in sorted(self.properties.items()) 48 | ) 49 | res += "{" + props + "}" 50 | 51 | return res 52 | 53 | def __str__(self) -> str: 54 | """ 55 | Get a string representation of the node. 56 | 57 | Returns: 58 | str: A string representation of the node. 59 | """ 60 | res = "(" 61 | if self.alias: 62 | res += self.alias 63 | if self.labels: 64 | res += ":" + ":".join(self.labels) 65 | if self.properties: 66 | props = ",".join( 67 | key + ":" + str(quote_string(val)) 68 | for key, val in sorted(self.properties.items()) 69 | ) 70 | res += "{" + props + "}" 71 | res += ")" 72 | 73 | return res 74 | 75 | def __eq__(self, rhs) -> bool: 76 | """ 77 | Check if two nodes are equal. 78 | 79 | Args: 80 | rhs: The node to compare. 81 | 82 | Returns: 83 | bool: True if the nodes are equal, False otherwise. 84 | """ 85 | # Type checking 86 | if not isinstance(rhs, Node): 87 | return False 88 | 89 | # Quick positive check, if both IDs are set 90 | if self.id is not None and rhs.id is not None and self.id != rhs.id: 91 | return False 92 | 93 | # Labels should match. 94 | if self.labels != rhs.labels: 95 | return False 96 | 97 | # Quick check for the number of properties. 98 | if len(self.properties) != len(rhs.properties): 99 | return False 100 | 101 | # Compare properties. 102 | if self.properties != rhs.properties: 103 | return False 104 | 105 | return True 106 | -------------------------------------------------------------------------------- /falkordb/path.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from .edge import Edge 3 | from .node import Node 4 | 5 | 6 | class Path: 7 | """ 8 | Path Class for representing a path in a graph. 9 | 10 | This class defines a path consisting of nodes and edges. It provides methods for managing and manipulating the path. 11 | 12 | Example: 13 | node1 = Node() 14 | node2 = Node() 15 | edge1 = Edge(node1, "R", node2) 16 | 17 | path = Path.new_empty_path() 18 | path.add_node(node1).add_edge(edge1).add_node(node2) 19 | print(path) 20 | # Output: <(node1)-(edge1)->(node2)> 21 | """ 22 | def __init__(self, nodes: List[Node], edges: List[Edge]): 23 | if not (isinstance(nodes, list) and isinstance(edges, list)): 24 | raise TypeError("nodes and edges must be list") 25 | 26 | self._nodes = nodes 27 | self._edges = edges 28 | self.append_type = Node 29 | 30 | def nodes(self) -> List[Node]: 31 | """ 32 | Returns the list of nodes in the path. 33 | 34 | Returns: 35 | list: List of nodes in the path. 36 | """ 37 | return self._nodes 38 | 39 | def edges(self) -> List[Edge]: 40 | """ 41 | Returns the list of edges in the path. 42 | 43 | Returns: 44 | list: List of edges in the path. 45 | """ 46 | return self._edges 47 | 48 | def get_node(self, index) -> Node: 49 | """ 50 | Returns the node at the specified index in the path. 51 | 52 | Args: 53 | index (int): Index of the node. 54 | 55 | Returns: 56 | Node: The node at the specified index. 57 | """ 58 | if 0 <= index < self.node_count(): 59 | return self._nodes[index] 60 | 61 | return None 62 | 63 | def get_edge(self, index) -> Edge: 64 | """ 65 | Returns the edge at the specified index in the path. 66 | 67 | Args: 68 | index (int): Index of the edge. 69 | 70 | Returns: 71 | Edge: The edge at the specified index. 72 | """ 73 | if 0 <= index < self.edge_count(): 74 | return self._edges[index] 75 | 76 | return None 77 | 78 | def first_node(self) -> Node: 79 | """ 80 | Returns the first node in the path. 81 | 82 | Returns: 83 | Node: The first node in the path. 84 | """ 85 | return self._nodes[0] if self.node_count() > 0 else None 86 | 87 | def last_node(self) -> Node: 88 | """ 89 | Returns the last node in the path. 90 | 91 | Returns: 92 | Node: The last node in the path. 93 | """ 94 | return self._nodes[-1] if self.node_count() > 0 else None 95 | 96 | def edge_count(self) -> int: 97 | """ 98 | Returns the number of edges in the path. 99 | 100 | Returns: 101 | int: Number of edges in the path. 102 | """ 103 | return len(self._edges) 104 | 105 | def node_count(self) -> int: 106 | """ 107 | Returns the number of nodes in the path. 108 | 109 | Returns: 110 | int: Number of nodes in the path. 111 | """ 112 | return len(self._nodes) 113 | 114 | def __eq__(self, other) -> bool: 115 | """ 116 | Compares two Path instances for equality based on their nodes and edges. 117 | 118 | Args: 119 | other (Path): Another Path instance for comparison. 120 | 121 | Returns: 122 | bool: True if the paths are equal, False otherwise. 123 | """ 124 | # Type checking 125 | if not isinstance(other, Path): 126 | return False 127 | 128 | return self.nodes() == other.nodes() and self.edges() == other.edges() 129 | 130 | def __str__(self) -> str: 131 | """ 132 | Returns a string representation of the path, including nodes and edges. 133 | 134 | Returns: 135 | str: String representation of the path. 136 | """ 137 | res = "<" 138 | edge_count = self.edge_count() 139 | for i in range(0, edge_count): 140 | node_id = self.get_node(i).id 141 | res += "(" + str(node_id) + ")" 142 | edge = self.get_edge(i) 143 | res += ( 144 | "-[" + str(int(edge.id)) + "]->" 145 | if edge.src_node == node_id 146 | else "<-[" + str(int(edge.id)) + "]-" 147 | ) 148 | node_id = self.get_node(edge_count).id 149 | res += "(" + str(node_id) + ")" 150 | res += ">" 151 | return res 152 | -------------------------------------------------------------------------------- /falkordb/query_result.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from enum import Enum 3 | from typing import List 4 | from collections import OrderedDict 5 | 6 | from redis import ResponseError 7 | 8 | from .edge import Edge 9 | from .node import Node 10 | from .path import Path 11 | from .exceptions import SchemaVersionMismatchException 12 | 13 | # statistics 14 | LABELS_ADDED = "Labels added" 15 | LABELS_REMOVED = "Labels removed" 16 | NODES_CREATED = "Nodes created" 17 | NODES_DELETED = "Nodes deleted" 18 | PROPERTIES_SET = "Properties set" 19 | INDICES_CREATED = "Indices created" 20 | INDICES_DELETED = "Indices deleted" 21 | CACHED_EXECUTION = "Cached execution" 22 | PROPERTIES_REMOVED = "Properties removed" 23 | RELATIONSHIPS_DELETED = "Relationships deleted" 24 | RELATIONSHIPS_CREATED = "Relationships created" 25 | INTERNAL_EXECUTION_TIME = "internal execution time" 26 | 27 | STATS = [ 28 | LABELS_ADDED, 29 | NODES_CREATED, 30 | NODES_DELETED, 31 | LABELS_REMOVED, 32 | PROPERTIES_SET, 33 | INDICES_CREATED, 34 | INDICES_DELETED, 35 | CACHED_EXECUTION, 36 | PROPERTIES_REMOVED, 37 | RELATIONSHIPS_CREATED, 38 | RELATIONSHIPS_DELETED, 39 | INTERNAL_EXECUTION_TIME, 40 | ] 41 | 42 | class ResultSetScalarTypes(Enum): 43 | """ 44 | Enumeration representing different scalar types in the query result set. 45 | 46 | Attributes: 47 | VALUE_UNKNOWN (int): Unknown scalar type (0) 48 | VALUE_NULL (int): Null scalar type (1) 49 | VALUE_STRING (int): String scalar type (2) 50 | VALUE_INTEGER (int): Integer scalar type (3) 51 | VALUE_BOOLEAN (int): Boolean scalar type (4) 52 | VALUE_DOUBLE (int): Double scalar type (5) 53 | VALUE_ARRAY (int): Array scalar type (6) 54 | VALUE_EDGE (int): Edge scalar type (7) 55 | VALUE_NODE (int): Node scalar type (8) 56 | VALUE_PATH (int): Path scalar type (9) 57 | VALUE_MAP (int): Map scalar type (10) 58 | VALUE_POINT (int): Point scalar type (11) 59 | VALUE_VECTORF32 (int): Vector scalar type (12) 60 | """ 61 | 62 | VALUE_UNKNOWN = 0 63 | VALUE_NULL = 1 64 | VALUE_STRING = 2 65 | VALUE_INTEGER = 3 66 | VALUE_BOOLEAN = 4 67 | VALUE_DOUBLE = 5 68 | VALUE_ARRAY = 6 69 | VALUE_EDGE = 7 70 | VALUE_NODE = 8 71 | VALUE_PATH = 9 72 | VALUE_MAP = 10 73 | VALUE_POINT = 11 74 | VALUE_VECTORF32 = 12 75 | 76 | def __parse_unknown(value, graph): 77 | """ 78 | Parse a value of unknown type. 79 | 80 | Args: 81 | value: The value to parse. 82 | graph: The graph instance. 83 | 84 | Returns: 85 | None 86 | """ 87 | sys.stderr.write("Unknown type\n") 88 | 89 | def __parse_null(value, graph) -> None: 90 | """ 91 | Parse a null value. 92 | 93 | Args: 94 | value: The null value. 95 | graph: The graph instance. 96 | 97 | Returns: 98 | None: Always returns None. 99 | """ 100 | return None 101 | 102 | def __parse_string(value, graph) -> str: 103 | """ 104 | Parse the value as a string. 105 | 106 | Args: 107 | value: The value to parse. 108 | graph: The graph instance. 109 | 110 | Returns: 111 | str: The parsed string value. 112 | """ 113 | if isinstance(value, bytes): 114 | return value.decode() 115 | 116 | if not isinstance(value, str): 117 | return str(value) 118 | 119 | return value 120 | 121 | def __parse_integer(value, graph) -> int: 122 | """ 123 | Parse the integer value from the value. 124 | 125 | Args: 126 | value: The value to parse. 127 | graph: The graph instance. 128 | 129 | Returns: 130 | int: The parsed integer value. 131 | """ 132 | return int(value) 133 | 134 | def __parse_boolean(value, graph) -> bool: 135 | """ 136 | Parse the value as a boolean. 137 | 138 | Args: 139 | value: The value to parse. 140 | graph: The graph instance. 141 | 142 | Returns: 143 | bool: The parsed boolean value. 144 | """ 145 | value = value.decode() if isinstance(value, bytes) else value 146 | return value == "true" 147 | 148 | def __parse_double(value, graph) -> float: 149 | """ 150 | Parse the value as a double. 151 | 152 | Args: 153 | value: The value to parse. 154 | graph: The graph instance. 155 | 156 | Returns: 157 | float: The parsed double value. 158 | """ 159 | return float(value) 160 | 161 | def __parse_array(value, graph) -> List: 162 | """ 163 | Parse an array of values. 164 | 165 | Args: 166 | value: The array value to parse. 167 | graph: The graph instance. 168 | 169 | Returns: 170 | list: The parsed list of values. 171 | """ 172 | scalar = [parse_scalar(value[i], graph) for i in range(len(value))] 173 | return scalar 174 | 175 | def __parse_vectorf32(value, graph) -> List: 176 | """ 177 | Parse a vector32f. 178 | 179 | Args: 180 | value: The vector to parse. 181 | graph: The graph instance. 182 | 183 | Returns: 184 | list: The parsed vector. 185 | """ 186 | 187 | return [float(v) for v in value] 188 | 189 | def __parse_entity_properties(props, graph): 190 | """ 191 | Parse node/edge properties. 192 | 193 | Args: 194 | props (List): List of properties. 195 | graph: The graph instance. 196 | 197 | Returns: 198 | dict: Dictionary containing parsed properties. 199 | """ 200 | properties = {} 201 | for prop in props: 202 | prop_name = graph.schema.get_property(prop[0]) 203 | prop_value = parse_scalar(prop[1:], graph) 204 | properties[prop_name] = prop_value 205 | 206 | return properties 207 | 208 | def __parse_node(value, graph) -> Node: 209 | """ 210 | Parse the value to a node. 211 | 212 | Args: 213 | value: The value to parse. 214 | graph: The graph instance. 215 | 216 | Returns: 217 | Node: The parsed Node instance. 218 | """ 219 | node_id = int(value[0]) 220 | labels = None 221 | if len(value[1]) > 0: 222 | labels = [graph.schema.get_label(inner_label) for inner_label in value[1]] 223 | properties = __parse_entity_properties(value[2], graph) 224 | return Node(node_id=node_id, alias="", labels=labels, properties=properties) 225 | 226 | def __parse_edge(value, graph) -> Edge: 227 | """ 228 | Parse the value to an edge. 229 | 230 | Args: 231 | value: The value to parse. 232 | graph: The graph instance. 233 | 234 | Returns: 235 | Edge: The parsed Edge instance. 236 | """ 237 | edge_id = int(value[0]) 238 | relation = graph.schema.get_relation(value[1]) 239 | src_node_id = int(value[2]) 240 | dest_node_id = int(value[3]) 241 | properties = __parse_entity_properties(value[4], graph) 242 | return Edge(src_node_id, relation, dest_node_id, edge_id=edge_id, properties=properties) 243 | 244 | def __parse_path(value, graph) -> Path: 245 | """ 246 | Parse the value to a path. 247 | 248 | Args: 249 | value: The value to parse. 250 | graph: The graph instance. 251 | 252 | Returns: 253 | Path: The parsed Path instance. 254 | """ 255 | nodes = parse_scalar(value[0], graph) 256 | edges = parse_scalar(value[1], graph) 257 | return Path(nodes, edges) 258 | 259 | def __parse_map(value, graph) -> OrderedDict: 260 | """ 261 | Parse the value as a map. 262 | 263 | Args: 264 | value: The value to parse. 265 | graph: The graph instance. 266 | 267 | Returns: 268 | OrderedDict: The parsed OrderedDict. 269 | """ 270 | m = OrderedDict() 271 | n_entries = len(value) 272 | 273 | for i in range(0, n_entries, 2): 274 | key = __parse_string(value[i], graph) 275 | m[key] = parse_scalar(value[i + 1], graph) 276 | 277 | return m 278 | 279 | def __parse_point(value, graph): 280 | """ 281 | Parse the value to point. 282 | 283 | Args: 284 | value: The value to parse. 285 | graph: The graph instance. 286 | 287 | Returns: 288 | dict: The parsed dictionary representing a point. 289 | """ 290 | p = {"latitude": float(value[0]), "longitude": float(value[1])} 291 | return p 292 | 293 | def parse_scalar(value, graph): 294 | """ 295 | Parse a scalar value from a value in the result set. 296 | 297 | Args: 298 | value: The value to parse. 299 | graph: The graph instance. 300 | 301 | Returns: 302 | Any: The parsed scalar value. 303 | """ 304 | scalar_type = int(value[0]) 305 | value = value[1] 306 | scalar = PARSE_SCALAR_TYPES[scalar_type](value, graph) 307 | 308 | return scalar 309 | 310 | 311 | PARSE_SCALAR_TYPES = [ 312 | __parse_unknown, # VALUE_UNKNOWN 313 | __parse_null, # VALUE_NULL 314 | __parse_string, # VALUE_STRING 315 | __parse_integer, # VALUE_INTEGER 316 | __parse_boolean, # VALUE_BOOLEAN 317 | __parse_double, # VALUE_DOUBLE 318 | __parse_array, # VALUE_ARRAY 319 | __parse_edge, # VALUE_EDGE 320 | __parse_node, # VALUE_NODE 321 | __parse_path, # VALUE_PATH 322 | __parse_map, # VALUE_MAP 323 | __parse_point, # VALUE_POINT 324 | __parse_vectorf32 # VALUE_VECTORF32 325 | ] 326 | 327 | class QueryResult: 328 | """ 329 | Represents the result of a query operation on a graph. 330 | """ 331 | def __init__(self, graph, response): 332 | """ 333 | Initializes a QueryResult instance. 334 | 335 | Args: 336 | graph: The graph on which the query was executed. 337 | response: The response from the server. 338 | """ 339 | self.graph = graph 340 | self.header = [] 341 | self.result_set = [] 342 | self._raw_stats = [] 343 | 344 | # in case of an error, an exception will be raised 345 | self.__check_for_errors(response) 346 | 347 | if len(response) == 1: 348 | self._raw_stats = response[0] 349 | else: 350 | # start by parsing statistics, matches the one we have 351 | self._raw_stats = response[-1] 352 | self.__parse_results(response) 353 | 354 | def __check_for_errors(self, response): 355 | """ 356 | Checks if the response contains an error. 357 | 358 | Args: 359 | response: The response from the server. 360 | 361 | Raises: 362 | ResponseError: If an error is encountered. 363 | """ 364 | if isinstance(response[0], ResponseError): 365 | error = response[0] 366 | if str(error) == "version mismatch": 367 | version = response[1] 368 | error = VersionMismatchException(version) 369 | raise error 370 | 371 | # if we encountered a run-time error, the last response 372 | # element will be an exception 373 | if isinstance(response[-1], ResponseError): 374 | raise response[-1] 375 | 376 | def __parse_results(self, raw_result_set): 377 | """ 378 | Parse the query execution result returned from the server. 379 | 380 | Args: 381 | raw_result_set: The raw result set from the server. 382 | """ 383 | self.header = self.__parse_header(raw_result_set) 384 | 385 | # empty header 386 | if len(self.header) == 0: 387 | return 388 | 389 | self.result_set = self.__parse_records(raw_result_set) 390 | 391 | def __get_statistics(self, s): 392 | """ 393 | Get the value of a specific statistical metric. 394 | 395 | Args: 396 | s (str): The statistical metric to retrieve. 397 | 398 | Returns: 399 | float: The value of the specified statistical metric. Returns 0 if the metric is not found. 400 | """ 401 | for stat in self._raw_stats: 402 | if s in stat: 403 | return float(stat.split(": ")[1].split(" ")[0]) 404 | 405 | return 0 406 | 407 | def __parse_header(self, raw_result_set): 408 | """ 409 | Parse the header of the result. 410 | 411 | Args: 412 | raw_result_set: The raw result set from the server. 413 | 414 | Returns: 415 | list: An array of column name/column type pairs. 416 | """ 417 | # an array of column name/column type pairs 418 | header = raw_result_set[0] 419 | return header 420 | 421 | def __parse_records(self, raw_result_set): 422 | """ 423 | Parses the result set and returns a list of records. 424 | 425 | Args: 426 | raw_result_set: The raw result set from the server. 427 | 428 | Returns: 429 | list: A list of records. 430 | """ 431 | records = [ 432 | [parse_scalar(cell, self.graph) for cell in row] 433 | for row in raw_result_set[1] 434 | ] 435 | 436 | return records 437 | 438 | @property 439 | def labels_added(self) -> int: 440 | """ 441 | Get the number of labels added in the query. 442 | 443 | Returns: 444 | int: The number of labels added. 445 | """ 446 | 447 | return self.__get_statistics(LABELS_ADDED) 448 | 449 | @property 450 | def labels_removed(self) -> int: 451 | """ 452 | Get the number of labels removed in the query. 453 | 454 | Returns: 455 | int: The number of labels removed. 456 | """ 457 | return self.__get_statistics(LABELS_REMOVED) 458 | 459 | @property 460 | def nodes_created(self) -> int: 461 | """ 462 | Get the number of nodes created in the query. 463 | 464 | Returns: 465 | int: The number of nodes created. 466 | """ 467 | return self.__get_statistics(NODES_CREATED) 468 | 469 | @property 470 | def nodes_deleted(self) -> int: 471 | """ 472 | Get the number of nodes deleted in the query. 473 | 474 | Returns: 475 | int: The number of nodes deleted. 476 | """ 477 | return self.__get_statistics(NODES_DELETED) 478 | 479 | @property 480 | def properties_set(self) -> int: 481 | """ 482 | Get the number of properties set in the query. 483 | 484 | Returns: 485 | int: The number of properties set. 486 | """ 487 | return self.__get_statistics(PROPERTIES_SET) 488 | 489 | @property 490 | def properties_removed(self) -> int: 491 | """ 492 | Get the number of properties removed in the query. 493 | 494 | Returns: 495 | int: The number of properties removed. 496 | """ 497 | return self.__get_statistics(PROPERTIES_REMOVED) 498 | 499 | @property 500 | def relationships_created(self) -> int: 501 | """ 502 | Get the number of relationships created in the query. 503 | 504 | Returns: 505 | int: The number of relationships created. 506 | """ 507 | return self.__get_statistics(RELATIONSHIPS_CREATED) 508 | 509 | @property 510 | def relationships_deleted(self) -> int: 511 | """ 512 | Get the number of relationships deleted in the query. 513 | 514 | Returns: 515 | int: The number of relationships deleted. 516 | """ 517 | return self.__get_statistics(RELATIONSHIPS_DELETED) 518 | 519 | @property 520 | def indices_created(self) -> int: 521 | """ 522 | Get the number of indices created in the query. 523 | 524 | Returns: 525 | int: The number of indices created. 526 | """ 527 | return self.__get_statistics(INDICES_CREATED) 528 | 529 | @property 530 | def indices_deleted(self) -> int: 531 | """ 532 | Get the number of indices deleted in the query. 533 | 534 | Returns: 535 | int: The number of indices deleted. 536 | """ 537 | return self.__get_statistics(INDICES_DELETED) 538 | 539 | @property 540 | def cached_execution(self) -> bool: 541 | """ 542 | Check if the query execution plan was cached. 543 | 544 | Returns: 545 | bool: True if the query execution plan was cached, False otherwise. 546 | """ 547 | return self.__get_statistics(CACHED_EXECUTION) == 1 548 | 549 | @property 550 | def run_time_ms(self) -> float: 551 | """ 552 | Get the server execution time of the query. 553 | 554 | Returns: 555 | float: The server execution time of the query in milliseconds. 556 | """ 557 | return self.__get_statistics(INTERNAL_EXECUTION_TIME) 558 | -------------------------------------------------------------------------------- /falkordb/sentinel.py: -------------------------------------------------------------------------------- 1 | from redis.sentinel import Sentinel 2 | 3 | # detect if a connection is a sentinel 4 | def Is_Sentinel(conn): 5 | info = conn.info(section="server") 6 | return "redis_mode" in info and info["redis_mode"] == "sentinel" 7 | 8 | # create a sentinel connection from a Redis connection 9 | def Sentinel_Conn(conn, ssl): 10 | # collect masters 11 | masters = conn.sentinel_masters() 12 | 13 | # abort if multiple masters are detected 14 | if len(masters) != 1: 15 | raise Exception("Multiple masters, require service name") 16 | 17 | # monitored service name 18 | service_name = list(masters.keys())[0] 19 | 20 | # list of sentinels connection information 21 | sentinels_conns = [] 22 | 23 | # current sentinel 24 | host = conn.connection_pool.connection_kwargs['host'] 25 | port = conn.connection_pool.connection_kwargs['port'] 26 | sentinels_conns.append((host, port)) 27 | 28 | # additional sentinels 29 | #sentinels = conn.sentinel_sentinels(service_name) 30 | #for sentinel in sentinels: 31 | # ip = sentinel['ip'] 32 | # port = sentinel['port'] 33 | # sentinels_conns.append((host, port)) 34 | 35 | # use the same connection arguments e.g. username and password 36 | connection_kwargs = conn.connection_pool.connection_kwargs 37 | 38 | # construct sentinel arguments 39 | sentinel_kwargs = { } 40 | if 'username' in connection_kwargs: 41 | sentinel_kwargs['username'] = connection_kwargs['username'] 42 | if 'password' in connection_kwargs: 43 | sentinel_kwargs['password'] = connection_kwargs['password'] 44 | if ssl: 45 | sentinel_kwargs['ssl'] = True 46 | 47 | return (Sentinel(sentinels_conns, sentinel_kwargs=sentinel_kwargs, **connection_kwargs), service_name) 48 | 49 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "FalkorDB" 3 | version = "1.1.2" 4 | description = "Python client for interacting with FalkorDB database" 5 | authors = ["FalkorDB inc "] 6 | 7 | readme = "README.md" 8 | repository = "http://github.com/falkorDB/falkordb-py" 9 | homepage = "http://falkordb-py.readthedocs.io" 10 | keywords = ['FalkorDB', 'GraphDB', 'Cypher'] 11 | 12 | packages = [{include = "falkordb"}] 13 | 14 | [tool.poetry.dependencies] 15 | python = "^3.8" 16 | redis = "^5.0.1" 17 | 18 | [tool.poetry.group.test.dependencies] 19 | pytest-cov = ">=4.1,<6.0" 20 | pytest = "8.3.5" 21 | pytest-asyncio = ">=0.23.4,<0.25.0" 22 | 23 | [build-system] 24 | requires = ["poetry-core"] 25 | build-backend = "poetry.core.masonry.api" 26 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --asyncio-mode=auto 3 | 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FalkorDB/falkordb-py/5a23f700a69a760319367bc4b1ab4cff1aa05ce9/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_async_constraints.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import asyncio 3 | from falkordb.asyncio import FalkorDB 4 | from redis.asyncio import BlockingConnectionPool 5 | 6 | @pytest.mark.asyncio 7 | async def test_constraints(): 8 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 9 | db = FalkorDB(connection_pool=pool) 10 | g = db.select_graph("async_constraints") 11 | 12 | # create node constraints 13 | await g.create_node_unique_constraint("Person", "name") 14 | await g.create_node_mandatory_constraint("Person", "name") 15 | await g.create_node_unique_constraint("Person", "v1", "v2") 16 | 17 | # create edge constraints 18 | await g.create_edge_unique_constraint("KNOWS", "since") 19 | await g.create_edge_mandatory_constraint("KNOWS", "since") 20 | await g.create_edge_unique_constraint("KNOWS", "v1", "v2") 21 | 22 | constraints = await g.list_constraints() 23 | assert(len(constraints) == 6) 24 | 25 | # drop constraints 26 | await g.drop_node_unique_constraint("Person", "name") 27 | await g.drop_node_mandatory_constraint("Person", "name") 28 | await g.drop_node_unique_constraint("Person", "v1", "v2") 29 | 30 | await g.drop_edge_unique_constraint("KNOWS", "since") 31 | await g.drop_edge_mandatory_constraint("KNOWS", "since") 32 | await g.drop_edge_unique_constraint("KNOWS", "v1", "v2") 33 | 34 | constraints = await g.list_constraints() 35 | assert(len(constraints) == 0) 36 | 37 | # close the connection pool 38 | await pool.aclose() 39 | 40 | @pytest.mark.asyncio 41 | async def test_create_existing_constraint(): 42 | # trying to create an existing constraint 43 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 44 | db = FalkorDB(connection_pool=pool) 45 | g = db.select_graph("async_constraints") 46 | 47 | # create node constraints 48 | await g.create_node_unique_constraint("Person", "name") 49 | try: 50 | await g.create_node_unique_constraint("Person", "name") 51 | assert(False) 52 | except Exception as e: 53 | assert("Constraint already exists" == str(e)) 54 | 55 | # close the connection pool 56 | await pool.aclose() 57 | -------------------------------------------------------------------------------- /tests/test_async_copy.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb.asyncio import FalkorDB 3 | from redis.asyncio import BlockingConnectionPool 4 | 5 | @pytest.mark.asyncio 6 | async def test_graph_copy(): 7 | # create a simple graph and clone it 8 | # make sure graphs are the same 9 | 10 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 11 | db = FalkorDB(connection_pool=pool) 12 | src = db.select_graph("async_src") 13 | 14 | # create entities 15 | await src.query("CREATE (:A {v:1})-[:R {v:2}]->(:B {v:3})") 16 | 17 | # create index 18 | await src.create_edge_range_index("A", "v") 19 | await src.create_edge_range_index("R", "v") 20 | await src.create_node_fulltext_index("B", "v") 21 | 22 | # create constrain 23 | await src.create_node_unique_constraint("A", "v") 24 | await src.create_edge_unique_constraint("R", "v") 25 | 26 | # clone graph 27 | dest = await src.copy("async_dest") 28 | 29 | # validate src and dest are the same 30 | # validate entities 31 | q = "MATCH (a) RETURN a ORDER BY ID(a)" 32 | src_res = (await src.query(q)).result_set 33 | dest_res = (await dest.query(q)).result_set 34 | assert(src_res == dest_res) 35 | 36 | q = "MATCH ()-[e]->() RETURN e ORDER BY ID(e)" 37 | src_res = (await src.query(q)).result_set 38 | dest_res = (await dest.query(q)).result_set 39 | assert(src_res == dest_res) 40 | 41 | # validate schema 42 | src_res = (await src.call_procedure("DB.LABELS")).result_set 43 | dest_res = (await dest.call_procedure("DB.LABELS")).result_set 44 | assert(src_res == dest_res) 45 | 46 | src_res = (await src.call_procedure("DB.PROPERTYKEYS")).result_set 47 | dest_res = (await dest.call_procedure("DB.PROPERTYKEYS")).result_set 48 | assert(src_res == dest_res) 49 | 50 | src_res = (await src.call_procedure("DB.RELATIONSHIPTYPES")).result_set 51 | dest_res = (await dest.call_procedure("DB.RELATIONSHIPTYPES")).result_set 52 | assert(src_res == dest_res) 53 | 54 | # validate indices 55 | q = """CALL DB.INDEXES() 56 | YIELD label, properties, types, language, stopwords, entitytype, status 57 | RETURN * 58 | ORDER BY label, properties, types, language, stopwords, entitytype, status""" 59 | src_res = (await src.query(q)).result_set 60 | dest_res = (await dest.query(q)).result_set 61 | 62 | assert(src_res == dest_res) 63 | 64 | # validate constraints 65 | src_res = await src.list_constraints() 66 | dest_res = await dest.list_constraints() 67 | assert(src_res == dest_res) 68 | 69 | # close the connection pool 70 | await pool.aclose() 71 | -------------------------------------------------------------------------------- /tests/test_async_db.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import asyncio 3 | from falkordb.asyncio import FalkorDB 4 | from redis.asyncio import BlockingConnectionPool 5 | 6 | @pytest.mark.asyncio 7 | async def test_config(): 8 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 9 | db = FalkorDB(connection_pool=pool) 10 | config_name = "RESULTSET_SIZE" 11 | 12 | # save olf configuration value 13 | prev_value = int(await db.config_get(config_name)) 14 | 15 | # set configuration 16 | response = await db.config_set(config_name, 3) 17 | assert response == "OK" 18 | 19 | # make sure config been updated 20 | new_value = int(await db.config_get(config_name)) 21 | assert new_value == 3 22 | 23 | # restore original value 24 | response = await db.config_set(config_name, prev_value) 25 | assert response == "OK" 26 | 27 | # trying to get / set invalid configuration 28 | with pytest.raises(Exception): 29 | await db.config_get("none_existing_conf") 30 | 31 | with pytest.raises(Exception): 32 | await db.config_set("none_existing_conf", 1) 33 | 34 | with pytest.raises(Exception): 35 | await db.config_set(config_name, "invalid value") 36 | 37 | # close the connection pool 38 | await pool.aclose() 39 | 40 | @pytest.mark.asyncio 41 | async def test_connect_via_url(): 42 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 43 | db = FalkorDB(connection_pool=pool) 44 | 45 | # make sure we're able to connect via url 46 | g = db.select_graph("async_db") 47 | one = (await g.query("RETURN 1")).result_set[0][0] 48 | assert one == 1 49 | 50 | # close the connection pool 51 | await pool.aclose() 52 | -------------------------------------------------------------------------------- /tests/test_async_explain.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb.asyncio import FalkorDB 3 | from redis.asyncio import BlockingConnectionPool 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_explain(): 8 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 9 | db = FalkorDB(connection_pool=pool) 10 | g = db.select_graph("async_explain") 11 | 12 | # run a single query to create the graph 13 | await g.query("RETURN 1") 14 | 15 | plan = await g.explain("UNWIND range(0, 3) AS x RETURN x") 16 | 17 | results_op = plan.structured_plan 18 | assert(results_op.name == 'Results') 19 | assert(len(results_op.children) == 1) 20 | 21 | project_op = results_op.children[0] 22 | assert(project_op.name == 'Project') 23 | assert(len(project_op.children) == 1) 24 | 25 | unwind_op = project_op.children[0] 26 | assert(unwind_op.name == 'Unwind') 27 | assert(len(unwind_op.children) == 0) 28 | 29 | # close the connection pool 30 | await pool.aclose() 31 | 32 | @pytest.mark.asyncio 33 | async def test_cartesian_product_explain(): 34 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 35 | db = FalkorDB(connection_pool=pool) 36 | g = db.select_graph("async_explain") 37 | plan = await g.explain("MATCH (a), (b) RETURN *") 38 | 39 | results_op = plan.structured_plan 40 | assert(results_op.name == 'Results') 41 | assert(len(results_op.children) == 1) 42 | 43 | project_op = results_op.children[0] 44 | assert(project_op.name == 'Project') 45 | assert(len(project_op.children) == 1) 46 | 47 | cp_op = project_op.children[0] 48 | assert(cp_op.name == 'Cartesian Product') 49 | assert(len(cp_op.children) == 2) 50 | 51 | scan_a_op = cp_op.children[0] 52 | scan_b_op = cp_op.children[1] 53 | 54 | assert(scan_a_op.name == 'All Node Scan') 55 | assert(len(scan_a_op.children) == 0) 56 | 57 | assert(scan_b_op.name == 'All Node Scan') 58 | assert(len(scan_b_op.children) == 0) 59 | 60 | # close the connection pool 61 | await pool.aclose() 62 | 63 | @pytest.mark.asyncio 64 | async def test_merge(): 65 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 66 | db = FalkorDB(connection_pool=pool) 67 | g = db.select_graph("async_explain") 68 | 69 | try: 70 | await g.create_node_range_index("person", "age") 71 | except: 72 | pass 73 | plan = await g.explain("MERGE (p1:person {age: 40}) MERGE (p2:person {age: 41})") 74 | 75 | root = plan.structured_plan 76 | assert(root.name == 'Merge') 77 | assert(len(root.children) == 3) 78 | 79 | merge_op = root.children[0] 80 | assert(merge_op.name == 'Merge') 81 | assert(len(merge_op.children) == 2) 82 | 83 | index_scan_op = merge_op.children[0] 84 | assert(index_scan_op.name == 'Node By Index Scan') 85 | assert(len(index_scan_op.children) == 0) 86 | 87 | merge_create_op = merge_op.children[1] 88 | assert(merge_create_op.name == 'MergeCreate') 89 | assert(len(merge_create_op.children) == 0) 90 | 91 | index_scan_op = root.children[1] 92 | assert(index_scan_op.name == 'Node By Index Scan') 93 | assert(len(index_scan_op.children) == 1) 94 | 95 | arg_op = index_scan_op.children[0] 96 | assert(arg_op.name == 'Argument') 97 | assert(len(arg_op.children) == 0) 98 | 99 | merge_create_op = root.children[2] 100 | assert(merge_create_op.name == 'MergeCreate') 101 | assert(len(merge_create_op.children) == 1) 102 | 103 | arg_op = merge_create_op.children[0] 104 | assert(arg_op.name == 'Argument') 105 | assert(len(arg_op.children) == 0) 106 | 107 | # close the connection pool 108 | await pool.aclose() 109 | -------------------------------------------------------------------------------- /tests/test_async_graph.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pytest import approx 3 | from redis import ResponseError 4 | from falkordb.asyncio import FalkorDB 5 | from falkordb import Edge, Node, Path, Operation 6 | from redis.asyncio import BlockingConnectionPool 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_graph_creation(): 11 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 12 | db = FalkorDB(connection_pool=pool) 13 | graph = db.select_graph("async_graph") 14 | 15 | john = Node( 16 | alias="p", 17 | labels="person", 18 | properties={ 19 | "name": "John Doe", 20 | "age": 33, 21 | "gender": "male", 22 | "status": "single", 23 | }, 24 | ) 25 | 26 | japan = Node(alias="c", labels="country", properties={"name": "Japan"}) 27 | 28 | edge = Edge(john, "visited", japan, alias="v", properties={"purpose": "pleasure"}) 29 | 30 | query = f"CREATE {john}, {japan}, {edge} RETURN p,v,c" 31 | result = await graph.query(query) 32 | 33 | person = result.result_set[0][0] 34 | visit = result.result_set[0][1] 35 | country = result.result_set[0][2] 36 | 37 | assert person == john 38 | assert visit.properties == edge.properties 39 | assert country == japan 40 | 41 | # Test vector float32 query result 42 | query = "RETURN vecf32([1, -2, 3.14])" 43 | result = await graph.query(query) 44 | assert result.result_set[0][0] == approx([1, -2, 3.14]) 45 | 46 | query = "RETURN [1, 2.3, '4', true, false, null]" 47 | result = await graph.query(query) 48 | assert [1, 2.3, "4", True, False, None] == result.result_set[0][0] 49 | 50 | # close the connection pool 51 | await pool.aclose() 52 | 53 | @pytest.mark.asyncio 54 | async def test_array_functions(): 55 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 56 | db = FalkorDB(connection_pool=pool) 57 | graph = db.select_graph("async_graph") 58 | 59 | await graph.delete() 60 | 61 | query = """RETURN [0,1,2]""" 62 | result = await graph.query(query) 63 | assert [0, 1, 2] == result.result_set[0][0] 64 | 65 | a = Node( 66 | node_id=0, 67 | labels="person", 68 | properties={"name": "a", "age": 32, "array": [0, 1, 2]} 69 | ) 70 | 71 | await graph.query(f"CREATE {a}") 72 | 73 | query = "MATCH(n) return collect(n)" 74 | result = await graph.query(query) 75 | 76 | assert [a] == result.result_set[0][0] 77 | 78 | # close the connection pool 79 | await pool.aclose() 80 | 81 | @pytest.mark.asyncio 82 | async def test_path(): 83 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 84 | db = FalkorDB(connection_pool=pool) 85 | graph = db.select_graph("async_graph") 86 | 87 | await graph.delete() 88 | 89 | node0 = Node(alias="node0", node_id=0, labels="L1") 90 | node1 = Node(alias="node1", node_id=1, labels="L1") 91 | edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1}) 92 | 93 | await graph.query(f"CREATE {node0}, {node1}, {edge01}") 94 | 95 | path01 = Path([node0, node1], [edge01]) 96 | expected_results = [[path01]] 97 | 98 | query = "MATCH p=(:L1)-[:R1]->(:L1) RETURN p" 99 | result = await graph.query(query) 100 | assert expected_results == result.result_set 101 | 102 | # close the connection pool 103 | await pool.aclose() 104 | 105 | @pytest.mark.asyncio 106 | async def test_param(): 107 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 108 | db = FalkorDB(connection_pool=pool) 109 | graph = db.select_graph("async_graph") 110 | 111 | params = [1, 2.3, "str", True, False, None, [0, 1, 2], r"\" RETURN 1337 //"] 112 | query = "RETURN $param" 113 | for param in params: 114 | result = await graph.query(query, {"param": param}) 115 | expected_results = [[param]] 116 | assert expected_results == result.result_set 117 | 118 | # close the connection pool 119 | await pool.aclose() 120 | 121 | @pytest.mark.asyncio 122 | async def test_map(): 123 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 124 | db = FalkorDB(connection_pool=pool) 125 | g = db.select_graph("async_graph") 126 | 127 | await g.delete() 128 | 129 | query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}" 130 | actual = (await g.query(query)).result_set[0][0] 131 | expected = { 132 | "a": 1, 133 | "b": "str", 134 | "c": None, 135 | "d": [1, 2, 3], 136 | "e": True, 137 | "f": {"x": 1, "y": 2}, 138 | } 139 | 140 | assert actual == expected 141 | 142 | src = Node(alias="src", node_id=0, labels="L1", properties={"v": 0}) 143 | dest = Node(alias="dest", node_id=1, labels="L2", properties={"v":2}) 144 | e = Edge(src, "R1", dest, edge_id=0, properties={"value": 1}) 145 | await g.query(f"CREATE {src}, {dest}, {e}") 146 | 147 | query = "MATCH (src)-[e]->(dest) RETURN {src:src, e:e, dest:dest}" 148 | actual = (await g.query(query)).result_set[0][0] 149 | expected = { "src": src, "e": e, "dest": dest } 150 | assert actual == expected 151 | 152 | # close the connection pool 153 | await pool.aclose() 154 | 155 | @pytest.mark.asyncio 156 | async def test_point(): 157 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 158 | db = FalkorDB(connection_pool=pool) 159 | g = db.select_graph("async_graph") 160 | 161 | query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})" 162 | expected_lat = 32.070794860 163 | expected_lon = 34.820751118 164 | actual = (await g.query(query)).result_set[0][0] 165 | assert abs(actual["latitude"] - expected_lat) < 0.001 166 | assert abs(actual["longitude"] - expected_lon) < 0.001 167 | 168 | query = "RETURN point({latitude: 32, longitude: 34.0})" 169 | expected_lat = 32 170 | expected_lon = 34 171 | actual = (await g.query(query)).result_set[0][0] 172 | assert abs(actual["latitude"] - expected_lat) < 0.001 173 | assert abs(actual["longitude"] - expected_lon) < 0.001 174 | 175 | # close the connection pool 176 | await pool.aclose() 177 | 178 | @pytest.mark.asyncio 179 | async def test_index_response(): 180 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 181 | db = FalkorDB(connection_pool=pool) 182 | g = db.select_graph("async_graph") 183 | 184 | result_set = await g.query("CREATE INDEX ON :person(age)") 185 | assert 1 == result_set.indices_created 186 | 187 | with pytest.raises(ResponseError): 188 | await g.query("CREATE INDEX ON :person(age)") 189 | 190 | result_set = await g.query("DROP INDEX ON :person(age)") 191 | assert 1 == result_set.indices_deleted 192 | 193 | with pytest.raises(ResponseError): 194 | await g.query("DROP INDEX ON :person(age)") 195 | 196 | # close the connection pool 197 | await pool.aclose() 198 | 199 | @pytest.mark.asyncio 200 | async def test_stringify_query_result(): 201 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 202 | db = FalkorDB(connection_pool=pool) 203 | g = db.select_graph("async_graph") 204 | 205 | john = Node(alias="a", labels="person", 206 | properties={ "name": "John Doe", "age": 33, "gender": "male", 207 | "status": "single", }) 208 | japan = Node(alias="b", labels="country", properties={"name": "Japan"}) 209 | 210 | e = Edge(john, "visited", japan, properties={"purpose": "pleasure"}) 211 | 212 | assert ( 213 | str(john) 214 | == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" 215 | ) 216 | assert str(e) == """(a)-[:visited{purpose:"pleasure"}]->(b)""" 217 | assert str(japan) == """(b:country{name:"Japan"})""" 218 | 219 | await g.query(f"CREATE {john}, {japan}, {e}") 220 | 221 | query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) 222 | RETURN p, v, c""" 223 | 224 | result = await g.query(query) 225 | person = result.result_set[0][0] 226 | visit = result.result_set[0][1] 227 | country = result.result_set[0][2] 228 | 229 | assert ( 230 | str(person) 231 | == """(:person{age:33,gender:"male",name:"John Doe",status:"single"})""" 232 | ) 233 | assert str(visit) == """()-[:visited{purpose:"pleasure"}]->()""" 234 | assert str(country) == """(:country{name:"Japan"})""" 235 | 236 | # close the connection pool 237 | await pool.aclose() 238 | 239 | @pytest.mark.asyncio 240 | async def test_optional_match(): 241 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 242 | db = FalkorDB(connection_pool=pool) 243 | g = db.select_graph("async_graph") 244 | 245 | await g.delete() 246 | 247 | # build a graph of form (a)-[R]->(b) 248 | src = Node(alias="src", node_id=0, labels="L1", properties={"value": "a"}) 249 | dest = Node(alias="dest", node_id=1, labels="L1", properties={"value": "b"}) 250 | 251 | e = Edge(src, "R", dest, edge_id=0) 252 | 253 | await g.query(f"CREATE {src}, {dest}, {e}") 254 | 255 | # issue a query that collects all outgoing edges from both nodes 256 | # (the second has none) 257 | query = """MATCH (a) 258 | OPTIONAL MATCH (a)-[e]->(b) 259 | RETURN a, e, b 260 | ORDER BY a.value""" 261 | expected_results = [[src, e, dest], [dest, None, None]] 262 | 263 | result = await g.query(query) 264 | assert expected_results == result.result_set 265 | 266 | # close the connection pool 267 | await pool.aclose() 268 | 269 | @pytest.mark.asyncio 270 | async def test_cached_execution(): 271 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 272 | db = FalkorDB(connection_pool=pool) 273 | g = db.select_graph("async_graph") 274 | 275 | result = await g.query("RETURN $param", {"param": 0}) 276 | assert result.cached_execution is False 277 | 278 | result = await g.query("RETURN $param", {"param": 0}) 279 | assert result.cached_execution is True 280 | 281 | # close the connection pool 282 | await pool.aclose() 283 | 284 | @pytest.mark.asyncio 285 | async def test_slowlog(): 286 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 287 | db = FalkorDB(connection_pool=pool) 288 | g = db.select_graph("async_graph") 289 | 290 | await g.delete() 291 | 292 | create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 293 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 294 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 295 | await g.query(create_query) 296 | 297 | results = await g.slowlog() 298 | assert len(results[0]) == 4 299 | assert results[0][1] == "GRAPH.QUERY" 300 | assert results[0][2] == create_query 301 | 302 | # close the connection pool 303 | await pool.aclose() 304 | 305 | @pytest.mark.xfail(strict=False) 306 | @pytest.mark.asyncio 307 | async def test_query_timeout(): 308 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 309 | db = FalkorDB(connection_pool=pool) 310 | g = db.select_graph("async_graph") 311 | 312 | # build a graph with 1000 nodes 313 | await g.query("UNWIND range(0, 1000) as val CREATE ({v: val})") 314 | # issue a long-running query with a 1-millisecond timeout 315 | with pytest.raises(ResponseError): 316 | await g.query("MATCH (a), (b), (c), (d) RETURN *", timeout=1) 317 | assert False is False 318 | 319 | with pytest.raises(Exception): 320 | await g.query("RETURN 1", timeout="str") 321 | assert False is False 322 | 323 | # close the connection pool 324 | await pool.aclose() 325 | 326 | @pytest.mark.asyncio 327 | async def test_read_only_query(): 328 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 329 | db = FalkorDB(connection_pool=pool) 330 | g = db.select_graph("async_graph") 331 | 332 | with pytest.raises(Exception): 333 | # issue a write query, specifying read-only true 334 | # call should fail 335 | await g.query("CREATE ()", read_only=True) 336 | assert False is False 337 | 338 | # close the connection pool 339 | await pool.aclose() 340 | 341 | @pytest.mark.asyncio 342 | async def test_multi_label(): 343 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 344 | db = FalkorDB(connection_pool=pool) 345 | g = db.select_graph("async_graph") 346 | 347 | await g.delete() 348 | 349 | node = Node(labels=["l", "ll"]) 350 | await g.query(f"CREATE {node}") 351 | 352 | query = "MATCH (n) RETURN n" 353 | result = await g.query(query) 354 | result_node = result.result_set[0][0] 355 | assert result_node == node 356 | 357 | try: 358 | Node(labels=1) 359 | assert False 360 | except AssertionError: 361 | assert True 362 | 363 | try: 364 | Node(labels=["l", 1]) 365 | assert False 366 | except AssertionError: 367 | assert True 368 | 369 | # close the connection pool 370 | await pool.aclose() 371 | 372 | @pytest.mark.asyncio 373 | async def test_execution_plan(): 374 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 375 | db = FalkorDB(connection_pool=pool) 376 | g = db.select_graph("async_graph") 377 | 378 | create_query = """CREATE 379 | (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 380 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 381 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 382 | await g.query(create_query) 383 | 384 | result = await g.explain( 385 | """MATCH (r:Rider)-[:rides]->(t:Team) 386 | WHERE t.name = $name 387 | RETURN r.name, t.name, $params""", {"name": "Yehuda"} 388 | ) 389 | 390 | expected = "Results\n Project\n Conditional Traverse | (t)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" 391 | assert str(result) == expected 392 | 393 | # close the connection pool 394 | await pool.aclose() 395 | 396 | @pytest.mark.asyncio 397 | async def test_explain(): 398 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 399 | db = FalkorDB(connection_pool=pool) 400 | g = db.select_graph("async_graph") 401 | 402 | # graph creation / population 403 | create_query = """CREATE 404 | (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 405 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 406 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 407 | await g.query(create_query) 408 | 409 | result = await g.explain( 410 | """MATCH (r:Rider)-[:rides]->(t:Team) 411 | WHERE t.name = $name 412 | RETURN r.name, t.name 413 | UNION 414 | MATCH (r:Rider)-[:rides]->(t:Team) 415 | WHERE t.name = $name 416 | RETURN r.name, t.name""", 417 | {"name": "Yamaha"}, 418 | ) 419 | expected = """\ 420 | Results 421 | Distinct 422 | Join 423 | Project 424 | Conditional Traverse | (t)->(r:Rider) 425 | Filter 426 | Node By Label Scan | (t:Team) 427 | Project 428 | Conditional Traverse | (t)->(r:Rider) 429 | Filter 430 | Node By Label Scan | (t:Team)""" 431 | assert str(result).replace(" ", "").replace("\n", "") == expected.replace( 432 | " ", "" 433 | ).replace("\n", "") 434 | 435 | expected = Operation("Results").append_child( 436 | Operation("Distinct").append_child( 437 | Operation("Join") 438 | .append_child( 439 | Operation("Project").append_child( 440 | Operation("Conditional Traverse", "(t)->(r:Rider)").append_child( 441 | Operation("Filter").append_child( 442 | Operation("Node By Label Scan", "(t:Team)") 443 | ) 444 | ) 445 | ) 446 | ) 447 | .append_child( 448 | Operation("Project").append_child( 449 | Operation("Conditional Traverse", "(t)->(r:Rider)").append_child( 450 | Operation("Filter").append_child( 451 | Operation("Node By Label Scan", "(t:Team)") 452 | ) 453 | ) 454 | ) 455 | ) 456 | ) 457 | ) 458 | 459 | assert result.structured_plan == expected 460 | 461 | result = await g.explain("MATCH (r:Rider), (t:Team) RETURN r.name, t.name") 462 | expected = """\ 463 | Results 464 | Project 465 | Cartesian Product 466 | Node By Label Scan | (r:Rider) 467 | Node By Label Scan | (t:Team)""" 468 | assert str(result).replace(" ", "").replace("\n", "") == expected.replace( 469 | " ", "" 470 | ).replace("\n", "") 471 | 472 | expected = Operation("Results").append_child( 473 | Operation("Project").append_child( 474 | Operation("Cartesian Product") 475 | .append_child(Operation("Node By Label Scan")) 476 | .append_child(Operation("Node By Label Scan")) 477 | ) 478 | ) 479 | 480 | assert result.structured_plan == expected 481 | 482 | # close the connection pool 483 | await pool.aclose() 484 | -------------------------------------------------------------------------------- /tests/test_async_indices.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from redis import ResponseError 3 | from falkordb import Edge, Node 4 | from falkordb.asyncio import FalkorDB 5 | from collections import OrderedDict 6 | from redis.asyncio import BlockingConnectionPool 7 | 8 | class Index(): 9 | def __init__(self, raw_response): 10 | self.label = raw_response[0] 11 | self.properties = raw_response[1] 12 | self.types = raw_response[2] 13 | self.entity_type = raw_response[6] 14 | 15 | @pytest.mark.asyncio 16 | async def test_node_index_creation(): 17 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 18 | db = FalkorDB(connection_pool=pool) 19 | graph = db.select_graph("async_indices") 20 | 21 | lbl = "N" 22 | 23 | # create node indices 24 | 25 | # create node range index 26 | res = await graph.create_node_range_index(lbl, 'x') 27 | assert(res.indices_created == 1) 28 | 29 | index = Index((await graph.list_indices()).result_set[0]) 30 | assert(index.label == lbl) 31 | assert(index.properties == ['x']) 32 | assert(index.types['x'] == ['RANGE']) 33 | assert(index.entity_type == 'NODE') 34 | 35 | # create node range index over multiple properties 36 | res = await graph.create_node_range_index(lbl, 'y', 'z') 37 | assert(res.indices_created == 2) 38 | 39 | index = Index((await graph.list_indices()).result_set[0]) 40 | assert(index.label == lbl) 41 | assert(index.properties == ['x', 'y', 'z']) 42 | assert(index.types['x'] == ['RANGE']) 43 | assert(index.types['y'] == ['RANGE']) 44 | assert(index.types['z'] == ['RANGE']) 45 | assert(index.entity_type == 'NODE') 46 | 47 | # try to create an existing index 48 | with pytest.raises(ResponseError): 49 | res = await graph.create_node_range_index(lbl, 'z', 'x') 50 | 51 | # create node full-text index 52 | res = await graph.create_node_fulltext_index(lbl, 'name') 53 | assert(res.indices_created == 1) 54 | 55 | index = Index((await graph.list_indices()).result_set[0]) 56 | assert(index.label == lbl) 57 | assert(index.properties == ['x', 'y', 'z', 'name']) 58 | assert(index.types['x'] == ['RANGE']) 59 | assert(index.types['y'] == ['RANGE']) 60 | assert(index.types['z'] == ['RANGE']) 61 | assert(index.types['name'] == ['FULLTEXT']) 62 | assert(index.entity_type == 'NODE') 63 | 64 | # create node vector index 65 | res = await graph.create_node_vector_index(lbl, 'desc', dim=32, similarity_function="euclidean") 66 | assert(res.indices_created == 1) 67 | 68 | index = Index((await graph.list_indices()).result_set[0]) 69 | assert(index.label == lbl) 70 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 71 | assert(index.types['x'] == ['RANGE']) 72 | assert(index.types['y'] == ['RANGE']) 73 | assert(index.types['z'] == ['RANGE']) 74 | assert(index.types['name'] == ['FULLTEXT']) 75 | assert(index.types['desc'] == ['VECTOR']) 76 | assert(index.entity_type == 'NODE') 77 | 78 | # create a multi-type property 79 | res = await graph.create_node_fulltext_index(lbl, 'x') 80 | assert(res.indices_created == 1) 81 | 82 | index = Index((await graph.list_indices()).result_set[0]) 83 | assert(index.label == lbl) 84 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 85 | assert(index.types['x'] == ['RANGE', 'FULLTEXT']) 86 | assert(index.types['y'] == ['RANGE']) 87 | assert(index.types['z'] == ['RANGE']) 88 | assert(index.types['name'] == ['FULLTEXT']) 89 | assert(index.types['desc'] == ['VECTOR']) 90 | assert(index.entity_type == 'NODE') 91 | 92 | # close the connection pool 93 | await pool.aclose() 94 | 95 | @pytest.mark.asyncio 96 | async def test_edge_index_creation(): 97 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 98 | db = FalkorDB(connection_pool=pool) 99 | graph = db.select_graph("async_indices") 100 | await graph.delete() 101 | 102 | rel = "R" 103 | 104 | # create edge indices 105 | 106 | # create edge range index 107 | res = await graph.create_edge_range_index(rel, 'x') 108 | assert(res.indices_created == 1) 109 | 110 | index = Index((await graph.list_indices()).result_set[0]) 111 | assert(index.label ==rel) 112 | assert(index.properties == ['x']) 113 | assert(index.types['x'] == ['RANGE']) 114 | assert(index.entity_type == 'RELATIONSHIP') 115 | 116 | # create edge range index over multiple properties 117 | res = await graph.create_edge_range_index(rel, 'y', 'z') 118 | assert(res.indices_created == 2) 119 | 120 | index = Index((await graph.list_indices()).result_set[0]) 121 | assert(index.label ==rel) 122 | assert(index.properties == ['x', 'y', 'z']) 123 | assert(index.types['x'] == ['RANGE']) 124 | assert(index.types['y'] == ['RANGE']) 125 | assert(index.types['z'] == ['RANGE']) 126 | assert(index.entity_type == 'RELATIONSHIP') 127 | 128 | # try to create an existing index 129 | with pytest.raises(ResponseError): 130 | res = await graph.create_edge_range_index(rel, 'z', 'x') 131 | 132 | # create edge full-text index 133 | res = await graph.create_edge_fulltext_index(rel, 'name') 134 | assert(res.indices_created == 1) 135 | 136 | index = Index((await graph.list_indices()).result_set[0]) 137 | assert(index.label ==rel) 138 | assert(index.properties == ['x', 'y', 'z', 'name']) 139 | assert(index.types['x'] == ['RANGE']) 140 | assert(index.types['y'] == ['RANGE']) 141 | assert(index.types['z'] == ['RANGE']) 142 | assert(index.types['name'] == ['FULLTEXT']) 143 | assert(index.entity_type == 'RELATIONSHIP') 144 | 145 | # create edge vector index 146 | res = await graph.create_edge_vector_index(rel, 'desc', dim=32, similarity_function="euclidean") 147 | assert(res.indices_created == 1) 148 | 149 | index = Index((await graph.list_indices()).result_set[0]) 150 | assert(index.label ==rel) 151 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 152 | assert(index.types['x'] == ['RANGE']) 153 | assert(index.types['y'] == ['RANGE']) 154 | assert(index.types['z'] == ['RANGE']) 155 | assert(index.types['name'] == ['FULLTEXT']) 156 | assert(index.types['desc'] == ['VECTOR']) 157 | assert(index.entity_type == 'RELATIONSHIP') 158 | 159 | # create a multi-type property 160 | res = await graph.create_edge_fulltext_index(rel, 'x') 161 | assert(res.indices_created == 1) 162 | 163 | index = Index((await graph.list_indices()).result_set[0]) 164 | assert(index.label ==rel) 165 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 166 | assert(index.types['x'] == ['RANGE', 'FULLTEXT']) 167 | assert(index.types['y'] == ['RANGE']) 168 | assert(index.types['z'] == ['RANGE']) 169 | assert(index.types['name'] == ['FULLTEXT']) 170 | assert(index.types['desc'] == ['VECTOR']) 171 | assert(index.entity_type == 'RELATIONSHIP') 172 | 173 | # close the connection pool 174 | await pool.aclose() 175 | 176 | @pytest.mark.asyncio 177 | async def test_node_index_drop(): 178 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 179 | db = FalkorDB(connection_pool=pool) 180 | graph = db.select_graph("async_indices") 181 | await graph.delete() 182 | 183 | # create an index and delete it 184 | lbl = 'N' 185 | attr = 'x' 186 | 187 | # create node range index 188 | res = await graph.create_node_range_index(lbl, attr) 189 | assert(res.indices_created == 1) 190 | 191 | # list indices 192 | res = await graph.list_indices() 193 | assert(len(res.result_set) == 1) 194 | 195 | # drop range index 196 | res = await graph.drop_node_range_index(lbl, attr) 197 | assert(res.indices_deleted == 1) 198 | 199 | # list indices 200 | res = await graph.list_indices() 201 | assert(len(res.result_set) == 0) 202 | 203 | #--------------------------------------------------------------------------- 204 | 205 | # create node fulltext index 206 | res = await graph.create_node_fulltext_index(lbl, attr) 207 | assert(res.indices_created == 1) 208 | 209 | # list indices 210 | res = await graph.list_indices() 211 | assert(len(res.result_set) == 1) 212 | 213 | # drop fulltext index 214 | res = await graph.drop_node_fulltext_index(lbl, attr) 215 | assert(res.indices_deleted == 1) 216 | 217 | # list indices 218 | res = await graph.list_indices() 219 | assert(len(res.result_set) == 0) 220 | 221 | #--------------------------------------------------------------------------- 222 | 223 | # create node vector index 224 | res = await graph.create_node_vector_index(lbl, attr) 225 | assert(res.indices_created == 1) 226 | 227 | # list indices 228 | res = await graph.list_indices() 229 | assert(len(res.result_set) == 1) 230 | 231 | # drop vector index 232 | res = await graph.drop_node_vector_index(lbl, attr) 233 | assert(res.indices_deleted == 1) 234 | 235 | # list indices 236 | res = await graph.list_indices() 237 | assert(len(res.result_set) == 0) 238 | 239 | # close the connection pool 240 | await pool.aclose() 241 | 242 | @pytest.mark.asyncio 243 | async def test_edge_index_drop(): 244 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 245 | db = FalkorDB(connection_pool=pool) 246 | graph = db.select_graph("async_indices") 247 | await graph.delete() 248 | 249 | # create an index and delete it 250 | rel = 'R' 251 | attr = 'x' 252 | 253 | # create edge range index 254 | res = await graph.create_edge_range_index(rel, attr) 255 | assert(res.indices_created == 1) 256 | 257 | # list indices 258 | res = await graph.list_indices() 259 | assert(len(res.result_set) == 1) 260 | 261 | # drop range index 262 | res = await graph.drop_edge_range_index(rel, attr) 263 | assert(res.indices_deleted == 1) 264 | 265 | # list indices 266 | res = await graph.list_indices() 267 | assert(len(res.result_set) == 0) 268 | 269 | #--------------------------------------------------------------------------- 270 | 271 | # create edge fulltext index 272 | res = await graph.create_edge_fulltext_index(rel, attr) 273 | assert(res.indices_created == 1) 274 | 275 | # list indices 276 | res = await graph.list_indices() 277 | assert(len(res.result_set) == 1) 278 | 279 | # drop fulltext index 280 | res = await graph.drop_edge_fulltext_index(rel, attr) 281 | assert(res.indices_deleted == 1) 282 | 283 | # list indices 284 | res = await graph.list_indices() 285 | assert(len(res.result_set) == 0) 286 | 287 | #--------------------------------------------------------------------------- 288 | 289 | # create edge vector index 290 | res = await graph.create_edge_vector_index(rel, attr) 291 | assert(res.indices_created == 1) 292 | 293 | # list indices 294 | res = await graph.list_indices() 295 | assert(len(res.result_set) == 1) 296 | 297 | # drop vector index 298 | res = await graph.drop_edge_vector_index(rel, attr) 299 | assert(res.indices_deleted == 1) 300 | 301 | # list indices 302 | res = await graph.list_indices() 303 | assert(len(res.result_set) == 0) 304 | 305 | # close the connection pool 306 | await pool.aclose() 307 | -------------------------------------------------------------------------------- /tests/test_async_profile.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb.asyncio import FalkorDB 3 | from redis.asyncio import BlockingConnectionPool 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_profile(): 8 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 9 | db = FalkorDB(connection_pool=pool) 10 | g = db.select_graph("async_profile") 11 | 12 | plan = await g.profile("UNWIND range(0, 3) AS x RETURN x") 13 | 14 | results_op = plan.structured_plan 15 | assert(results_op.name == 'Results') 16 | assert(len(results_op.children) == 1) 17 | assert(results_op.profile_stats.records_produced == 4) 18 | 19 | project_op = results_op.children[0] 20 | assert(project_op.name == 'Project') 21 | assert(len(project_op.children) == 1) 22 | assert(project_op.profile_stats.records_produced == 4) 23 | 24 | unwind_op = project_op.children[0] 25 | assert(unwind_op.name == 'Unwind') 26 | assert(len(unwind_op.children) == 0) 27 | assert(unwind_op.profile_stats.records_produced == 4) 28 | 29 | # close the connection pool 30 | await pool.aclose() 31 | 32 | @pytest.mark.asyncio 33 | async def test_cartesian_product_profile(): 34 | pool = BlockingConnectionPool(max_connections=16, timeout=None, decode_responses=True) 35 | db = FalkorDB(connection_pool=pool) 36 | g = db.select_graph("async_profile") 37 | 38 | plan = await g.profile("MATCH (a), (b) RETURN *") 39 | 40 | results_op = plan.structured_plan 41 | assert(results_op.name == 'Results') 42 | assert(len(results_op.children) == 1) 43 | assert(results_op.profile_stats.records_produced == 0) 44 | 45 | project_op = results_op.children[0] 46 | assert(project_op.name == 'Project') 47 | assert(len(project_op.children) == 1) 48 | assert(project_op.profile_stats.records_produced == 0) 49 | 50 | cp_op = project_op.children[0] 51 | assert(cp_op.name == 'Cartesian Product') 52 | assert(len(cp_op.children) == 2) 53 | assert(cp_op.profile_stats.records_produced == 0) 54 | 55 | scan_a_op = cp_op.children[0] 56 | scan_b_op = cp_op.children[1] 57 | 58 | assert(scan_a_op.name == 'All Node Scan') 59 | assert(len(scan_a_op.children) == 0) 60 | assert(scan_a_op.profile_stats.records_produced == 0) 61 | 62 | assert(scan_b_op.name == 'All Node Scan') 63 | assert(len(scan_b_op.children) == 0) 64 | assert(scan_b_op.profile_stats.records_produced == 0) 65 | 66 | # close the connection pool 67 | await pool.aclose() 68 | -------------------------------------------------------------------------------- /tests/test_constraints.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from redis import ResponseError 3 | from falkordb import FalkorDB 4 | 5 | def test_constraints(): 6 | db = FalkorDB(host='localhost', port=6379) 7 | g = db.select_graph("constraints") 8 | 9 | # create node constraints 10 | g.create_node_unique_constraint("Person", "name") 11 | g.create_node_mandatory_constraint("Person", "name") 12 | g.create_node_unique_constraint("Person", "v1", "v2") 13 | 14 | # create edge constraints 15 | g.create_edge_unique_constraint("KNOWS", "since") 16 | g.create_edge_mandatory_constraint("KNOWS", "since") 17 | g.create_edge_unique_constraint("KNOWS", "v1", "v2") 18 | 19 | constraints = g.list_constraints() 20 | assert(len(constraints) == 6) 21 | 22 | # drop constraints 23 | g.drop_node_unique_constraint("Person", "name") 24 | g.drop_node_mandatory_constraint("Person", "name") 25 | g.drop_node_unique_constraint("Person", "v1", "v2") 26 | 27 | g.drop_edge_unique_constraint("KNOWS", "since") 28 | g.drop_edge_mandatory_constraint("KNOWS", "since") 29 | g.drop_edge_unique_constraint("KNOWS", "v1", "v2") 30 | 31 | constraints = g.list_constraints() 32 | assert(len(constraints) == 0) 33 | 34 | def test_create_existing_constraint(): 35 | # trying to create an existing constraint 36 | db = FalkorDB(host='localhost', port=6379) 37 | g = db.select_graph("constraints") 38 | 39 | # create node constraints 40 | g.create_node_unique_constraint("Person", "name") 41 | try: 42 | g.create_node_unique_constraint("Person", "name") 43 | assert(False) 44 | except Exception as e: 45 | assert("Constraint already exists" == str(e)) 46 | 47 | -------------------------------------------------------------------------------- /tests/test_copy.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import FalkorDB 3 | 4 | def test_graph_copy(): 5 | # create a simple graph and clone it 6 | # make sure graphs are the same 7 | 8 | db = FalkorDB(host='localhost', port=6379) 9 | src = db.select_graph("copy_src") 10 | 11 | # create entities 12 | src.query("CREATE (:A {v:1})-[:R {v:2}]->(:B {v:3})") 13 | 14 | # create index 15 | src.create_edge_range_index("A", "v") 16 | src.create_edge_range_index("R", "v") 17 | src.create_node_fulltext_index("B", "v") 18 | 19 | # create constrain 20 | src.create_node_unique_constraint("A", "v") 21 | src.create_edge_unique_constraint("R", "v") 22 | 23 | # clone graph 24 | dest = src.copy("copy_dest") 25 | 26 | # validate src and dest are the same 27 | # validate entities 28 | q = "MATCH (a) RETURN a ORDER BY ID(a)" 29 | src_res = src.query(q).result_set 30 | dest_res = dest.query(q).result_set 31 | assert(src_res == dest_res) 32 | 33 | q = "MATCH ()-[e]->() RETURN e ORDER BY ID(e)" 34 | src_res = src.query(q).result_set 35 | dest_res = dest.query(q).result_set 36 | assert(src_res == dest_res) 37 | 38 | # validate schema 39 | src_res = src.call_procedure("DB.LABELS").result_set 40 | dest_res = dest.call_procedure("DB.LABELS").result_set 41 | assert(src_res == dest_res) 42 | 43 | src_res = src.call_procedure("DB.PROPERTYKEYS").result_set 44 | dest_res = dest.call_procedure("DB.PROPERTYKEYS").result_set 45 | assert(src_res == dest_res) 46 | 47 | src_res = src.call_procedure("DB.RELATIONSHIPTYPES").result_set 48 | dest_res = dest.call_procedure("DB.RELATIONSHIPTYPES").result_set 49 | assert(src_res == dest_res) 50 | 51 | # validate indices 52 | q = """CALL DB.INDEXES() 53 | YIELD label, properties, types, language, stopwords, entitytype, status 54 | RETURN * 55 | ORDER BY label, properties, types, language, stopwords, entitytype, status""" 56 | src_res = src.query(q).result_set 57 | dest_res = dest.query(q).result_set 58 | 59 | assert(src_res == dest_res) 60 | 61 | # validate constraints 62 | src_res = src.list_constraints() 63 | dest_res = dest.list_constraints() 64 | assert(src_res == dest_res) 65 | -------------------------------------------------------------------------------- /tests/test_db.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import FalkorDB 3 | 4 | 5 | @pytest.fixture 6 | def client(request): 7 | return FalkorDB(host='localhost', port=6379) 8 | 9 | 10 | def test_config(client): 11 | db = client 12 | config_name = "RESULTSET_SIZE" 13 | 14 | # save olf configuration value 15 | prev_value = int(db.config_get(config_name)) 16 | 17 | # set configuration 18 | response = db.config_set(config_name, 3) 19 | assert response == "OK" 20 | 21 | # make sure config been updated 22 | new_value = int(db.config_get(config_name)) 23 | assert new_value == 3 24 | 25 | # restore original value 26 | response = db.config_set(config_name, prev_value) 27 | assert response == "OK" 28 | 29 | # trying to get / set invalid configuration 30 | with pytest.raises(Exception): 31 | db.config_get("none_existing_conf") 32 | 33 | with pytest.raises(Exception): 34 | db.config_set("none_existing_conf", 1) 35 | 36 | with pytest.raises(Exception): 37 | db.config_set(config_name, "invalid value") 38 | 39 | def test_connect_via_url(): 40 | # make sure we're able to connect via url 41 | 42 | # just host 43 | db = FalkorDB.from_url("falkor://localhost") 44 | g = db.select_graph("db") 45 | one = g.query("RETURN 1").result_set[0][0] 46 | assert one == 1 47 | 48 | # host & Port 49 | db = FalkorDB.from_url("falkor://localhost:6379") 50 | g = db.select_graph("db") 51 | one = g.query("RETURN 1").result_set[0][0] 52 | assert one == 1 53 | -------------------------------------------------------------------------------- /tests/test_edge.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import Node, Edge 3 | 4 | def test_init(): 5 | with pytest.raises(AssertionError): 6 | Edge(None, None, None) 7 | Edge(Node(), None, None) 8 | Edge(None, None, Node()) 9 | 10 | assert isinstance( 11 | Edge(Node(node_id=1), None, Node(node_id=2)), Edge 12 | ) 13 | 14 | 15 | def test_to_string(): 16 | props_result = Edge( 17 | Node(), None, Node(), properties={"a": "a", "b": 10} 18 | ).to_string() 19 | assert props_result == '{a:"a",b:10}' 20 | 21 | no_props_result = Edge( 22 | Node(), None, Node(), properties={} 23 | ).to_string() 24 | assert no_props_result == "" 25 | 26 | 27 | def test_stringify(): 28 | john = Node( 29 | alias="a", 30 | labels="person", 31 | properties={"name": 'John Doe', "age": 33, "someArray": [1, 2, 3]}, 32 | ) 33 | 34 | japan = Node(alias="b", 35 | labels="country", 36 | properties={"name": 'Japan'} 37 | ) 38 | 39 | edge_with_relation = Edge( 40 | john, 41 | "visited", 42 | japan, 43 | properties={"purpose": "pleasure"} 44 | ) 45 | assert("(a)-[:visited{purpose:\"pleasure\"}]->(b)" == str(edge_with_relation)) 46 | 47 | edge_no_relation_no_props = Edge(japan, "", john) 48 | assert("(b)-[]->(a)" == str(edge_no_relation_no_props)) 49 | 50 | edge_only_props = Edge(john, "", japan, properties={"a": "b", "c": 3}) 51 | assert("(a)-[{a:\"b\",c:3}]->(b)" == str(edge_only_props)) 52 | 53 | 54 | def test_comparision(): 55 | node1 = Node(node_id=1) 56 | node2 = Node(node_id=2) 57 | node3 = Node(node_id=3) 58 | 59 | edge1 = Edge(node1, None, node2) 60 | assert edge1 == Edge(node1, None, node2) 61 | assert edge1 != Edge(node1, "bla", node2) 62 | assert edge1 != Edge(node1, None, node3) 63 | assert edge1 != Edge(node3, None, node2) 64 | assert edge1 != Edge(node2, None, node1) 65 | assert edge1 != Edge(node1, None, node2, properties={"a": 10}) 66 | -------------------------------------------------------------------------------- /tests/test_explain.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import FalkorDB 3 | 4 | 5 | @pytest.fixture 6 | def client(request): 7 | db = FalkorDB(host='localhost', port=6379) 8 | return db 9 | 10 | 11 | def test_explain(client): 12 | db = client 13 | g = db.select_graph("explain") 14 | 15 | # run a single query to create the graph 16 | g.query("RETURN 1") 17 | 18 | plan = g.explain("UNWIND range(0, 3) AS x RETURN x") 19 | 20 | results_op = plan.structured_plan 21 | assert(results_op.name == 'Results') 22 | assert(len(results_op.children) == 1) 23 | 24 | project_op = results_op.children[0] 25 | assert(project_op.name == 'Project') 26 | assert(len(project_op.children) == 1) 27 | 28 | unwind_op = project_op.children[0] 29 | assert(unwind_op.name == 'Unwind') 30 | assert(len(unwind_op.children) == 0) 31 | 32 | def test_cartesian_product_explain(client): 33 | db = client 34 | g = db.select_graph("explain") 35 | plan = g.explain("MATCH (a), (b) RETURN *") 36 | 37 | results_op = plan.structured_plan 38 | assert(results_op.name == 'Results') 39 | assert(len(results_op.children) == 1) 40 | 41 | project_op = results_op.children[0] 42 | assert(project_op.name == 'Project') 43 | assert(len(project_op.children) == 1) 44 | 45 | cp_op = project_op.children[0] 46 | assert(cp_op.name == 'Cartesian Product') 47 | assert(len(cp_op.children) == 2) 48 | 49 | scan_a_op = cp_op.children[0] 50 | scan_b_op = cp_op.children[1] 51 | 52 | assert(scan_a_op.name == 'All Node Scan') 53 | assert(len(scan_a_op.children) == 0) 54 | 55 | assert(scan_b_op.name == 'All Node Scan') 56 | assert(len(scan_b_op.children) == 0) 57 | 58 | def test_merge(client): 59 | db = client 60 | g = db.select_graph("explain") 61 | 62 | try: 63 | g.create_node_range_index("person", "age") 64 | except: 65 | pass 66 | plan = g.explain("MERGE (p1:person {age: 40}) MERGE (p2:person {age: 41})") 67 | 68 | root = plan.structured_plan 69 | assert(root.name == 'Merge') 70 | assert(len(root.children) == 3) 71 | 72 | merge_op = root.children[0] 73 | assert(merge_op.name == 'Merge') 74 | assert(len(merge_op.children) == 2) 75 | 76 | index_scan_op = merge_op.children[0] 77 | assert(index_scan_op.name == 'Node By Index Scan') 78 | assert(len(index_scan_op.children) == 0) 79 | 80 | merge_create_op = merge_op.children[1] 81 | assert(merge_create_op.name == 'MergeCreate') 82 | assert(len(merge_create_op.children) == 0) 83 | 84 | index_scan_op = root.children[1] 85 | assert(index_scan_op.name == 'Node By Index Scan') 86 | assert(len(index_scan_op.children) == 1) 87 | 88 | arg_op = index_scan_op.children[0] 89 | assert(arg_op.name == 'Argument') 90 | assert(len(arg_op.children) == 0) 91 | 92 | merge_create_op = root.children[2] 93 | assert(merge_create_op.name == 'MergeCreate') 94 | assert(len(merge_create_op.children) == 1) 95 | 96 | arg_op = merge_create_op.children[0] 97 | assert(arg_op.name == 'Argument') 98 | assert(len(arg_op.children) == 0) 99 | -------------------------------------------------------------------------------- /tests/test_graph.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from redis import ResponseError 3 | from falkordb import FalkorDB, Edge, Node, Path, Operation 4 | 5 | 6 | @pytest.fixture 7 | def client(request): 8 | db = FalkorDB(host='localhost', port=6379) 9 | db.flushdb() 10 | return db.select_graph("graph") 11 | 12 | def test_graph_creation(client): 13 | graph = client 14 | 15 | john = Node( 16 | alias="p", 17 | labels="person", 18 | properties={ 19 | "name": "John Doe", 20 | "age": 33, 21 | "gender": "male", 22 | "status": "single", 23 | }, 24 | ) 25 | 26 | japan = Node(alias="c", labels="country", properties={"name": "Japan"}) 27 | 28 | edge = Edge(john, "visited", japan, alias="v", properties={"purpose": "pleasure"}) 29 | 30 | query = f"CREATE {john}, {japan}, {edge} RETURN p,v,c" 31 | result = graph.query(query) 32 | 33 | person = result.result_set[0][0] 34 | visit = result.result_set[0][1] 35 | country = result.result_set[0][2] 36 | 37 | assert person == john 38 | assert visit.properties == edge.properties 39 | assert country == japan 40 | 41 | query = """RETURN [1, 2.3, "4", true, false, null]""" 42 | result = graph.query(query) 43 | assert [1, 2.3, "4", True, False, None] == result.result_set[0][0] 44 | 45 | # all done, remove graph 46 | graph.delete() 47 | 48 | 49 | def test_array_functions(client): 50 | graph = client 51 | query = """RETURN [0,1,2]""" 52 | result = graph.query(query) 53 | assert [0, 1, 2] == result.result_set[0][0] 54 | 55 | a = Node( 56 | node_id=0, 57 | labels="person", 58 | properties={"name": "a", "age": 32, "array": [0, 1, 2]} 59 | ) 60 | 61 | graph.query(f"CREATE {a}") 62 | 63 | query = "MATCH(n) return collect(n)" 64 | result = graph.query(query) 65 | 66 | assert [a] == result.result_set[0][0] 67 | 68 | 69 | def test_path(client): 70 | graph = client 71 | node0 = Node(alias="node0", node_id=0, labels="L1") 72 | node1 = Node(alias="node1", node_id=1, labels="L1") 73 | edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1}) 74 | 75 | graph.query(f"CREATE {node0}, {node1}, {edge01}") 76 | 77 | path01 = Path([node0, node1], [edge01]) 78 | expected_results = [[path01]] 79 | 80 | query = "MATCH p=(:L1)-[:R1]->(:L1) RETURN p" 81 | result = graph.query(query) 82 | assert expected_results == result.result_set 83 | 84 | 85 | def test_vector(client): 86 | graph = client 87 | res = graph.query("RETURN vecf32([1.2, 2.3, -1.2, 0.1])").result_set 88 | 89 | actual = [round(x, 3) for x in res[0][0]] 90 | expected = [1.2, 2.3, -1.2, 0.1] 91 | 92 | assert expected == actual 93 | 94 | 95 | def test_param(client): 96 | graph = client 97 | params = [1, 2.3, "str", True, False, None, [0, 1, 2], r"\" RETURN 1337 //"] 98 | query = "RETURN $param" 99 | for param in params: 100 | result = graph.query(query, {"param": param}) 101 | expected_results = [[param]] 102 | assert expected_results == result.result_set 103 | 104 | 105 | def test_map(client): 106 | g = client 107 | 108 | query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}" 109 | actual = g.query(query).result_set[0][0] 110 | expected = { 111 | "a": 1, 112 | "b": "str", 113 | "c": None, 114 | "d": [1, 2, 3], 115 | "e": True, 116 | "f": {"x": 1, "y": 2}, 117 | } 118 | 119 | assert actual == expected 120 | 121 | src = Node(alias="src", node_id=0, labels="L1", properties={"v": 0}) 122 | dest = Node(alias="dest", node_id=1, labels="L2", properties={"v":2}) 123 | e = Edge(src, "R1", dest, edge_id=0, properties={"value": 1}) 124 | g.query(f"CREATE {src}, {dest}, {e}") 125 | 126 | query = "MATCH (src)-[e]->(dest) RETURN {src:src, e:e, dest:dest}" 127 | actual = g.query(query).result_set[0][0] 128 | expected = { "src": src, "e": e, "dest": dest } 129 | assert actual == expected 130 | 131 | def test_point(client): 132 | g = client 133 | query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})" 134 | expected_lat = 32.070794860 135 | expected_lon = 34.820751118 136 | actual = g.query(query).result_set[0][0] 137 | assert abs(actual["latitude"] - expected_lat) < 0.001 138 | assert abs(actual["longitude"] - expected_lon) < 0.001 139 | 140 | query = "RETURN point({latitude: 32, longitude: 34.0})" 141 | expected_lat = 32 142 | expected_lon = 34 143 | actual = g.query(query).result_set[0][0] 144 | assert abs(actual["latitude"] - expected_lat) < 0.001 145 | assert abs(actual["longitude"] - expected_lon) < 0.001 146 | 147 | 148 | def test_index_response(client): 149 | g = client 150 | result_set = g.query("CREATE INDEX ON :person(age)") 151 | assert 1 == result_set.indices_created 152 | 153 | with pytest.raises(ResponseError): 154 | g.query("CREATE INDEX ON :person(age)") 155 | 156 | result_set = g.query("DROP INDEX ON :person(age)") 157 | assert 1 == result_set.indices_deleted 158 | 159 | with pytest.raises(ResponseError): 160 | g.query("DROP INDEX ON :person(age)") 161 | 162 | 163 | def test_stringify_query_result(client): 164 | g = client 165 | 166 | john = Node(alias="a", labels="person", 167 | properties={ "name": "John Doe", "age": 33, "gender": "male", 168 | "status": "single", }) 169 | japan = Node(alias="b", labels="country", properties={"name": "Japan"}) 170 | 171 | e = Edge(john, "visited", japan, properties={"purpose": "pleasure"}) 172 | 173 | assert ( 174 | str(john) 175 | == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" 176 | ) 177 | assert str(e) == """(a)-[:visited{purpose:"pleasure"}]->(b)""" 178 | assert str(japan) == """(b:country{name:"Japan"})""" 179 | 180 | g.query(f"CREATE {john}, {japan}, {e}") 181 | 182 | query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) 183 | RETURN p, v, c""" 184 | 185 | result = g.query(query) 186 | person = result.result_set[0][0] 187 | visit = result.result_set[0][1] 188 | country = result.result_set[0][2] 189 | 190 | assert ( 191 | str(person) 192 | == """(:person{age:33,gender:"male",name:"John Doe",status:"single"})""" 193 | ) 194 | assert str(visit) == """()-[:visited{purpose:"pleasure"}]->()""" 195 | assert str(country) == """(:country{name:"Japan"})""" 196 | 197 | g.delete() 198 | 199 | 200 | def test_optional_match(client): 201 | # build a graph of form (a)-[R]->(b) 202 | src = Node(alias="src", node_id=0, labels="L1", properties={"value": "a"}) 203 | dest = Node(alias="dest", node_id=1, labels="L1", properties={"value": "b"}) 204 | 205 | e = Edge(src, "R", dest, edge_id=0) 206 | 207 | g = client 208 | g.query(f"CREATE {src}, {dest}, {e}") 209 | 210 | # issue a query that collects all outgoing edges from both nodes 211 | # (the second has none) 212 | query = """MATCH (a) 213 | OPTIONAL MATCH (a)-[e]->(b) 214 | RETURN a, e, b 215 | ORDER BY a.value""" 216 | expected_results = [[src, e, dest], [dest, None, None]] 217 | 218 | result = g.query(query) 219 | assert expected_results == result.result_set 220 | 221 | g.delete() 222 | 223 | 224 | def test_cached_execution(client): 225 | g = client 226 | 227 | result = g.query("RETURN $param", {"param": 0}) 228 | assert result.cached_execution is False 229 | 230 | result = g.query("RETURN $param", {"param": 0}) 231 | assert result.cached_execution is True 232 | 233 | 234 | def test_slowlog(client): 235 | g = client 236 | create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 237 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 238 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 239 | g.query(create_query) 240 | 241 | results = g.slowlog() 242 | assert len(results[0]) == 4 243 | assert results[0][1] == "GRAPH.QUERY" 244 | assert results[0][2] == create_query 245 | 246 | 247 | @pytest.mark.xfail(strict=False) 248 | def test_query_timeout(client): 249 | g = client 250 | # build a graph with 1000 nodes 251 | g.query("UNWIND range(0, 1000) as val CREATE ({v: val})") 252 | # issue a long-running query with a 1-millisecond timeout 253 | with pytest.raises(ResponseError): 254 | g.query("MATCH (a), (b), (c), (d) RETURN *", timeout=1) 255 | assert False is False 256 | 257 | with pytest.raises(Exception): 258 | g.query("RETURN 1", timeout="str") 259 | assert False is False 260 | 261 | 262 | def test_read_only_query(client): 263 | g = client 264 | with pytest.raises(Exception): 265 | # issue a write query, specifying read-only true 266 | # call should fail 267 | g.query("CREATE ()", read_only=True) 268 | assert False is False 269 | 270 | 271 | def _test_list_keys(client): 272 | g = client 273 | result = g.list_keys() 274 | assert result == [] 275 | 276 | client.graph("G").query("RETURN 1") 277 | result = client.graph().list_keys() 278 | assert result == ["G"] 279 | 280 | client.graph("X").query("RETURN 1") 281 | result = client.graph().list_keys() 282 | assert result == ["G", "X"] 283 | 284 | client.delete("G") 285 | client.rename("X", "Z") 286 | result = client.graph().list_keys() 287 | assert result == ["Z"] 288 | 289 | client.delete("Z") 290 | result = client.graph().list_keys() 291 | assert result == [] 292 | 293 | 294 | def test_multi_label(client): 295 | g = client 296 | 297 | node = Node(labels=["l", "ll"]) 298 | g.query(f"CREATE {node}") 299 | 300 | query = "MATCH (n) RETURN n" 301 | result = g.query(query) 302 | result_node = result.result_set[0][0] 303 | assert result_node == node 304 | 305 | try: 306 | Node(labels=1) 307 | assert False 308 | except AssertionError: 309 | assert True 310 | 311 | try: 312 | Node(labels=["l", 1]) 313 | assert False 314 | except AssertionError: 315 | assert True 316 | 317 | 318 | def test_cache_sync(client): 319 | pass 320 | return 321 | # This test verifies that client internal graph schema cache stays 322 | # in sync with the graph schema 323 | # 324 | # Client B will try to get Client A out of sync by: 325 | # 1. deleting the graph 326 | # 2. reconstructing the graph in a different order, this will casuse 327 | # a differance in the current mapping between string IDs and the 328 | # mapping Client A is aware of 329 | # 330 | # Client A should pick up on the changes by comparing graph versions 331 | # and resyncing its cache. 332 | 333 | A = client.graph("cache-sync") 334 | B = client.graph("cache-sync") 335 | 336 | # Build order: 337 | # 1. introduce label 'L' and 'K' 338 | # 2. introduce attribute 'x' and 'q' 339 | # 3. introduce relationship-type 'R' and 'S' 340 | 341 | A.query("CREATE (:L)") 342 | B.query("CREATE (:K)") 343 | A.query("MATCH (n) SET n.x = 1") 344 | B.query("MATCH (n) SET n.q = 1") 345 | A.query("MATCH (n) CREATE (n)-[:R]->()") 346 | B.query("MATCH (n) CREATE (n)-[:S]->()") 347 | 348 | # Cause client A to populate its cache 349 | A.query("MATCH (n)-[e]->() RETURN n, e") 350 | 351 | assert len(A._labels) == 2 352 | assert len(A._properties) == 2 353 | assert len(A._relationship_types) == 2 354 | assert A._labels[0] == "L" 355 | assert A._labels[1] == "K" 356 | assert A._properties[0] == "x" 357 | assert A._properties[1] == "q" 358 | assert A._relationship_types[0] == "R" 359 | assert A._relationship_types[1] == "S" 360 | 361 | # Have client B reconstruct the graph in a different order. 362 | B.delete() 363 | 364 | # Build order: 365 | # 1. introduce relationship-type 'R' 366 | # 2. introduce label 'L' 367 | # 3. introduce attribute 'x' 368 | B.query("CREATE ()-[:S]->()") 369 | B.query("CREATE ()-[:R]->()") 370 | B.query("CREATE (:K)") 371 | B.query("CREATE (:L)") 372 | B.query("MATCH (n) SET n.q = 1") 373 | B.query("MATCH (n) SET n.x = 1") 374 | 375 | # A's internal cached mapping is now out of sync 376 | # issue a query and make sure A's cache is synced. 377 | A.query("MATCH (n)-[e]->() RETURN n, e") 378 | 379 | assert len(A._labels) == 2 380 | assert len(A._properties) == 2 381 | assert len(A._relationship_types) == 2 382 | assert A._labels[0] == "K" 383 | assert A._labels[1] == "L" 384 | assert A._properties[0] == "q" 385 | assert A._properties[1] == "x" 386 | assert A._relationship_types[0] == "S" 387 | assert A._relationship_types[1] == "R" 388 | 389 | 390 | def test_execution_plan(client): 391 | g = client 392 | create_query = """CREATE 393 | (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 394 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 395 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 396 | g.query(create_query) 397 | 398 | result = g.explain( 399 | """MATCH (r:Rider)-[:rides]->(t:Team) 400 | WHERE t.name = $name 401 | RETURN r.name, t.name, $params""", {"name": "Yehuda"} 402 | ) 403 | 404 | expected = "Results\n Project\n Conditional Traverse | (t)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" 405 | assert str(result) == expected 406 | 407 | g.delete() 408 | 409 | 410 | def test_explain(client): 411 | g = client 412 | # graph creation / population 413 | create_query = """CREATE 414 | (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}), 415 | (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}), 416 | (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})""" 417 | g.query(create_query) 418 | 419 | result = g.explain( 420 | """MATCH (r:Rider)-[:rides]->(t:Team) 421 | WHERE t.name = $name 422 | RETURN r.name, t.name 423 | UNION 424 | MATCH (r:Rider)-[:rides]->(t:Team) 425 | WHERE t.name = $name 426 | RETURN r.name, t.name""", 427 | {"name": "Yamaha"}, 428 | ) 429 | expected = """\ 430 | Results 431 | Distinct 432 | Join 433 | Project 434 | Conditional Traverse | (t)->(r:Rider) 435 | Filter 436 | Node By Label Scan | (t:Team) 437 | Project 438 | Conditional Traverse | (t)->(r:Rider) 439 | Filter 440 | Node By Label Scan | (t:Team)""" 441 | assert str(result).replace(" ", "").replace("\n", "") == expected.replace( 442 | " ", "" 443 | ).replace("\n", "") 444 | 445 | expected = Operation("Results").append_child( 446 | Operation("Distinct").append_child( 447 | Operation("Join") 448 | .append_child( 449 | Operation("Project").append_child( 450 | Operation("Conditional Traverse", "(t)->(r:Rider)").append_child( 451 | Operation("Filter").append_child( 452 | Operation("Node By Label Scan", "(t:Team)") 453 | ) 454 | ) 455 | ) 456 | ) 457 | .append_child( 458 | Operation("Project").append_child( 459 | Operation("Conditional Traverse", "(t)->(r:Rider)").append_child( 460 | Operation("Filter").append_child( 461 | Operation("Node By Label Scan", "(t:Team)") 462 | ) 463 | ) 464 | ) 465 | ) 466 | ) 467 | ) 468 | 469 | assert result.structured_plan == expected 470 | 471 | result = g.explain("MATCH (r:Rider), (t:Team) RETURN r.name, t.name") 472 | expected = """\ 473 | Results 474 | Project 475 | Cartesian Product 476 | Node By Label Scan | (r:Rider) 477 | Node By Label Scan | (t:Team)""" 478 | assert str(result).replace(" ", "").replace("\n", "") == expected.replace( 479 | " ", "" 480 | ).replace("\n", "") 481 | 482 | expected = Operation("Results").append_child( 483 | Operation("Project").append_child( 484 | Operation("Cartesian Product") 485 | .append_child(Operation("Node By Label Scan")) 486 | .append_child(Operation("Node By Label Scan")) 487 | ) 488 | ) 489 | 490 | assert result.structured_plan == expected 491 | 492 | g.delete() 493 | -------------------------------------------------------------------------------- /tests/test_indices.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from redis import ResponseError 3 | from falkordb import FalkorDB 4 | from collections import OrderedDict 5 | 6 | class Index(): 7 | def __init__(self, raw_response): 8 | self.label = raw_response[0] 9 | self.properties = raw_response[1] 10 | self.types = raw_response[2] 11 | self.entity_type = raw_response[6] 12 | 13 | @pytest.fixture 14 | def client(request): 15 | db = FalkorDB(host='localhost', port=6379) 16 | db.flushdb() 17 | return db.select_graph("indices") 18 | 19 | def test_node_index_creation(client): 20 | graph = client 21 | lbl = "N" 22 | 23 | # create node indices 24 | 25 | # create node range index 26 | res = graph.create_node_range_index(lbl, 'x') 27 | assert(res.indices_created == 1) 28 | 29 | index = Index(graph.list_indices().result_set[0]) 30 | assert(index.label == lbl) 31 | assert(index.properties == ['x']) 32 | assert(index.types['x'] == ['RANGE']) 33 | assert(index.entity_type == 'NODE') 34 | 35 | # create node range index over multiple properties 36 | res = graph.create_node_range_index(lbl, 'y', 'z') 37 | assert(res.indices_created == 2) 38 | 39 | index = Index(graph.list_indices().result_set[0]) 40 | assert(index.label == lbl) 41 | assert(index.properties == ['x', 'y', 'z']) 42 | assert(index.types['x'] == ['RANGE']) 43 | assert(index.types['y'] == ['RANGE']) 44 | assert(index.types['z'] == ['RANGE']) 45 | assert(index.entity_type == 'NODE') 46 | 47 | # try to create an existing index 48 | with pytest.raises(ResponseError): 49 | res = graph.create_node_range_index(lbl, 'z', 'x') 50 | 51 | # create node full-text index 52 | res = graph.create_node_fulltext_index(lbl, 'name') 53 | assert(res.indices_created == 1) 54 | 55 | index = Index(graph.list_indices().result_set[0]) 56 | assert(index.label == lbl) 57 | assert(index.properties == ['x', 'y', 'z', 'name']) 58 | assert(index.types['x'] == ['RANGE']) 59 | assert(index.types['y'] == ['RANGE']) 60 | assert(index.types['z'] == ['RANGE']) 61 | assert(index.types['name'] == ['FULLTEXT']) 62 | assert(index.entity_type == 'NODE') 63 | 64 | # create node vector index 65 | res = graph.create_node_vector_index(lbl, 'desc', dim=32, similarity_function="euclidean") 66 | assert(res.indices_created == 1) 67 | 68 | index = Index(graph.list_indices().result_set[0]) 69 | assert(index.label == lbl) 70 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 71 | assert(index.types['x'] == ['RANGE']) 72 | assert(index.types['y'] == ['RANGE']) 73 | assert(index.types['z'] == ['RANGE']) 74 | assert(index.types['name'] == ['FULLTEXT']) 75 | assert(index.types['desc'] == ['VECTOR']) 76 | assert(index.entity_type == 'NODE') 77 | 78 | # create a multi-type property 79 | res = graph.create_node_fulltext_index(lbl, 'x') 80 | assert(res.indices_created == 1) 81 | 82 | index = Index(graph.list_indices().result_set[0]) 83 | assert(index.label == lbl) 84 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 85 | assert(index.types['x'] == ['RANGE', 'FULLTEXT']) 86 | assert(index.types['y'] == ['RANGE']) 87 | assert(index.types['z'] == ['RANGE']) 88 | assert(index.types['name'] == ['FULLTEXT']) 89 | assert(index.types['desc'] == ['VECTOR']) 90 | assert(index.entity_type == 'NODE') 91 | 92 | def test_edge_index_creation(client): 93 | graph = client 94 | rel = "R" 95 | 96 | # create edge indices 97 | 98 | # create edge range index 99 | res = graph.create_edge_range_index(rel, 'x') 100 | assert(res.indices_created == 1) 101 | 102 | index = Index(graph.list_indices().result_set[0]) 103 | assert(index.label ==rel) 104 | assert(index.properties == ['x']) 105 | assert(index.types['x'] == ['RANGE']) 106 | assert(index.entity_type == 'RELATIONSHIP') 107 | 108 | # create edge range index over multiple properties 109 | res = graph.create_edge_range_index(rel, 'y', 'z') 110 | assert(res.indices_created == 2) 111 | 112 | index = Index(graph.list_indices().result_set[0]) 113 | assert(index.label ==rel) 114 | assert(index.properties == ['x', 'y', 'z']) 115 | assert(index.types['x'] == ['RANGE']) 116 | assert(index.types['y'] == ['RANGE']) 117 | assert(index.types['z'] == ['RANGE']) 118 | assert(index.entity_type == 'RELATIONSHIP') 119 | 120 | # try to create an existing index 121 | with pytest.raises(ResponseError): 122 | res = graph.create_edge_range_index(rel, 'z', 'x') 123 | 124 | # create edge full-text index 125 | res = graph.create_edge_fulltext_index(rel, 'name') 126 | assert(res.indices_created == 1) 127 | 128 | index = Index(graph.list_indices().result_set[0]) 129 | assert(index.label ==rel) 130 | assert(index.properties == ['x', 'y', 'z', 'name']) 131 | assert(index.types['x'] == ['RANGE']) 132 | assert(index.types['y'] == ['RANGE']) 133 | assert(index.types['z'] == ['RANGE']) 134 | assert(index.types['name'] == ['FULLTEXT']) 135 | assert(index.entity_type == 'RELATIONSHIP') 136 | 137 | # create edge vector index 138 | res = graph.create_edge_vector_index(rel, 'desc', dim=32, similarity_function="euclidean") 139 | assert(res.indices_created == 1) 140 | 141 | index = Index(graph.list_indices().result_set[0]) 142 | assert(index.label ==rel) 143 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 144 | assert(index.types['x'] == ['RANGE']) 145 | assert(index.types['y'] == ['RANGE']) 146 | assert(index.types['z'] == ['RANGE']) 147 | assert(index.types['name'] == ['FULLTEXT']) 148 | assert(index.types['desc'] == ['VECTOR']) 149 | assert(index.entity_type == 'RELATIONSHIP') 150 | 151 | # create a multi-type property 152 | res = graph.create_edge_fulltext_index(rel, 'x') 153 | assert(res.indices_created == 1) 154 | 155 | index = Index(graph.list_indices().result_set[0]) 156 | assert(index.label ==rel) 157 | assert(index.properties == ['x', 'y', 'z', 'name', 'desc']) 158 | assert(index.types['x'] == ['RANGE', 'FULLTEXT']) 159 | assert(index.types['y'] == ['RANGE']) 160 | assert(index.types['z'] == ['RANGE']) 161 | assert(index.types['name'] == ['FULLTEXT']) 162 | assert(index.types['desc'] == ['VECTOR']) 163 | assert(index.entity_type == 'RELATIONSHIP') 164 | 165 | def test_node_index_drop(client): 166 | graph = client 167 | 168 | # create an index and delete it 169 | lbl = 'N' 170 | attr = 'x' 171 | 172 | # create node range index 173 | res = graph.create_node_range_index(lbl, attr) 174 | assert(res.indices_created == 1) 175 | 176 | # list indices 177 | res = graph.list_indices() 178 | assert(len(res.result_set) == 1) 179 | 180 | # drop range index 181 | res = graph.drop_node_range_index(lbl, attr) 182 | assert(res.indices_deleted == 1) 183 | 184 | # list indices 185 | res = graph.list_indices() 186 | assert(len(res.result_set) == 0) 187 | 188 | #--------------------------------------------------------------------------- 189 | 190 | # create node fulltext index 191 | res = graph.create_node_fulltext_index(lbl, attr) 192 | assert(res.indices_created == 1) 193 | 194 | # list indices 195 | res = graph.list_indices() 196 | assert(len(res.result_set) == 1) 197 | 198 | # drop fulltext index 199 | res = graph.drop_node_fulltext_index(lbl, attr) 200 | assert(res.indices_deleted == 1) 201 | 202 | # list indices 203 | res = graph.list_indices() 204 | assert(len(res.result_set) == 0) 205 | 206 | #--------------------------------------------------------------------------- 207 | 208 | # create node vector index 209 | res = graph.create_node_vector_index(lbl, attr) 210 | assert(res.indices_created == 1) 211 | 212 | # list indices 213 | res = graph.list_indices() 214 | assert(len(res.result_set) == 1) 215 | 216 | # drop vector index 217 | res = graph.drop_node_vector_index(lbl, attr) 218 | assert(res.indices_deleted == 1) 219 | 220 | # list indices 221 | res = graph.list_indices() 222 | assert(len(res.result_set) == 0) 223 | 224 | def test_edge_index_drop(client): 225 | graph = client 226 | 227 | # create an index and delete it 228 | rel = 'R' 229 | attr = 'x' 230 | 231 | # create edge range index 232 | res = graph.create_edge_range_index(rel, attr) 233 | assert(res.indices_created == 1) 234 | 235 | # list indices 236 | res = graph.list_indices() 237 | assert(len(res.result_set) == 1) 238 | 239 | # drop range index 240 | res = graph.drop_edge_range_index(rel, attr) 241 | assert(res.indices_deleted == 1) 242 | 243 | # list indices 244 | res = graph.list_indices() 245 | assert(len(res.result_set) == 0) 246 | 247 | #--------------------------------------------------------------------------- 248 | 249 | # create edge fulltext index 250 | res = graph.create_edge_fulltext_index(rel, attr) 251 | assert(res.indices_created == 1) 252 | 253 | # list indices 254 | res = graph.list_indices() 255 | assert(len(res.result_set) == 1) 256 | 257 | # drop fulltext index 258 | res = graph.drop_edge_fulltext_index(rel, attr) 259 | assert(res.indices_deleted == 1) 260 | 261 | # list indices 262 | res = graph.list_indices() 263 | assert(len(res.result_set) == 0) 264 | 265 | #--------------------------------------------------------------------------- 266 | 267 | # create edge vector index 268 | res = graph.create_edge_vector_index(rel, attr) 269 | assert(res.indices_created == 1) 270 | 271 | # list indices 272 | res = graph.list_indices() 273 | assert(len(res.result_set) == 1) 274 | 275 | # drop vector index 276 | res = graph.drop_edge_vector_index(rel, attr) 277 | assert(res.indices_deleted == 1) 278 | 279 | # list indices 280 | res = graph.list_indices() 281 | assert(len(res.result_set) == 0) 282 | 283 | -------------------------------------------------------------------------------- /tests/test_node.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import Node 3 | 4 | 5 | @pytest.fixture 6 | def fixture(): 7 | no_args = Node(alias="n") 8 | no_props = Node(node_id=1, alias="n", labels="l") 9 | no_label = Node(node_id=1, alias="n", properties={"a": "a"}) 10 | props_only = Node(alias="n", properties={"a": "a", "b": 10}) 11 | multi_label = Node(node_id=1, alias="n", labels=["l", "ll"]) 12 | 13 | return no_args, no_props, props_only, no_label, multi_label 14 | 15 | 16 | def test_to_string(fixture): 17 | no_args, no_props, props_only, no_label, multi_label = fixture 18 | 19 | assert no_args.to_string() == "" 20 | assert no_props.to_string() == "" 21 | assert no_label.to_string() == '{a:"a"}' 22 | assert props_only.to_string() == '{a:"a",b:10}' 23 | assert multi_label.to_string() == "" 24 | 25 | 26 | def test_stringify(fixture): 27 | no_args, no_props, props_only, no_label, multi_label = fixture 28 | 29 | assert str(no_args) == "(n)" 30 | assert str(no_props) == "(n:l)" 31 | assert str(no_label) == '(n{a:"a"})' 32 | assert str(props_only) == '(n{a:"a",b:10})' 33 | assert str(multi_label) == "(n:l:ll)" 34 | 35 | 36 | def test_comparision(): 37 | assert Node() != Node(properties={"a": 10}) 38 | assert Node() == Node() 39 | assert Node(node_id=1) == Node(node_id=1) 40 | assert Node(node_id=1) != Node(node_id=2) 41 | assert Node(node_id=1, alias="a") == Node(node_id=1, alias="b") 42 | assert Node(node_id=1, alias="a") == Node(node_id=1, alias="a") 43 | assert Node(node_id=1, labels="a") == Node(node_id=1, labels="a") 44 | assert Node(node_id=1, labels="a") != Node(node_id=1, labels="b") 45 | assert Node(alias="a", labels="l") != Node(alias="a", labels="l1") 46 | assert Node(properties={"a": 10}) == Node(properties={"a": 10}) 47 | assert Node(node_id=1, alias="a", labels="l") == Node(node_id=1, alias="a", labels="l") 48 | -------------------------------------------------------------------------------- /tests/test_path.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import Node, Edge, Path 3 | 4 | 5 | def test_init(): 6 | with pytest.raises(TypeError): 7 | Path(None, None) 8 | Path([], None) 9 | Path(None, []) 10 | 11 | assert isinstance(Path([], []), Path) 12 | 13 | 14 | def test_new_empty_path(): 15 | nodes = [] 16 | edges = [] 17 | path = Path(nodes, edges) 18 | assert isinstance(path, Path) 19 | assert path._nodes == [] 20 | assert path._edges == [] 21 | 22 | 23 | def test_wrong_flows(): 24 | node_1 = Node(node_id=1) 25 | node_2 = Node(node_id=2) 26 | node_3 = Node(node_id=3) 27 | 28 | edge_1 = Edge(node_1, None, node_2) 29 | edge_2 = Edge(node_1, None, node_3) 30 | 31 | nodes = [node_1, node_2, node_3] 32 | edges = [edge_1, edge_2] 33 | 34 | def test_nodes_and_edges(): 35 | node_1 = Node(node_id=1) 36 | node_2 = Node(node_id=2) 37 | edge_1 = Edge(node_1, None, node_2) 38 | 39 | nodes = [node_1, node_2] 40 | edges = [edge_1] 41 | 42 | p = Path(nodes, edges) 43 | assert nodes == p.nodes() 44 | assert node_1 == p.get_node(0) 45 | assert node_2 == p.get_node(1) 46 | assert node_1 == p.first_node() 47 | assert node_2 == p.last_node() 48 | assert 2 == p.node_count() 49 | 50 | assert edges == p.edges() 51 | assert 1 == p.edge_count() 52 | assert edge_1 == p.get_edge(0) 53 | 54 | def test_compare(): 55 | node_1 = Node(node_id=1) 56 | node_2 = Node(node_id=2) 57 | edge_1 = Edge(node_1, None, node_2) 58 | nodes = [node_1, node_2] 59 | edges = [edge_1] 60 | 61 | assert Path([], []) == Path([], []) 62 | assert Path(nodes, edges) == Path(nodes, edges) 63 | assert Path(nodes, []) != Path([], []) 64 | assert Path([node_1], []) != Path([], []) 65 | assert Path([node_1], edges=[]) != Path([node_2], []) 66 | assert Path([node_1], [edge_1]) != Path( [node_1], []) 67 | assert Path([node_1], [edge_1]) != Path([node_2], [edge_1]) 68 | -------------------------------------------------------------------------------- /tests/test_profile.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from falkordb import FalkorDB 3 | 4 | 5 | @pytest.fixture 6 | def client(request): 7 | db = FalkorDB(host='localhost', port=6379) 8 | return db.select_graph("profile") 9 | 10 | 11 | def test_profile(client): 12 | g = client 13 | plan = g.profile("UNWIND range(0, 3) AS x RETURN x") 14 | 15 | results_op = plan.structured_plan 16 | assert(results_op.name == 'Results') 17 | assert(len(results_op.children) == 1) 18 | assert(results_op.profile_stats.records_produced == 4) 19 | 20 | project_op = results_op.children[0] 21 | assert(project_op.name == 'Project') 22 | assert(len(project_op.children) == 1) 23 | assert(project_op.profile_stats.records_produced == 4) 24 | 25 | unwind_op = project_op.children[0] 26 | assert(unwind_op.name == 'Unwind') 27 | assert(len(unwind_op.children) == 0) 28 | assert(unwind_op.profile_stats.records_produced == 4) 29 | 30 | def test_cartesian_product_profile(client): 31 | g = client 32 | plan = g.profile("MATCH (a), (b) RETURN *") 33 | 34 | results_op = plan.structured_plan 35 | assert(results_op.name == 'Results') 36 | assert(len(results_op.children) == 1) 37 | assert(results_op.profile_stats.records_produced == 0) 38 | 39 | project_op = results_op.children[0] 40 | assert(project_op.name == 'Project') 41 | assert(len(project_op.children) == 1) 42 | assert(project_op.profile_stats.records_produced == 0) 43 | 44 | cp_op = project_op.children[0] 45 | assert(cp_op.name == 'Cartesian Product') 46 | assert(len(cp_op.children) == 2) 47 | assert(cp_op.profile_stats.records_produced == 0) 48 | 49 | scan_a_op = cp_op.children[0] 50 | scan_b_op = cp_op.children[1] 51 | 52 | assert(scan_a_op.name == 'All Node Scan') 53 | assert(len(scan_a_op.children) == 0) 54 | assert(scan_a_op.profile_stats.records_produced == 0) 55 | 56 | assert(scan_b_op.name == 'All Node Scan') 57 | assert(len(scan_b_op.children) == 0) 58 | assert(scan_b_op.profile_stats.records_produced == 0) 59 | --------------------------------------------------------------------------------