├── .coveragerc ├── .github └── workflows │ ├── deploy.yml │ ├── docs.yml │ ├── lint.yml │ ├── manage_issues.yml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── LICENSE.md ├── README.md ├── bin ├── autolinter └── convert_documentation ├── docs ├── Makefile ├── api.rst ├── conf.py ├── examples.rst ├── filters.rst ├── index.rst ├── inheritance.rst ├── relay.rst ├── requirements.txt ├── starter.rst ├── tips.rst └── tutorial.rst ├── examples ├── filters │ ├── README.md │ ├── __init__.py │ ├── app.py │ ├── database.py │ ├── models.py │ ├── requirements.txt │ ├── run.sh │ └── schema.py ├── flask_sqlalchemy │ ├── README.md │ ├── __init__.py │ ├── app.py │ ├── database.py │ ├── models.py │ ├── requirements.txt │ └── schema.py └── nameko_sqlalchemy │ ├── README.md │ ├── __init__.py │ ├── app.py │ ├── config.yml │ ├── database.py │ ├── models.py │ ├── requirements.txt │ ├── run.sh │ ├── schema.py │ └── service.py ├── graphene_sqlalchemy ├── __init__.py ├── batching.py ├── converter.py ├── enums.py ├── fields.py ├── filters.py ├── registry.py ├── resolvers.py ├── tests │ ├── __init__.py │ ├── conftest.py │ ├── models.py │ ├── models_batching.py │ ├── test_batching.py │ ├── test_benchmark.py │ ├── test_converter.py │ ├── test_enums.py │ ├── test_fields.py │ ├── test_filters.py │ ├── test_query.py │ ├── test_query_enums.py │ ├── test_reflected.py │ ├── test_registry.py │ ├── test_sort_enums.py │ ├── test_types.py │ ├── test_utils.py │ └── utils.py ├── types.py └── utils.py ├── setup.cfg ├── setup.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = */tests/* 3 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: 🚀 Deploy to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Set up Python 3.10 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: '3.10' 18 | - name: Build wheel and source tarball 19 | run: | 20 | pip install wheel 21 | python setup.py sdist bdist_wheel 22 | - name: Publish a Python distribution to PyPI 23 | uses: pypa/gh-action-pypi-publish@v1.1.0 24 | with: 25 | user: __token__ 26 | password: ${{ secrets.PYPI_API_TOKEN }} 27 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Docs 2 | 3 | # Runs on pushes targeting the default branch 4 | on: 5 | push: 6 | branches: [master] 7 | 8 | jobs: 9 | pages: 10 | runs-on: ubuntu-22.04 11 | environment: 12 | name: github-pages 13 | url: ${{ steps.deployment.outputs.page_url }} 14 | permissions: 15 | pages: write 16 | id-token: write 17 | steps: 18 | - id: deployment 19 | uses: sphinx-notes/pages@v3 20 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Set up Python 3.10 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: '3.10' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install tox 25 | - name: Run lint 💅 26 | run: tox 27 | env: 28 | TOXENV: flake8 29 | -------------------------------------------------------------------------------- /.github/workflows/manage_issues.yml: -------------------------------------------------------------------------------- 1 | name: Issue Manager 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 * * *" 6 | issue_comment: 7 | types: 8 | - created 9 | issues: 10 | types: 11 | - labeled 12 | pull_request_target: 13 | types: 14 | - labeled 15 | workflow_dispatch: 16 | 17 | permissions: 18 | issues: write 19 | pull-requests: write 20 | 21 | concurrency: 22 | group: lock 23 | 24 | jobs: 25 | lock-old-closed-issues: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - uses: dessant/lock-threads@v4 29 | with: 30 | issue-inactive-days: '180' 31 | process-only: 'issues' 32 | issue-comment: > 33 | This issue has been automatically locked since there 34 | has not been any recent activity after it was closed. 35 | Please open a new issue for related topics referencing 36 | this issue. 37 | close-labelled-issues: 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: tiangolo/issue-manager@0.4.0 41 | with: 42 | token: ${{ secrets.GITHUB_TOKEN }} 43 | config: > 44 | { 45 | "needs-reply": { 46 | "delay": 2200000, 47 | "message": "This issue was closed due to inactivity. If your request is still relevant, please open a new issue referencing this one and provide all of the requested information." 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | test: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | max-parallel: 10 16 | matrix: 17 | sql-alchemy: [ "1.2", "1.3", "1.4","2.0" ] 18 | python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13"] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v3 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install tox tox-gh-actions 30 | - name: Test with tox 31 | run: tox 32 | env: 33 | SQLALCHEMY: ${{ matrix.sql-alchemy }} 34 | TOXENV: ${{ matrix.toxenv }} 35 | - name: Upload coverage.xml 36 | if: ${{ matrix.sql-alchemy == '1.4' && matrix.python-version == '3.10' }} 37 | uses: actions/upload-artifact@v4 38 | with: 39 | name: graphene-sqlalchemy-coverage 40 | path: coverage.xml 41 | if-no-files-found: error 42 | - name: Upload coverage.xml to codecov 43 | if: ${{ matrix.sql-alchemy == '1.4' && matrix.python-version == '3.10' }} 44 | uses: codecov/codecov-action@v3 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io 2 | 3 | ### Python ### 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | .venv/ 15 | venv/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | .python-version 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *,cover 51 | .pytest_cache/ 52 | .benchmarks/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | 61 | # Sphinx documentation 62 | docs/_build/ 63 | 64 | # PyBuilder 65 | target/ 66 | 67 | # PyCharm 68 | .idea 69 | 70 | # Databases 71 | *.sqlite3 72 | .vscode 73 | 74 | # Schema 75 | *.gql 76 | 77 | # mypy cache 78 | .mypy_cache/ 79 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_language_version: 2 | python: python3.8 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.2.0 6 | hooks: 7 | - id: check-merge-conflict 8 | - id: check-yaml 9 | - id: debug-statements 10 | - id: end-of-file-fixer 11 | exclude: ^docs/.*$ 12 | - id: trailing-whitespace 13 | exclude: README.md 14 | - repo: https://github.com/pycqa/isort 15 | rev: 5.12.0 16 | hooks: 17 | - id: isort 18 | name: isort (python) 19 | - repo: https://github.com/asottile/pyupgrade 20 | rev: v2.37.3 21 | hooks: 22 | - id: pyupgrade 23 | - repo: https://github.com/psf/black 24 | rev: 22.6.0 25 | hooks: 26 | - id: black 27 | - repo: https://github.com/PyCQA/flake8 28 | rev: 4.0.0 29 | hooks: 30 | - id: flake8 31 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | / @cito @jnak @Nabellaleen 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Local Development 2 | 3 | Set up our development dependencies: 4 | 5 | ```sh 6 | pip install -e ".[dev]" 7 | pre-commit install 8 | ``` 9 | 10 | We use `tox` to test this library against different versions of `python` and `SQLAlchemy`. 11 | While developping locally, it is usually fine to run the tests against the most recent versions: 12 | 13 | ```sh 14 | tox -e py37 # Python 3.7, SQLAlchemy < 2.0 15 | tox -e py37 -- -v -s # Verbose output 16 | tox -e py37 -- -k test_query # Only test_query.py 17 | ``` 18 | 19 | Our linters will run automatically when committing via git hooks but you can also run them manually: 20 | 21 | ```sh 22 | tox -e pre-commit 23 | ``` 24 | 25 | ## Release Process 26 | 27 | 1. Update the version number in graphene_sqlalchemy/__init__.py via a PR. 28 | 29 | 2. Once the PR is merged, tag the commit on master with the new version (only maintainers of the repo can do this). For example, "v2.1.2". Travis will then automatically build this tag and release it to Pypi. 30 | 31 | 3. Make sure to create a new release on github (via the release tab) that lists all the changes that went into the new version. 32 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Syrus Akbary 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Version 3.0 is in beta stage. Please read https://github.com/graphql-python/graphene-sqlalchemy/issues/348 to learn about progress and changes in upcoming 2 | beta releases. 3 | 4 | --- 5 | 6 | # ![Graphene Logo](http://graphene-python.org/favicon.png) Graphene-SQLAlchemy 7 | [![Build Status](https://github.com/graphql-python/graphene-sqlalchemy/workflows/Tests/badge.svg)](https://github.com/graphql-python/graphene-sqlalchemy/actions) 8 | [![PyPI version](https://badge.fury.io/py/graphene-sqlalchemy.svg)](https://badge.fury.io/py/graphene-sqlalchemy) 9 | ![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/graphql-python/graphene-sqlalchemy?color=green&include_prereleases&label=latest) 10 | [![codecov](https://codecov.io/gh/graphql-python/graphene-sqlalchemy/branch/master/graph/badge.svg?token=Zi5S1TikeN)](https://codecov.io/gh/graphql-python/graphene-sqlalchemy) 11 | 12 | 13 | 14 | A [SQLAlchemy](http://www.sqlalchemy.org/) integration for [Graphene](http://graphene-python.org/). 15 | 16 | ## Installation 17 | 18 | For installing Graphene, just run this command in your shell. 19 | 20 | ```bash 21 | pip install --pre "graphene-sqlalchemy" 22 | ``` 23 | 24 | ## Examples 25 | 26 | Here is a simple SQLAlchemy model: 27 | 28 | ```python 29 | from sqlalchemy import Column, Integer, String 30 | 31 | from sqlalchemy.ext.declarative import declarative_base 32 | 33 | Base = declarative_base() 34 | 35 | class UserModel(Base): 36 | __tablename__ = 'user' 37 | id = Column(Integer, primary_key=True) 38 | name = Column(String) 39 | last_name = Column(String) 40 | ``` 41 | 42 | To create a GraphQL schema for it, you simply have to write the following: 43 | 44 | ```python 45 | import graphene 46 | from graphene_sqlalchemy import SQLAlchemyObjectType 47 | 48 | class User(SQLAlchemyObjectType): 49 | class Meta: 50 | model = UserModel 51 | # use `only_fields` to only expose specific fields ie "name" 52 | # only_fields = ("name",) 53 | # use `exclude_fields` to exclude specific fields ie "last_name" 54 | # exclude_fields = ("last_name",) 55 | 56 | class Query(graphene.ObjectType): 57 | users = graphene.List(User) 58 | 59 | def resolve_users(self, info): 60 | query = User.get_query(info) # SQLAlchemy query 61 | return query.all() 62 | 63 | schema = graphene.Schema(query=Query) 64 | ``` 65 | 66 | We need a database session first: 67 | 68 | ```python 69 | from sqlalchemy import (create_engine) 70 | from sqlalchemy.orm import (scoped_session, sessionmaker) 71 | 72 | engine = create_engine('sqlite:///database.sqlite3', convert_unicode=True) 73 | db_session = scoped_session(sessionmaker(autocommit=False, 74 | autoflush=False, 75 | bind=engine)) 76 | # We will need this for querying, Graphene extracts the session from the base. 77 | # Alternatively it can be provided in the GraphQLResolveInfo.context dictionary under context["session"] 78 | Base.query = db_session.query_property() 79 | ``` 80 | 81 | Then you can simply query the schema: 82 | 83 | ```python 84 | query = ''' 85 | query { 86 | users { 87 | name, 88 | lastName 89 | } 90 | } 91 | ''' 92 | result = schema.execute(query, context_value={'session': db_session}) 93 | ``` 94 | 95 | You may also subclass SQLAlchemyObjectType by providing `abstract = True` in 96 | your subclasses Meta: 97 | ```python 98 | from graphene_sqlalchemy import SQLAlchemyObjectType 99 | 100 | class ActiveSQLAlchemyObjectType(SQLAlchemyObjectType): 101 | class Meta: 102 | abstract = True 103 | 104 | @classmethod 105 | def get_node(cls, info, id): 106 | return cls.get_query(info).filter( 107 | and_(cls._meta.model.deleted_at==None, 108 | cls._meta.model.id==id) 109 | ).first() 110 | 111 | class User(ActiveSQLAlchemyObjectType): 112 | class Meta: 113 | model = UserModel 114 | 115 | class Query(graphene.ObjectType): 116 | users = graphene.List(User) 117 | 118 | def resolve_users(self, info): 119 | query = User.get_query(info) # SQLAlchemy query 120 | return query.all() 121 | 122 | schema = graphene.Schema(query=Query) 123 | ``` 124 | 125 | ### Full Examples 126 | 127 | To learn more check out the following [examples](https://github.com/graphql-python/graphene-sqlalchemy/tree/master/examples/): 128 | 129 | - [Flask SQLAlchemy example](https://github.com/graphql-python/graphene-sqlalchemy/tree/master/examples/flask_sqlalchemy) 130 | - [Nameko SQLAlchemy example](https://github.com/graphql-python/graphene-sqlalchemy/tree/master/examples/nameko_sqlalchemy) 131 | 132 | ## Contributing 133 | 134 | See [CONTRIBUTING.md](https://github.com/graphql-python/graphene-sqlalchemy/blob/master/CONTRIBUTING.md) 135 | -------------------------------------------------------------------------------- /bin/autolinter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install the required scripts with 4 | # pip install autoflake autopep8 isort 5 | autoflake ./examples/ ./graphene_sqlalchemy/ -r --remove-unused-variables --remove-all-unused-imports --in-place 6 | autopep8 ./examples/ ./graphene_sqlalchemy/ -r --in-place --experimental --aggressive --max-line-length 120 7 | isort -rc ./examples/ ./graphene_sqlalchemy/ 8 | -------------------------------------------------------------------------------- /bin/convert_documentation: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pandoc README.md --from markdown --to rst -s -o README.rst 4 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Graphene.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Graphene.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Graphene" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Graphene" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============== 3 | 4 | SQLAlchemyObjectType 5 | -------------------- 6 | .. autoclass:: graphene_sqlalchemy.SQLAlchemyObjectType 7 | 8 | SQLAlchemyInterface 9 | ------------------- 10 | .. autoclass:: graphene_sqlalchemy.SQLAlchemyInterface 11 | 12 | ORMField 13 | -------------------- 14 | .. autoclass:: graphene_sqlalchemy.types.ORMField 15 | 16 | SQLAlchemyConnectionField 17 | ------------------------- 18 | .. autoclass:: graphene_sqlalchemy.SQLAlchemyConnectionField 19 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | on_rtd = os.environ.get("READTHEDOCS", None) == "True" 4 | 5 | # -*- coding: utf-8 -*- 6 | # 7 | # Graphene documentation build configuration file, created by 8 | # sphinx-quickstart on Sun Sep 11 18:30:51 2016. 9 | # 10 | # This file is execfile()d with the current directory set to its 11 | # containing dir. 12 | # 13 | # Note that not all possible configuration values are present in this 14 | # autogenerated file. 15 | # 16 | # All configuration values have a default; values that are commented out 17 | # serve to show the default. 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | # 23 | # import os 24 | # import sys 25 | # sys.path.insert(0, os.path.abspath('.')) 26 | import os 27 | import sys 28 | 29 | sys.path.insert(0, os.path.abspath("..")) 30 | # -- General configuration ------------------------------------------------ 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | # 34 | # needs_sphinx = '1.0' 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 38 | # ones. 39 | extensions = [ 40 | "sphinx.ext.autodoc", 41 | "sphinx.ext.intersphinx", 42 | "sphinx.ext.todo", 43 | "sphinx.ext.coverage", 44 | "sphinx.ext.viewcode", 45 | ] 46 | if not on_rtd: 47 | extensions += [ 48 | "sphinx.ext.githubpages", 49 | ] 50 | 51 | # Add any paths that contain templates here, relative to this directory. 52 | templates_path = ["_templates"] 53 | 54 | # The suffix(es) of source filenames. 55 | # You can specify multiple suffix as a list of string: 56 | # 57 | # source_suffix = ['.rst', '.md'] 58 | source_suffix = ".rst" 59 | 60 | # The encoding of source files. 61 | # 62 | # source_encoding = 'utf-8-sig' 63 | 64 | # The master toctree document. 65 | master_doc = "index" 66 | 67 | # General information about the project. 68 | project = "Graphene Django" 69 | copyright = "Graphene 2016" 70 | author = "Syrus Akbary" 71 | 72 | # The version info for the project you're documenting, acts as replacement for 73 | # |version| and |release|, also used in various other places throughout the 74 | # built documents. 75 | # 76 | # The short X.Y version. 77 | version = "1.0" 78 | # The full version, including alpha/beta/rc tags. 79 | release = "1.0.dev" 80 | 81 | # The language for content autogenerated by Sphinx. Refer to documentation 82 | # for a list of supported languages. 83 | # 84 | # This is also used if you do content translation via gettext catalogs. 85 | # Usually you set "language" from the command line for these cases. 86 | language = "en" 87 | 88 | # There are two options for replacing |today|: either, you set today to some 89 | # non-false value, then it is used: 90 | # 91 | # today = '' 92 | # 93 | # Else, today_fmt is used as the format for a strftime call. 94 | # 95 | # today_fmt = '%B %d, %Y' 96 | 97 | # List of patterns, relative to source directory, that match files and 98 | # directories to ignore when looking for source files. 99 | # This patterns also effect to html_static_path and html_extra_path 100 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 101 | 102 | # The reST default role (used for this markup: `text`) to use for all 103 | # documents. 104 | # 105 | # default_role = None 106 | 107 | # If true, '()' will be appended to :func: etc. cross-reference text. 108 | # 109 | # add_function_parentheses = True 110 | 111 | # If true, the current module name will be prepended to all description 112 | # unit titles (such as .. function::). 113 | # 114 | # add_module_names = True 115 | 116 | # If true, sectionauthor and moduleauthor directives will be shown in the 117 | # output. They are ignored by default. 118 | # 119 | # show_authors = False 120 | 121 | # The name of the Pygments (syntax highlighting) style to use. 122 | pygments_style = "sphinx" 123 | 124 | # A list of ignored prefixes for module index sorting. 125 | # modindex_common_prefix = [] 126 | 127 | # If true, keep warnings as "system message" paragraphs in the built documents. 128 | # keep_warnings = False 129 | 130 | # If true, `todo` and `todoList` produce output, else they produce nothing. 131 | todo_include_todos = True 132 | 133 | 134 | # -- Options for HTML output ---------------------------------------------- 135 | 136 | # The theme to use for HTML and HTML Help pages. See the documentation for 137 | # a list of builtin themes. 138 | # 139 | # html_theme = 'alabaster' 140 | # if on_rtd: 141 | # html_theme = 'sphinx_rtd_theme' 142 | import sphinx_graphene_theme # isort:skip 143 | 144 | html_theme = "sphinx_graphene_theme" 145 | 146 | html_theme_path = [sphinx_graphene_theme.get_html_theme_path()] 147 | 148 | 149 | # Theme options are theme-specific and customize the look and feel of a theme 150 | # further. For a list of options available for each theme, see the 151 | # documentation. 152 | # 153 | # html_theme_options = {} 154 | 155 | # Add any paths that contain custom themes here, relative to this directory. 156 | # html_theme_path = [] 157 | 158 | # The name for this set of Sphinx documents. 159 | # " v documentation" by default. 160 | # 161 | # html_title = u'Graphene v1.0.dev' 162 | 163 | # A shorter title for the navigation bar. Default is the same as html_title. 164 | # 165 | # html_short_title = None 166 | 167 | # The name of an image file (relative to this directory) to place at the top 168 | # of the sidebar. 169 | # 170 | # html_logo = None 171 | 172 | # The name of an image file (relative to this directory) to use as a favicon of 173 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 174 | # pixels large. 175 | # 176 | # html_favicon = None 177 | 178 | # Add any paths that contain custom static files (such as style sheets) here, 179 | # relative to this directory. They are copied after the builtin static files, 180 | # so a file named "default.css" will overwrite the builtin "default.css". 181 | # html_static_path = ["_static"] 182 | 183 | # Add any extra paths that contain custom files (such as robots.txt or 184 | # .htaccess) here, relative to this directory. These files are copied 185 | # directly to the root of the documentation. 186 | # 187 | # html_extra_path = [] 188 | 189 | # If not None, a 'Last updated on:' timestamp is inserted at every page 190 | # bottom, using the given strftime format. 191 | # The empty string is equivalent to '%b %d, %Y'. 192 | # 193 | # html_last_updated_fmt = None 194 | 195 | # If true, SmartyPants will be used to convert quotes and dashes to 196 | # typographically correct entities. 197 | # 198 | # html_use_smartypants = True 199 | 200 | # Custom sidebar templates, maps document names to template names. 201 | # 202 | # html_sidebars = {} 203 | 204 | # Additional templates that should be rendered to pages, maps page names to 205 | # template names. 206 | # 207 | # html_additional_pages = {} 208 | 209 | # If false, no module index is generated. 210 | # 211 | # html_domain_indices = True 212 | 213 | # If false, no index is generated. 214 | # 215 | # html_use_index = True 216 | 217 | # If true, the index is split into individual pages for each letter. 218 | # 219 | # html_split_index = False 220 | 221 | # If true, links to the reST sources are added to the pages. 222 | # 223 | # html_show_sourcelink = True 224 | 225 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 226 | # 227 | # html_show_sphinx = True 228 | 229 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 230 | # 231 | # html_show_copyright = True 232 | 233 | # If true, an OpenSearch description file will be output, and all pages will 234 | # contain a tag referring to it. The value of this option must be the 235 | # base URL from which the finished HTML is served. 236 | # 237 | # html_use_opensearch = '' 238 | 239 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 240 | # html_file_suffix = None 241 | 242 | # Language to be used for generating the HTML full-text search index. 243 | # Sphinx supports the following languages: 244 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 245 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 246 | # 247 | # html_search_language = 'en' 248 | 249 | # A dictionary with options for the search language support, empty by default. 250 | # 'ja' uses this config value. 251 | # 'zh' user can custom change `jieba` dictionary path. 252 | # 253 | # html_search_options = {'type': 'default'} 254 | 255 | # The name of a javascript file (relative to the configuration directory) that 256 | # implements a search results scorer. If empty, the default will be used. 257 | # 258 | # html_search_scorer = 'scorer.js' 259 | 260 | # Output file base name for HTML help builder. 261 | htmlhelp_basename = "Graphenedoc" 262 | 263 | # -- Options for LaTeX output --------------------------------------------- 264 | 265 | latex_elements = { 266 | # The paper size ('letterpaper' or 'a4paper'). 267 | # 268 | # 'papersize': 'letterpaper', 269 | # The font size ('10pt', '11pt' or '12pt'). 270 | # 271 | # 'pointsize': '10pt', 272 | # Additional stuff for the LaTeX preamble. 273 | # 274 | # 'preamble': '', 275 | # Latex figure (float) alignment 276 | # 277 | # 'figure_align': 'htbp', 278 | } 279 | 280 | # Grouping the document tree into LaTeX files. List of tuples 281 | # (source start file, target name, title, 282 | # author, documentclass [howto, manual, or own class]). 283 | latex_documents = [ 284 | (master_doc, "Graphene.tex", "Graphene Documentation", "Syrus Akbary", "manual"), 285 | ] 286 | 287 | # The name of an image file (relative to this directory) to place at the top of 288 | # the title page. 289 | # 290 | # latex_logo = None 291 | 292 | # For "manual" documents, if this is true, then toplevel headings are parts, 293 | # not chapters. 294 | # 295 | # latex_use_parts = False 296 | 297 | # If true, show page references after internal links. 298 | # 299 | # latex_show_pagerefs = False 300 | 301 | # If true, show URL addresses after external links. 302 | # 303 | # latex_show_urls = False 304 | 305 | # Documents to append as an appendix to all manuals. 306 | # 307 | # latex_appendices = [] 308 | 309 | # It false, will not define \strong, \code, itleref, \crossref ... but only 310 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added 311 | # packages. 312 | # 313 | # latex_keep_old_macro_names = True 314 | 315 | # If false, no module index is generated. 316 | # 317 | # latex_domain_indices = True 318 | 319 | 320 | # -- Options for manual page output --------------------------------------- 321 | 322 | # One entry per manual page. List of tuples 323 | # (source start file, name, description, authors, manual section). 324 | man_pages = [ 325 | (master_doc, "graphene_django", "Graphene Django Documentation", [author], 1) 326 | ] 327 | 328 | # If true, show URL addresses after external links. 329 | # 330 | # man_show_urls = False 331 | 332 | 333 | # -- Options for Texinfo output ------------------------------------------- 334 | 335 | # Grouping the document tree into Texinfo files. List of tuples 336 | # (source start file, target name, title, author, 337 | # dir menu entry, description, category) 338 | texinfo_documents = [ 339 | ( 340 | master_doc, 341 | "Graphene-Django", 342 | "Graphene Django Documentation", 343 | author, 344 | "Graphene Django", 345 | "One line description of project.", 346 | "Miscellaneous", 347 | ), 348 | ] 349 | 350 | # Documents to append as an appendix to all manuals. 351 | # 352 | # texinfo_appendices = [] 353 | 354 | # If false, no module index is generated. 355 | # 356 | # texinfo_domain_indices = True 357 | 358 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 359 | # 360 | # texinfo_show_urls = 'footnote' 361 | 362 | # If true, do not generate a @detailmenu in the "Top" node's menu. 363 | # 364 | # texinfo_no_detailmenu = False 365 | 366 | 367 | # -- Options for Epub output ---------------------------------------------- 368 | 369 | # Bibliographic Dublin Core info. 370 | epub_title = project 371 | epub_author = author 372 | epub_publisher = author 373 | epub_copyright = copyright 374 | 375 | # The basename for the epub file. It defaults to the project name. 376 | # epub_basename = project 377 | 378 | # The HTML theme for the epub output. Since the default themes are not 379 | # optimized for small screen space, using the same theme for HTML and epub 380 | # output is usually not wise. This defaults to 'epub', a theme designed to save 381 | # visual space. 382 | # 383 | # epub_theme = 'epub' 384 | 385 | # The language of the text. It defaults to the language option 386 | # or 'en' if the language is not set. 387 | # 388 | # epub_language = '' 389 | 390 | # The scheme of the identifier. Typical schemes are ISBN or URL. 391 | # epub_scheme = '' 392 | 393 | # The unique identifier of the text. This can be a ISBN number 394 | # or the project homepage. 395 | # 396 | # epub_identifier = '' 397 | 398 | # A unique identification for the text. 399 | # 400 | # epub_uid = '' 401 | 402 | # A tuple containing the cover image and cover page html template filenames. 403 | # 404 | # epub_cover = () 405 | 406 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 407 | # 408 | # epub_guide = () 409 | 410 | # HTML files that should be inserted before the pages created by sphinx. 411 | # The format is a list of tuples containing the path and title. 412 | # 413 | # epub_pre_files = [] 414 | 415 | # HTML files that should be inserted after the pages created by sphinx. 416 | # The format is a list of tuples containing the path and title. 417 | # 418 | # epub_post_files = [] 419 | 420 | # A list of files that should not be packed into the epub file. 421 | epub_exclude_files = ["search.html"] 422 | 423 | # The depth of the table of contents in toc.ncx. 424 | # 425 | # epub_tocdepth = 3 426 | 427 | # Allow duplicate toc entries. 428 | # 429 | # epub_tocdup = True 430 | 431 | # Choose between 'default' and 'includehidden'. 432 | # 433 | # epub_tocscope = 'default' 434 | 435 | # Fix unsupported image types using the Pillow. 436 | # 437 | # epub_fix_images = False 438 | 439 | # Scale large images. 440 | # 441 | # epub_max_image_width = 0 442 | 443 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 444 | # 445 | # epub_show_urls = 'inline' 446 | 447 | # If false, no index is generated. 448 | # 449 | # epub_use_index = True 450 | 451 | 452 | # Example configuration for intersphinx: refer to the Python standard library. 453 | intersphinx_mapping = {"https://docs.python.org/": None} 454 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | Schema Examples 2 | =========================== 3 | 4 | 5 | Search all Models with Union 6 | ---------------------------- 7 | 8 | .. code:: python 9 | 10 | class Book(SQLAlchemyObjectType): 11 | class Meta: 12 | model = BookModel 13 | interfaces = (relay.Node,) 14 | 15 | 16 | class Author(SQLAlchemyObjectType): 17 | class Meta: 18 | model = AuthorModel 19 | interfaces = (relay.Node,) 20 | 21 | 22 | class SearchResult(graphene.Union): 23 | class Meta: 24 | types = (Book, Author) 25 | 26 | 27 | class Query(graphene.ObjectType): 28 | node = relay.Node.Field() 29 | search = graphene.List(SearchResult, q=graphene.String()) # List field for search results 30 | 31 | # Normal Fields 32 | all_books = SQLAlchemyConnectionField(Book.connection) 33 | all_authors = SQLAlchemyConnectionField(Author.connection) 34 | 35 | def resolve_search(self, info, **args): 36 | q = args.get("q") # Search query 37 | 38 | # Get queries 39 | bookdata_query = BookData.get_query(info) 40 | author_query = Author.get_query(info) 41 | 42 | # Query Books 43 | books = bookdata_query.filter((BookModel.title.contains(q)) | 44 | (BookModel.isbn.contains(q)) | 45 | (BookModel.authors.any(AuthorModel.name.contains(q)))).all() 46 | 47 | # Query Authors 48 | authors = author_query.filter(AuthorModel.name.contains(q)).all() 49 | 50 | return authors + books # Combine lists 51 | 52 | schema = graphene.Schema(query=Query, types=[Book, Author, SearchResult]) 53 | 54 | Example GraphQL query 55 | 56 | .. code:: 57 | 58 | book(id: "Qm9vazow") { 59 | id 60 | title 61 | } 62 | search(q: "Making Games") { 63 | __typename 64 | ... on Author { 65 | fname 66 | lname 67 | } 68 | ... on Book { 69 | title 70 | isbn 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /docs/filters.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Filters 3 | ======= 4 | 5 | Starting in graphene-sqlalchemy version 3, the SQLAlchemyConnectionField class implements filtering by default. The query utilizes a ``filter`` keyword to specify a filter class that inherits from ``graphene.InputObjectType``. 6 | 7 | Migrating from graphene-sqlalchemy-filter 8 | --------------------------------------------- 9 | 10 | If like many of us, you have been using |graphene-sqlalchemy-filter|_ to implement filters and would like to use the in-built mechanism here, there are a couple key differences to note. Mainly, in an effort to simplify the generated schema, filter keywords are nested under their respective fields instead of concatenated. For example, the filter partial ``{usernameIn: ["moderator", "cool guy"]}`` would be represented as ``{username: {in: ["moderator", "cool guy"]}}``. 11 | 12 | .. |graphene-sqlalchemy-filter| replace:: ``graphene-sqlalchemy-filter`` 13 | .. _graphene-sqlalchemy-filter: https://github.com/art1415926535/graphene-sqlalchemy-filter 14 | 15 | Further, some of the constructs found in libraries like `DGraph's DQL `_ have been implemented, so if you have created custom implementations for these features, you may want to take a look at the examples below. 16 | 17 | 18 | Example model 19 | ------------- 20 | 21 | Take as example a Pet model similar to that in the sorting example. We will use variations on this arrangement for the following examples. 22 | 23 | .. code:: 24 | 25 | class Pet(Base): 26 | __tablename__ = 'pets' 27 | id = Column(Integer(), primary_key=True) 28 | name = Column(String(30)) 29 | age = Column(Integer()) 30 | 31 | 32 | class PetNode(SQLAlchemyObjectType): 33 | class Meta: 34 | model = Pet 35 | 36 | 37 | class Query(graphene.ObjectType): 38 | allPets = SQLAlchemyConnectionField(PetNode.connection) 39 | 40 | 41 | Simple filter example 42 | --------------------- 43 | 44 | Filters are defined at the object level through the ``BaseTypeFilter`` class. The ``BaseType`` encompasses both Graphene ``ObjectType``\ s and ``Interface``\ s. Each ``BaseTypeFilter`` instance may define fields via ``FieldFilter`` and relationships via ``RelationshipFilter``. Here's a basic example querying a single field on the Pet model: 45 | 46 | .. code:: 47 | 48 | allPets(filter: {name: {eq: "Fido"}}){ 49 | edges { 50 | node { 51 | name 52 | } 53 | } 54 | } 55 | 56 | This will return all pets with the name "Fido". 57 | 58 | 59 | Custom filter types 60 | ------------------- 61 | 62 | If you'd like to implement custom behavior for filtering a field, you can do so by extending one of the base filter classes in ``graphene_sqlalchemy.filters``. For example, if you'd like to add a ``divisible_by`` keyword to filter the age attribute on the ``Pet`` model, you can do so as follows: 63 | 64 | .. code:: python 65 | 66 | class MathFilter(FloatFilter): 67 | class Meta: 68 | graphene_type = graphene.Float 69 | 70 | @classmethod 71 | def divisible_by_filter(cls, query, field, val: int) -> bool: 72 | return is_(field % val, 0) 73 | 74 | class PetType(SQLAlchemyObjectType): 75 | ... 76 | 77 | age = ORMField(filter_type=MathFilter) 78 | 79 | class Query(graphene.ObjectType): 80 | pets = SQLAlchemyConnectionField(PetType.connection) 81 | 82 | 83 | Filtering over relationships with RelationshipFilter 84 | ---------------------------------------------------- 85 | 86 | When a filter class field refers to another object in a relationship, you may nest filters on relationship object attributes. This happens directly for 1:1 and m:1 relationships and through the ``contains`` and ``containsExactly`` keywords for 1:n and m:n relationships. 87 | 88 | 89 | :1 relationships 90 | ^^^^^^^^^^^^^^^^ 91 | 92 | When an object or interface defines a singular relationship, relationship object attributes may be filtered directly like so: 93 | 94 | Take the following SQLAlchemy model definition as an example: 95 | 96 | .. code:: python 97 | 98 | class Pet 99 | ... 100 | person_id = Column(Integer(), ForeignKey("people.id")) 101 | 102 | class Person 103 | ... 104 | pets = relationship("Pet", backref="person") 105 | 106 | 107 | Then, this query will return all pets whose person is named "Ada": 108 | 109 | .. code:: 110 | 111 | allPets(filter: { 112 | person: {name: {eq: "Ada"}} 113 | }) { 114 | ... 115 | } 116 | 117 | 118 | :n relationships 119 | ^^^^^^^^^^^^^^^^ 120 | 121 | However, for plural relationships, relationship object attributes must be filtered through either ``contains`` or ``containsExactly``: 122 | 123 | Now, using a many-to-many model definition: 124 | 125 | .. code:: python 126 | 127 | people_pets_table = sqlalchemy.Table( 128 | "people_pets", 129 | Base.metadata, 130 | Column("person_id", ForeignKey("people.id")), 131 | Column("pet_id", ForeignKey("pets.id")), 132 | ) 133 | 134 | class Pet 135 | ... 136 | 137 | class Person 138 | ... 139 | pets = relationship("Pet", backref="people") 140 | 141 | 142 | this query will return all pets which have a person named "Ben" in their ``people`` list. 143 | 144 | .. code:: 145 | 146 | allPets(filter: { 147 | people: { 148 | contains: [{name: {eq: "Ben"}}], 149 | } 150 | }) { 151 | ... 152 | } 153 | 154 | 155 | and this one will return all pets which hvae a person list that contains exactly the people "Ada" and "Ben" and no fewer or people with other names. 156 | 157 | .. code:: 158 | 159 | allPets(filter: { 160 | articles: { 161 | containsExactly: [ 162 | {name: {eq: "Ada"}}, 163 | {name: {eq: "Ben"}}, 164 | ], 165 | } 166 | }) { 167 | ... 168 | } 169 | 170 | And/Or Logic 171 | ------------ 172 | 173 | Filters can also be chained together logically using `and` and `or` keywords nested under `filter`. Clauses are passed directly to `sqlalchemy.and_` and `slqlalchemy.or_`, respectively. To return all pets named "Fido" or "Spot", use: 174 | 175 | 176 | .. code:: 177 | 178 | allPets(filter: { 179 | or: [ 180 | {name: {eq: "Fido"}}, 181 | {name: {eq: "Spot"}}, 182 | ] 183 | }) { 184 | ... 185 | } 186 | 187 | And to return all pets that are named "Fido" or are 5 years old and named "Spot", use: 188 | 189 | .. code:: 190 | 191 | allPets(filter: { 192 | or: [ 193 | {name: {eq: "Fido"}}, 194 | { and: [ 195 | {name: {eq: "Spot"}}, 196 | {age: {eq: 5}} 197 | } 198 | ] 199 | }) { 200 | ... 201 | } 202 | 203 | 204 | Hybrid Property support 205 | ----------------------- 206 | 207 | Filtering over SQLAlchemy `hybrid properties `_ is fully supported. 208 | 209 | 210 | Reporting feedback and bugs 211 | --------------------------- 212 | 213 | Filtering is a new feature to graphene-sqlalchemy, so please `post an issue on Github `_ if you run into any problems or have ideas on how to improve the implementation. 214 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Graphene-SQLAlchemy 2 | =================== 3 | 4 | Contents: 5 | 6 | .. toctree:: 7 | :maxdepth: 0 8 | 9 | starter 10 | inheritance 11 | relay 12 | tips 13 | filters 14 | examples 15 | tutorial 16 | api 17 | -------------------------------------------------------------------------------- /docs/inheritance.rst: -------------------------------------------------------------------------------- 1 | Inheritance Examples 2 | ==================== 3 | 4 | 5 | Create interfaces from inheritance relationships 6 | ------------------------------------------------ 7 | 8 | .. note:: 9 | If you're using `AsyncSession`, please check the chapter `Eager Loading & Using with AsyncSession`_. 10 | 11 | SQLAlchemy has excellent support for class inheritance hierarchies. 12 | These hierarchies can be represented in your GraphQL schema by means 13 | of interfaces_. Much like ObjectTypes, Interfaces in 14 | Graphene-SQLAlchemy are able to infer their fields and relationships 15 | from the attributes of their underlying SQLAlchemy model: 16 | 17 | .. _interfaces: https://docs.graphene-python.org/en/latest/types/interfaces/ 18 | 19 | .. code:: python 20 | 21 | from sqlalchemy import Column, Date, Integer, String 22 | from sqlalchemy.ext.declarative import declarative_base 23 | 24 | import graphene 25 | from graphene import relay 26 | from graphene_sqlalchemy import SQLAlchemyInterface, SQLAlchemyObjectType 27 | 28 | Base = declarative_base() 29 | 30 | class Person(Base): 31 | id = Column(Integer(), primary_key=True) 32 | type = Column(String()) 33 | name = Column(String()) 34 | birth_date = Column(Date()) 35 | 36 | __tablename__ = "person" 37 | __mapper_args__ = { 38 | "polymorphic_on": type, 39 | } 40 | 41 | class Employee(Person): 42 | hire_date = Column(Date()) 43 | 44 | __mapper_args__ = { 45 | "polymorphic_identity": "employee", 46 | } 47 | 48 | class Customer(Person): 49 | first_purchase_date = Column(Date()) 50 | 51 | __mapper_args__ = { 52 | "polymorphic_identity": "customer", 53 | } 54 | 55 | class PersonType(SQLAlchemyInterface): 56 | class Meta: 57 | model = Person 58 | 59 | class EmployeeType(SQLAlchemyObjectType): 60 | class Meta: 61 | model = Employee 62 | interfaces = (relay.Node, PersonType) 63 | 64 | class CustomerType(SQLAlchemyObjectType): 65 | class Meta: 66 | model = Customer 67 | interfaces = (relay.Node, PersonType) 68 | 69 | Keep in mind that `PersonType` is a `SQLAlchemyInterface`. Interfaces must 70 | be linked to an abstract Model that does not specify a `polymorphic_identity`, 71 | because we cannot return instances of interfaces from a GraphQL query. 72 | If Person specified a `polymorphic_identity`, instances of Person could 73 | be inserted into and returned by the database, potentially causing 74 | Persons to be returned to the resolvers. 75 | 76 | When querying on the base type, you can refer directly to common fields, 77 | and fields on concrete implementations using the `... on` syntax: 78 | 79 | 80 | .. code:: 81 | 82 | people { 83 | name 84 | birthDate 85 | ... on EmployeeType { 86 | hireDate 87 | } 88 | ... on CustomerType { 89 | firstPurchaseDate 90 | } 91 | } 92 | 93 | 94 | .. danger:: 95 | When using joined table inheritance, this style of querying may lead to unbatched implicit IO with negative performance implications. 96 | See the chapter `Eager Loading & Using with AsyncSession`_ for more information on eager loading all possible types of a `SQLAlchemyInterface`. 97 | 98 | Please note that by default, the "polymorphic_on" column is *not* 99 | generated as a field on types that use polymorphic inheritance, as 100 | this is considered an implementation detail. The idiomatic way to 101 | retrieve the concrete GraphQL type of an object is to query for the 102 | `__typename` field. 103 | To override this behavior, an `ORMField` needs to be created 104 | for the custom type field on the corresponding `SQLAlchemyInterface`. This is *not recommended* 105 | as it promotes abiguous schema design 106 | 107 | If your SQLAlchemy model only specifies a relationship to the 108 | base type, you will need to explicitly pass your concrete implementation 109 | class to the Schema constructor via the `types=` argument: 110 | 111 | .. code:: python 112 | 113 | schema = graphene.Schema(..., types=[PersonType, EmployeeType, CustomerType]) 114 | 115 | 116 | See also: `Graphene Interfaces `_ 117 | 118 | 119 | Eager Loading & Using with AsyncSession 120 | ---------------------------------------- 121 | 122 | When querying the base type in multi-table inheritance or joined table inheritance, you can only directly refer to polymorphic fields when they are loaded eagerly. 123 | This restricting is in place because AsyncSessions don't allow implicit async operations such as the loads of the joined tables. 124 | To load the polymorphic fields eagerly, you can use the `with_polymorphic` attribute of the mapper args in the base model: 125 | 126 | .. code:: python 127 | 128 | class Person(Base): 129 | id = Column(Integer(), primary_key=True) 130 | type = Column(String()) 131 | name = Column(String()) 132 | birth_date = Column(Date()) 133 | 134 | __tablename__ = "person" 135 | __mapper_args__ = { 136 | "polymorphic_on": type, 137 | "with_polymorphic": "*", # needed for eager loading in async session 138 | } 139 | 140 | Alternatively, the specific polymorphic fields can be loaded explicitly in resolvers: 141 | 142 | .. code:: python 143 | 144 | class Query(graphene.ObjectType): 145 | people = graphene.Field(graphene.List(PersonType)) 146 | 147 | async def resolve_people(self, _info): 148 | return (await session.scalars(with_polymorphic(Person, [Engineer, Customer]))).all() 149 | 150 | Dynamic batching of the types based on the query to avoid eager is currently not supported, but could be implemented in a future PR. 151 | 152 | For more information on loading techniques for polymorphic models, please check out the `SQLAlchemy docs `_. 153 | -------------------------------------------------------------------------------- /docs/relay.rst: -------------------------------------------------------------------------------- 1 | Relay 2 | ========== 3 | 4 | :code:`graphene-sqlalchemy` comes with pre-defined 5 | connection fields to quickly create a functioning relay API. 6 | Using the :code:`SQLAlchemyConnectionField`, you have access to relay pagination, 7 | sorting and filtering (filtering is coming soon!). 8 | 9 | To be used in a relay connection, your :code:`SQLAlchemyObjectType` must implement 10 | the :code:`Node` interface from :code:`graphene.relay`. This handles the creation of 11 | the :code:`Connection` and :code:`Edge` types automatically. 12 | 13 | The following example creates a relay-paginated connection: 14 | 15 | 16 | 17 | .. code:: python 18 | 19 | class Pet(Base): 20 | __tablename__ = 'pets' 21 | id = Column(Integer(), primary_key=True) 22 | name = Column(String(30)) 23 | pet_kind = Column(Enum('cat', 'dog', name='pet_kind'), nullable=False) 24 | 25 | 26 | class PetNode(SQLAlchemyObjectType): 27 | class Meta: 28 | model = Pet 29 | interfaces=(Node,) 30 | 31 | 32 | class Query(ObjectType): 33 | all_pets = SQLAlchemyConnectionField(PetNode.connection) 34 | 35 | To disable sorting on the connection, you can set :code:`sort` to :code:`None` the 36 | :code:`SQLAlchemyConnectionField`: 37 | 38 | 39 | .. code:: python 40 | 41 | class Query(ObjectType): 42 | all_pets = SQLAlchemyConnectionField(PetNode.connection, sort=None) 43 | 44 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | # Docs template 3 | http://graphene-python.org/sphinx_graphene_theme.zip 4 | -------------------------------------------------------------------------------- /docs/starter.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | ================= 3 | 4 | Welcome to the graphene-sqlalchemy documentation! 5 | Graphene is a powerful Python library for building GraphQL APIs, 6 | and SQLAlchemy is a popular ORM (Object-Relational Mapping) 7 | tool for working with databases. When combined, graphene-sqlalchemy 8 | allows developers to quickly and easily create a GraphQL API that 9 | seamlessly interacts with a SQLAlchemy-managed database. 10 | It is fully compatible with SQLAlchemy 1.4 and 2.0. 11 | This documentation provides detailed instructions on how to get 12 | started with graphene-sqlalchemy, including installation, setup, 13 | and usage examples. 14 | 15 | Installation 16 | ------------ 17 | 18 | To install :code:`graphene-sqlalchemy`, just run this command in your shell: 19 | 20 | .. code:: bash 21 | 22 | pip install --pre "graphene-sqlalchemy" 23 | 24 | Examples 25 | -------- 26 | 27 | Here is a simple SQLAlchemy model: 28 | 29 | .. code:: python 30 | 31 | from sqlalchemy import Column, Integer, String 32 | from sqlalchemy.ext.declarative import declarative_base 33 | 34 | Base = declarative_base() 35 | 36 | class UserModel(Base): 37 | __tablename__ = 'user' 38 | id = Column(Integer, primary_key=True) 39 | name = Column(String) 40 | last_name = Column(String) 41 | 42 | To create a GraphQL schema for it, you simply have to write the 43 | following: 44 | 45 | .. code:: python 46 | 47 | import graphene 48 | from graphene_sqlalchemy import SQLAlchemyObjectType 49 | 50 | class User(SQLAlchemyObjectType): 51 | class Meta: 52 | model = UserModel 53 | # use `only_fields` to only expose specific fields ie "name" 54 | # only_fields = ("name",) 55 | # use `exclude_fields` to exclude specific fields ie "last_name" 56 | # exclude_fields = ("last_name",) 57 | 58 | class Query(graphene.ObjectType): 59 | users = graphene.List(User) 60 | 61 | def resolve_users(self, info): 62 | query = User.get_query(info) # SQLAlchemy query 63 | return query.all() 64 | 65 | schema = graphene.Schema(query=Query) 66 | 67 | Then you can simply query the schema: 68 | 69 | .. code:: python 70 | 71 | query = ''' 72 | query { 73 | users { 74 | name, 75 | lastName 76 | } 77 | } 78 | ''' 79 | result = schema.execute(query, context_value={'session': db_session}) 80 | 81 | 82 | It is important to provide a session for graphene-sqlalchemy to resolve the models. 83 | In this example, it is provided using the GraphQL context. See :doc:`tips` for 84 | other ways to implement this. 85 | 86 | You may also subclass SQLAlchemyObjectType by providing 87 | ``abstract = True`` in your subclasses Meta: 88 | 89 | .. code:: python 90 | 91 | from graphene_sqlalchemy import SQLAlchemyObjectType 92 | 93 | class ActiveSQLAlchemyObjectType(SQLAlchemyObjectType): 94 | class Meta: 95 | abstract = True 96 | 97 | @classmethod 98 | def get_node(cls, info, id): 99 | return cls.get_query(info).filter( 100 | and_(cls._meta.model.deleted_at==None, 101 | cls._meta.model.id==id) 102 | ).first() 103 | 104 | class User(ActiveSQLAlchemyObjectType): 105 | class Meta: 106 | model = UserModel 107 | 108 | class Query(graphene.ObjectType): 109 | users = graphene.List(User) 110 | 111 | def resolve_users(self, info): 112 | query = User.get_query(info) # SQLAlchemy query 113 | return query.all() 114 | 115 | schema = graphene.Schema(query=Query) 116 | 117 | More complex inhertiance using SQLAlchemy's polymorphic models is also supported. 118 | You can check out :doc:`inheritance` for a guide. 119 | -------------------------------------------------------------------------------- /docs/tips.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Tips 3 | ==== 4 | 5 | Querying 6 | -------- 7 | .. _querying: 8 | 9 | In order to make querying against the database work, there are two alternatives: 10 | 11 | - Set the db session when you do the execution: 12 | 13 | .. code:: python 14 | 15 | schema = graphene.Schema() 16 | schema.execute(context_value={'session': session}) 17 | 18 | - Create a query for the models. 19 | 20 | .. code:: python 21 | 22 | Base = declarative_base() 23 | Base.query = db_session.query_property() 24 | 25 | class MyModel(Base): 26 | # ... 27 | 28 | If you don't specify any, the following error will be displayed: 29 | 30 | ``A query in the model Base or a session in the schema is required for querying.`` 31 | 32 | Sorting 33 | ------- 34 | 35 | By default the SQLAlchemyConnectionField sorts the result elements over the primary key(s). 36 | The query has a `sort` argument which allows to sort over a different column(s) 37 | 38 | Given the model 39 | 40 | .. code:: python 41 | 42 | class Pet(Base): 43 | __tablename__ = 'pets' 44 | id = Column(Integer(), primary_key=True) 45 | name = Column(String(30)) 46 | pet_kind = Column(Enum('cat', 'dog', name='pet_kind'), nullable=False) 47 | 48 | 49 | class PetNode(SQLAlchemyObjectType): 50 | class Meta: 51 | model = Pet 52 | 53 | 54 | class Query(ObjectType): 55 | allPets = SQLAlchemyConnectionField(PetNode.connection) 56 | 57 | some of the allowed queries are 58 | 59 | - Sort in ascending order over the `name` column 60 | 61 | .. code:: 62 | 63 | allPets(sort: name_asc){ 64 | edges { 65 | node { 66 | name 67 | } 68 | } 69 | } 70 | 71 | - Sort in descending order over the `per_kind` column and in ascending order over the `name` column 72 | 73 | .. code:: 74 | 75 | allPets(sort: [pet_kind_desc, name_asc]) { 76 | edges { 77 | node { 78 | name 79 | petKind 80 | } 81 | } 82 | } 83 | 84 | -------------------------------------------------------------------------------- /docs/tutorial.rst: -------------------------------------------------------------------------------- 1 | SQLAlchemy + Flask Tutorial 2 | =========================== 3 | 4 | Graphene comes with builtin support to SQLAlchemy, which makes quite 5 | easy to operate with your current models. 6 | 7 | Note: The code in this tutorial is pulled from the `Flask SQLAlchemy 8 | example 9 | app `__. 10 | 11 | Setup the Project 12 | ----------------- 13 | 14 | We will setup the project, execute the following: 15 | 16 | .. code:: bash 17 | 18 | # Create the project directory 19 | mkdir flask_sqlalchemy 20 | cd flask_sqlalchemy 21 | 22 | # Create a virtualenv to isolate our package dependencies locally 23 | virtualenv env 24 | source env/bin/activate # On Windows use `env\Scripts\activate` 25 | 26 | # SQLAlchemy and Graphene with SQLAlchemy support 27 | pip install SQLAlchemy 28 | pip install graphene_sqlalchemy 29 | 30 | # Install Flask and GraphQL Flask for exposing the schema through HTTP 31 | pip install Flask 32 | pip install Flask-GraphQL 33 | 34 | Defining our models 35 | ------------------- 36 | 37 | Let's get started with these models: 38 | 39 | .. code:: python 40 | 41 | # flask_sqlalchemy/models.py 42 | from sqlalchemy import * 43 | from sqlalchemy.orm import (scoped_session, sessionmaker, relationship, 44 | backref) 45 | from sqlalchemy.ext.declarative import declarative_base 46 | 47 | engine = create_engine('sqlite:///database.sqlite3', convert_unicode=True) 48 | db_session = scoped_session(sessionmaker(autocommit=False, 49 | autoflush=False, 50 | bind=engine)) 51 | 52 | Base = declarative_base() 53 | # We will need this for querying 54 | Base.query = db_session.query_property() 55 | 56 | 57 | class Department(Base): 58 | __tablename__ = 'department' 59 | id = Column(Integer, primary_key=True) 60 | name = Column(String) 61 | 62 | 63 | class Employee(Base): 64 | __tablename__ = 'employee' 65 | id = Column(Integer, primary_key=True) 66 | name = Column(String) 67 | hired_on = Column(DateTime, default=func.now()) 68 | department_id = Column(Integer, ForeignKey('department.id')) 69 | department = relationship( 70 | Department, 71 | backref=backref('employees', 72 | uselist=True, 73 | cascade='delete,all')) 74 | 75 | Schema 76 | ------ 77 | 78 | GraphQL presents your objects to the world as a graph structure rather 79 | than a more hierarchical structure to which you may be accustomed. In 80 | order to create this representation, Graphene needs to know about each 81 | *type* of object which will appear in the graph. 82 | 83 | This graph also has a *root type* through which all access begins. This 84 | is the ``Query`` class below. In this example, we provide the ability to 85 | list all employees via ``all_employees``, and the ability to obtain a 86 | specific node via ``node``. 87 | 88 | Create ``flask_sqlalchemy/schema.py`` and type the following: 89 | 90 | .. code:: python 91 | 92 | # flask_sqlalchemy/schema.py 93 | import graphene 94 | from graphene import relay 95 | from graphene_sqlalchemy import SQLAlchemyObjectType, SQLAlchemyConnectionField 96 | from .models import db_session, Department as DepartmentModel, Employee as EmployeeModel 97 | 98 | 99 | class Department(SQLAlchemyObjectType): 100 | class Meta: 101 | model = DepartmentModel 102 | interfaces = (relay.Node, ) 103 | 104 | 105 | class Employee(SQLAlchemyObjectType): 106 | class Meta: 107 | model = EmployeeModel 108 | interfaces = (relay.Node, ) 109 | 110 | 111 | class Query(graphene.ObjectType): 112 | node = relay.Node.Field() 113 | # Allows sorting over multiple columns, by default over the primary key 114 | all_employees = SQLAlchemyConnectionField(Employee.connection) 115 | # Disable sorting over this field 116 | all_departments = SQLAlchemyConnectionField(Department.connection, sort=None) 117 | 118 | schema = graphene.Schema(query=Query) 119 | 120 | Creating GraphQL and GraphiQL views in Flask 121 | -------------------------------------------- 122 | 123 | Unlike a RESTful API, there is only a single URL from which GraphQL is 124 | accessed. 125 | 126 | We are going to use Flask to create a server that expose the GraphQL 127 | schema under ``/graphql`` and a interface for querying it easily: 128 | GraphiQL (also under ``/graphql`` when accessed by a browser). 129 | 130 | Fortunately for us, the library ``Flask-GraphQL`` that we previously 131 | installed makes this task quite easy. 132 | 133 | .. code:: python 134 | 135 | # flask_sqlalchemy/app.py 136 | from flask import Flask 137 | from flask_graphql import GraphQLView 138 | 139 | from .models import db_session 140 | from .schema import schema, Department 141 | 142 | app = Flask(__name__) 143 | app.debug = True 144 | 145 | app.add_url_rule( 146 | '/graphql', 147 | view_func=GraphQLView.as_view( 148 | 'graphql', 149 | schema=schema, 150 | graphiql=True # for having the GraphiQL interface 151 | ) 152 | ) 153 | 154 | @app.teardown_appcontext 155 | def shutdown_session(exception=None): 156 | db_session.remove() 157 | 158 | if __name__ == '__main__': 159 | app.run() 160 | 161 | Creating some data 162 | ------------------ 163 | 164 | .. code:: bash 165 | 166 | $ python 167 | 168 | >>> from .models import engine, db_session, Base, Department, Employee 169 | >>> Base.metadata.create_all(bind=engine) 170 | 171 | >>> # Fill the tables with some data 172 | >>> engineering = Department(name='Engineering') 173 | >>> db_session.add(engineering) 174 | >>> hr = Department(name='Human Resources') 175 | >>> db_session.add(hr) 176 | 177 | >>> peter = Employee(name='Peter', department=engineering) 178 | >>> db_session.add(peter) 179 | >>> roy = Employee(name='Roy', department=engineering) 180 | >>> db_session.add(roy) 181 | >>> tracy = Employee(name='Tracy', department=hr) 182 | >>> db_session.add(tracy) 183 | >>> db_session.commit() 184 | 185 | Testing our GraphQL schema 186 | -------------------------- 187 | 188 | We're now ready to test the API we've built. Let's fire up the server 189 | from the command line. 190 | 191 | .. code:: bash 192 | 193 | $ python ./app.py 194 | 195 | * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) 196 | 197 | Go to `localhost:5000/graphql `__ and 198 | type your first query! 199 | 200 | .. code:: 201 | 202 | { 203 | allEmployees { 204 | edges { 205 | node { 206 | id 207 | name 208 | department { 209 | name 210 | } 211 | } 212 | } 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /examples/filters/README.md: -------------------------------------------------------------------------------- 1 | Example Filters Project 2 | ================================ 3 | 4 | This example highlights the ability to filter queries in graphene-sqlalchemy. 5 | 6 | The project contains two models, one named `Department` and another 7 | named `Employee`. 8 | 9 | Getting started 10 | --------------- 11 | 12 | First you'll need to get the source of the project. Do this by cloning the 13 | whole Graphene-SQLAlchemy repository: 14 | 15 | ```bash 16 | # Get the example project code 17 | git clone https://github.com/graphql-python/graphene-sqlalchemy.git 18 | cd graphene-sqlalchemy/examples/filters 19 | ``` 20 | 21 | It is recommended to create a virtual environment 22 | for this project. We'll do this using 23 | [virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/) 24 | to keep things simple, 25 | but you may also find something like 26 | [virtualenvwrapper](https://virtualenvwrapper.readthedocs.org/en/latest/) 27 | to be useful: 28 | 29 | ```bash 30 | # Create a virtualenv in which we can install the dependencies 31 | virtualenv env 32 | source env/bin/activate 33 | ``` 34 | 35 | Install our dependencies: 36 | 37 | ```bash 38 | pip install -r requirements.txt 39 | ``` 40 | 41 | The following command will setup the database, and start the server: 42 | 43 | ```bash 44 | python app.py 45 | ``` 46 | 47 | Now head over to your favorite GraphQL client, POST to [http://127.0.0.1:5000/graphql](http://127.0.0.1:5000/graphql) and run some queries! 48 | -------------------------------------------------------------------------------- /examples/filters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphql-python/graphene-sqlalchemy/4ea6ee819600d65ad784c783a68321105a643d76/examples/filters/__init__.py -------------------------------------------------------------------------------- /examples/filters/app.py: -------------------------------------------------------------------------------- 1 | from database import init_db 2 | from fastapi import FastAPI 3 | from schema import schema 4 | from starlette_graphene3 import GraphQLApp, make_playground_handler 5 | 6 | 7 | def create_app() -> FastAPI: 8 | init_db() 9 | app = FastAPI() 10 | 11 | app.mount("/graphql", GraphQLApp(schema, on_get=make_playground_handler())) 12 | 13 | return app 14 | 15 | 16 | app = create_app() 17 | -------------------------------------------------------------------------------- /examples/filters/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.ext.declarative import declarative_base 3 | from sqlalchemy.orm import sessionmaker 4 | 5 | Base = declarative_base() 6 | engine = create_engine( 7 | "sqlite://", connect_args={"check_same_thread": False}, echo=True 8 | ) 9 | session_factory = sessionmaker(autocommit=False, autoflush=False, bind=engine) 10 | 11 | from sqlalchemy.orm import scoped_session as scoped_session_factory 12 | 13 | scoped_session = scoped_session_factory(session_factory) 14 | 15 | Base.query = scoped_session.query_property() 16 | Base.metadata.bind = engine 17 | 18 | 19 | def init_db(): 20 | from models import Person, Pet, Toy 21 | 22 | Base.metadata.create_all() 23 | scoped_session.execute("PRAGMA foreign_keys=on") 24 | db = scoped_session() 25 | 26 | person1 = Person(name="A") 27 | person2 = Person(name="B") 28 | 29 | pet1 = Pet(name="Spot") 30 | pet2 = Pet(name="Milo") 31 | 32 | toy1 = Toy(name="disc") 33 | toy2 = Toy(name="ball") 34 | 35 | person1.pet = pet1 36 | person2.pet = pet2 37 | 38 | pet1.toys.append(toy1) 39 | pet2.toys.append(toy1) 40 | pet2.toys.append(toy2) 41 | 42 | db.add(person1) 43 | db.add(person2) 44 | db.add(pet1) 45 | db.add(pet2) 46 | db.add(toy1) 47 | db.add(toy2) 48 | 49 | db.commit() 50 | -------------------------------------------------------------------------------- /examples/filters/models.py: -------------------------------------------------------------------------------- 1 | import sqlalchemy 2 | from database import Base 3 | from sqlalchemy import Column, ForeignKey, Integer, String 4 | from sqlalchemy.orm import relationship 5 | 6 | 7 | class Pet(Base): 8 | __tablename__ = "pets" 9 | id = Column(Integer(), primary_key=True) 10 | name = Column(String(30)) 11 | age = Column(Integer()) 12 | person_id = Column(Integer(), ForeignKey("people.id")) 13 | 14 | 15 | class Person(Base): 16 | __tablename__ = "people" 17 | id = Column(Integer(), primary_key=True) 18 | name = Column(String(100)) 19 | pets = relationship("Pet", backref="person") 20 | 21 | 22 | pets_toys_table = sqlalchemy.Table( 23 | "pets_toys", 24 | Base.metadata, 25 | Column("pet_id", ForeignKey("pets.id")), 26 | Column("toy_id", ForeignKey("toys.id")), 27 | ) 28 | 29 | 30 | class Toy(Base): 31 | __tablename__ = "toys" 32 | id = Column(Integer(), primary_key=True) 33 | name = Column(String(30)) 34 | pets = relationship("Pet", secondary=pets_toys_table, backref="toys") 35 | -------------------------------------------------------------------------------- /examples/filters/requirements.txt: -------------------------------------------------------------------------------- 1 | -e ../../ 2 | fastapi 3 | uvicorn 4 | -------------------------------------------------------------------------------- /examples/filters/run.sh: -------------------------------------------------------------------------------- 1 | uvicorn app:app --port 5000 2 | -------------------------------------------------------------------------------- /examples/filters/schema.py: -------------------------------------------------------------------------------- 1 | from models import Person as PersonModel 2 | from models import Pet as PetModel 3 | from models import Toy as ToyModel 4 | 5 | import graphene 6 | from graphene import relay 7 | from graphene_sqlalchemy import SQLAlchemyObjectType 8 | from graphene_sqlalchemy.fields import SQLAlchemyConnectionField 9 | 10 | 11 | class Pet(SQLAlchemyObjectType): 12 | class Meta: 13 | model = PetModel 14 | name = "Pet" 15 | interfaces = (relay.Node,) 16 | batching = True 17 | 18 | 19 | class Person(SQLAlchemyObjectType): 20 | class Meta: 21 | model = PersonModel 22 | name = "Person" 23 | interfaces = (relay.Node,) 24 | batching = True 25 | 26 | 27 | class Toy(SQLAlchemyObjectType): 28 | class Meta: 29 | model = ToyModel 30 | name = "Toy" 31 | interfaces = (relay.Node,) 32 | batching = True 33 | 34 | 35 | class Query(graphene.ObjectType): 36 | node = relay.Node.Field() 37 | pets = SQLAlchemyConnectionField(Pet.connection) 38 | people = SQLAlchemyConnectionField(Person.connection) 39 | toys = SQLAlchemyConnectionField(Toy.connection) 40 | 41 | 42 | schema = graphene.Schema(query=Query) 43 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/README.md: -------------------------------------------------------------------------------- 1 | Example Flask+SQLAlchemy Project 2 | ================================ 3 | 4 | This example project demos integration between Graphene, Flask and SQLAlchemy. 5 | The project contains two models, one named `Department` and another 6 | named `Employee`. 7 | 8 | Getting started 9 | --------------- 10 | 11 | First you'll need to get the source of the project. Do this by cloning the 12 | whole Graphene-SQLAlchemy repository: 13 | 14 | ```bash 15 | # Get the example project code 16 | git clone https://github.com/graphql-python/graphene-sqlalchemy.git 17 | cd graphene-sqlalchemy/examples/flask_sqlalchemy 18 | ``` 19 | 20 | It is good idea (but not required) to create a virtual environment 21 | for this project. We'll do this using 22 | [virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/) 23 | to keep things simple, 24 | but you may also find something like 25 | [virtualenvwrapper](https://virtualenvwrapper.readthedocs.org/en/latest/) 26 | to be useful: 27 | 28 | ```bash 29 | # Create a virtualenv in which we can install the dependencies 30 | virtualenv env 31 | source env/bin/activate 32 | ``` 33 | 34 | Now we can install our dependencies: 35 | 36 | ```bash 37 | pip install -r requirements.txt 38 | ``` 39 | 40 | Now the following command will setup the database, and start the server: 41 | 42 | ```bash 43 | ./app.py 44 | 45 | ``` 46 | 47 | 48 | Now head on over to 49 | [http://127.0.0.1:5000/graphql](http://127.0.0.1:5000/graphql) 50 | and run some queries! 51 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphql-python/graphene-sqlalchemy/4ea6ee819600d65ad784c783a68321105a643d76/examples/flask_sqlalchemy/__init__.py -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from database import db_session, init_db 4 | from flask import Flask 5 | from schema import schema 6 | 7 | from flask_graphql import GraphQLView 8 | 9 | app = Flask(__name__) 10 | app.debug = True 11 | 12 | example_query = """ 13 | { 14 | allEmployees(sort: [NAME_ASC, ID_ASC]) { 15 | edges { 16 | node { 17 | id 18 | name 19 | department { 20 | id 21 | name 22 | } 23 | role { 24 | id 25 | name 26 | } 27 | } 28 | } 29 | } 30 | } 31 | """ 32 | 33 | 34 | app.add_url_rule( 35 | "/graphql", view_func=GraphQLView.as_view("graphql", schema=schema, graphiql=True) 36 | ) 37 | 38 | 39 | @app.teardown_appcontext 40 | def shutdown_session(exception=None): 41 | db_session.remove() 42 | 43 | 44 | if __name__ == "__main__": 45 | init_db() 46 | app.run() 47 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.ext.declarative import declarative_base 3 | from sqlalchemy.orm import scoped_session, sessionmaker 4 | 5 | engine = create_engine("sqlite:///database.sqlite3", convert_unicode=True) 6 | db_session = scoped_session( 7 | sessionmaker(autocommit=False, autoflush=False, bind=engine) 8 | ) 9 | Base = declarative_base() 10 | Base.query = db_session.query_property() 11 | 12 | 13 | def init_db(): 14 | # import all modules here that might define models so that 15 | # they will be registered properly on the metadata. Otherwise 16 | # you will have to import them first before calling init_db() 17 | from models import Department, Employee, Role 18 | 19 | Base.metadata.drop_all(bind=engine) 20 | Base.metadata.create_all(bind=engine) 21 | 22 | # Create the fixtures 23 | engineering = Department(name="Engineering") 24 | db_session.add(engineering) 25 | hr = Department(name="Human Resources") 26 | db_session.add(hr) 27 | 28 | manager = Role(name="manager") 29 | db_session.add(manager) 30 | engineer = Role(name="engineer") 31 | db_session.add(engineer) 32 | 33 | peter = Employee(name="Peter", department=engineering, role=engineer) 34 | db_session.add(peter) 35 | roy = Employee(name="Roy", department=engineering, role=engineer) 36 | db_session.add(roy) 37 | tracy = Employee(name="Tracy", department=hr, role=manager) 38 | db_session.add(tracy) 39 | db_session.commit() 40 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/models.py: -------------------------------------------------------------------------------- 1 | from database import Base 2 | from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func 3 | from sqlalchemy.orm import backref, relationship 4 | 5 | 6 | class Department(Base): 7 | __tablename__ = "department" 8 | id = Column(Integer, primary_key=True) 9 | name = Column(String) 10 | 11 | 12 | class Role(Base): 13 | __tablename__ = "roles" 14 | role_id = Column(Integer, primary_key=True) 15 | name = Column(String) 16 | 17 | 18 | class Employee(Base): 19 | __tablename__ = "employee" 20 | id = Column(Integer, primary_key=True) 21 | name = Column(String) 22 | # Use default=func.now() to set the default hiring time 23 | # of an Employee to be the current time when an 24 | # Employee record was created 25 | hired_on = Column(DateTime, default=func.now()) 26 | department_id = Column(Integer, ForeignKey("department.id")) 27 | role_id = Column(Integer, ForeignKey("roles.role_id")) 28 | # Use cascade='delete,all' to propagate the deletion of a Department onto its Employees 29 | department = relationship( 30 | Department, backref=backref("employees", uselist=True, cascade="delete,all") 31 | ) 32 | role = relationship( 33 | Role, backref=backref("roles", uselist=True, cascade="delete,all") 34 | ) 35 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/requirements.txt: -------------------------------------------------------------------------------- 1 | -e ../../ 2 | Flask-GraphQL 3 | -------------------------------------------------------------------------------- /examples/flask_sqlalchemy/schema.py: -------------------------------------------------------------------------------- 1 | from models import Department as DepartmentModel 2 | from models import Employee as EmployeeModel 3 | from models import Role as RoleModel 4 | 5 | import graphene 6 | from graphene import relay 7 | from graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType 8 | 9 | 10 | class Department(SQLAlchemyObjectType): 11 | class Meta: 12 | model = DepartmentModel 13 | interfaces = (relay.Node,) 14 | 15 | 16 | class Employee(SQLAlchemyObjectType): 17 | class Meta: 18 | model = EmployeeModel 19 | interfaces = (relay.Node,) 20 | 21 | 22 | class Role(SQLAlchemyObjectType): 23 | class Meta: 24 | model = RoleModel 25 | interfaces = (relay.Node,) 26 | 27 | 28 | class Query(graphene.ObjectType): 29 | node = relay.Node.Field() 30 | # Allow only single column sorting 31 | all_employees = SQLAlchemyConnectionField( 32 | Employee.connection, sort=Employee.sort_argument() 33 | ) 34 | # Allows sorting over multiple columns, by default over the primary key 35 | all_roles = SQLAlchemyConnectionField(Role.connection) 36 | # Disable sorting over this field 37 | all_departments = SQLAlchemyConnectionField(Department.connection, sort=None) 38 | 39 | 40 | schema = graphene.Schema(query=Query) 41 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/README.md: -------------------------------------------------------------------------------- 1 | Example Nameko+Graphene-SQLAlchemy Project 2 | ================================ 3 | 4 | This example is for those who are not using frameworks like Flask | Django which already have a View wrapper implemented to handle graphql request and response accordingly 5 | 6 | If you need a [graphiql](https://github.com/graphql/graphiql) interface on your application, kindly look at [flask_sqlalchemy](../flask_sqlalchemy). 7 | 8 | Using [nameko](https://github.com/nameko/nameko) as an example, but you can get rid of `service.py` 9 | 10 | The project contains two models, one named `Department` and another 11 | named `Employee`. 12 | 13 | Getting started 14 | --------------- 15 | 16 | First you'll need to get the source of the project. Do this by cloning the 17 | whole Graphene-SQLAlchemy repository: 18 | 19 | ```bash 20 | # Get the example project code 21 | git clone https://github.com/graphql-python/graphene-sqlalchemy.git 22 | cd graphene-sqlalchemy/examples/nameko_sqlalchemy 23 | ``` 24 | 25 | It is good idea (but not required) to create a virtual environment 26 | for this project. We'll do this using 27 | [virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/) 28 | to keep things simple, 29 | but you may also find something like 30 | [virtualenvwrapper](https://virtualenvwrapper.readthedocs.org/en/latest/) 31 | to be useful: 32 | 33 | ```bash 34 | # Create a virtualenv in which we can install the dependencies 35 | virtualenv env 36 | source env/bin/activate 37 | ``` 38 | 39 | Now we can install our dependencies: 40 | 41 | ```bash 42 | pip install -r requirements.txt 43 | ``` 44 | 45 | Now the following command will setup the database, and start the server: 46 | 47 | ```bash 48 | ./run.sh 49 | ``` 50 | 51 | Now head on over to postman and send POST request to: 52 | [http://127.0.0.1:5000/graphql](http://127.0.0.1:5000/graphql) 53 | and run some queries! 54 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphql-python/graphene-sqlalchemy/4ea6ee819600d65ad784c783a68321105a643d76/examples/nameko_sqlalchemy/__init__.py -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/app.py: -------------------------------------------------------------------------------- 1 | from database import db_session, init_db 2 | from schema import schema 3 | 4 | from graphql_server import ( 5 | HttpQueryError, 6 | default_format_error, 7 | encode_execution_results, 8 | json_encode, 9 | load_json_body, 10 | run_http_query, 11 | ) 12 | 13 | 14 | class App: 15 | def __init__(self): 16 | init_db() 17 | 18 | def query(self, request): 19 | data = self.parse_body(request) 20 | execution_results, params = run_http_query(schema, "post", data) 21 | result, status_code = encode_execution_results( 22 | execution_results, 23 | format_error=default_format_error, 24 | is_batch=False, 25 | encode=json_encode, 26 | ) 27 | return result 28 | 29 | def parse_body(self, request): 30 | # We use mimetype here since we don't need the other 31 | # information provided by content_type 32 | content_type = request.mimetype 33 | if content_type == "application/graphql": 34 | return {"query": request.data.decode("utf8")} 35 | 36 | elif content_type == "application/json": 37 | return load_json_body(request.data.decode("utf8")) 38 | 39 | elif content_type in ( 40 | "application/x-www-form-urlencoded", 41 | "multipart/form-data", 42 | ): 43 | return request.form 44 | 45 | return {} 46 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/config.yml: -------------------------------------------------------------------------------- 1 | WEB_SERVER_ADDRESS: '0.0.0.0:5000' 2 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.ext.declarative import declarative_base 3 | from sqlalchemy.orm import scoped_session, sessionmaker 4 | 5 | engine = create_engine("sqlite:///database.sqlite3", convert_unicode=True) 6 | db_session = scoped_session( 7 | sessionmaker(autocommit=False, autoflush=False, bind=engine) 8 | ) 9 | Base = declarative_base() 10 | Base.query = db_session.query_property() 11 | 12 | 13 | def init_db(): 14 | # import all modules here that might define models so that 15 | # they will be registered properly on the metadata. Otherwise 16 | # you will have to import them first before calling init_db() 17 | from models import Department, Employee, Role 18 | 19 | Base.metadata.drop_all(bind=engine) 20 | Base.metadata.create_all(bind=engine) 21 | 22 | # Create the fixtures 23 | engineering = Department(name="Engineering") 24 | db_session.add(engineering) 25 | hr = Department(name="Human Resources") 26 | db_session.add(hr) 27 | 28 | manager = Role(name="manager") 29 | db_session.add(manager) 30 | engineer = Role(name="engineer") 31 | db_session.add(engineer) 32 | 33 | peter = Employee(name="Peter", department=engineering, role=engineer) 34 | db_session.add(peter) 35 | roy = Employee(name="Roy", department=engineering, role=engineer) 36 | db_session.add(roy) 37 | tracy = Employee(name="Tracy", department=hr, role=manager) 38 | db_session.add(tracy) 39 | db_session.commit() 40 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/models.py: -------------------------------------------------------------------------------- 1 | from database import Base 2 | from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func 3 | from sqlalchemy.orm import backref, relationship 4 | 5 | 6 | class Department(Base): 7 | __tablename__ = "department" 8 | id = Column(Integer, primary_key=True) 9 | name = Column(String) 10 | 11 | 12 | class Role(Base): 13 | __tablename__ = "roles" 14 | role_id = Column(Integer, primary_key=True) 15 | name = Column(String) 16 | 17 | 18 | class Employee(Base): 19 | __tablename__ = "employee" 20 | id = Column(Integer, primary_key=True) 21 | name = Column(String) 22 | # Use default=func.now() to set the default hiring time 23 | # of an Employee to be the current time when an 24 | # Employee record was created 25 | hired_on = Column(DateTime, default=func.now()) 26 | department_id = Column(Integer, ForeignKey("department.id")) 27 | role_id = Column(Integer, ForeignKey("roles.role_id")) 28 | # Use cascade='delete,all' to propagate the deletion of a Department onto its Employees 29 | department = relationship( 30 | Department, backref=backref("employees", uselist=True, cascade="delete,all") 31 | ) 32 | role = relationship( 33 | Role, backref=backref("roles", uselist=True, cascade="delete,all") 34 | ) 35 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/requirements.txt: -------------------------------------------------------------------------------- 1 | -e ../../ 2 | graphql-server-core 3 | nameko 4 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "Starting application service server" 3 | # Run Service 4 | nameko run --config config.yml service 5 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/schema.py: -------------------------------------------------------------------------------- 1 | from models import Department as DepartmentModel 2 | from models import Employee as EmployeeModel 3 | from models import Role as RoleModel 4 | 5 | import graphene 6 | from graphene import relay 7 | from graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType 8 | 9 | 10 | class Department(SQLAlchemyObjectType): 11 | class Meta: 12 | model = DepartmentModel 13 | interfaces = (relay.Node,) 14 | 15 | 16 | class Employee(SQLAlchemyObjectType): 17 | class Meta: 18 | model = EmployeeModel 19 | interfaces = (relay.Node,) 20 | 21 | 22 | class Role(SQLAlchemyObjectType): 23 | class Meta: 24 | model = RoleModel 25 | interfaces = (relay.Node,) 26 | 27 | 28 | class Query(graphene.ObjectType): 29 | node = relay.Node.Field() 30 | all_employees = SQLAlchemyConnectionField(Employee.connection) 31 | all_roles = SQLAlchemyConnectionField(Role.connection) 32 | role = graphene.Field(Role) 33 | 34 | 35 | schema = graphene.Schema(query=Query) 36 | -------------------------------------------------------------------------------- /examples/nameko_sqlalchemy/service.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from app import App 3 | from nameko.web.handlers import http 4 | 5 | 6 | class DepartmentService: 7 | name = "department" 8 | 9 | @http("POST", "/graphql") 10 | def query(self, request): 11 | return App().query(request) 12 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/__init__.py: -------------------------------------------------------------------------------- 1 | from .fields import SQLAlchemyConnectionField 2 | from .types import SQLAlchemyInterface, SQLAlchemyObjectType 3 | from .utils import get_query, get_session 4 | 5 | __version__ = "3.0.0rc2" 6 | 7 | __all__ = [ 8 | "__version__", 9 | "SQLAlchemyInterface", 10 | "SQLAlchemyObjectType", 11 | "SQLAlchemyConnectionField", 12 | "get_query", 13 | "get_session", 14 | ] 15 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/batching.py: -------------------------------------------------------------------------------- 1 | """The dataloader uses "select in loading" strategy to load related entities.""" 2 | from asyncio import get_event_loop 3 | from typing import Any, Dict 4 | 5 | import sqlalchemy 6 | from sqlalchemy.orm import Session, strategies 7 | from sqlalchemy.orm.query import QueryContext 8 | from sqlalchemy.util import immutabledict 9 | 10 | from .utils import ( 11 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4, 12 | SQL_VERSION_HIGHER_EQUAL_THAN_2, 13 | is_graphene_version_less_than, 14 | ) 15 | 16 | 17 | def get_data_loader_impl() -> Any: # pragma: no cover 18 | """Graphene >= 3.1.1 ships a copy of aiodataloader with minor fixes. To preserve backward-compatibility, 19 | aiodataloader is used in conjunction with older versions of graphene""" 20 | if is_graphene_version_less_than("3.1.1"): 21 | from aiodataloader import DataLoader 22 | else: 23 | from graphene.utils.dataloader import DataLoader 24 | 25 | return DataLoader 26 | 27 | 28 | DataLoader = get_data_loader_impl() 29 | 30 | 31 | class RelationshipLoader(DataLoader): 32 | cache = False 33 | 34 | def __init__(self, relationship_prop, selectin_loader): 35 | super().__init__() 36 | self.relationship_prop = relationship_prop 37 | self.selectin_loader = selectin_loader 38 | 39 | async def batch_load_fn(self, parents): 40 | """ 41 | Batch loads the relationships of all the parents as one SQL statement. 42 | 43 | There is no way to do this out-of-the-box with SQLAlchemy but 44 | we can piggyback on some internal APIs of the `selectin` 45 | eager loading strategy. It's a bit hacky but it's preferable 46 | than re-implementing and maintainnig a big chunk of the `selectin` 47 | loader logic ourselves. 48 | 49 | The approach here is to build a regular query that 50 | selects the parent and `selectin` load the relationship. 51 | But instead of having the query emits 2 `SELECT` statements 52 | when callling `all()`, we skip the first `SELECT` statement 53 | and jump right before the `selectin` loader is called. 54 | To accomplish this, we have to construct objects that are 55 | normally built in the first part of the query in order 56 | to call directly `SelectInLoader._load_for_path`. 57 | 58 | TODO Move this logic to a util in the SQLAlchemy repo as per 59 | SQLAlchemy's main maitainer suggestion. 60 | See https://git.io/JewQ7 61 | """ 62 | child_mapper = self.relationship_prop.mapper 63 | parent_mapper = self.relationship_prop.parent 64 | session = Session.object_session(parents[0]) 65 | 66 | # These issues are very unlikely to happen in practice... 67 | for parent in parents: 68 | # assert parent.__mapper__ is parent_mapper 69 | # All instances must share the same session 70 | assert session is Session.object_session(parent) 71 | # The behavior of `selectin` is undefined if the parent is dirty 72 | assert parent not in session.dirty 73 | 74 | # Should the boolean be set to False? Does it matter for our purposes? 75 | states = [(sqlalchemy.inspect(parent), True) for parent in parents] 76 | 77 | # For our purposes, the query_context will only used to get the session 78 | query_context = None 79 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 80 | parent_mapper_query = session.query(parent_mapper.entity) 81 | query_context = parent_mapper_query._compile_context() 82 | else: 83 | query_context = QueryContext(session.query(parent_mapper.entity)) 84 | if SQL_VERSION_HIGHER_EQUAL_THAN_2: # pragma: no cover 85 | self.selectin_loader._load_for_path( 86 | query_context, 87 | parent_mapper._path_registry, 88 | states, 89 | None, 90 | child_mapper, 91 | None, 92 | None, # recursion depth can be none 93 | immutabledict(), # default value for selectinload->lazyload 94 | ) 95 | elif SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 96 | self.selectin_loader._load_for_path( 97 | query_context, 98 | parent_mapper._path_registry, 99 | states, 100 | None, 101 | child_mapper, 102 | None, 103 | ) 104 | else: 105 | self.selectin_loader._load_for_path( 106 | query_context, 107 | parent_mapper._path_registry, 108 | states, 109 | None, 110 | child_mapper, 111 | ) 112 | return [getattr(parent, self.relationship_prop.key) for parent in parents] 113 | 114 | 115 | # Cache this across `batch_load_fn` calls 116 | # This is so SQL string generation is cached under-the-hood via `bakery` 117 | # Caching the relationship loader for each relationship prop. 118 | RELATIONSHIP_LOADERS_CACHE: Dict[ 119 | sqlalchemy.orm.relationships.RelationshipProperty, RelationshipLoader 120 | ] = {} 121 | 122 | 123 | def get_batch_resolver(relationship_prop): 124 | """Get the resolve function for the given relationship.""" 125 | 126 | def _get_loader(relationship_prop): 127 | """Retrieve the cached loader of the given relationship.""" 128 | loader = RELATIONSHIP_LOADERS_CACHE.get(relationship_prop, None) 129 | if loader is None or loader.loop != get_event_loop(): 130 | selectin_loader = strategies.SelectInLoader( 131 | relationship_prop, (("lazy", "selectin"),) 132 | ) 133 | loader = RelationshipLoader( 134 | relationship_prop=relationship_prop, 135 | selectin_loader=selectin_loader, 136 | ) 137 | RELATIONSHIP_LOADERS_CACHE[relationship_prop] = loader 138 | return loader 139 | 140 | async def resolve(root, info, **args): 141 | return await _get_loader(relationship_prop).load(root) 142 | 143 | return resolve 144 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/enums.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import ColumnProperty 2 | from sqlalchemy.types import Enum as SQLAlchemyEnumType 3 | 4 | from graphene import Argument, Enum, List 5 | 6 | from .utils import EnumValue, to_enum_value_name, to_type_name 7 | 8 | 9 | def _convert_sa_to_graphene_enum(sa_enum, fallback_name=None): 10 | """Convert the given SQLAlchemy Enum type to a Graphene Enum type. 11 | 12 | The name of the Graphene Enum will be determined as follows: 13 | If the SQLAlchemy Enum is based on a Python Enum, use the name 14 | of the Python Enum. Otherwise, if the SQLAlchemy Enum is named, 15 | use the SQL name after conversion to a type name. Otherwise, use 16 | the given fallback_name or raise an error if it is empty. 17 | 18 | The Enum value names are converted to upper case if necessary. 19 | """ 20 | if not isinstance(sa_enum, SQLAlchemyEnumType): 21 | raise TypeError("Expected sqlalchemy.types.Enum, but got: {!r}".format(sa_enum)) 22 | enum_class = sa_enum.enum_class 23 | if enum_class: 24 | if all(to_enum_value_name(key) == key for key in enum_class.__members__): 25 | return Enum.from_enum(enum_class) 26 | name = enum_class.__name__ 27 | members = [ 28 | (to_enum_value_name(key), value.value) 29 | for key, value in enum_class.__members__.items() 30 | ] 31 | else: 32 | sql_enum_name = sa_enum.name 33 | if sql_enum_name: 34 | name = to_type_name(sql_enum_name) 35 | elif fallback_name: 36 | name = fallback_name 37 | else: 38 | raise TypeError("No type name specified for {!r}".format(sa_enum)) 39 | members = [(to_enum_value_name(key), key) for key in sa_enum.enums] 40 | return Enum(name, members) 41 | 42 | 43 | def enum_for_sa_enum(sa_enum, registry): 44 | """Return the Graphene Enum type for the specified SQLAlchemy Enum type.""" 45 | if not isinstance(sa_enum, SQLAlchemyEnumType): 46 | raise TypeError("Expected sqlalchemy.types.Enum, but got: {!r}".format(sa_enum)) 47 | enum = registry.get_graphene_enum_for_sa_enum(sa_enum) 48 | if not enum: 49 | enum = _convert_sa_to_graphene_enum(sa_enum) 50 | registry.register_enum(sa_enum, enum) 51 | return enum 52 | 53 | 54 | def enum_for_field(obj_type, field_name): 55 | """Return the Graphene Enum type for the specified Graphene field.""" 56 | from .types import SQLAlchemyObjectType 57 | 58 | if not isinstance(obj_type, type) or not issubclass(obj_type, SQLAlchemyObjectType): 59 | raise TypeError("Expected SQLAlchemyObjectType, but got: {!r}".format(obj_type)) 60 | if not field_name or not isinstance(field_name, str): 61 | raise TypeError("Expected a field name, but got: {!r}".format(field_name)) 62 | registry = obj_type._meta.registry 63 | orm_field = registry.get_orm_field_for_graphene_field(obj_type, field_name) 64 | if orm_field is None: 65 | raise TypeError("Cannot get {}.{}".format(obj_type._meta.name, field_name)) 66 | if not isinstance(orm_field, ColumnProperty): 67 | raise TypeError( 68 | "{}.{} does not map to model column".format(obj_type._meta.name, field_name) 69 | ) 70 | column = orm_field.columns[0] 71 | sa_enum = column.type 72 | if not isinstance(sa_enum, SQLAlchemyEnumType): 73 | raise TypeError( 74 | "{}.{} does not map to enum column".format(obj_type._meta.name, field_name) 75 | ) 76 | enum = registry.get_graphene_enum_for_sa_enum(sa_enum) 77 | if not enum: 78 | fallback_name = obj_type._meta.name + to_type_name(field_name) 79 | enum = _convert_sa_to_graphene_enum(sa_enum, fallback_name) 80 | registry.register_enum(sa_enum, enum) 81 | return enum 82 | 83 | 84 | def _default_sort_enum_symbol_name(column_name, sort_asc=True): 85 | return to_enum_value_name(column_name) + ("_ASC" if sort_asc else "_DESC") 86 | 87 | 88 | def sort_enum_for_object_type( 89 | obj_type, name=None, only_fields=None, only_indexed=None, get_symbol_name=None 90 | ): 91 | """Return Graphene Enum for sorting the given SQLAlchemyObjectType. 92 | 93 | Parameters 94 | - obj_type : SQLAlchemyObjectType 95 | The object type for which the sort Enum shall be generated. 96 | - name : str, optional, default None 97 | Name to use for the sort Enum. 98 | If not provided, it will be set to the object type name + 'SortEnum' 99 | - only_fields : sequence, optional, default None 100 | If this is set, only fields from this sequence will be considered. 101 | - only_indexed : bool, optional, default False 102 | If this is set, only indexed columns will be considered. 103 | - get_symbol_name : function, optional, default None 104 | Function which takes the column name and a boolean indicating 105 | if the sort direction is ascending, and returns the symbol name 106 | for the current column and sort direction. If no such function 107 | is passed, a default function will be used that creates the symbols 108 | 'foo_asc' and 'foo_desc' for a column with the name 'foo'. 109 | 110 | Returns 111 | - Enum 112 | The Graphene Enum type 113 | """ 114 | name = name or obj_type._meta.name + "SortEnum" 115 | registry = obj_type._meta.registry 116 | enum = registry.get_sort_enum_for_object_type(obj_type) 117 | custom_options = dict( 118 | only_fields=only_fields, 119 | only_indexed=only_indexed, 120 | get_symbol_name=get_symbol_name, 121 | ) 122 | if enum: 123 | if name != enum.__name__ or custom_options != enum.custom_options: 124 | raise ValueError( 125 | "Sort enum for {} has already been customized".format(obj_type) 126 | ) 127 | else: 128 | members = [] 129 | default = [] 130 | fields = obj_type._meta.fields 131 | get_name = get_symbol_name or _default_sort_enum_symbol_name 132 | for field_name in fields: 133 | if only_fields and field_name not in only_fields: 134 | continue 135 | orm_field = registry.get_orm_field_for_graphene_field(obj_type, field_name) 136 | if not isinstance(orm_field, ColumnProperty): 137 | continue 138 | column = orm_field.columns[0] 139 | if only_indexed and not (column.primary_key or column.index): 140 | continue 141 | asc_name = get_name(field_name, True) 142 | asc_value = EnumValue(asc_name, column.asc()) 143 | desc_name = get_name(field_name, False) 144 | desc_value = EnumValue(desc_name, column.desc()) 145 | if column.primary_key: 146 | default.append(asc_value) 147 | members.extend(((asc_name, asc_value), (desc_name, desc_value))) 148 | enum = Enum(name, members) 149 | enum.default = default # store default as attribute 150 | enum.custom_options = custom_options 151 | registry.register_sort_enum(obj_type, enum) 152 | return enum 153 | 154 | 155 | def sort_argument_for_object_type( 156 | obj_type, 157 | enum_name=None, 158 | only_fields=None, 159 | only_indexed=None, 160 | get_symbol_name=None, 161 | has_default=True, 162 | ): 163 | """ "Returns Graphene Argument for sorting the given SQLAlchemyObjectType. 164 | 165 | Parameters 166 | - obj_type : SQLAlchemyObjectType 167 | The object type for which the sort Argument shall be generated. 168 | - enum_name : str, optional, default None 169 | Name to use for the sort Enum. 170 | If not provided, it will be set to the object type name + 'SortEnum' 171 | - only_fields : sequence, optional, default None 172 | If this is set, only fields from this sequence will be considered. 173 | - only_indexed : bool, optional, default False 174 | If this is set, only indexed columns will be considered. 175 | - get_symbol_name : function, optional, default None 176 | Function which takes the column name and a boolean indicating 177 | if the sort direction is ascending, and returns the symbol name 178 | for the current column and sort direction. If no such function 179 | is passed, a default function will be used that creates the symbols 180 | 'foo_asc' and 'foo_desc' for a column with the name 'foo'. 181 | - has_default : bool, optional, default True 182 | If this is set to False, no sorting will happen when this argument is not 183 | passed. Otherwise results will be sortied by the primary key(s) of the model. 184 | 185 | Returns 186 | - Enum 187 | A Graphene Argument that accepts a list of sorting directions for the model. 188 | """ 189 | enum = sort_enum_for_object_type( 190 | obj_type, 191 | enum_name, 192 | only_fields=only_fields, 193 | only_indexed=only_indexed, 194 | get_symbol_name=get_symbol_name, 195 | ) 196 | if not has_default: 197 | enum.default = None 198 | 199 | return Argument(List(enum), default_value=enum.default) 200 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/fields.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import warnings 3 | from functools import partial 4 | 5 | from promise import Promise, is_thenable 6 | from sqlalchemy.orm.query import Query 7 | 8 | from graphene.relay import Connection, ConnectionField 9 | from graphene.relay.connection import connection_adapter, page_info_adapter 10 | from graphql_relay import connection_from_array_slice 11 | 12 | from .batching import get_batch_resolver 13 | from .filters import BaseTypeFilter 14 | from .utils import ( 15 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4, 16 | EnumValue, 17 | get_nullable_type, 18 | get_query, 19 | get_session, 20 | ) 21 | 22 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 23 | from sqlalchemy.ext.asyncio import AsyncSession 24 | 25 | 26 | class SQLAlchemyConnectionField(ConnectionField): 27 | @property 28 | def type(self): 29 | from .types import SQLAlchemyObjectType 30 | 31 | type_ = super(ConnectionField, self).type 32 | nullable_type = get_nullable_type(type_) 33 | if issubclass(nullable_type, Connection): 34 | return type_ 35 | assert issubclass(nullable_type, SQLAlchemyObjectType), ( 36 | "SQLALchemyConnectionField only accepts SQLAlchemyObjectType types, not {}" 37 | ).format(nullable_type.__name__) 38 | assert nullable_type.connection, "The type {} doesn't have a connection".format( 39 | nullable_type.__name__ 40 | ) 41 | assert type_ == nullable_type, ( 42 | "Passing a SQLAlchemyObjectType instance is deprecated. " 43 | "Pass the connection type instead accessible via SQLAlchemyObjectType.connection" 44 | ) 45 | return nullable_type.connection 46 | 47 | def __init__(self, type_, *args, **kwargs): 48 | nullable_type = get_nullable_type(type_) 49 | # Handle Sorting and Filtering 50 | if ( 51 | "sort" not in kwargs 52 | and nullable_type 53 | and issubclass(nullable_type, Connection) 54 | ): 55 | # Let super class raise if type is not a Connection 56 | try: 57 | kwargs.setdefault("sort", nullable_type.Edge.node._type.sort_argument()) 58 | except (AttributeError, TypeError): 59 | raise TypeError( 60 | 'Cannot create sort argument for {}. A model is required. Set the "sort" argument' 61 | " to None to disabling the creation of the sort query argument".format( 62 | nullable_type.__name__ 63 | ) 64 | ) 65 | elif "sort" in kwargs and kwargs["sort"] is None: 66 | del kwargs["sort"] 67 | 68 | if ( 69 | "filter" not in kwargs 70 | and nullable_type 71 | and issubclass(nullable_type, Connection) 72 | ): 73 | # Only add filtering if a filter argument exists on the object type 74 | filter_argument = nullable_type.Edge.node._type.get_filter_argument() 75 | if filter_argument: 76 | kwargs.setdefault("filter", filter_argument) 77 | elif "filter" in kwargs and kwargs["filter"] is None: 78 | del kwargs["filter"] 79 | 80 | super(SQLAlchemyConnectionField, self).__init__(type_, *args, **kwargs) 81 | 82 | @property 83 | def model(self): 84 | return get_nullable_type(self.type)._meta.node._meta.model 85 | 86 | @classmethod 87 | def get_query(cls, model, info, sort=None, filter=None, **args): 88 | query = get_query(model, info.context) 89 | if sort is not None: 90 | if not isinstance(sort, list): 91 | sort = [sort] 92 | sort_args = [] 93 | # ensure consistent handling of graphene Enums, enum values and 94 | # plain strings 95 | for item in sort: 96 | if isinstance(item, enum.Enum): 97 | sort_args.append(item.value.value) 98 | elif isinstance(item, EnumValue): 99 | sort_args.append(item.value) 100 | else: 101 | sort_args.append(item) 102 | query = query.order_by(*sort_args) 103 | 104 | if filter is not None: 105 | assert isinstance(filter, dict) 106 | filter_type: BaseTypeFilter = type(filter) 107 | query, clauses = filter_type.execute_filters(query, filter) 108 | query = query.filter(*clauses) 109 | return query 110 | 111 | @classmethod 112 | def resolve_connection(cls, connection_type, model, info, args, resolved): 113 | session = get_session(info.context) 114 | if resolved is None: 115 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 116 | 117 | async def get_result(): 118 | return await cls.resolve_connection_async( 119 | connection_type, model, info, args, resolved 120 | ) 121 | 122 | return get_result() 123 | 124 | else: 125 | resolved = cls.get_query(model, info, **args) 126 | if isinstance(resolved, Query): 127 | _len = resolved.count() 128 | else: 129 | _len = len(resolved) 130 | 131 | def adjusted_connection_adapter(edges, pageInfo): 132 | return connection_adapter(connection_type, edges, pageInfo) 133 | 134 | connection = connection_from_array_slice( 135 | array_slice=resolved, 136 | args=args, 137 | slice_start=0, 138 | array_length=_len, 139 | array_slice_length=_len, 140 | connection_type=adjusted_connection_adapter, 141 | edge_type=connection_type.Edge, 142 | page_info_type=page_info_adapter, 143 | ) 144 | connection.iterable = resolved 145 | connection.length = _len 146 | return connection 147 | 148 | @classmethod 149 | async def resolve_connection_async( 150 | cls, connection_type, model, info, args, resolved 151 | ): 152 | session = get_session(info.context) 153 | if resolved is None: 154 | query = cls.get_query(model, info, **args) 155 | resolved = (await session.scalars(query)).all() 156 | if isinstance(resolved, Query): 157 | _len = resolved.count() 158 | else: 159 | _len = len(resolved) 160 | 161 | def adjusted_connection_adapter(edges, pageInfo): 162 | return connection_adapter(connection_type, edges, pageInfo) 163 | 164 | connection = connection_from_array_slice( 165 | array_slice=resolved, 166 | args=args, 167 | slice_start=0, 168 | array_length=_len, 169 | array_slice_length=_len, 170 | connection_type=adjusted_connection_adapter, 171 | edge_type=connection_type.Edge, 172 | page_info_type=page_info_adapter, 173 | ) 174 | connection.iterable = resolved 175 | connection.length = _len 176 | return connection 177 | 178 | @classmethod 179 | def connection_resolver(cls, resolver, connection_type, model, root, info, **args): 180 | resolved = resolver(root, info, **args) 181 | 182 | on_resolve = partial(cls.resolve_connection, connection_type, model, info, args) 183 | if is_thenable(resolved): 184 | return Promise.resolve(resolved).then(on_resolve) 185 | 186 | return on_resolve(resolved) 187 | 188 | def wrap_resolve(self, parent_resolver): 189 | return partial( 190 | self.connection_resolver, 191 | parent_resolver, 192 | get_nullable_type(self.type), 193 | self.model, 194 | ) 195 | 196 | 197 | # TODO Remove in next major version 198 | class UnsortedSQLAlchemyConnectionField(SQLAlchemyConnectionField): 199 | def __init__(self, type_, *args, **kwargs): 200 | if "sort" in kwargs and kwargs["sort"] is not None: 201 | warnings.warn( 202 | "UnsortedSQLAlchemyConnectionField does not support sorting. " 203 | "All sorting arguments will be ignored." 204 | ) 205 | kwargs["sort"] = None 206 | warnings.warn( 207 | "UnsortedSQLAlchemyConnectionField is deprecated and will be removed in the next " 208 | "major version. Use SQLAlchemyConnectionField instead and either don't " 209 | "provide the `sort` argument or set it to None if you do not want sorting.", 210 | DeprecationWarning, 211 | ) 212 | super(UnsortedSQLAlchemyConnectionField, self).__init__(type_, *args, **kwargs) 213 | 214 | 215 | class BatchSQLAlchemyConnectionField(SQLAlchemyConnectionField): 216 | """ 217 | This is currently experimental. 218 | The API and behavior may change in future versions. 219 | Use at your own risk. 220 | """ 221 | 222 | @classmethod 223 | def connection_resolver(cls, resolver, connection_type, model, root, info, **args): 224 | if root is None: 225 | resolved = resolver(root, info, **args) 226 | on_resolve = partial( 227 | cls.resolve_connection, connection_type, model, info, args 228 | ) 229 | else: 230 | relationship_prop = None 231 | for relationship in root.__class__.__mapper__.relationships: 232 | if relationship.mapper.class_ == model: 233 | relationship_prop = relationship 234 | break 235 | resolved = get_batch_resolver(relationship_prop)(root, info, **args) 236 | on_resolve = partial( 237 | cls.resolve_connection, connection_type, root, info, args 238 | ) 239 | 240 | if is_thenable(resolved): 241 | return Promise.resolve(resolved).then(on_resolve) 242 | 243 | return on_resolve(resolved) 244 | 245 | @classmethod 246 | def from_relationship(cls, relationship, registry, **field_kwargs): 247 | model = relationship.mapper.entity 248 | model_type = registry.get_type_for_model(model) 249 | return cls( 250 | model_type.connection, 251 | resolver=get_batch_resolver(relationship), 252 | **field_kwargs, 253 | ) 254 | 255 | 256 | def default_connection_field_factory(relationship, registry, **field_kwargs): 257 | model = relationship.mapper.entity 258 | model_type = registry.get_type_for_model(model) 259 | return __connectionFactory(model_type, **field_kwargs) 260 | 261 | 262 | # TODO Remove in next major version 263 | __connectionFactory = UnsortedSQLAlchemyConnectionField 264 | 265 | 266 | def createConnectionField(type_, **field_kwargs): 267 | warnings.warn( 268 | "createConnectionField is deprecated and will be removed in the next " 269 | "major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.", 270 | DeprecationWarning, 271 | ) 272 | return __connectionFactory(type_, **field_kwargs) 273 | 274 | 275 | def registerConnectionFieldFactory(factoryMethod): 276 | warnings.warn( 277 | "registerConnectionFieldFactory is deprecated and will be removed in the next " 278 | "major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.", 279 | DeprecationWarning, 280 | ) 281 | global __connectionFactory 282 | __connectionFactory = factoryMethod 283 | 284 | 285 | def unregisterConnectionFieldFactory(): 286 | warnings.warn( 287 | "registerConnectionFieldFactory is deprecated and will be removed in the next " 288 | "major version. Use SQLAlchemyObjectType.Meta.connection_field_factory instead.", 289 | DeprecationWarning, 290 | ) 291 | global __connectionFactory 292 | __connectionFactory = UnsortedSQLAlchemyConnectionField 293 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/registry.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from collections import defaultdict 3 | from typing import TYPE_CHECKING, List, Type 4 | 5 | from sqlalchemy.types import Enum as SQLAlchemyEnumType 6 | 7 | import graphene 8 | from graphene import Enum 9 | from graphene.types.base import BaseType 10 | 11 | if TYPE_CHECKING: # pragma: no_cover 12 | from .filters import BaseTypeFilter, FieldFilter, RelationshipFilter 13 | 14 | 15 | class Registry(object): 16 | def __init__(self): 17 | self._registry = {} 18 | self._registry_models = {} 19 | self._registry_orm_fields = defaultdict(dict) 20 | self._registry_composites = {} 21 | self._registry_enums = {} 22 | self._registry_sort_enums = {} 23 | self._registry_unions = {} 24 | self._registry_scalar_filters = {} 25 | self._registry_base_type_filters = {} 26 | self._registry_relationship_filters = {} 27 | 28 | self._init_base_filters() 29 | 30 | def _init_base_filters(self): 31 | import graphene_sqlalchemy.filters as gsqa_filters 32 | 33 | from .filters import FieldFilter 34 | 35 | field_filter_classes = [ 36 | filter_cls[1] 37 | for filter_cls in inspect.getmembers(gsqa_filters, inspect.isclass) 38 | if ( 39 | filter_cls[1] is not FieldFilter 40 | and FieldFilter in filter_cls[1].__mro__ 41 | and getattr(filter_cls[1]._meta, "graphene_type", False) 42 | ) 43 | ] 44 | for field_filter_class in field_filter_classes: 45 | self.register_filter_for_scalar_type( 46 | field_filter_class._meta.graphene_type, field_filter_class 47 | ) 48 | 49 | def register(self, obj_type): 50 | from .types import SQLAlchemyBase 51 | 52 | if not isinstance(obj_type, type) or not issubclass(obj_type, SQLAlchemyBase): 53 | raise TypeError("Expected SQLAlchemyBase, but got: {!r}".format(obj_type)) 54 | assert obj_type._meta.registry == self, "Registry for a Model have to match." 55 | # assert self.get_type_for_model(cls._meta.model) in [None, cls], ( 56 | # 'SQLAlchemy model "{}" already associated with ' 57 | # 'another type "{}".' 58 | # ).format(cls._meta.model, self._registry[cls._meta.model]) 59 | self._registry[obj_type._meta.model] = obj_type 60 | 61 | def get_type_for_model(self, model): 62 | return self._registry.get(model) 63 | 64 | def register_orm_field(self, obj_type, field_name, orm_field): 65 | from .types import SQLAlchemyBase 66 | 67 | if not isinstance(obj_type, type) or not issubclass(obj_type, SQLAlchemyBase): 68 | raise TypeError("Expected SQLAlchemyBase, but got: {!r}".format(obj_type)) 69 | if not field_name or not isinstance(field_name, str): 70 | raise TypeError("Expected a field name, but got: {!r}".format(field_name)) 71 | self._registry_orm_fields[obj_type][field_name] = orm_field 72 | 73 | def get_orm_field_for_graphene_field(self, obj_type, field_name): 74 | return self._registry_orm_fields.get(obj_type, {}).get(field_name) 75 | 76 | def register_composite_converter(self, composite, converter): 77 | self._registry_composites[composite] = converter 78 | 79 | def get_converter_for_composite(self, composite): 80 | return self._registry_composites.get(composite) 81 | 82 | def register_enum(self, sa_enum: SQLAlchemyEnumType, graphene_enum: Enum): 83 | if not isinstance(sa_enum, SQLAlchemyEnumType): 84 | raise TypeError( 85 | "Expected SQLAlchemyEnumType, but got: {!r}".format(sa_enum) 86 | ) 87 | if not isinstance(graphene_enum, type(Enum)): 88 | raise TypeError( 89 | "Expected Graphene Enum, but got: {!r}".format(graphene_enum) 90 | ) 91 | 92 | self._registry_enums[sa_enum] = graphene_enum 93 | 94 | def get_graphene_enum_for_sa_enum(self, sa_enum: SQLAlchemyEnumType): 95 | return self._registry_enums.get(sa_enum) 96 | 97 | def register_sort_enum(self, obj_type, sort_enum: Enum): 98 | 99 | from .types import SQLAlchemyObjectType 100 | 101 | if not isinstance(obj_type, type) or not issubclass( 102 | obj_type, SQLAlchemyObjectType 103 | ): 104 | raise TypeError( 105 | "Expected SQLAlchemyObjectType, but got: {!r}".format(obj_type) 106 | ) 107 | if not isinstance(sort_enum, type(Enum)): 108 | raise TypeError("Expected Graphene Enum, but got: {!r}".format(sort_enum)) 109 | self._registry_sort_enums[obj_type] = sort_enum 110 | 111 | def get_sort_enum_for_object_type(self, obj_type: graphene.ObjectType): 112 | return self._registry_sort_enums.get(obj_type) 113 | 114 | def register_union_type( 115 | self, union: Type[graphene.Union], obj_types: List[Type[graphene.ObjectType]] 116 | ): 117 | if not issubclass(union, graphene.Union): 118 | raise TypeError("Expected graphene.Union, but got: {!r}".format(union)) 119 | 120 | for obj_type in obj_types: 121 | if not issubclass(obj_type, graphene.ObjectType): 122 | raise TypeError( 123 | "Expected Graphene ObjectType, but got: {!r}".format(obj_type) 124 | ) 125 | 126 | self._registry_unions[frozenset(obj_types)] = union 127 | 128 | def get_union_for_object_types(self, obj_types: List[Type[graphene.ObjectType]]): 129 | return self._registry_unions.get(frozenset(obj_types)) 130 | 131 | # Filter Scalar Fields of Object Types 132 | def register_filter_for_scalar_type( 133 | self, scalar_type: Type[graphene.Scalar], filter_obj: Type["FieldFilter"] 134 | ): 135 | from .filters import FieldFilter 136 | 137 | if not isinstance(scalar_type, type(graphene.Scalar)): 138 | raise TypeError("Expected Scalar, but got: {!r}".format(scalar_type)) 139 | 140 | if not issubclass(filter_obj, FieldFilter): 141 | raise TypeError("Expected ScalarFilter, but got: {!r}".format(filter_obj)) 142 | self._registry_scalar_filters[scalar_type] = filter_obj 143 | 144 | def get_filter_for_sql_enum_type( 145 | self, enum_type: Type[graphene.Enum] 146 | ) -> Type["FieldFilter"]: 147 | from .filters import SQLEnumFilter 148 | 149 | filter_type = self._registry_scalar_filters.get(enum_type) 150 | if not filter_type: 151 | filter_type = SQLEnumFilter.create_type( 152 | f"Default{enum_type.__name__}EnumFilter", graphene_type=enum_type 153 | ) 154 | self._registry_scalar_filters[enum_type] = filter_type 155 | return filter_type 156 | 157 | def get_filter_for_py_enum_type( 158 | self, enum_type: Type[graphene.Enum] 159 | ) -> Type["FieldFilter"]: 160 | from .filters import PyEnumFilter 161 | 162 | filter_type = self._registry_scalar_filters.get(enum_type) 163 | if not filter_type: 164 | filter_type = PyEnumFilter.create_type( 165 | f"Default{enum_type.__name__}EnumFilter", graphene_type=enum_type 166 | ) 167 | self._registry_scalar_filters[enum_type] = filter_type 168 | return filter_type 169 | 170 | def get_filter_for_scalar_type( 171 | self, scalar_type: Type[graphene.Scalar] 172 | ) -> Type["FieldFilter"]: 173 | from .filters import FieldFilter 174 | 175 | filter_type = self._registry_scalar_filters.get(scalar_type) 176 | if not filter_type: 177 | filter_type = FieldFilter.create_type( 178 | f"Default{scalar_type.__name__}ScalarFilter", graphene_type=scalar_type 179 | ) 180 | self._registry_scalar_filters[scalar_type] = filter_type 181 | 182 | return filter_type 183 | 184 | # TODO register enums automatically 185 | def register_filter_for_enum_type( 186 | self, enum_type: Type[graphene.Enum], filter_obj: Type["FieldFilter"] 187 | ): 188 | from .filters import FieldFilter 189 | 190 | if not issubclass(enum_type, graphene.Enum): 191 | raise TypeError("Expected Enum, but got: {!r}".format(enum_type)) 192 | 193 | if not issubclass(filter_obj, FieldFilter): 194 | raise TypeError("Expected FieldFilter, but got: {!r}".format(filter_obj)) 195 | self._registry_scalar_filters[enum_type] = filter_obj 196 | 197 | # Filter Base Types 198 | def register_filter_for_base_type( 199 | self, 200 | base_type: Type[BaseType], 201 | filter_obj: Type["BaseTypeFilter"], 202 | ): 203 | from .filters import BaseTypeFilter 204 | 205 | if not issubclass(base_type, BaseType): 206 | raise TypeError("Expected BaseType, but got: {!r}".format(base_type)) 207 | 208 | if not issubclass(filter_obj, BaseTypeFilter): 209 | raise TypeError("Expected BaseTypeFilter, but got: {!r}".format(filter_obj)) 210 | self._registry_base_type_filters[base_type] = filter_obj 211 | 212 | def get_filter_for_base_type(self, base_type: Type[BaseType]): 213 | return self._registry_base_type_filters.get(base_type) 214 | 215 | # Filter Relationships between base types 216 | def register_relationship_filter_for_base_type( 217 | self, base_type: BaseType, filter_obj: Type["RelationshipFilter"] 218 | ): 219 | from .filters import RelationshipFilter 220 | 221 | if not isinstance(base_type, type(BaseType)): 222 | raise TypeError("Expected BaseType, but got: {!r}".format(base_type)) 223 | 224 | if not issubclass(filter_obj, RelationshipFilter): 225 | raise TypeError( 226 | "Expected RelationshipFilter, but got: {!r}".format(filter_obj) 227 | ) 228 | self._registry_relationship_filters[base_type] = filter_obj 229 | 230 | def get_relationship_filter_for_base_type( 231 | self, base_type: Type[BaseType] 232 | ) -> "RelationshipFilter": 233 | return self._registry_relationship_filters.get(base_type) 234 | 235 | 236 | registry = None 237 | 238 | 239 | def get_global_registry(): 240 | global registry 241 | if not registry: 242 | registry = Registry() 243 | return registry 244 | 245 | 246 | def reset_global_registry(): 247 | global registry 248 | registry = None 249 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/resolvers.py: -------------------------------------------------------------------------------- 1 | from graphene.utils.get_unbound_function import get_unbound_function 2 | 3 | 4 | def get_custom_resolver(obj_type, orm_field_name): 5 | """ 6 | Since `graphene` will call `resolve_` on a field only if it 7 | does not have a `resolver`, we need to re-implement that logic here so 8 | users are able to override the default resolvers that we provide. 9 | """ 10 | resolver = getattr(obj_type, "resolve_{}".format(orm_field_name), None) 11 | if resolver: 12 | return get_unbound_function(resolver) 13 | 14 | return None 15 | 16 | 17 | def get_attr_resolver(obj_type, model_attr): 18 | """ 19 | In order to support field renaming via `ORMField.model_attr`, 20 | we need to define resolver functions for each field. 21 | 22 | :param SQLAlchemyObjectType obj_type: 23 | :param str model_attr: the name of the SQLAlchemy attribute 24 | :rtype: Callable 25 | """ 26 | return lambda root, _info: getattr(root, model_attr, None) 27 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graphql-python/graphene-sqlalchemy/4ea6ee819600d65ad784c783a68321105a643d76/graphene_sqlalchemy/tests/__init__.py -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pytest_asyncio 3 | from sqlalchemy import create_engine 4 | from sqlalchemy.orm import sessionmaker 5 | from typing_extensions import Literal 6 | 7 | import graphene 8 | from graphene_sqlalchemy.utils import SQL_VERSION_HIGHER_EQUAL_THAN_1_4 9 | 10 | from ..converter import convert_sqlalchemy_composite 11 | from ..registry import reset_global_registry 12 | from .models import Base, CompositeFullName 13 | 14 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 15 | from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine 16 | 17 | 18 | @pytest.fixture(autouse=True) 19 | def reset_registry(): 20 | reset_global_registry() 21 | 22 | # Prevent tests that implicitly depend on Reporter from raising 23 | # Tests that explicitly depend on this behavior should re-register a converter 24 | @convert_sqlalchemy_composite.register(CompositeFullName) 25 | def convert_composite_class(composite, registry): 26 | return graphene.Field(graphene.Int) 27 | 28 | 29 | # make a typed literal for session one is sync and one is async 30 | SESSION_TYPE = Literal["sync", "session_factory"] 31 | 32 | 33 | @pytest.fixture(params=["sync", "async"]) 34 | def session_type(request) -> SESSION_TYPE: 35 | return request.param 36 | 37 | 38 | @pytest.fixture 39 | def async_session(session_type): 40 | return session_type == "async" 41 | 42 | 43 | @pytest.fixture 44 | def test_db_url(session_type: SESSION_TYPE): 45 | if session_type == "async": 46 | return "sqlite+aiosqlite://" 47 | else: 48 | return "sqlite://" 49 | 50 | 51 | @pytest.mark.asyncio 52 | @pytest_asyncio.fixture(scope="function") 53 | async def session_factory(session_type: SESSION_TYPE, test_db_url: str): 54 | if session_type == "async": 55 | if not SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 56 | pytest.skip("Async Sessions only work in sql alchemy 1.4 and above") 57 | engine = create_async_engine(test_db_url) 58 | async with engine.begin() as conn: 59 | await conn.run_sync(Base.metadata.create_all) 60 | yield sessionmaker(bind=engine, class_=AsyncSession, expire_on_commit=False) 61 | await engine.dispose() 62 | else: 63 | engine = create_engine(test_db_url) 64 | Base.metadata.create_all(engine) 65 | yield sessionmaker(bind=engine, expire_on_commit=False) 66 | # SQLite in-memory db is deleted when its connection is closed. 67 | # https://www.sqlite.org/inmemorydb.html 68 | engine.dispose() 69 | 70 | 71 | @pytest_asyncio.fixture(scope="function") 72 | async def sync_session_factory(): 73 | engine = create_engine("sqlite://") 74 | Base.metadata.create_all(engine) 75 | yield sessionmaker(bind=engine, expire_on_commit=False) 76 | # SQLite in-memory db is deleted when its connection is closed. 77 | # https://www.sqlite.org/inmemorydb.html 78 | engine.dispose() 79 | 80 | 81 | @pytest_asyncio.fixture(scope="function") 82 | def session(session_factory): 83 | return session_factory() 84 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/models.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import datetime 4 | import enum 5 | import uuid 6 | from decimal import Decimal 7 | from typing import List, Optional 8 | 9 | # fmt: off 10 | from sqlalchemy import ( 11 | Column, 12 | Date, 13 | Enum, 14 | ForeignKey, 15 | Integer, 16 | Numeric, 17 | String, 18 | Table, 19 | func, 20 | ) 21 | from sqlalchemy.ext.associationproxy import association_proxy 22 | from sqlalchemy.ext.declarative import declarative_base 23 | from sqlalchemy.ext.hybrid import hybrid_property 24 | from sqlalchemy.orm import backref, column_property, composite, mapper, relationship 25 | from sqlalchemy.sql.type_api import TypeEngine 26 | 27 | from graphene_sqlalchemy.tests.utils import wrap_select_func 28 | from graphene_sqlalchemy.utils import ( 29 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4, 30 | SQL_VERSION_HIGHER_EQUAL_THAN_2, 31 | ) 32 | 33 | # fmt: off 34 | if SQL_VERSION_HIGHER_EQUAL_THAN_2: 35 | from sqlalchemy.sql.sqltypes import HasExpressionLookup # noqa # isort:skip 36 | else: 37 | from sqlalchemy.sql.sqltypes import _LookupExpressionAdapter as HasExpressionLookup # noqa # isort:skip 38 | # fmt: on 39 | 40 | PetKind = Enum("cat", "dog", name="pet_kind") 41 | 42 | 43 | class HairKind(enum.Enum): 44 | LONG = "long" 45 | SHORT = "short" 46 | 47 | 48 | Base = declarative_base() 49 | 50 | association_table = Table( 51 | "association", 52 | Base.metadata, 53 | Column("pet_id", Integer, ForeignKey("pets.id")), 54 | Column("reporter_id", Integer, ForeignKey("reporters.id")), 55 | ) 56 | 57 | 58 | class Editor(Base): 59 | __tablename__ = "editors" 60 | editor_id = Column(Integer(), primary_key=True) 61 | name = Column(String(100)) 62 | 63 | 64 | class Pet(Base): 65 | __tablename__ = "pets" 66 | id = Column(Integer(), primary_key=True) 67 | name = Column(String(30)) 68 | pet_kind = Column(PetKind, nullable=False) 69 | hair_kind = Column(Enum(HairKind, name="hair_kind"), nullable=False) 70 | reporter_id = Column(Integer(), ForeignKey("reporters.id")) 71 | legs = Column(Integer(), default=4) 72 | 73 | 74 | class CompositeFullName(object): 75 | def __init__(self, first_name, last_name): 76 | self.first_name = first_name 77 | self.last_name = last_name 78 | 79 | def __composite_values__(self): 80 | return self.first_name, self.last_name 81 | 82 | def __repr__(self): 83 | return "{} {}".format(self.first_name, self.last_name) 84 | 85 | 86 | class ProxiedReporter(Base): 87 | __tablename__ = "reporters_error" 88 | id = Column(Integer(), primary_key=True) 89 | first_name = Column(String(30), doc="First name") 90 | last_name = Column(String(30), doc="Last name") 91 | reporter_id = Column(Integer(), ForeignKey("reporters.id")) 92 | reporter = relationship("Reporter", uselist=False) 93 | 94 | # This is a hybrid property, we don't support proxies on hybrids yet 95 | composite_prop = association_proxy("reporter", "composite_prop") 96 | 97 | 98 | class Reporter(Base): 99 | __tablename__ = "reporters" 100 | 101 | id = Column(Integer(), primary_key=True) 102 | first_name = Column(String(30), doc="First name") 103 | last_name = Column(String(30), doc="Last name") 104 | email = Column(String(), doc="Email") 105 | favorite_pet_kind = Column(PetKind) 106 | pets = relationship( 107 | "Pet", 108 | secondary=association_table, 109 | backref="reporters", 110 | order_by="Pet.id", 111 | lazy="selectin", 112 | ) 113 | articles = relationship( 114 | "Article", backref=backref("reporter", lazy="selectin"), lazy="selectin" 115 | ) 116 | favorite_article = relationship("Article", uselist=False, lazy="selectin") 117 | 118 | @hybrid_property 119 | def hybrid_prop_with_doc(self) -> str: 120 | """Docstring test""" 121 | return self.first_name 122 | 123 | @hybrid_property 124 | def hybrid_prop(self) -> str: 125 | return self.first_name 126 | 127 | @hybrid_property 128 | def hybrid_prop_str(self) -> str: 129 | return self.first_name 130 | 131 | @hybrid_property 132 | def hybrid_prop_int(self) -> int: 133 | return 42 134 | 135 | @hybrid_property 136 | def hybrid_prop_float(self) -> float: 137 | return 42.3 138 | 139 | @hybrid_property 140 | def hybrid_prop_bool(self) -> bool: 141 | return True 142 | 143 | @hybrid_property 144 | def hybrid_prop_list(self) -> List[int]: 145 | return [1, 2, 3] 146 | 147 | column_prop = column_property( 148 | wrap_select_func(func.cast(func.count(id), Integer)), doc="Column property" 149 | ) 150 | 151 | composite_prop = composite( 152 | CompositeFullName, first_name, last_name, doc="Composite" 153 | ) 154 | 155 | headlines = association_proxy("articles", "headline") 156 | 157 | 158 | articles_tags_table = Table( 159 | "articles_tags", 160 | Base.metadata, 161 | Column("article_id", ForeignKey("articles.id")), 162 | Column("tag_id", ForeignKey("tags.id")), 163 | ) 164 | 165 | 166 | class Image(Base): 167 | __tablename__ = "images" 168 | id = Column(Integer(), primary_key=True) 169 | external_id = Column(Integer()) 170 | description = Column(String(30)) 171 | 172 | 173 | class Tag(Base): 174 | __tablename__ = "tags" 175 | id = Column(Integer(), primary_key=True) 176 | name = Column(String(30)) 177 | 178 | 179 | class Article(Base): 180 | __tablename__ = "articles" 181 | id = Column(Integer(), primary_key=True) 182 | headline = Column(String(100)) 183 | pub_date = Column(Date()) 184 | reporter_id = Column(Integer(), ForeignKey("reporters.id")) 185 | readers = relationship( 186 | "Reader", secondary="articles_readers", back_populates="articles" 187 | ) 188 | recommended_reads = association_proxy("reporter", "articles") 189 | 190 | # one-to-one relationship with image 191 | image_id = Column(Integer(), ForeignKey("images.id"), unique=True) 192 | image = relationship("Image", backref=backref("articles", uselist=False)) 193 | 194 | # many-to-many relationship with tags 195 | tags = relationship("Tag", secondary=articles_tags_table, backref="articles") 196 | 197 | 198 | class Reader(Base): 199 | __tablename__ = "readers" 200 | id = Column(Integer(), primary_key=True) 201 | name = Column(String(100)) 202 | articles = relationship( 203 | "Article", secondary="articles_readers", back_populates="readers" 204 | ) 205 | 206 | 207 | class ArticleReader(Base): 208 | __tablename__ = "articles_readers" 209 | article_id = Column(Integer(), ForeignKey("articles.id"), primary_key=True) 210 | reader_id = Column(Integer(), ForeignKey("readers.id"), primary_key=True) 211 | 212 | 213 | class ReflectedEditor(type): 214 | """Same as Editor, but using reflected table.""" 215 | 216 | @classmethod 217 | def __subclasses__(cls): 218 | return [] 219 | 220 | 221 | editor_table = Table("editors", Base.metadata, autoload=True) 222 | 223 | # TODO Remove when switching min sqlalchemy version to SQLAlchemy 1.4 224 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 225 | Base.registry.map_imperatively(ReflectedEditor, editor_table) 226 | else: 227 | mapper(ReflectedEditor, editor_table) 228 | 229 | 230 | ############################################ 231 | # The models below are mainly used in the 232 | # @hybrid_property type inference scenarios 233 | ############################################ 234 | 235 | 236 | class ShoppingCartItem(Base): 237 | __tablename__ = "shopping_cart_items" 238 | 239 | id = Column(Integer(), primary_key=True) 240 | 241 | @hybrid_property 242 | def hybrid_prop_shopping_cart(self) -> List["ShoppingCart"]: 243 | return [ShoppingCart(id=1)] 244 | 245 | 246 | class ShoppingCart(Base): 247 | __tablename__ = "shopping_carts" 248 | 249 | id = Column(Integer(), primary_key=True) 250 | 251 | # Standard Library types 252 | 253 | @hybrid_property 254 | def hybrid_prop_str(self) -> str: 255 | return self.first_name 256 | 257 | @hybrid_property 258 | def hybrid_prop_int(self) -> int: 259 | return 42 260 | 261 | @hybrid_property 262 | def hybrid_prop_float(self) -> float: 263 | return 42.3 264 | 265 | @hybrid_property 266 | def hybrid_prop_bool(self) -> bool: 267 | return True 268 | 269 | @hybrid_property 270 | def hybrid_prop_decimal(self) -> Decimal: 271 | return Decimal("3.14") 272 | 273 | @hybrid_property 274 | def hybrid_prop_date(self) -> datetime.date: 275 | return datetime.datetime.now().date() 276 | 277 | @hybrid_property 278 | def hybrid_prop_time(self) -> datetime.time: 279 | return datetime.datetime.now().time() 280 | 281 | @hybrid_property 282 | def hybrid_prop_datetime(self) -> datetime.datetime: 283 | return datetime.datetime.now() 284 | 285 | # Lists and Nested Lists 286 | 287 | @hybrid_property 288 | def hybrid_prop_list_int(self) -> List[int]: 289 | return [1, 2, 3] 290 | 291 | @hybrid_property 292 | def hybrid_prop_list_date(self) -> List[datetime.date]: 293 | return [self.hybrid_prop_date, self.hybrid_prop_date, self.hybrid_prop_date] 294 | 295 | @hybrid_property 296 | def hybrid_prop_nested_list_int(self) -> List[List[int]]: 297 | return [ 298 | self.hybrid_prop_list_int, 299 | ] 300 | 301 | @hybrid_property 302 | def hybrid_prop_deeply_nested_list_int(self) -> List[List[List[int]]]: 303 | return [ 304 | [ 305 | self.hybrid_prop_list_int, 306 | ], 307 | ] 308 | 309 | # Other SQLAlchemy Instance 310 | @hybrid_property 311 | def hybrid_prop_first_shopping_cart_item(self) -> ShoppingCartItem: 312 | return ShoppingCartItem(id=1) 313 | 314 | # Other SQLAlchemy Instance with expression 315 | @hybrid_property 316 | def hybrid_prop_first_shopping_cart_item_expression(self) -> ShoppingCartItem: 317 | return ShoppingCartItem(id=1) 318 | 319 | @hybrid_prop_first_shopping_cart_item_expression.expression 320 | def hybrid_prop_first_shopping_cart_item_expression(cls): 321 | return ShoppingCartItem 322 | 323 | # Other SQLAlchemy Instances 324 | @hybrid_property 325 | def hybrid_prop_shopping_cart_item_list(self) -> List[ShoppingCartItem]: 326 | return [ShoppingCartItem(id=1), ShoppingCartItem(id=2)] 327 | 328 | # Self-references 329 | 330 | @hybrid_property 331 | def hybrid_prop_self_referential(self) -> "ShoppingCart": 332 | return ShoppingCart(id=1) 333 | 334 | @hybrid_property 335 | def hybrid_prop_self_referential_list(self) -> List["ShoppingCart"]: 336 | return [ShoppingCart(id=1)] 337 | 338 | # Optional[T] 339 | 340 | @hybrid_property 341 | def hybrid_prop_optional_self_referential(self) -> Optional["ShoppingCart"]: 342 | return None 343 | 344 | # UUIDS 345 | @hybrid_property 346 | def hybrid_prop_uuid(self) -> uuid.UUID: 347 | return uuid.uuid4() 348 | 349 | @hybrid_property 350 | def hybrid_prop_uuid_list(self) -> List[uuid.UUID]: 351 | return [ 352 | uuid.uuid4(), 353 | ] 354 | 355 | @hybrid_property 356 | def hybrid_prop_optional_uuid(self) -> Optional[uuid.UUID]: 357 | return None 358 | 359 | 360 | class KeyedModel(Base): 361 | __tablename__ = "test330" 362 | id = Column(Integer(), primary_key=True) 363 | reporter_number = Column("% reporter_number", Numeric, key="reporter_number") 364 | 365 | 366 | ############################################ 367 | # For interfaces 368 | ############################################ 369 | 370 | 371 | class Person(Base): 372 | id = Column(Integer(), primary_key=True) 373 | type = Column(String()) 374 | name = Column(String()) 375 | birth_date = Column(Date()) 376 | 377 | __tablename__ = "person" 378 | __mapper_args__ = { 379 | "polymorphic_on": type, 380 | "with_polymorphic": "*", # needed for eager loading in async session 381 | } 382 | 383 | 384 | class NonAbstractPerson(Base): 385 | id = Column(Integer(), primary_key=True) 386 | type = Column(String()) 387 | name = Column(String()) 388 | birth_date = Column(Date()) 389 | 390 | __tablename__ = "non_abstract_person" 391 | __mapper_args__ = { 392 | "polymorphic_on": type, 393 | "polymorphic_identity": "person", 394 | } 395 | 396 | 397 | class Employee(Person): 398 | hire_date = Column(Date()) 399 | 400 | __mapper_args__ = { 401 | "polymorphic_identity": "employee", 402 | } 403 | 404 | 405 | ############################################ 406 | # Custom Test Models 407 | ############################################ 408 | 409 | 410 | class CustomIntegerColumn(HasExpressionLookup, TypeEngine): 411 | """ 412 | Custom Column Type that our converters don't recognize 413 | Adapted from sqlalchemy.Integer 414 | """ 415 | 416 | """A type for ``int`` integers.""" 417 | 418 | __visit_name__ = "integer" 419 | 420 | def get_dbapi_type(self, dbapi): 421 | return dbapi.NUMBER 422 | 423 | @property 424 | def python_type(self): 425 | return int 426 | 427 | def literal_processor(self, dialect): 428 | def process(value): 429 | return str(int(value)) 430 | 431 | return process 432 | 433 | 434 | class CustomColumnModel(Base): 435 | __tablename__ = "customcolumnmodel" 436 | 437 | id = Column(Integer(), primary_key=True) 438 | custom_col = Column(CustomIntegerColumn) 439 | 440 | 441 | class CompositePrimaryKeyTestModel(Base): 442 | __tablename__ = "compositekeytestmodel" 443 | 444 | first_name = Column(String(30), primary_key=True) 445 | last_name = Column(String(30), primary_key=True) 446 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/models_batching.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import enum 4 | 5 | from sqlalchemy import Column, Date, Enum, ForeignKey, Integer, String, Table, func 6 | from sqlalchemy.ext.declarative import declarative_base 7 | from sqlalchemy.orm import column_property, relationship 8 | 9 | from graphene_sqlalchemy.tests.utils import wrap_select_func 10 | 11 | PetKind = Enum("cat", "dog", name="pet_kind") 12 | 13 | 14 | class HairKind(enum.Enum): 15 | LONG = "long" 16 | SHORT = "short" 17 | 18 | 19 | Base = declarative_base() 20 | 21 | association_table = Table( 22 | "association", 23 | Base.metadata, 24 | Column("pet_id", Integer, ForeignKey("pets.id")), 25 | Column("reporter_id", Integer, ForeignKey("reporters.id")), 26 | ) 27 | 28 | 29 | class Pet(Base): 30 | __tablename__ = "pets" 31 | id = Column(Integer(), primary_key=True) 32 | name = Column(String(30)) 33 | pet_kind = Column(PetKind, nullable=False) 34 | hair_kind = Column(Enum(HairKind, name="hair_kind"), nullable=False) 35 | reporter_id = Column(Integer(), ForeignKey("reporters.id")) 36 | 37 | 38 | class Reporter(Base): 39 | __tablename__ = "reporters" 40 | 41 | id = Column(Integer(), primary_key=True) 42 | first_name = Column(String(30), doc="First name") 43 | last_name = Column(String(30), doc="Last name") 44 | email = Column(String(), doc="Email") 45 | favorite_pet_kind = Column(PetKind) 46 | pets = relationship( 47 | "Pet", 48 | secondary=association_table, 49 | backref="reporters", 50 | order_by="Pet.id", 51 | ) 52 | articles = relationship("Article", backref="reporter") 53 | favorite_article = relationship("Article", uselist=False) 54 | 55 | column_prop = column_property( 56 | wrap_select_func(func.cast(func.count(id), Integer)), doc="Column property" 57 | ) 58 | 59 | 60 | class Article(Base): 61 | __tablename__ = "articles" 62 | id = Column(Integer(), primary_key=True) 63 | headline = Column(String(100)) 64 | pub_date = Column(Date()) 65 | reporter_id = Column(Integer(), ForeignKey("reporters.id")) 66 | readers = relationship( 67 | "Reader", secondary="articles_readers", back_populates="articles" 68 | ) 69 | 70 | 71 | class Reader(Base): 72 | __tablename__ = "readers" 73 | id = Column(Integer(), primary_key=True) 74 | name = Column(String(100)) 75 | articles = relationship( 76 | "Article", secondary="articles_readers", back_populates="readers" 77 | ) 78 | 79 | 80 | class ArticleReader(Base): 81 | __tablename__ = "articles_readers" 82 | article_id = Column(Integer(), ForeignKey("articles.id"), primary_key=True) 83 | reader_id = Column(Integer(), ForeignKey("readers.id"), primary_key=True) 84 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_benchmark.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | from sqlalchemy import select 5 | 6 | import graphene 7 | from graphene import relay 8 | 9 | from ..types import SQLAlchemyObjectType 10 | from ..utils import ( 11 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4, 12 | get_session, 13 | is_sqlalchemy_version_less_than, 14 | ) 15 | from .models import Article, HairKind, Pet, Reporter 16 | from .utils import eventually_await_session 17 | 18 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 19 | from sqlalchemy.ext.asyncio import AsyncSession 20 | if is_sqlalchemy_version_less_than("1.2"): 21 | pytest.skip("SQL batching only works for SQLAlchemy 1.2+", allow_module_level=True) 22 | 23 | 24 | def get_async_schema(): 25 | class ReporterType(SQLAlchemyObjectType): 26 | class Meta: 27 | model = Reporter 28 | interfaces = (relay.Node,) 29 | 30 | class ArticleType(SQLAlchemyObjectType): 31 | class Meta: 32 | model = Article 33 | interfaces = (relay.Node,) 34 | 35 | class PetType(SQLAlchemyObjectType): 36 | class Meta: 37 | model = Pet 38 | interfaces = (relay.Node,) 39 | 40 | class Query(graphene.ObjectType): 41 | articles = graphene.Field(graphene.List(ArticleType)) 42 | reporters = graphene.Field(graphene.List(ReporterType)) 43 | 44 | async def resolve_articles(self, info): 45 | session = get_session(info.context) 46 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 47 | return (await session.scalars(select(Article))).all() 48 | return session.query(Article).all() 49 | 50 | async def resolve_reporters(self, info): 51 | session = get_session(info.context) 52 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 53 | return (await session.scalars(select(Reporter))).all() 54 | return session.query(Reporter).all() 55 | 56 | return graphene.Schema(query=Query) 57 | 58 | 59 | def get_schema(): 60 | class ReporterType(SQLAlchemyObjectType): 61 | class Meta: 62 | model = Reporter 63 | interfaces = (relay.Node,) 64 | 65 | class ArticleType(SQLAlchemyObjectType): 66 | class Meta: 67 | model = Article 68 | interfaces = (relay.Node,) 69 | 70 | class PetType(SQLAlchemyObjectType): 71 | class Meta: 72 | model = Pet 73 | interfaces = (relay.Node,) 74 | 75 | class Query(graphene.ObjectType): 76 | articles = graphene.Field(graphene.List(ArticleType)) 77 | reporters = graphene.Field(graphene.List(ReporterType)) 78 | 79 | def resolve_articles(self, info): 80 | return info.context.get("session").query(Article).all() 81 | 82 | def resolve_reporters(self, info): 83 | return info.context.get("session").query(Reporter).all() 84 | 85 | return graphene.Schema(query=Query) 86 | 87 | 88 | async def benchmark_query(session, benchmark, schema, query): 89 | import nest_asyncio 90 | 91 | nest_asyncio.apply() 92 | loop = asyncio.get_event_loop() 93 | result = benchmark( 94 | lambda: loop.run_until_complete( 95 | schema.execute_async(query, context_value={"session": session}) 96 | ) 97 | ) 98 | assert not result.errors 99 | 100 | 101 | @pytest.fixture(params=[get_schema, get_async_schema]) 102 | def schema_provider(request, async_session): 103 | if async_session and request.param == get_schema: 104 | pytest.skip("Cannot test sync schema with async sessions") 105 | return request.param 106 | 107 | 108 | @pytest.mark.asyncio 109 | async def test_one_to_one(session_factory, benchmark, schema_provider): 110 | session = session_factory() 111 | schema = schema_provider() 112 | 113 | reporter_1 = Reporter( 114 | first_name="Reporter_1", 115 | ) 116 | session.add(reporter_1) 117 | reporter_2 = Reporter( 118 | first_name="Reporter_2", 119 | ) 120 | session.add(reporter_2) 121 | 122 | article_1 = Article(headline="Article_1") 123 | article_1.reporter = reporter_1 124 | session.add(article_1) 125 | 126 | article_2 = Article(headline="Article_2") 127 | article_2.reporter = reporter_2 128 | session.add(article_2) 129 | 130 | await eventually_await_session(session, "commit") 131 | await eventually_await_session(session, "close") 132 | 133 | await benchmark_query( 134 | session, 135 | benchmark, 136 | schema, 137 | """ 138 | query { 139 | reporters { 140 | firstName 141 | favoriteArticle { 142 | headline 143 | } 144 | } 145 | } 146 | """, 147 | ) 148 | 149 | 150 | @pytest.mark.asyncio 151 | async def test_many_to_one(session_factory, benchmark, schema_provider): 152 | session = session_factory() 153 | schema = schema_provider() 154 | reporter_1 = Reporter( 155 | first_name="Reporter_1", 156 | ) 157 | session.add(reporter_1) 158 | reporter_2 = Reporter( 159 | first_name="Reporter_2", 160 | ) 161 | session.add(reporter_2) 162 | 163 | article_1 = Article(headline="Article_1") 164 | article_1.reporter = reporter_1 165 | session.add(article_1) 166 | 167 | article_2 = Article(headline="Article_2") 168 | article_2.reporter = reporter_2 169 | session.add(article_2) 170 | await eventually_await_session(session, "flush") 171 | await eventually_await_session(session, "commit") 172 | await eventually_await_session(session, "close") 173 | 174 | await benchmark_query( 175 | session, 176 | benchmark, 177 | schema, 178 | """ 179 | query { 180 | articles { 181 | headline 182 | reporter { 183 | firstName 184 | } 185 | } 186 | } 187 | """, 188 | ) 189 | 190 | 191 | @pytest.mark.asyncio 192 | async def test_one_to_many(session_factory, benchmark, schema_provider): 193 | session = session_factory() 194 | schema = schema_provider() 195 | 196 | reporter_1 = Reporter( 197 | first_name="Reporter_1", 198 | ) 199 | session.add(reporter_1) 200 | reporter_2 = Reporter( 201 | first_name="Reporter_2", 202 | ) 203 | session.add(reporter_2) 204 | 205 | article_1 = Article(headline="Article_1") 206 | article_1.reporter = reporter_1 207 | session.add(article_1) 208 | 209 | article_2 = Article(headline="Article_2") 210 | article_2.reporter = reporter_1 211 | session.add(article_2) 212 | 213 | article_3 = Article(headline="Article_3") 214 | article_3.reporter = reporter_2 215 | session.add(article_3) 216 | 217 | article_4 = Article(headline="Article_4") 218 | article_4.reporter = reporter_2 219 | session.add(article_4) 220 | 221 | await eventually_await_session(session, "commit") 222 | await eventually_await_session(session, "close") 223 | 224 | await benchmark_query( 225 | session, 226 | benchmark, 227 | schema, 228 | """ 229 | query { 230 | reporters { 231 | firstName 232 | articles(first: 2) { 233 | edges { 234 | node { 235 | headline 236 | } 237 | } 238 | } 239 | } 240 | } 241 | """, 242 | ) 243 | 244 | 245 | @pytest.mark.asyncio 246 | async def test_many_to_many(session_factory, benchmark, schema_provider): 247 | session = session_factory() 248 | schema = schema_provider() 249 | reporter_1 = Reporter( 250 | first_name="Reporter_1", 251 | ) 252 | session.add(reporter_1) 253 | reporter_2 = Reporter( 254 | first_name="Reporter_2", 255 | ) 256 | session.add(reporter_2) 257 | 258 | pet_1 = Pet(name="Pet_1", pet_kind="cat", hair_kind=HairKind.LONG) 259 | session.add(pet_1) 260 | 261 | pet_2 = Pet(name="Pet_2", pet_kind="cat", hair_kind=HairKind.LONG) 262 | session.add(pet_2) 263 | 264 | reporter_1.pets.append(pet_1) 265 | reporter_1.pets.append(pet_2) 266 | 267 | pet_3 = Pet(name="Pet_3", pet_kind="cat", hair_kind=HairKind.LONG) 268 | session.add(pet_3) 269 | 270 | pet_4 = Pet(name="Pet_4", pet_kind="cat", hair_kind=HairKind.LONG) 271 | session.add(pet_4) 272 | 273 | reporter_2.pets.append(pet_3) 274 | reporter_2.pets.append(pet_4) 275 | 276 | await eventually_await_session(session, "commit") 277 | await eventually_await_session(session, "close") 278 | 279 | await benchmark_query( 280 | session, 281 | benchmark, 282 | schema, 283 | """ 284 | query { 285 | reporters { 286 | firstName 287 | pets(first: 2) { 288 | edges { 289 | node { 290 | name 291 | } 292 | } 293 | } 294 | } 295 | } 296 | """, 297 | ) 298 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_enums.py: -------------------------------------------------------------------------------- 1 | from enum import Enum as PyEnum 2 | 3 | import pytest 4 | from sqlalchemy.types import Enum as SQLAlchemyEnumType 5 | 6 | from graphene import Enum 7 | 8 | from ..enums import _convert_sa_to_graphene_enum, enum_for_field 9 | from ..types import SQLAlchemyObjectType 10 | from .models import HairKind, Pet 11 | 12 | 13 | def test_convert_sa_to_graphene_enum_bad_type(): 14 | re_err = "Expected sqlalchemy.types.Enum, but got: 'foo'" 15 | with pytest.raises(TypeError, match=re_err): 16 | _convert_sa_to_graphene_enum("foo") 17 | 18 | 19 | def test_convert_sa_to_graphene_enum_based_on_py_enum(): 20 | class Color(PyEnum): 21 | RED = 1 22 | GREEN = 2 23 | BLUE = 3 24 | 25 | sa_enum = SQLAlchemyEnumType(Color) 26 | graphene_enum = _convert_sa_to_graphene_enum(sa_enum, "FallbackName") 27 | assert isinstance(graphene_enum, type(Enum)) 28 | assert graphene_enum._meta.name == "Color" 29 | assert graphene_enum._meta.enum is Color 30 | 31 | 32 | def test_convert_sa_to_graphene_enum_based_on_py_enum_with_bad_names(): 33 | class Color(PyEnum): 34 | red = 1 35 | green = 2 36 | blue = 3 37 | 38 | sa_enum = SQLAlchemyEnumType(Color) 39 | graphene_enum = _convert_sa_to_graphene_enum(sa_enum, "FallbackName") 40 | assert isinstance(graphene_enum, type(Enum)) 41 | assert graphene_enum._meta.name == "Color" 42 | assert graphene_enum._meta.enum is not Color 43 | assert [ 44 | (key, value.value) 45 | for key, value in graphene_enum._meta.enum.__members__.items() 46 | ] == [("RED", 1), ("GREEN", 2), ("BLUE", 3)] 47 | 48 | 49 | def test_convert_sa_enum_to_graphene_enum_based_on_list_named(): 50 | sa_enum = SQLAlchemyEnumType("red", "green", "blue", name="color_values") 51 | graphene_enum = _convert_sa_to_graphene_enum(sa_enum, "FallbackName") 52 | assert isinstance(graphene_enum, type(Enum)) 53 | assert graphene_enum._meta.name == "ColorValues" 54 | assert [ 55 | (key, value.value) 56 | for key, value in graphene_enum._meta.enum.__members__.items() 57 | ] == [("RED", "red"), ("GREEN", "green"), ("BLUE", "blue")] 58 | 59 | 60 | def test_convert_sa_enum_to_graphene_enum_based_on_list_unnamed(): 61 | sa_enum = SQLAlchemyEnumType("red", "green", "blue") 62 | graphene_enum = _convert_sa_to_graphene_enum(sa_enum, "FallbackName") 63 | assert isinstance(graphene_enum, type(Enum)) 64 | assert graphene_enum._meta.name == "FallbackName" 65 | assert [ 66 | (key, value.value) 67 | for key, value in graphene_enum._meta.enum.__members__.items() 68 | ] == [("RED", "red"), ("GREEN", "green"), ("BLUE", "blue")] 69 | 70 | 71 | def test_convert_sa_enum_to_graphene_enum_based_on_list_without_name(): 72 | sa_enum = SQLAlchemyEnumType("red", "green", "blue") 73 | re_err = r"No type name specified for Enum\('red', 'green', 'blue'\)" 74 | with pytest.raises(TypeError, match=re_err): 75 | _convert_sa_to_graphene_enum(sa_enum) 76 | 77 | 78 | def test_enum_for_field(): 79 | class PetType(SQLAlchemyObjectType): 80 | class Meta: 81 | model = Pet 82 | 83 | enum = enum_for_field(PetType, "pet_kind") 84 | assert isinstance(enum, type(Enum)) 85 | assert enum._meta.name == "PetKind" 86 | assert [ 87 | (key, value.value) for key, value in enum._meta.enum.__members__.items() 88 | ] == [ 89 | ("CAT", "cat"), 90 | ("DOG", "dog"), 91 | ] 92 | enum2 = enum_for_field(PetType, "pet_kind") 93 | assert enum2 is enum 94 | enum2 = PetType.enum_for_field("pet_kind") 95 | assert enum2 is enum 96 | 97 | enum = enum_for_field(PetType, "hair_kind") 98 | assert isinstance(enum, type(Enum)) 99 | assert enum._meta.name == "HairKind" 100 | assert enum._meta.enum is HairKind 101 | enum2 = PetType.enum_for_field("hair_kind") 102 | assert enum2 is enum 103 | 104 | re_err = r"Cannot get PetType\.other_kind" 105 | with pytest.raises(TypeError, match=re_err): 106 | enum_for_field(PetType, "other_kind") 107 | with pytest.raises(TypeError, match=re_err): 108 | PetType.enum_for_field("other_kind") 109 | 110 | re_err = r"PetType\.name does not map to enum column" 111 | with pytest.raises(TypeError, match=re_err): 112 | enum_for_field(PetType, "name") 113 | with pytest.raises(TypeError, match=re_err): 114 | PetType.enum_for_field("name") 115 | 116 | re_err = r"Expected a field name, but got: None" 117 | with pytest.raises(TypeError, match=re_err): 118 | enum_for_field(PetType, None) 119 | with pytest.raises(TypeError, match=re_err): 120 | PetType.enum_for_field(None) 121 | 122 | re_err = "Expected SQLAlchemyObjectType, but got: None" 123 | with pytest.raises(TypeError, match=re_err): 124 | enum_for_field(None, "other_kind") 125 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_fields.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from promise import Promise 3 | 4 | from graphene import NonNull, ObjectType 5 | from graphene.relay import Connection, Node 6 | 7 | from ..fields import SQLAlchemyConnectionField, UnsortedSQLAlchemyConnectionField 8 | from ..types import SQLAlchemyObjectType 9 | from .models import Editor as EditorModel 10 | from .models import Pet as PetModel 11 | 12 | 13 | class Pet(SQLAlchemyObjectType): 14 | class Meta: 15 | model = PetModel 16 | interfaces = (Node,) 17 | 18 | 19 | class Editor(SQLAlchemyObjectType): 20 | class Meta: 21 | model = EditorModel 22 | 23 | 24 | ## 25 | # SQLAlchemyConnectionField 26 | ## 27 | 28 | 29 | def test_nonnull_sqlalachemy_connection(): 30 | field = SQLAlchemyConnectionField(NonNull(Pet.connection)) 31 | assert isinstance(field.type, NonNull) 32 | assert issubclass(field.type.of_type, Connection) 33 | assert field.type.of_type._meta.node is Pet 34 | 35 | 36 | def test_required_sqlalachemy_connection(): 37 | field = SQLAlchemyConnectionField(Pet.connection, required=True) 38 | assert isinstance(field.type, NonNull) 39 | assert issubclass(field.type.of_type, Connection) 40 | assert field.type.of_type._meta.node is Pet 41 | 42 | 43 | def test_promise_connection_resolver(): 44 | def resolver(_obj, _info): 45 | return Promise.resolve([]) 46 | 47 | result = UnsortedSQLAlchemyConnectionField.connection_resolver( 48 | resolver, Pet.connection, Pet, None, None 49 | ) 50 | assert isinstance(result, Promise) 51 | 52 | 53 | def test_type_assert_sqlalchemy_object_type(): 54 | with pytest.raises(AssertionError, match="only accepts SQLAlchemyObjectType"): 55 | SQLAlchemyConnectionField(ObjectType).type 56 | 57 | 58 | def test_type_assert_object_has_connection(): 59 | with pytest.raises(AssertionError, match="doesn't have a connection"): 60 | SQLAlchemyConnectionField(Editor).type 61 | 62 | 63 | ## 64 | # UnsortedSQLAlchemyConnectionField 65 | ## 66 | 67 | 68 | def test_unsorted_connection_field_removes_sort_arg_if_passed(): 69 | editor = UnsortedSQLAlchemyConnectionField( 70 | Editor.connection, sort=Editor.sort_argument(has_default=True) 71 | ) 72 | assert "sort" not in editor.args 73 | 74 | 75 | def test_sort_added_by_default(): 76 | field = SQLAlchemyConnectionField(Pet.connection) 77 | assert "sort" in field.args 78 | assert field.args["sort"] == Pet.sort_argument() 79 | 80 | 81 | def test_sort_can_be_removed(): 82 | field = SQLAlchemyConnectionField(Pet.connection, sort=None) 83 | assert "sort" not in field.args 84 | 85 | 86 | def test_custom_sort(): 87 | field = SQLAlchemyConnectionField(Pet.connection, sort=Editor.sort_argument()) 88 | assert field.args["sort"] == Editor.sort_argument() 89 | 90 | 91 | def test_sort_init_raises(): 92 | with pytest.raises(TypeError, match="Cannot create sort"): 93 | SQLAlchemyConnectionField(Connection) 94 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_query_enums.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from sqlalchemy import select 3 | 4 | import graphene 5 | from graphene_sqlalchemy.tests.utils import eventually_await_session 6 | from graphene_sqlalchemy.utils import SQL_VERSION_HIGHER_EQUAL_THAN_1_4, get_session 7 | 8 | from ..types import SQLAlchemyObjectType 9 | from .models import HairKind, Pet, Reporter 10 | from .test_query import add_test_data, to_std_dicts 11 | 12 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 13 | from sqlalchemy.ext.asyncio import AsyncSession 14 | 15 | 16 | @pytest.mark.asyncio 17 | async def test_query_pet_kinds(session, session_factory): 18 | await add_test_data(session) 19 | await eventually_await_session(session, "close") 20 | 21 | class PetType(SQLAlchemyObjectType): 22 | class Meta: 23 | model = Pet 24 | 25 | class ReporterType(SQLAlchemyObjectType): 26 | class Meta: 27 | model = Reporter 28 | 29 | class Query(graphene.ObjectType): 30 | reporter = graphene.Field(ReporterType) 31 | reporters = graphene.List(ReporterType) 32 | pets = graphene.List( 33 | PetType, kind=graphene.Argument(PetType.enum_for_field("pet_kind")) 34 | ) 35 | 36 | async def resolve_reporter(self, _info): 37 | session = get_session(_info.context) 38 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 39 | return (await session.scalars(select(Reporter))).unique().first() 40 | return session.query(Reporter).first() 41 | 42 | async def resolve_reporters(self, _info): 43 | session = get_session(_info.context) 44 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 45 | return (await session.scalars(select(Reporter))).unique().all() 46 | return session.query(Reporter) 47 | 48 | async def resolve_pets(self, _info, kind): 49 | session = get_session(_info.context) 50 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 51 | query = select(Pet) 52 | if kind: 53 | query = query.filter(Pet.pet_kind == kind.value) 54 | return (await session.scalars(query)).unique().all() 55 | query = session.query(Pet) 56 | if kind: 57 | query = query.filter_by(pet_kind=kind.value) 58 | return query 59 | 60 | query = """ 61 | query ReporterQuery { 62 | reporter { 63 | firstName 64 | lastName 65 | email 66 | favoritePetKind 67 | pets { 68 | name 69 | petKind 70 | } 71 | } 72 | reporters { 73 | firstName 74 | favoritePetKind 75 | } 76 | pets(kind: DOG) { 77 | name 78 | petKind 79 | } 80 | } 81 | """ 82 | expected = { 83 | "reporter": { 84 | "firstName": "John", 85 | "lastName": "Doe", 86 | "email": None, 87 | "favoritePetKind": "CAT", 88 | "pets": [{"name": "Garfield", "petKind": "CAT"}], 89 | }, 90 | "reporters": [ 91 | { 92 | "firstName": "John", 93 | "favoritePetKind": "CAT", 94 | }, 95 | { 96 | "firstName": "Jane", 97 | "favoritePetKind": "DOG", 98 | }, 99 | ], 100 | "pets": [{"name": "Lassie", "petKind": "DOG"}], 101 | } 102 | schema = graphene.Schema(query=Query) 103 | result = await schema.execute_async( 104 | query, context_value={"session": session_factory()} 105 | ) 106 | assert not result.errors 107 | assert result.data == expected 108 | 109 | 110 | @pytest.mark.asyncio 111 | async def test_query_more_enums(session): 112 | await add_test_data(session) 113 | 114 | class PetType(SQLAlchemyObjectType): 115 | class Meta: 116 | model = Pet 117 | 118 | class Query(graphene.ObjectType): 119 | pet = graphene.Field(PetType) 120 | 121 | async def resolve_pet(self, _info): 122 | session = get_session(_info.context) 123 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 124 | return (await session.scalars(select(Pet))).first() 125 | return session.query(Pet).first() 126 | 127 | query = """ 128 | query PetQuery { 129 | pet { 130 | name, 131 | petKind 132 | hairKind 133 | } 134 | } 135 | """ 136 | expected = {"pet": {"name": "Garfield", "petKind": "CAT", "hairKind": "SHORT"}} 137 | schema = graphene.Schema(query=Query) 138 | result = await schema.execute_async(query, context_value={"session": session}) 139 | assert not result.errors 140 | result = to_std_dicts(result.data) 141 | assert result == expected 142 | 143 | 144 | @pytest.mark.asyncio 145 | async def test_enum_as_argument(session): 146 | await add_test_data(session) 147 | 148 | class PetType(SQLAlchemyObjectType): 149 | class Meta: 150 | model = Pet 151 | 152 | class Query(graphene.ObjectType): 153 | pet = graphene.Field( 154 | PetType, kind=graphene.Argument(PetType.enum_for_field("pet_kind")) 155 | ) 156 | 157 | async def resolve_pet(self, info, kind=None): 158 | session = get_session(info.context) 159 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 160 | query = select(Pet) 161 | if kind: 162 | query = query.filter(Pet.pet_kind == kind.value) 163 | return (await session.scalars(query)).first() 164 | query = session.query(Pet) 165 | if kind: 166 | query = query.filter(Pet.pet_kind == kind.value) 167 | return query.first() 168 | 169 | query = """ 170 | query PetQuery($kind: PetKind) { 171 | pet(kind: $kind) { 172 | name, 173 | petKind 174 | hairKind 175 | } 176 | } 177 | """ 178 | 179 | schema = graphene.Schema(query=Query) 180 | result = await schema.execute_async( 181 | query, variables={"kind": "CAT"}, context_value={"session": session} 182 | ) 183 | assert not result.errors 184 | expected = {"pet": {"name": "Garfield", "petKind": "CAT", "hairKind": "SHORT"}} 185 | assert result.data == expected 186 | result = await schema.execute_async( 187 | query, variables={"kind": "DOG"}, context_value={"session": session} 188 | ) 189 | assert not result.errors 190 | expected = {"pet": {"name": "Lassie", "petKind": "DOG", "hairKind": "LONG"}} 191 | result = to_std_dicts(result.data) 192 | assert result == expected 193 | 194 | 195 | @pytest.mark.asyncio 196 | async def test_py_enum_as_argument(session): 197 | await add_test_data(session) 198 | 199 | class PetType(SQLAlchemyObjectType): 200 | class Meta: 201 | model = Pet 202 | 203 | class Query(graphene.ObjectType): 204 | pet = graphene.Field( 205 | PetType, 206 | kind=graphene.Argument(PetType._meta.fields["hair_kind"].type.of_type), 207 | ) 208 | 209 | async def resolve_pet(self, _info, kind=None): 210 | session = get_session(_info.context) 211 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 212 | return ( 213 | await session.scalars( 214 | select(Pet).filter(Pet.hair_kind == HairKind(kind)) 215 | ) 216 | ).first() 217 | query = session.query(Pet) 218 | if kind: 219 | # enum arguments are expected to be strings, not PyEnums 220 | query = query.filter(Pet.hair_kind == HairKind(kind)) 221 | return query.first() 222 | 223 | query = """ 224 | query PetQuery($kind: HairKind) { 225 | pet(kind: $kind) { 226 | name, 227 | petKind 228 | hairKind 229 | } 230 | } 231 | """ 232 | 233 | schema = graphene.Schema(query=Query) 234 | result = await schema.execute_async( 235 | query, variables={"kind": "SHORT"}, context_value={"session": session} 236 | ) 237 | assert not result.errors 238 | expected = {"pet": {"name": "Garfield", "petKind": "CAT", "hairKind": "SHORT"}} 239 | assert result.data == expected 240 | result = await schema.execute_async( 241 | query, variables={"kind": "LONG"}, context_value={"session": session} 242 | ) 243 | assert not result.errors 244 | expected = {"pet": {"name": "Lassie", "petKind": "DOG", "hairKind": "LONG"}} 245 | result = to_std_dicts(result.data) 246 | assert result == expected 247 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_reflected.py: -------------------------------------------------------------------------------- 1 | from graphene import ObjectType 2 | 3 | from ..registry import Registry 4 | from ..types import SQLAlchemyObjectType 5 | from .models import ReflectedEditor 6 | 7 | registry = Registry() 8 | 9 | 10 | class Reflected(SQLAlchemyObjectType): 11 | class Meta: 12 | model = ReflectedEditor 13 | registry = registry 14 | 15 | 16 | def test_objecttype_registered(): 17 | assert issubclass(Reflected, ObjectType) 18 | assert Reflected._meta.model == ReflectedEditor 19 | assert list(Reflected._meta.fields.keys()) == ["editor_id", "name"] 20 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_registry.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from sqlalchemy.types import Enum as SQLAlchemyEnum 3 | 4 | import graphene 5 | from graphene import Enum as GrapheneEnum 6 | 7 | from ..registry import Registry 8 | from ..types import SQLAlchemyObjectType 9 | from ..utils import EnumValue 10 | from .models import Pet, Reporter 11 | 12 | 13 | def test_register_object_type(): 14 | reg = Registry() 15 | 16 | class PetType(SQLAlchemyObjectType): 17 | class Meta: 18 | model = Pet 19 | registry = reg 20 | 21 | reg.register(PetType) 22 | assert reg.get_type_for_model(Pet) is PetType 23 | 24 | 25 | def test_register_incorrect_object_type(): 26 | reg = Registry() 27 | 28 | class Spam: 29 | pass 30 | 31 | re_err = "Expected SQLAlchemyBase, but got: .*Spam" 32 | with pytest.raises(TypeError, match=re_err): 33 | reg.register(Spam) 34 | 35 | 36 | def test_register_orm_field(): 37 | reg = Registry() 38 | 39 | class PetType(SQLAlchemyObjectType): 40 | class Meta: 41 | model = Pet 42 | registry = reg 43 | 44 | reg.register_orm_field(PetType, "name", Pet.name) 45 | assert reg.get_orm_field_for_graphene_field(PetType, "name") is Pet.name 46 | 47 | 48 | def test_register_orm_field_incorrect_types(): 49 | reg = Registry() 50 | 51 | class Spam: 52 | pass 53 | 54 | re_err = "Expected SQLAlchemyBase, but got: .*Spam" 55 | with pytest.raises(TypeError, match=re_err): 56 | reg.register_orm_field(Spam, "name", Pet.name) 57 | 58 | class PetType(SQLAlchemyObjectType): 59 | class Meta: 60 | model = Pet 61 | registry = reg 62 | 63 | re_err = "Expected a field name, but got: .*Spam" 64 | with pytest.raises(TypeError, match=re_err): 65 | reg.register_orm_field(PetType, Spam, Pet.name) 66 | 67 | 68 | def test_register_enum(): 69 | reg = Registry() 70 | 71 | sa_enum = SQLAlchemyEnum("cat", "dog") 72 | graphene_enum = GrapheneEnum("PetKind", [("CAT", 1), ("DOG", 2)]) 73 | 74 | reg.register_enum(sa_enum, graphene_enum) 75 | assert reg.get_graphene_enum_for_sa_enum(sa_enum) is graphene_enum 76 | 77 | 78 | def test_register_enum_incorrect_types(): 79 | reg = Registry() 80 | 81 | sa_enum = SQLAlchemyEnum("cat", "dog") 82 | graphene_enum = GrapheneEnum("PetKind", [("CAT", 1), ("DOG", 2)]) 83 | 84 | re_err = r"Expected Graphene Enum, but got: Enum\('cat', 'dog'\)" 85 | with pytest.raises(TypeError, match=re_err): 86 | reg.register_enum(sa_enum, sa_enum) 87 | 88 | re_err = r"Expected SQLAlchemyEnumType, but got: .*PetKind.*" 89 | with pytest.raises(TypeError, match=re_err): 90 | reg.register_enum(graphene_enum, graphene_enum) 91 | 92 | 93 | def test_register_sort_enum(): 94 | reg = Registry() 95 | 96 | class PetType(SQLAlchemyObjectType): 97 | class Meta: 98 | model = Pet 99 | registry = reg 100 | 101 | sort_enum = GrapheneEnum( 102 | "PetSort", 103 | [("ID", EnumValue("id", Pet.id)), ("NAME", EnumValue("name", Pet.name))], 104 | ) 105 | 106 | reg.register_sort_enum(PetType, sort_enum) 107 | assert reg.get_sort_enum_for_object_type(PetType) is sort_enum 108 | 109 | 110 | def test_register_sort_enum_incorrect_types(): 111 | reg = Registry() 112 | 113 | class PetType(SQLAlchemyObjectType): 114 | class Meta: 115 | model = Pet 116 | registry = reg 117 | 118 | sort_enum = GrapheneEnum( 119 | "PetSort", 120 | [("ID", EnumValue("id", Pet.id)), ("NAME", EnumValue("name", Pet.name))], 121 | ) 122 | 123 | re_err = r"Expected SQLAlchemyObjectType, but got: .*PetSort.*" 124 | with pytest.raises(TypeError, match=re_err): 125 | reg.register_sort_enum(sort_enum, sort_enum) 126 | 127 | re_err = r"Expected Graphene Enum, but got: .*PetType.*" 128 | with pytest.raises(TypeError, match=re_err): 129 | reg.register_sort_enum(PetType, PetType) 130 | 131 | 132 | def test_register_union(): 133 | reg = Registry() 134 | 135 | class PetType(SQLAlchemyObjectType): 136 | class Meta: 137 | model = Pet 138 | registry = reg 139 | 140 | class ReporterType(SQLAlchemyObjectType): 141 | class Meta: 142 | model = Reporter 143 | 144 | union_types = [PetType, ReporterType] 145 | union = graphene.Union.create_type("ReporterPet", types=tuple(union_types)) 146 | 147 | reg.register_union_type(union, union_types) 148 | 149 | assert reg.get_union_for_object_types(union_types) == union 150 | # Order should not matter 151 | assert reg.get_union_for_object_types([ReporterType, PetType]) == union 152 | 153 | 154 | def test_register_union_scalar(): 155 | reg = Registry() 156 | 157 | union_types = [graphene.String, graphene.Int] 158 | union = graphene.Union.create_type("StringInt", types=union_types) 159 | 160 | re_err = r"Expected Graphene ObjectType, but got: .*String.*" 161 | with pytest.raises(TypeError, match=re_err): 162 | reg.register_union_type(union, union_types) 163 | 164 | 165 | def test_register_union_incorrect_types(): 166 | reg = Registry() 167 | 168 | class PetType(SQLAlchemyObjectType): 169 | class Meta: 170 | model = Pet 171 | registry = reg 172 | 173 | class ReporterType(SQLAlchemyObjectType): 174 | class Meta: 175 | model = Reporter 176 | 177 | union_types = [PetType, ReporterType] 178 | union = PetType 179 | 180 | re_err = r"Expected graphene.Union, but got: .*PetType.*" 181 | with pytest.raises(TypeError, match=re_err): 182 | reg.register_union_type(union, union_types) 183 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_sort_enums.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import sqlalchemy as sa 3 | 4 | from graphene import Argument, Enum, List, ObjectType, Schema 5 | from graphene.relay import Node 6 | 7 | from ..fields import SQLAlchemyConnectionField 8 | from ..types import SQLAlchemyObjectType 9 | from ..utils import to_type_name 10 | from .models import Base, HairKind, KeyedModel, Pet 11 | from .test_query import to_std_dicts 12 | from .utils import eventually_await_session 13 | 14 | 15 | async def add_pets(session): 16 | pets = [ 17 | Pet(id=1, name="Lassie", pet_kind="dog", hair_kind=HairKind.LONG), 18 | Pet(id=2, name="Barf", pet_kind="dog", hair_kind=HairKind.LONG), 19 | Pet(id=3, name="Alf", pet_kind="cat", hair_kind=HairKind.LONG), 20 | ] 21 | session.add_all(pets) 22 | await eventually_await_session(session, "commit") 23 | 24 | 25 | def test_sort_enum(): 26 | class PetType(SQLAlchemyObjectType): 27 | class Meta: 28 | model = Pet 29 | 30 | sort_enum = PetType.sort_enum() 31 | assert isinstance(sort_enum, type(Enum)) 32 | assert sort_enum._meta.name == "PetTypeSortEnum" 33 | assert list(sort_enum._meta.enum.__members__) == [ 34 | "ID_ASC", 35 | "ID_DESC", 36 | "NAME_ASC", 37 | "NAME_DESC", 38 | "PET_KIND_ASC", 39 | "PET_KIND_DESC", 40 | "HAIR_KIND_ASC", 41 | "HAIR_KIND_DESC", 42 | "REPORTER_ID_ASC", 43 | "REPORTER_ID_DESC", 44 | "LEGS_ASC", 45 | "LEGS_DESC", 46 | ] 47 | assert str(sort_enum.ID_ASC.value.value) == "pets.id ASC" 48 | assert str(sort_enum.ID_DESC.value.value) == "pets.id DESC" 49 | assert str(sort_enum.HAIR_KIND_ASC.value.value) == "pets.hair_kind ASC" 50 | assert str(sort_enum.HAIR_KIND_DESC.value.value) == "pets.hair_kind DESC" 51 | 52 | 53 | def test_sort_enum_with_custom_name(): 54 | class PetType(SQLAlchemyObjectType): 55 | class Meta: 56 | model = Pet 57 | 58 | sort_enum = PetType.sort_enum(name="CustomSortName") 59 | assert isinstance(sort_enum, type(Enum)) 60 | assert sort_enum._meta.name == "CustomSortName" 61 | 62 | 63 | def test_sort_enum_cache(): 64 | class PetType(SQLAlchemyObjectType): 65 | class Meta: 66 | model = Pet 67 | 68 | sort_enum = PetType.sort_enum() 69 | sort_enum_2 = PetType.sort_enum() 70 | assert sort_enum_2 is sort_enum 71 | sort_enum_2 = PetType.sort_enum(name="PetTypeSortEnum") 72 | assert sort_enum_2 is sort_enum 73 | err_msg = "Sort enum for PetType has already been customized" 74 | with pytest.raises(ValueError, match=err_msg): 75 | PetType.sort_enum(name="CustomSortName") 76 | with pytest.raises(ValueError, match=err_msg): 77 | PetType.sort_enum(only_fields=["id"]) 78 | with pytest.raises(ValueError, match=err_msg): 79 | PetType.sort_enum(only_indexed=True) 80 | with pytest.raises(ValueError, match=err_msg): 81 | PetType.sort_enum(get_symbol_name=lambda: "foo") 82 | 83 | 84 | def test_sort_enum_with_excluded_field_in_object_type(): 85 | class PetType(SQLAlchemyObjectType): 86 | class Meta: 87 | model = Pet 88 | exclude_fields = ["reporter_id"] 89 | 90 | sort_enum = PetType.sort_enum() 91 | assert list(sort_enum._meta.enum.__members__) == [ 92 | "ID_ASC", 93 | "ID_DESC", 94 | "NAME_ASC", 95 | "NAME_DESC", 96 | "PET_KIND_ASC", 97 | "PET_KIND_DESC", 98 | "HAIR_KIND_ASC", 99 | "HAIR_KIND_DESC", 100 | "LEGS_ASC", 101 | "LEGS_DESC", 102 | ] 103 | 104 | 105 | def test_sort_enum_only_fields(): 106 | class PetType(SQLAlchemyObjectType): 107 | class Meta: 108 | model = Pet 109 | 110 | sort_enum = PetType.sort_enum(only_fields=["id", "name"]) 111 | assert list(sort_enum._meta.enum.__members__) == [ 112 | "ID_ASC", 113 | "ID_DESC", 114 | "NAME_ASC", 115 | "NAME_DESC", 116 | ] 117 | 118 | 119 | def test_sort_argument(): 120 | class PetType(SQLAlchemyObjectType): 121 | class Meta: 122 | model = Pet 123 | 124 | sort_arg = PetType.sort_argument() 125 | assert isinstance(sort_arg, Argument) 126 | 127 | assert isinstance(sort_arg.type, List) 128 | sort_enum = sort_arg.type._of_type 129 | assert isinstance(sort_enum, type(Enum)) 130 | assert sort_enum._meta.name == "PetTypeSortEnum" 131 | assert list(sort_enum._meta.enum.__members__) == [ 132 | "ID_ASC", 133 | "ID_DESC", 134 | "NAME_ASC", 135 | "NAME_DESC", 136 | "PET_KIND_ASC", 137 | "PET_KIND_DESC", 138 | "HAIR_KIND_ASC", 139 | "HAIR_KIND_DESC", 140 | "REPORTER_ID_ASC", 141 | "REPORTER_ID_DESC", 142 | "LEGS_ASC", 143 | "LEGS_DESC", 144 | ] 145 | assert str(sort_enum.ID_ASC.value.value) == "pets.id ASC" 146 | assert str(sort_enum.ID_DESC.value.value) == "pets.id DESC" 147 | assert str(sort_enum.HAIR_KIND_ASC.value.value) == "pets.hair_kind ASC" 148 | assert str(sort_enum.HAIR_KIND_DESC.value.value) == "pets.hair_kind DESC" 149 | 150 | assert sort_arg.default_value == ["ID_ASC"] 151 | assert str(sort_enum.ID_ASC.value.value) == "pets.id ASC" 152 | 153 | 154 | def test_sort_argument_with_excluded_fields_in_object_type(): 155 | class PetType(SQLAlchemyObjectType): 156 | class Meta: 157 | model = Pet 158 | exclude_fields = ["hair_kind", "reporter_id", "legs"] 159 | 160 | sort_arg = PetType.sort_argument() 161 | sort_enum = sort_arg.type._of_type 162 | assert list(sort_enum._meta.enum.__members__) == [ 163 | "ID_ASC", 164 | "ID_DESC", 165 | "NAME_ASC", 166 | "NAME_DESC", 167 | "PET_KIND_ASC", 168 | "PET_KIND_DESC", 169 | ] 170 | assert sort_arg.default_value == ["ID_ASC"] 171 | 172 | 173 | def test_sort_argument_only_fields(): 174 | class PetType(SQLAlchemyObjectType): 175 | class Meta: 176 | model = Pet 177 | only_fields = ["id", "pet_kind"] 178 | 179 | sort_arg = PetType.sort_argument() 180 | sort_enum = sort_arg.type._of_type 181 | assert list(sort_enum._meta.enum.__members__) == [ 182 | "ID_ASC", 183 | "ID_DESC", 184 | "PET_KIND_ASC", 185 | "PET_KIND_DESC", 186 | ] 187 | assert sort_arg.default_value == ["ID_ASC"] 188 | 189 | 190 | def test_sort_argument_for_multi_column_pk(): 191 | class MultiPkTestModel(Base): 192 | __tablename__ = "multi_pk_test_table" 193 | foo = sa.Column(sa.Integer, primary_key=True) 194 | bar = sa.Column(sa.Integer, primary_key=True) 195 | 196 | class MultiPkTestType(SQLAlchemyObjectType): 197 | class Meta: 198 | model = MultiPkTestModel 199 | 200 | sort_arg = MultiPkTestType.sort_argument() 201 | assert sort_arg.default_value == ["FOO_ASC", "BAR_ASC"] 202 | 203 | 204 | def test_sort_argument_only_indexed(): 205 | class IndexedTestModel(Base): 206 | __tablename__ = "indexed_test_table" 207 | id = sa.Column(sa.Integer, primary_key=True) 208 | foo = sa.Column(sa.Integer, index=False) 209 | bar = sa.Column(sa.Integer, index=True) 210 | 211 | class IndexedTestType(SQLAlchemyObjectType): 212 | class Meta: 213 | model = IndexedTestModel 214 | 215 | sort_arg = IndexedTestType.sort_argument(only_indexed=True) 216 | sort_enum = sort_arg.type._of_type 217 | assert list(sort_enum._meta.enum.__members__) == [ 218 | "ID_ASC", 219 | "ID_DESC", 220 | "BAR_ASC", 221 | "BAR_DESC", 222 | ] 223 | assert sort_arg.default_value == ["ID_ASC"] 224 | 225 | 226 | def test_sort_argument_with_custom_symbol_names(): 227 | class PetType(SQLAlchemyObjectType): 228 | class Meta: 229 | model = Pet 230 | 231 | def get_symbol_name(column_name, sort_asc=True): 232 | return to_type_name(column_name) + ("Up" if sort_asc else "Down") 233 | 234 | sort_arg = PetType.sort_argument(get_symbol_name=get_symbol_name) 235 | sort_enum = sort_arg.type._of_type 236 | assert list(sort_enum._meta.enum.__members__) == [ 237 | "IdUp", 238 | "IdDown", 239 | "NameUp", 240 | "NameDown", 241 | "PetKindUp", 242 | "PetKindDown", 243 | "HairKindUp", 244 | "HairKindDown", 245 | "ReporterIdUp", 246 | "ReporterIdDown", 247 | "LegsUp", 248 | "LegsDown", 249 | ] 250 | assert sort_arg.default_value == ["IdUp"] 251 | 252 | 253 | @pytest.mark.asyncio 254 | async def test_sort_query(session): 255 | await add_pets(session) 256 | 257 | class PetNode(SQLAlchemyObjectType): 258 | class Meta: 259 | model = Pet 260 | interfaces = (Node,) 261 | 262 | class Query(ObjectType): 263 | defaultSort = SQLAlchemyConnectionField(PetNode.connection) 264 | nameSort = SQLAlchemyConnectionField(PetNode.connection) 265 | multipleSort = SQLAlchemyConnectionField(PetNode.connection) 266 | descSort = SQLAlchemyConnectionField(PetNode.connection) 267 | singleColumnSort = SQLAlchemyConnectionField( 268 | PetNode.connection, sort=Argument(PetNode.sort_enum()) 269 | ) 270 | noDefaultSort = SQLAlchemyConnectionField( 271 | PetNode.connection, sort=PetNode.sort_argument(has_default=False) 272 | ) 273 | noSort = SQLAlchemyConnectionField(PetNode.connection, sort=None) 274 | 275 | query = """ 276 | query sortTest { 277 | defaultSort { 278 | edges { 279 | node { 280 | name 281 | } 282 | } 283 | } 284 | nameSort(sort: NAME_ASC) { 285 | edges { 286 | node { 287 | name 288 | } 289 | } 290 | } 291 | multipleSort(sort: [PET_KIND_ASC, NAME_DESC]) { 292 | edges { 293 | node { 294 | name 295 | petKind 296 | } 297 | } 298 | } 299 | descSort(sort: [NAME_DESC]) { 300 | edges { 301 | node { 302 | name 303 | } 304 | } 305 | } 306 | singleColumnSort(sort: NAME_DESC) { 307 | edges { 308 | node { 309 | name 310 | } 311 | } 312 | } 313 | noDefaultSort(sort: NAME_ASC) { 314 | edges { 315 | node { 316 | name 317 | } 318 | } 319 | } 320 | } 321 | """ 322 | 323 | def makeNodes(nodeList): 324 | nodes = [{"node": item} for item in nodeList] 325 | return {"edges": nodes} 326 | 327 | expected = { 328 | "defaultSort": makeNodes( 329 | [{"name": "Lassie"}, {"name": "Barf"}, {"name": "Alf"}] 330 | ), 331 | "nameSort": makeNodes([{"name": "Alf"}, {"name": "Barf"}, {"name": "Lassie"}]), 332 | "noDefaultSort": makeNodes( 333 | [{"name": "Alf"}, {"name": "Barf"}, {"name": "Lassie"}] 334 | ), 335 | "multipleSort": makeNodes( 336 | [ 337 | {"name": "Alf", "petKind": "CAT"}, 338 | {"name": "Lassie", "petKind": "DOG"}, 339 | {"name": "Barf", "petKind": "DOG"}, 340 | ] 341 | ), 342 | "descSort": makeNodes([{"name": "Lassie"}, {"name": "Barf"}, {"name": "Alf"}]), 343 | "singleColumnSort": makeNodes( 344 | [{"name": "Lassie"}, {"name": "Barf"}, {"name": "Alf"}] 345 | ), 346 | } # yapf: disable 347 | 348 | schema = Schema(query=Query) 349 | result = await schema.execute_async(query, context_value={"session": session}) 350 | assert not result.errors 351 | result = to_std_dicts(result.data) 352 | assert result == expected 353 | 354 | queryError = """ 355 | query sortTest { 356 | singleColumnSort(sort: [PET_KIND_ASC, NAME_DESC]) { 357 | edges { 358 | node { 359 | name 360 | } 361 | } 362 | } 363 | } 364 | """ 365 | result = await schema.execute_async(queryError, context_value={"session": session}) 366 | assert result.errors is not None 367 | assert "cannot represent non-enum value" in result.errors[0].message 368 | 369 | queryNoSort = """ 370 | query sortTest { 371 | noDefaultSort { 372 | edges { 373 | node { 374 | name 375 | } 376 | } 377 | } 378 | noSort { 379 | edges { 380 | node { 381 | name 382 | } 383 | } 384 | } 385 | } 386 | """ 387 | 388 | result = await schema.execute_async(queryNoSort, context_value={"session": session}) 389 | assert not result.errors 390 | # TODO: SQLite usually returns the results ordered by primary key, 391 | # so we cannot test this way whether sorting actually happens or not. 392 | # Also, no sort order is guaranteed by SQLite if "no order" by is used. 393 | assert [node["node"]["name"] for node in result.data["noSort"]["edges"]] == [ 394 | node["node"]["name"] for node in result.data["noDefaultSort"]["edges"] 395 | ] 396 | 397 | 398 | def test_sort_enum_from_key_issue_330(): 399 | """ 400 | Verifies that the sort enum name is generated from the column key instead of the name, 401 | in case the column has an invalid enum name. See #330 402 | """ 403 | 404 | class KeyedType(SQLAlchemyObjectType): 405 | class Meta: 406 | model = KeyedModel 407 | 408 | sort_enum = KeyedType.sort_enum() 409 | assert isinstance(sort_enum, type(Enum)) 410 | assert sort_enum._meta.name == "KeyedTypeSortEnum" 411 | assert list(sort_enum._meta.enum.__members__) == [ 412 | "ID_ASC", 413 | "ID_DESC", 414 | "REPORTER_NUMBER_ASC", 415 | "REPORTER_NUMBER_DESC", 416 | ] 417 | assert ( 418 | str(sort_enum.REPORTER_NUMBER_ASC.value.value) 419 | == 'test330."% reporter_number" ASC' 420 | ) 421 | assert ( 422 | str(sort_enum.REPORTER_NUMBER_DESC.value.value) 423 | == 'test330."% reporter_number" DESC' 424 | ) 425 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import sqlalchemy as sa 3 | 4 | from graphene import Enum, List, ObjectType, Schema, String 5 | 6 | from ..utils import ( 7 | DummyImport, 8 | get_session, 9 | sort_argument_for_model, 10 | sort_enum_for_model, 11 | to_enum_value_name, 12 | to_type_name, 13 | ) 14 | from .models import Base, Editor, Pet 15 | 16 | 17 | def test_get_session(): 18 | session = "My SQLAlchemy session" 19 | 20 | class Query(ObjectType): 21 | x = String() 22 | 23 | def resolve_x(self, info): 24 | return get_session(info.context) 25 | 26 | query = """ 27 | query ReporterQuery { 28 | x 29 | } 30 | """ 31 | 32 | schema = Schema(query=Query) 33 | result = schema.execute(query, context_value={"session": session}) 34 | assert not result.errors 35 | assert result.data["x"] == session 36 | 37 | 38 | def test_to_type_name(): 39 | assert to_type_name("make_camel_case") == "MakeCamelCase" 40 | assert to_type_name("AlreadyCamelCase") == "AlreadyCamelCase" 41 | assert to_type_name("A_Snake_and_a_Camel") == "ASnakeAndACamel" 42 | 43 | 44 | def test_to_enum_value_name(): 45 | assert to_enum_value_name("make_enum_value_name") == "MAKE_ENUM_VALUE_NAME" 46 | assert to_enum_value_name("makeEnumValueName") == "MAKE_ENUM_VALUE_NAME" 47 | assert to_enum_value_name("HTTPStatus400Message") == "HTTP_STATUS400_MESSAGE" 48 | assert to_enum_value_name("ALREADY_ENUM_VALUE_NAME") == "ALREADY_ENUM_VALUE_NAME" 49 | 50 | 51 | # test deprecated sort enum utility functions 52 | 53 | 54 | def test_sort_enum_for_model(): 55 | with pytest.warns(DeprecationWarning): 56 | enum = sort_enum_for_model(Pet) 57 | assert isinstance(enum, type(Enum)) 58 | assert str(enum) == "PetSortEnum" 59 | for col in sa.inspect(Pet).columns: 60 | assert hasattr(enum, col.name + "_asc") 61 | assert hasattr(enum, col.name + "_desc") 62 | 63 | 64 | def test_sort_enum_for_model_custom_naming(): 65 | with pytest.warns(DeprecationWarning): 66 | enum = sort_enum_for_model( 67 | Pet, "Foo", lambda n, d: n.upper() + ("A" if d else "D") 68 | ) 69 | assert str(enum) == "Foo" 70 | for col in sa.inspect(Pet).columns: 71 | assert hasattr(enum, col.name.upper() + "A") 72 | assert hasattr(enum, col.name.upper() + "D") 73 | 74 | 75 | def test_enum_cache(): 76 | with pytest.warns(DeprecationWarning): 77 | assert sort_enum_for_model(Editor) is sort_enum_for_model(Editor) 78 | 79 | 80 | def test_sort_argument_for_model(): 81 | with pytest.warns(DeprecationWarning): 82 | arg = sort_argument_for_model(Pet) 83 | 84 | assert isinstance(arg.type, List) 85 | assert arg.default_value == [Pet.id.name + "_asc"] 86 | with pytest.warns(DeprecationWarning): 87 | assert arg.type.of_type is sort_enum_for_model(Pet) 88 | 89 | 90 | def test_sort_argument_for_model_no_default(): 91 | with pytest.warns(DeprecationWarning): 92 | arg = sort_argument_for_model(Pet, False) 93 | 94 | assert arg.default_value is None 95 | 96 | 97 | def test_sort_argument_for_model_multiple_pk(): 98 | class MultiplePK(Base): 99 | foo = sa.Column(sa.Integer, primary_key=True) 100 | bar = sa.Column(sa.Integer, primary_key=True) 101 | __tablename__ = "MultiplePK" 102 | 103 | with pytest.warns(DeprecationWarning): 104 | arg = sort_argument_for_model(MultiplePK) 105 | assert set(arg.default_value) == { 106 | MultiplePK.foo.name + "_asc", 107 | MultiplePK.bar.name + "_asc", 108 | } 109 | 110 | 111 | def test_dummy_import(): 112 | dummy_module = DummyImport() 113 | assert dummy_module.foo == object 114 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/tests/utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import re 3 | 4 | from sqlalchemy import select 5 | 6 | from graphene_sqlalchemy.utils import SQL_VERSION_HIGHER_EQUAL_THAN_1_4 7 | 8 | 9 | def to_std_dicts(value): 10 | """Convert nested ordered dicts to normal dicts for better comparison.""" 11 | if isinstance(value, dict): 12 | return {k: to_std_dicts(v) for k, v in value.items()} 13 | elif isinstance(value, list): 14 | return [to_std_dicts(v) for v in value] 15 | else: 16 | return value 17 | 18 | 19 | def remove_cache_miss_stat(message): 20 | """Remove the stat from the echoed query message when the cache is missed for sqlalchemy version >= 1.4""" 21 | # https://github.com/sqlalchemy/sqlalchemy/blob/990eb3d8813369d3b8a7776ae85fb33627443d30/lib/sqlalchemy/engine/default.py#L1177 22 | return re.sub(r"\[generated in \d+.?\d*s\]\s", "", message) 23 | 24 | 25 | def wrap_select_func(query): 26 | # TODO remove this when we drop support for sqa < 2.0 27 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4: 28 | return select(query) 29 | else: 30 | return select([query]) 31 | 32 | 33 | async def eventually_await_session(session, func, *args): 34 | if inspect.iscoroutinefunction(getattr(session, func)): 35 | await getattr(session, func)(*args) 36 | else: 37 | getattr(session, func)(*args) 38 | -------------------------------------------------------------------------------- /graphene_sqlalchemy/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import typing 3 | import warnings 4 | from collections import OrderedDict 5 | from functools import _c3_mro 6 | from importlib.metadata import version as get_version 7 | from typing import Any, Callable, Dict, Optional 8 | 9 | from packaging import version 10 | from sqlalchemy import select 11 | from sqlalchemy.exc import ArgumentError 12 | from sqlalchemy.orm import class_mapper, object_mapper 13 | from sqlalchemy.orm.exc import UnmappedClassError, UnmappedInstanceError 14 | 15 | from graphene import NonNull 16 | 17 | 18 | def get_nullable_type(_type): 19 | if isinstance(_type, NonNull): 20 | return _type.of_type 21 | return _type 22 | 23 | 24 | def is_sqlalchemy_version_less_than(version_string): 25 | """Check the installed SQLAlchemy version""" 26 | return version.parse(get_version("SQLAlchemy")) < version.parse(version_string) 27 | 28 | 29 | def is_graphene_version_less_than(version_string): # pragma: no cover 30 | """Check the installed graphene version""" 31 | return version.parse(get_version("graphene")) < version.parse(version_string) 32 | 33 | 34 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4 = False 35 | 36 | if not is_sqlalchemy_version_less_than("1.4"): # pragma: no cover 37 | from sqlalchemy.ext.asyncio import AsyncSession 38 | 39 | SQL_VERSION_HIGHER_EQUAL_THAN_1_4 = True 40 | 41 | 42 | SQL_VERSION_HIGHER_EQUAL_THAN_2 = False 43 | 44 | if not is_sqlalchemy_version_less_than("2.0.0b1"): # pragma: no cover 45 | SQL_VERSION_HIGHER_EQUAL_THAN_2 = True 46 | 47 | 48 | def get_session(context): 49 | return context.get("session") 50 | 51 | 52 | def get_query(model, context): 53 | query = getattr(model, "query", None) 54 | if not query: 55 | session = get_session(context) 56 | if not session: 57 | raise Exception( 58 | "A query in the model Base or a session in the schema is required for querying.\n" 59 | "Read more http://docs.graphene-python.org/projects/sqlalchemy/en/latest/tips/#querying" 60 | ) 61 | if SQL_VERSION_HIGHER_EQUAL_THAN_1_4 and isinstance(session, AsyncSession): 62 | return select(model) 63 | query = session.query(model) 64 | return query 65 | 66 | 67 | def is_mapped_class(cls): 68 | try: 69 | class_mapper(cls) 70 | except ArgumentError as error: 71 | # Only handle ArgumentErrors for non-class objects 72 | if "Class object expected" in str(error): 73 | return False 74 | raise 75 | except UnmappedClassError: 76 | # Unmapped classes return false 77 | return False 78 | else: 79 | return True 80 | 81 | 82 | def is_mapped_instance(cls): 83 | try: 84 | object_mapper(cls) 85 | except (ArgumentError, UnmappedInstanceError): 86 | return False 87 | else: 88 | return True 89 | 90 | 91 | def to_type_name(name): 92 | """Convert the given name to a GraphQL type name.""" 93 | return "".join(part[:1].upper() + part[1:] for part in name.split("_")) 94 | 95 | 96 | _re_enum_value_name_1 = re.compile("(.)([A-Z][a-z]+)") 97 | _re_enum_value_name_2 = re.compile("([a-z0-9])([A-Z])") 98 | 99 | 100 | def to_enum_value_name(name): 101 | """Convert the given name to a GraphQL enum value name.""" 102 | return _re_enum_value_name_2.sub( 103 | r"\1_\2", _re_enum_value_name_1.sub(r"\1_\2", name) 104 | ).upper() 105 | 106 | 107 | class EnumValue(str): 108 | """String that has an additional value attached. 109 | 110 | This is used to attach SQLAlchemy model columns to Enum symbols. 111 | """ 112 | 113 | def __new__(cls, s, value): 114 | return super(EnumValue, cls).__new__(cls, s) 115 | 116 | def __init__(self, _s, value): 117 | super(EnumValue, self).__init__() 118 | self.value = value 119 | 120 | 121 | def _deprecated_default_symbol_name(column_name, sort_asc): 122 | return column_name + ("_asc" if sort_asc else "_desc") 123 | 124 | 125 | # unfortunately, we cannot use lru_cache because we still support Python 2 126 | _deprecated_object_type_cache = {} 127 | 128 | 129 | def _deprecated_object_type_for_model(cls, name): 130 | try: 131 | return _deprecated_object_type_cache[cls, name] 132 | except KeyError: 133 | from .types import SQLAlchemyObjectType 134 | 135 | obj_type_name = name or cls.__name__ 136 | 137 | class ObjType(SQLAlchemyObjectType): 138 | class Meta: 139 | name = obj_type_name 140 | model = cls 141 | 142 | _deprecated_object_type_cache[cls, name] = ObjType 143 | return ObjType 144 | 145 | 146 | def sort_enum_for_model(cls, name=None, symbol_name=None): 147 | """Get a Graphene Enum for sorting the given model class. 148 | 149 | This is deprecated, please use object_type.sort_enum() instead. 150 | """ 151 | warnings.warn( 152 | "sort_enum_for_model() is deprecated; use object_type.sort_enum() instead.", 153 | DeprecationWarning, 154 | stacklevel=2, 155 | ) 156 | 157 | from .enums import sort_enum_for_object_type 158 | 159 | return sort_enum_for_object_type( 160 | _deprecated_object_type_for_model(cls, name), 161 | name, 162 | get_symbol_name=symbol_name or _deprecated_default_symbol_name, 163 | ) 164 | 165 | 166 | def sort_argument_for_model(cls, has_default=True): 167 | """Get a Graphene Argument for sorting the given model class. 168 | 169 | This is deprecated, please use object_type.sort_argument() instead. 170 | """ 171 | warnings.warn( 172 | "sort_argument_for_model() is deprecated;" 173 | " use object_type.sort_argument() instead.", 174 | DeprecationWarning, 175 | stacklevel=2, 176 | ) 177 | 178 | from graphene import Argument, List 179 | 180 | from .enums import sort_enum_for_object_type 181 | 182 | enum = sort_enum_for_object_type( 183 | _deprecated_object_type_for_model(cls, None), 184 | get_symbol_name=_deprecated_default_symbol_name, 185 | ) 186 | if not has_default: 187 | enum.default = None 188 | 189 | return Argument(List(enum), default_value=enum.default) 190 | 191 | 192 | class singledispatchbymatchfunction: 193 | """ 194 | Inspired by @singledispatch, this is a variant that works using a matcher function 195 | instead of relying on the type of the first argument. 196 | The register method can be used to register a new matcher, which is passed as the first argument: 197 | """ 198 | 199 | def __init__(self, default: Callable): 200 | self.registry: Dict[Callable, Callable] = OrderedDict() 201 | self.default = default 202 | 203 | def __call__(self, *args, **kwargs): 204 | matched_arg = args[0] 205 | try: 206 | mro = _c3_mro(matched_arg) 207 | except Exception: 208 | # In case of tuples or similar types, we can't use the MRO. 209 | # Fall back to just matching the original argument. 210 | mro = [matched_arg] 211 | 212 | for cls in mro: 213 | for matcher_function, final_method in self.registry.items(): 214 | # Register order is important. First one that matches, runs. 215 | if matcher_function(cls): 216 | return final_method(*args, **kwargs) 217 | 218 | # No match, using default. 219 | return self.default(*args, **kwargs) 220 | 221 | def register(self, matcher_function: Callable[[Any], bool], func=None): 222 | if func is None: 223 | return lambda f: self.register(matcher_function, f) 224 | self.registry[matcher_function] = func 225 | return func 226 | 227 | 228 | def column_type_eq(value: Any) -> Callable[[Any], bool]: 229 | """A simple function that makes the equality based matcher functions for 230 | SingleDispatchByMatchFunction prettier""" 231 | return lambda x: (x == value) 232 | 233 | 234 | def safe_isinstance(cls): 235 | def safe_isinstance_checker(arg): 236 | try: 237 | return isinstance(arg, cls) 238 | except TypeError: 239 | pass 240 | 241 | return safe_isinstance_checker 242 | 243 | 244 | def safe_issubclass(cls): 245 | def safe_issubclass_checker(arg): 246 | try: 247 | return issubclass(arg, cls) 248 | except TypeError: 249 | pass 250 | 251 | return safe_issubclass_checker 252 | 253 | 254 | def registry_sqlalchemy_model_from_str(model_name: str) -> Optional[Any]: 255 | from graphene_sqlalchemy.registry import get_global_registry 256 | 257 | try: 258 | return next( 259 | filter( 260 | lambda x: x.__name__ == model_name, 261 | list(get_global_registry()._registry.keys()), 262 | ) 263 | ) 264 | except StopIteration: 265 | pass 266 | 267 | 268 | def is_list(x): 269 | return getattr(x, "__origin__", None) in [list, typing.List] 270 | 271 | 272 | class DummyImport: 273 | """The dummy module returns 'object' for a query for any member""" 274 | 275 | def __getattr__(self, name): 276 | return object 277 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [flake8] 5 | ignore = E203,W503 6 | exclude = .git,.mypy_cache,.pytest_cache,.tox,.venv,__pycache__,build,dist,docs,setup.py,docs/*,examples/*,tests 7 | max-line-length = 120 8 | 9 | [isort] 10 | profile = black 11 | no_lines_before=FIRSTPARTY 12 | known_graphene=graphene,graphql_relay,flask_graphql,graphql_server,sphinx_graphene_theme 13 | known_first_party=graphene_sqlalchemy 14 | known_third_party=aiodataloader,app,database,flask,models,nameko,pkg_resources,promise,pytest,schema,setuptools,sqlalchemy,sqlalchemy_utils 15 | sections=FUTURE,STDLIB,THIRDPARTY,GRAPHENE,FIRSTPARTY,LOCALFOLDER 16 | skip_glob=examples/nameko_sqlalchemy 17 | 18 | [bdist_wheel] 19 | universal=1 20 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import re 3 | import sys 4 | 5 | from setuptools import find_packages, setup 6 | 7 | _version_re = re.compile(r"__version__\s+=\s+(.*)") 8 | 9 | with open("graphene_sqlalchemy/__init__.py", "rb") as f: 10 | version = str( 11 | ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)) 12 | ) 13 | 14 | requirements = [ 15 | # To keep things simple, we only support newer versions of Graphene 16 | "graphene>=3.0.0b7", 17 | "promise>=2.3", 18 | "SQLAlchemy>=1.1", 19 | "aiodataloader>=0.2.0,<1.0", 20 | "packaging>=23.0", 21 | ] 22 | 23 | tests_require = [ 24 | "pytest>=6.2.0,<7.0", 25 | "pytest-asyncio>=0.18.3", 26 | "pytest-cov>=2.11.0,<3.0", 27 | "sqlalchemy_utils>=0.37.0,<1.0", 28 | "pytest-benchmark>=3.4.0,<4.0", 29 | "aiosqlite>=0.17.0", 30 | "nest-asyncio", 31 | "greenlet", 32 | ] 33 | 34 | setup( 35 | name="graphene-sqlalchemy", 36 | version=version, 37 | description="Graphene SQLAlchemy integration", 38 | long_description=open("README.md").read(), 39 | long_description_content_type="text/markdown", 40 | url="https://github.com/graphql-python/graphene-sqlalchemy", 41 | project_urls={ 42 | "Documentation": "https://docs.graphene-python.org/projects/sqlalchemy/en/latest", 43 | }, 44 | author="Syrus Akbary", 45 | author_email="me@syrusakbary.com", 46 | license="MIT", 47 | classifiers=[ 48 | "Development Status :: 3 - Alpha", 49 | "Intended Audience :: Developers", 50 | "Topic :: Software Development :: Libraries", 51 | "Programming Language :: Python :: 3", 52 | "Programming Language :: Python :: 3.9", 53 | "Programming Language :: Python :: 3.10", 54 | "Programming Language :: Python :: 3.11", 55 | "Programming Language :: Python :: 3.12", 56 | "Programming Language :: Python :: 3.13", 57 | "Programming Language :: Python :: Implementation :: PyPy", 58 | ], 59 | keywords="api graphql protocol rest relay graphene sqlalchemy", 60 | packages=find_packages(exclude=["tests"]), 61 | install_requires=requirements, 62 | extras_require={ 63 | "dev": [ 64 | "tox==3.7.0", # Should be kept in sync with tox.ini 65 | "pre-commit==2.19", 66 | "flake8==4.0.0", 67 | ], 68 | "test": tests_require, 69 | }, 70 | tests_require=tests_require, 71 | ) 72 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = pre-commit,py{39,310,311,312,313}-sql{12,13,14,20} 3 | skipsdist = true 4 | minversion = 3.7.0 5 | 6 | [gh-actions] 7 | python = 8 | 3.9: py39 9 | 3.10: py310 10 | 3.11: py311 11 | 3.12: py312 12 | 3.13: py313 13 | 14 | [gh-actions:env] 15 | SQLALCHEMY = 16 | 1.2: sql12 17 | 1.3: sql13 18 | 1.4: sql14 19 | 2.0: sql20 20 | 21 | [testenv] 22 | passenv = GITHUB_* 23 | deps = 24 | .[test] 25 | sql12: sqlalchemy>=1.2,<1.3 26 | sql13: sqlalchemy>=1.3,<1.4 27 | sql14: sqlalchemy>=1.4,<1.5 28 | sql20: sqlalchemy>=2.0.0b3 29 | setenv = 30 | SQLALCHEMY_WARN_20 = 1 31 | commands = 32 | python -W always -m pytest graphene_sqlalchemy --cov=graphene_sqlalchemy --cov-report=term --cov-report=xml {posargs} 33 | 34 | [testenv:pre-commit] 35 | basepython=python3.10 36 | deps = 37 | .[dev] 38 | commands = 39 | pre-commit {posargs:run --all-files} 40 | 41 | [testenv:flake8] 42 | basepython = python3.10 43 | deps = -e.[dev] 44 | commands = 45 | flake8 --exclude setup.py,docs,examples,tests,.tox --max-line-length 120 46 | --------------------------------------------------------------------------------