├── .coveragerc ├── .flake8 ├── .github └── workflows │ ├── python-package-daily.yml │ ├── python-package.yml │ └── python-publish.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .pylintrc ├── .readthedocs.yml ├── AUTHORS.rst ├── CHANGELOG.rst ├── CONTRIBUTING.rst ├── LICENSE.txt ├── Makefile ├── README.rst ├── dev-requirements.in ├── dev-requirements.txt ├── docs ├── Makefile ├── _static │ └── .gitignore ├── authors.rst ├── changelog.rst ├── conf.py ├── contributing.rst ├── images │ └── export_dashboards.png ├── index.rst ├── license.rst ├── readme.rst └── requirements.txt ├── examples └── exports │ ├── charts │ └── Total_count_134.yaml │ ├── dashboards │ └── White_label_test.yaml │ ├── databases │ └── Google_Sheets.yaml │ ├── datasets │ └── Google_Sheets │ │ └── country_cnt.yaml │ ├── functions │ └── demo.py │ └── metadata.yaml ├── pyproject.toml ├── requirements.in ├── requirements.txt ├── setup.cfg ├── setup.py ├── src └── preset_cli │ ├── __init__.py │ ├── api │ ├── __init__.py │ ├── clients │ │ ├── __init__.py │ │ ├── dbt.py │ │ ├── preset.py │ │ └── superset.py │ └── operators.py │ ├── auth │ ├── __init__.py │ ├── jwt.py │ ├── lib.py │ ├── main.py │ ├── preset.py │ ├── superset.py │ └── token.py │ ├── cli │ ├── __init__.py │ ├── main.py │ └── superset │ │ ├── __init__.py │ │ ├── export.py │ │ ├── import_.py │ │ ├── lib.py │ │ ├── main.py │ │ ├── sql.py │ │ └── sync │ │ ├── __init__.py │ │ ├── dbt │ │ ├── __init__.py │ │ ├── command.py │ │ ├── databases.py │ │ ├── datasets.py │ │ ├── exposures.py │ │ ├── lib.py │ │ └── metrics.py │ │ ├── main.py │ │ └── native │ │ ├── __init__.py │ │ └── command.py │ ├── exceptions.py │ ├── lib.py │ └── typing.py ├── tests ├── __init__.py ├── api │ ├── __init__.py │ └── clients │ │ ├── __init__.py │ │ ├── dbt_test.py │ │ ├── preset_test.py │ │ └── superset_test.py ├── auth │ ├── __init__.py │ ├── jwt_test.py │ ├── lib_test.py │ ├── main_test.py │ ├── preset_test.py │ └── superset_test.py ├── cli │ ├── __init__.py │ ├── main_test.py │ └── superset │ │ ├── __init__.py │ │ ├── export_test.py │ │ ├── import_test.py │ │ ├── lib_test.py │ │ ├── main_test.py │ │ ├── sql_test.py │ │ └── sync │ │ ├── __init__.py │ │ ├── dbt │ │ ├── __init__.py │ │ ├── command_test.py │ │ ├── databases_test.py │ │ ├── datasets_test.py │ │ ├── exposures_test.py │ │ ├── lib_test.py │ │ ├── manifest-metricflow.json │ │ ├── manifest.json │ │ └── metrics_test.py │ │ └── native │ │ ├── __init__.py │ │ └── command_test.py ├── conftest.py └── lib_test.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | [run] 3 | branch = True 4 | source = preset_cli 5 | # omit = bad_file.py 6 | 7 | [paths] 8 | source = 9 | src/ 10 | */site-packages/ 11 | 12 | [report] 13 | # Regexes for lines to exclude from consideration 14 | exclude_lines = 15 | # Have to re-enable the standard pragma 16 | pragma: no cover 17 | 18 | # Don't complain about missing debug-only code: 19 | def __repr__ 20 | if self\.debug 21 | 22 | # Don't complain if tests don't hit defensive assertion code: 23 | raise AssertionError 24 | raise NotImplementedError 25 | 26 | # Don't complain if non-runnable code isn't run: 27 | if 0: 28 | if __name__ == .__main__.: 29 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E203, E266, E501, W503, F403, F401, W293 3 | max-line-length = 79 4 | max-complexity = 18 5 | select = B,C,E,F,W,T4,B9 6 | -------------------------------------------------------------------------------- /.github/workflows/python-package-daily.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Run tests with latest dependencies 5 | 6 | on: 7 | schedule: 8 | - cron: '0 6 * * *' 9 | workflow_dispatch: 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: [3.8, 3.9, '3.10', '3.11'] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip wheel 29 | python -m pip install -e '.[testing]' 30 | - name: Test with pytest 31 | run: | 32 | pytest --cov-fail-under=100 --cov=src/preset_cli -vv tests/ 33 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Run tests with pinned dependencies 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: [3.8, 3.9, '3.10', '3.11'] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip wheel 30 | python -m pip install -r dev-requirements.txt 31 | - name: Test with pytest 32 | run: | 33 | pre-commit run --all-files 34 | pytest --cov-fail-under=100 --cov=src/preset_cli -vv tests/ 35 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | jobs: 16 | deploy: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: '3.x' 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install build 30 | - name: Build package 31 | run: python -m build 32 | - name: Publish package 33 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_API_TOKEN }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary and binary files 2 | *~ 3 | *.py[cod] 4 | *.so 5 | *.cfg 6 | !.isort.cfg 7 | !setup.cfg 8 | *.orig 9 | *.log 10 | *.pot 11 | __pycache__/* 12 | .cache/* 13 | .*.swp 14 | */.ipynb_checkpoints/* 15 | .DS_Store 16 | 17 | # Project files 18 | .ropeproject 19 | .project 20 | .pydevproject 21 | .settings 22 | .idea 23 | .vscode 24 | tags 25 | 26 | # Package files 27 | *.egg 28 | *.eggs/ 29 | .installed.cfg 30 | *.egg-info 31 | 32 | # Unittest and coverage 33 | htmlcov/* 34 | .coverage 35 | .coverage.* 36 | .tox 37 | junit*.xml 38 | coverage.xml 39 | .pytest_cache/ 40 | 41 | # Build and docs folder/files 42 | build/* 43 | dist/* 44 | sdist/* 45 | docs/api/* 46 | docs/_rst/* 47 | docs/_build/* 48 | cover/* 49 | MANIFEST 50 | 51 | # Per-project virtualenvs 52 | .venv*/ 53 | .conda*/ 54 | .python-version 55 | 56 | .pyre/ 57 | .pyre_configuration 58 | .watchmanconfig 59 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile = black 3 | known_first_party = preset_cli 4 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^docs/conf.py' 2 | 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v3.4.0 6 | hooks: 7 | #- id: check-added-large-files 8 | - id: check-ast 9 | - id: check-json 10 | - id: check-merge-conflict 11 | - id: check-xml 12 | - id: check-yaml 13 | exclude: ^examples 14 | - id: debug-statements 15 | - id: end-of-file-fixer 16 | - id: requirements-txt-fixer 17 | - id: mixed-line-ending 18 | args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows 19 | 20 | ## If you want to avoid flake8 errors due to unused vars or imports: 21 | # - repo: https://github.com/myint/autoflake.git 22 | # rev: v1.4 23 | # hooks: 24 | # - id: autoflake 25 | # args: [ 26 | # --in-place, 27 | # --remove-all-unused-imports, 28 | # --remove-unused-variables, 29 | # ] 30 | 31 | - repo: https://github.com/pycqa/isort 32 | rev: 5.11.5 33 | hooks: 34 | - id: isort 35 | 36 | - repo: https://github.com/psf/black 37 | rev: 22.10.0 38 | hooks: 39 | - id: black 40 | language_version: python3 41 | 42 | ## If like to embrace black styles even in the docs: 43 | # - repo: https://github.com/asottile/blacken-docs 44 | # rev: v1.9.1 45 | # hooks: 46 | # - id: blacken-docs 47 | # additional_dependencies: [black] 48 | 49 | - repo: https://github.com/PyCQA/flake8 50 | rev: 3.9.2 51 | hooks: 52 | - id: flake8 53 | ## You can add flake8 plugins via `additional_dependencies`: 54 | # additional_dependencies: [flake8-bugbear] 55 | 56 | - repo: https://github.com/pre-commit/mirrors-mypy 57 | rev: 'v0.981' # Use the sha / tag you want to point at 58 | hooks: 59 | - id: mypy 60 | additional_dependencies: 61 | - types-requests 62 | - types-freezegun 63 | - types-python-dateutil 64 | - types-setuptools 65 | - types-PyYAML 66 | - types-tabulate 67 | - repo: https://github.com/asottile/add-trailing-comma 68 | rev: v2.1.0 69 | hooks: 70 | - id: add-trailing-comma 71 | #- repo: https://github.com/asottile/reorder_python_imports 72 | # rev: v2.5.0 73 | # hooks: 74 | # - id: reorder-python-imports 75 | # args: [--application-directories=.:src] 76 | - repo: https://github.com/hadialqattan/pycln 77 | rev: v2.5.0 # Possible releases: https://github.com/hadialqattan/pycln/tags 78 | hooks: 79 | - id: pycln 80 | args: [--config=pyproject.toml] 81 | - repo: local 82 | hooks: 83 | - id: pylint 84 | name: pylint 85 | entry: pylint --disable=use-implicit-booleaness-not-comparison,fixme,duplicate-code 86 | language: system 87 | types: [python] 88 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MESSAGES CONTROL] 2 | disable = duplicate-code,use-implicit-booleaness-not-comparison,fixme 3 | 4 | [MASTER] 5 | ignore=templates,docs 6 | disable = duplicate-code,use-implicit-booleaness-not-comparison,fixme 7 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Build documentation in the docs/ directory with Sphinx 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | # Build documentation with MkDocs 12 | #mkdocs: 13 | # configuration: mkdocs.yml 14 | 15 | # Optionally build your docs in additional formats such as PDF 16 | formats: 17 | - pdf 18 | 19 | python: 20 | version: 3.8 21 | install: 22 | - requirements: docs/requirements.txt 23 | - {path: ., method: pip} 24 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributors 3 | ============ 4 | 5 | * Beto Dealmeida 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | 2 | ============ 3 | Contributing 4 | ============ 5 | 6 | The ``preset-cli`` project is an all-rights-reserved project. If you would like 7 | to contribute to the project, and you are not a member of the core development team 8 | or a contributor with write access to the repository, you will be given the option 9 | to sign a Contributor License Agreement (CLA) when you submit any pull requests. 10 | 11 | Issue Reports 12 | ============= 13 | 14 | If you experience bugs or general issues with ``preset-cli``, please report them in the 15 | `Preset Support Portal `_. 16 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 Preset Inc. 2 | 3 | All rights reserved. 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | pyenv: .python-version 2 | 3 | .python-version: setup.cfg 4 | if [ -z "`pyenv virtualenvs | grep backend-sdk`" ]; then\ 5 | pyenv virtualenv backend-sdk;\ 6 | fi 7 | if [ ! -f .python-version ]; then\ 8 | pyenv local backend-sdk;\ 9 | fi 10 | pip install -e '.[testing]' 11 | touch .python-version 12 | 13 | test: pyenv 14 | pytest --cov=src/preset_cli -vv tests/ --doctest-modules src/preset_cli 15 | 16 | clean: 17 | pyenv virtualenv-delete backend-sdk 18 | 19 | spellcheck: 20 | codespell -S "*.json" src/preset_cli docs/*rst tests templates 21 | 22 | requirements.txt: .python-version requirements.in setup.cfg 23 | pip install --upgrade pip 24 | pip-compile --no-annotate 25 | 26 | dev-requirements.txt: dev-requirements.in setup.cfg 27 | pip-compile dev-requirements.in --no-annotate 28 | 29 | check: 30 | pre-commit run --all-files 31 | -------------------------------------------------------------------------------- /dev-requirements.in: -------------------------------------------------------------------------------- 1 | -e file:.[testing] 2 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile --no-annotate dev-requirements.in 6 | # 7 | -e file:. 8 | aiohttp==3.8.3 9 | aiosignal==1.2.0 10 | appdirs==1.4.4 11 | astroid==2.12.12 12 | async-timeout==4.0.2 13 | attrs==22.1.0 14 | backoff==2.2.1 15 | beautifulsoup4==4.11.1 16 | build==0.8.0 17 | certifi==2021.10.8 18 | cfgv==3.3.1 19 | charset-normalizer==2.0.12 20 | click==8.1.2 21 | codespell==2.1.0 22 | commonmark==0.9.1 23 | coverage[toml]==6.4.3 24 | cython==0.29.32 25 | dill==0.3.6 26 | distlib==0.3.5 27 | filelock==3.8.0 28 | freezegun==1.2.2 29 | frozenlist==1.3.1 30 | greenlet==1.1.3.post0 31 | identify==2.5.3 32 | idna==3.3 33 | iniconfig==1.1.1 34 | isort==5.10.1 35 | jinja2==3.1.2 36 | lazy-object-proxy==1.8.0 37 | markupsafe==2.1.1 38 | marshmallow==3.17.0 39 | mccabe==0.7.0 40 | multidict==6.0.2 41 | nodeenv==1.7.0 42 | numpy==1.23.1 43 | packaging==21.3 44 | pandas==1.4.3 45 | pep517==0.13.0 46 | pip-tools==6.13.0 47 | platformdirs==2.5.2 48 | pluggy==1.0.0 49 | pre-commit==2.20.0 50 | prison==0.2.1 51 | prompt-toolkit==3.0.30 52 | py==1.11.0 53 | pyfakefs==4.6.3 54 | pygments==2.12.0 55 | pylint==2.15.5 56 | pyparsing==3.0.9 57 | pytest==7.1.2 58 | pytest-cov==3.0.0 59 | pytest-mock==3.8.2 60 | python-dateutil==2.8.2 61 | python-graphql-client==0.4.3 62 | pytz==2022.2 63 | pyyaml==6.0 64 | requests==2.27.1 65 | requests-mock==1.9.3 66 | rich==12.5.1 67 | six==1.16.0 68 | soupsieve==2.3.2.post1 69 | sqlalchemy==1.4.40 70 | sqlglot==26.23.0 71 | tabulate==0.8.10 72 | toml==0.10.2 73 | tomli==2.0.1 74 | tomlkit==0.11.6 75 | typing-extensions==4.3.0 76 | urllib3==1.26.9 77 | virtualenv==20.16.3 78 | wcwidth==0.2.5 79 | websockets==10.3 80 | wheel==0.37.1 81 | wrapt==1.14.1 82 | yarl==1.8.1 83 | 84 | # The following packages are considered to be unsafe in a requirements file: 85 | # pip 86 | # setuptools 87 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | AUTODOCDIR = api 11 | 12 | # User-friendly check for sphinx-build 13 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $?), 1) 14 | $(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from https://sphinx-doc.org/") 15 | endif 16 | 17 | .PHONY: help clean Makefile 18 | 19 | # Put it first so that "make" without argument is like "make help". 20 | help: 21 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | 23 | clean: 24 | rm -rf $(BUILDDIR)/* $(AUTODOCDIR) 25 | 26 | # Catch-all target: route all unknown targets to Sphinx using the new 27 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 28 | %: Makefile 29 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 30 | -------------------------------------------------------------------------------- /docs/_static/.gitignore: -------------------------------------------------------------------------------- 1 | # Empty directory 2 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. _authors: 2 | .. include:: ../AUTHORS.rst 3 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changes: 2 | .. include:: ../CHANGELOG.rst 3 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # This file is execfile()d with the current directory set to its containing dir. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | # 7 | # All configuration values have a default; values that are commented out 8 | # serve to show the default. 9 | 10 | import os 11 | import sys 12 | import shutil 13 | 14 | # -- Path setup -------------------------------------------------------------- 15 | 16 | __location__ = os.path.dirname(__file__) 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | sys.path.insert(0, os.path.join(__location__, "../src")) 22 | 23 | # -- Run sphinx-apidoc ------------------------------------------------------- 24 | # This hack is necessary since RTD does not issue `sphinx-apidoc` before running 25 | # `sphinx-build -b html . _build/html`. See Issue: 26 | # https://github.com/readthedocs/readthedocs.org/issues/1139 27 | # DON'T FORGET: Check the box "Install your project inside a virtualenv using 28 | # setup.py install" in the RTD Advanced Settings. 29 | # Additionally it helps us to avoid running apidoc manually 30 | 31 | try: # for Sphinx >= 1.7 32 | from sphinx.ext import apidoc 33 | except ImportError: 34 | from sphinx import apidoc 35 | 36 | output_dir = os.path.join(__location__, "api") 37 | module_dir = os.path.join(__location__, "../src/preset_cli") 38 | try: 39 | shutil.rmtree(output_dir) 40 | except FileNotFoundError: 41 | pass 42 | 43 | try: 44 | import sphinx 45 | 46 | cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}" 47 | 48 | args = cmd_line.split(" ") 49 | if tuple(sphinx.__version__.split(".")) >= ("1", "7"): 50 | # This is a rudimentary parse_version to avoid external dependencies 51 | args = args[1:] 52 | 53 | apidoc.main(args) 54 | except Exception as e: 55 | print("Running `sphinx-apidoc` failed!\n{}".format(e)) 56 | 57 | # -- General configuration --------------------------------------------------- 58 | 59 | # If your documentation needs a minimal Sphinx version, state it here. 60 | # needs_sphinx = '1.0' 61 | 62 | # Add any Sphinx extension module names here, as strings. They can be extensions 63 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 64 | extensions = [ 65 | "sphinx.ext.autodoc", 66 | "sphinx.ext.intersphinx", 67 | "sphinx.ext.todo", 68 | "sphinx.ext.autosummary", 69 | "sphinx.ext.viewcode", 70 | "sphinx.ext.coverage", 71 | "sphinx.ext.doctest", 72 | "sphinx.ext.ifconfig", 73 | "sphinx.ext.mathjax", 74 | "sphinx.ext.napoleon", 75 | ] 76 | 77 | # Add any paths that contain templates here, relative to this directory. 78 | templates_path = ["_templates"] 79 | 80 | # The suffix of source filenames. 81 | source_suffix = ".rst" 82 | 83 | # The encoding of source files. 84 | # source_encoding = 'utf-8-sig' 85 | 86 | # The master toctree document. 87 | master_doc = "index" 88 | 89 | # General information about the project. 90 | project = "preset-cli" 91 | copyright = "2022, Beto Dealmeida" 92 | 93 | # The version info for the project you're documenting, acts as replacement for 94 | # |version| and |release|, also used in various other places throughout the 95 | # built documents. 96 | # 97 | # version: The short X.Y version. 98 | # release: The full version, including alpha/beta/rc tags. 99 | # If you don’t need the separation provided between version and release, 100 | # just set them both to the same value. 101 | try: 102 | from preset_cli import __version__ as version 103 | except ImportError: 104 | version = "" 105 | 106 | if not version or version.lower() == "unknown": 107 | version = os.getenv("READTHEDOCS_VERSION", "unknown") # automatically set by RTD 108 | 109 | release = version 110 | 111 | # The language for content autogenerated by Sphinx. Refer to documentation 112 | # for a list of supported languages. 113 | # language = None 114 | 115 | # There are two options for replacing |today|: either, you set today to some 116 | # non-false value, then it is used: 117 | # today = '' 118 | # Else, today_fmt is used as the format for a strftime call. 119 | # today_fmt = '%B %d, %Y' 120 | 121 | # List of patterns, relative to source directory, that match files and 122 | # directories to ignore when looking for source files. 123 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"] 124 | 125 | # The reST default role (used for this markup: `text`) to use for all documents. 126 | # default_role = None 127 | 128 | # If true, '()' will be appended to :func: etc. cross-reference text. 129 | # add_function_parentheses = True 130 | 131 | # If true, the current module name will be prepended to all description 132 | # unit titles (such as .. function::). 133 | # add_module_names = True 134 | 135 | # If true, sectionauthor and moduleauthor directives will be shown in the 136 | # output. They are ignored by default. 137 | # show_authors = False 138 | 139 | # The name of the Pygments (syntax highlighting) style to use. 140 | pygments_style = "sphinx" 141 | 142 | # A list of ignored prefixes for module index sorting. 143 | # modindex_common_prefix = [] 144 | 145 | # If true, keep warnings as "system message" paragraphs in the built documents. 146 | # keep_warnings = False 147 | 148 | # If this is True, todo emits a warning for each TODO entries. The default is False. 149 | todo_emit_warnings = True 150 | 151 | 152 | # -- Options for HTML output ------------------------------------------------- 153 | 154 | # The theme to use for HTML and HTML Help pages. See the documentation for 155 | # a list of builtin themes. 156 | html_theme = "alabaster" 157 | 158 | # Theme options are theme-specific and customize the look and feel of a theme 159 | # further. For a list of options available for each theme, see the 160 | # documentation. 161 | html_theme_options = { 162 | "sidebar_width": "300px", 163 | "page_width": "1200px" 164 | } 165 | 166 | # Add any paths that contain custom themes here, relative to this directory. 167 | # html_theme_path = [] 168 | 169 | # The name for this set of Sphinx documents. If None, it defaults to 170 | # " v documentation". 171 | # html_title = None 172 | 173 | # A shorter title for the navigation bar. Default is the same as html_title. 174 | # html_short_title = None 175 | 176 | # The name of an image file (relative to this directory) to place at the top 177 | # of the sidebar. 178 | # html_logo = "" 179 | 180 | # The name of an image file (within the static path) to use as favicon of the 181 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 182 | # pixels large. 183 | # html_favicon = None 184 | 185 | # Add any paths that contain custom static files (such as style sheets) here, 186 | # relative to this directory. They are copied after the builtin static files, 187 | # so a file named "default.css" will overwrite the builtin "default.css". 188 | html_static_path = ["_static"] 189 | 190 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 191 | # using the given strftime format. 192 | # html_last_updated_fmt = '%b %d, %Y' 193 | 194 | # If true, SmartyPants will be used to convert quotes and dashes to 195 | # typographically correct entities. 196 | # html_use_smartypants = True 197 | 198 | # Custom sidebar templates, maps document names to template names. 199 | # html_sidebars = {} 200 | 201 | # Additional templates that should be rendered to pages, maps page names to 202 | # template names. 203 | # html_additional_pages = {} 204 | 205 | # If false, no module index is generated. 206 | # html_domain_indices = True 207 | 208 | # If false, no index is generated. 209 | # html_use_index = True 210 | 211 | # If true, the index is split into individual pages for each letter. 212 | # html_split_index = False 213 | 214 | # If true, links to the reST sources are added to the pages. 215 | # html_show_sourcelink = True 216 | 217 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 218 | # html_show_sphinx = True 219 | 220 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 221 | # html_show_copyright = True 222 | 223 | # If true, an OpenSearch description file will be output, and all pages will 224 | # contain a tag referring to it. The value of this option must be the 225 | # base URL from which the finished HTML is served. 226 | # html_use_opensearch = '' 227 | 228 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 229 | # html_file_suffix = None 230 | 231 | # Output file base name for HTML help builder. 232 | htmlhelp_basename = "preset-cli-doc" 233 | 234 | 235 | # -- Options for LaTeX output ------------------------------------------------ 236 | 237 | latex_elements = { 238 | # The paper size ("letterpaper" or "a4paper"). 239 | # "papersize": "letterpaper", 240 | # The font size ("10pt", "11pt" or "12pt"). 241 | # "pointsize": "10pt", 242 | # Additional stuff for the LaTeX preamble. 243 | # "preamble": "", 244 | } 245 | 246 | # Grouping the document tree into LaTeX files. List of tuples 247 | # (source start file, target name, title, author, documentclass [howto/manual]). 248 | latex_documents = [ 249 | ("index", "user_guide.tex", "preset-cli Documentation", "Beto Dealmeida", "manual") 250 | ] 251 | 252 | # The name of an image file (relative to this directory) to place at the top of 253 | # the title page. 254 | # latex_logo = "" 255 | 256 | # For "manual" documents, if this is true, then toplevel headings are parts, 257 | # not chapters. 258 | # latex_use_parts = False 259 | 260 | # If true, show page references after internal links. 261 | # latex_show_pagerefs = False 262 | 263 | # If true, show URL addresses after external links. 264 | # latex_show_urls = False 265 | 266 | # Documents to append as an appendix to all manuals. 267 | # latex_appendices = [] 268 | 269 | # If false, no module index is generated. 270 | # latex_domain_indices = True 271 | 272 | # -- External mapping -------------------------------------------------------- 273 | python_version = ".".join(map(str, sys.version_info[0:2])) 274 | intersphinx_mapping = { 275 | "sphinx": ("https://www.sphinx-doc.org/en/master", None), 276 | "python": ("https://docs.python.org/" + python_version, None), 277 | "matplotlib": ("https://matplotlib.org", None), 278 | "numpy": ("https://numpy.org/doc/stable", None), 279 | "sklearn": ("https://scikit-learn.org/stable", None), 280 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 281 | "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), 282 | "setuptools": ("https://setuptools.pypa.io/en/stable/", None), 283 | "pyscaffold": ("https://pyscaffold.org/en/stable", None), 284 | } 285 | 286 | print(f"loading configurations for {project} {version} ...", file=sys.stderr) 287 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/images/export_dashboards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/docs/images/export_dashboards.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | preset-cli 3 | ========== 4 | 5 | This is the documentation of **preset-cli**. 6 | 7 | .. note:: 8 | 9 | This is the main page of your project's `Sphinx`_ documentation. 10 | It is formatted in `reStructuredText`_. Add additional pages 11 | by creating rst-files in ``docs`` and adding them to the `toctree`_ below. 12 | Use then `references`_ in order to link them from this page, e.g. 13 | :ref:`authors` and :ref:`changes`. 14 | 15 | It is also possible to refer to the documentation of other Python packages 16 | with the `Python domain syntax`_. By default you can reference the 17 | documentation of `Sphinx`_, `Python`_, `NumPy`_, `SciPy`_, `matplotlib`_, 18 | `Pandas`_, `Scikit-Learn`_. You can add more by extending the 19 | ``intersphinx_mapping`` in your Sphinx's ``conf.py``. 20 | 21 | The pretty useful extension `autodoc`_ is activated by default and lets 22 | you include documentation from docstrings. Docstrings can be written in 23 | `Google style`_ (recommended!), `NumPy style`_ and `classical style`_. 24 | 25 | 26 | Contents 27 | ======== 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | 32 | Overview 33 | Contributions & Help 34 | License 35 | Authors 36 | Changelog 37 | Module Reference 38 | 39 | 40 | Indices and tables 41 | ================== 42 | 43 | * :ref:`genindex` 44 | * :ref:`modindex` 45 | * :ref:`search` 46 | 47 | .. _toctree: https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html 48 | .. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html 49 | .. _references: https://www.sphinx-doc.org/en/stable/markup/inline.html 50 | .. _Python domain syntax: https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#the-python-domain 51 | .. _Sphinx: https://www.sphinx-doc.org/ 52 | .. _Python: https://docs.python.org/ 53 | .. _Numpy: https://numpy.org/doc/stable 54 | .. _SciPy: https://docs.scipy.org/doc/scipy/reference/ 55 | .. _matplotlib: https://matplotlib.org/contents.html# 56 | .. _Pandas: https://pandas.pydata.org/pandas-docs/stable 57 | .. _Scikit-Learn: https://scikit-learn.org/stable 58 | .. _autodoc: https://www.sphinx-doc.org/en/master/ext/autodoc.html 59 | .. _Google style: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings 60 | .. _NumPy style: https://numpydoc.readthedocs.io/en/latest/format.html 61 | .. _classical style: https://www.sphinx-doc.org/en/master/domains.html#info-field-lists 62 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | .. _license: 2 | 3 | ======= 4 | License 5 | ======= 6 | 7 | .. include:: ../LICENSE.txt 8 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. _readme: 2 | .. include:: ../README.rst 3 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements file for ReadTheDocs, check .readthedocs.yml. 2 | # To build the module reference correctly, make sure every external package 3 | # under `install_requires` in `setup.cfg` is also listed here! 4 | sphinx>=3.2.1 5 | # sphinx_rtd_theme 6 | -------------------------------------------------------------------------------- /examples/exports/charts/Total_count_134.yaml: -------------------------------------------------------------------------------- 1 | {% if country %} 2 | slice_name: {{ functions.demo.hello_world() }} Count in {{ country }} 3 | {% else %} 4 | slice_name: Total count 5 | {% endif %} 6 | viz_type: big_number_total 7 | params: 8 | {% if country %} 9 | adhoc_filters: 10 | - clause: WHERE 11 | comparator: null 12 | expressionType: SQL 13 | filterOptionName: filter_osfx5u3a5ks_qu7tlefn04j 14 | isExtra: false 15 | isNew: false 16 | operator: null 17 | sqlExpression: country = '{{ country }}' 18 | subject: null 19 | {% else %} 20 | adhoc_filters: [] 21 | {% endif %} 22 | datasource: 27__table 23 | extra_form_data: {} 24 | header_font_size: 0.4 25 | metric: count 26 | slice_id: 134 27 | subheader_font_size: 0.15 28 | time_format: smart_date 29 | time_grain_sqla: P1D 30 | time_range: No filter 31 | time_range_endpoints: 32 | - inclusive 33 | - exclusive 34 | url_params: {} 35 | viz_type: big_number_total 36 | y_axis_format: SMART_NUMBER 37 | cache_timeout: null 38 | uuid: 3f966611-8afc-4841-abdc-fa4361ff69f8 39 | version: 1.0.0 40 | dataset_uuid: fe3bd066-ab04-4ab8-a89a-30bf3d8423b4 41 | -------------------------------------------------------------------------------- /examples/exports/dashboards/White_label_test.yaml: -------------------------------------------------------------------------------- 1 | dashboard_title: White label test {{ country }} 2 | description: null 3 | css: null 4 | slug: null 5 | uuid: c64ca0c9-5cdf-46d8-a0b2-7b269b10f36c 6 | position: 7 | DASHBOARD_VERSION_KEY: v2 8 | ROOT_ID: 9 | children: 10 | - GRID_ID 11 | id: ROOT_ID 12 | type: ROOT 13 | GRID_ID: 14 | children: 15 | - ROW-N-OZVI6W96 16 | id: GRID_ID 17 | parents: 18 | - ROOT_ID 19 | type: GRID 20 | HEADER_ID: 21 | id: HEADER_ID 22 | meta: 23 | text: White label test 24 | type: HEADER 25 | ROW-N-OZVI6W96: 26 | children: 27 | - CHART-BVI44PWH 28 | id: ROW-N-OZVI6W96 29 | meta: 30 | '0': ROOT_ID 31 | background: BACKGROUND_TRANSPARENT 32 | type: ROW 33 | parents: 34 | - ROOT_ID 35 | - GRID_ID 36 | CHART-BVI44PWH: 37 | children: [] 38 | id: CHART-BVI44PWH 39 | meta: 40 | chartId: 134 41 | height: 50 42 | sliceName: Total count 43 | uuid: 3f966611-8afc-4841-abdc-fa4361ff69f8 44 | width: 4 45 | type: CHART 46 | parents: 47 | - ROOT_ID 48 | - GRID_ID 49 | - ROW-N-OZVI6W96 50 | version: 1.0.0 51 | -------------------------------------------------------------------------------- /examples/exports/databases/Google_Sheets.yaml: -------------------------------------------------------------------------------- 1 | database_name: Google Sheets 2 | sqlalchemy_uri: gsheets:// 3 | cache_timeout: null 4 | expose_in_sqllab: true 5 | allow_run_async: false 6 | allow_ctas: false 7 | allow_cvas: false 8 | allow_file_upload: false 9 | extra: 10 | engine_params: 11 | catalog: 12 | country_cnt: https://docs.google.com/spreadsheets/d/1LcWZMsdCl92g7nA-D6qGRqg1T5TiHyuKJUY1u9XAnsk/edit#gid=0 13 | metadata_params: {} 14 | schemas_allowed_for_file_upload: [] 15 | uuid: 7737158f-bce3-4350-a2dc-85ca6a2998a0 16 | version: 1.0.0 17 | -------------------------------------------------------------------------------- /examples/exports/datasets/Google_Sheets/country_cnt.yaml: -------------------------------------------------------------------------------- 1 | table_name: country_cnt 2 | main_dttm_col: null 3 | description: null 4 | default_endpoint: null 5 | offset: 0 6 | cache_timeout: null 7 | schema: main 8 | sql: null 9 | params: null 10 | template_params: null 11 | filter_select_enabled: false 12 | fetch_values_predicate: null 13 | extra: null 14 | uuid: fe3bd066-ab04-4ab8-a89a-30bf3d8423b4 15 | metrics: 16 | - metric_name: count 17 | verbose_name: COUNT(*) 18 | metric_type: count 19 | expression: COUNT(*) 20 | description: null 21 | d3format: null 22 | extra: null 23 | warning_text: null 24 | columns: 25 | - column_name: cnt 26 | verbose_name: null 27 | is_dttm: false 28 | is_active: true 29 | type: REAL 30 | groupby: true 31 | filterable: true 32 | expression: null 33 | description: null 34 | python_date_format: null 35 | extra: null 36 | - column_name: country 37 | verbose_name: null 38 | is_dttm: false 39 | is_active: true 40 | type: TEXT 41 | groupby: true 42 | filterable: true 43 | expression: null 44 | description: null 45 | python_date_format: null 46 | extra: null 47 | version: 1.0.0 48 | database_uuid: 7737158f-bce3-4350-a2dc-85ca6a2998a0 49 | -------------------------------------------------------------------------------- /examples/exports/functions/demo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Demo functions. 3 | """ 4 | 5 | 6 | def hello_world() -> str: 7 | """ 8 | Simple demo function. 9 | """ 10 | return "Hello, world!" 11 | -------------------------------------------------------------------------------- /examples/exports/metadata.yaml: -------------------------------------------------------------------------------- 1 | version: 1.0.0 2 | timestamp: '2020-12-11T22:52:56.534241+00:00' 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | # AVOID CHANGING REQUIRES: IT WILL BE UPDATED BY PYSCAFFOLD! 3 | requires = ["setuptools>=46.1.0", "setuptools_scm[toml]>=5", "wheel"] 4 | build-backend = "setuptools.build_meta" 5 | 6 | [tool.setuptools_scm] 7 | # For smarter version schemes and other configuration options, 8 | # check out https://github.com/pypa/setuptools_scm 9 | version_scheme = "no-guess-dev" 10 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | -e file:. 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile --no-annotate 6 | # 7 | -e file:. 8 | aiohttp==3.8.3 9 | aiosignal==1.2.0 10 | appdirs==1.4.4 11 | async-timeout==4.0.2 12 | attrs==22.1.0 13 | backoff==2.2.1 14 | beautifulsoup4==4.11.1 15 | certifi==2021.10.8 16 | charset-normalizer==2.0.12 17 | click==8.1.2 18 | commonmark==0.9.1 19 | cython==0.29.28 20 | frozenlist==1.3.1 21 | greenlet==1.1.3.post0 22 | idna==3.3 23 | jinja2==3.1.1 24 | markupsafe==2.1.1 25 | marshmallow==3.17.0 26 | multidict==6.0.2 27 | numpy==1.22.3 28 | packaging==21.3 29 | pandas==1.4.2 30 | prison==0.2.1 31 | prompt-toolkit==3.0.29 32 | pygments==2.12.0 33 | pyparsing==3.0.9 34 | python-dateutil==2.8.2 35 | python-graphql-client==0.4.3 36 | pytz==2022.1 37 | pyyaml==6.0 38 | requests==2.27.1 39 | rich==12.3.0 40 | six==1.16.0 41 | soupsieve==2.3.2.post1 42 | sqlalchemy==1.4.35 43 | sqlglot==26.23.0 44 | tabulate==0.8.9 45 | typing-extensions==4.2.0 46 | urllib3==1.26.9 47 | wcwidth==0.2.5 48 | websockets==10.3 49 | yarl==1.7.2 50 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # This file is used to configure your project. 2 | # Read more about the various options under: 3 | # https://setuptools.pypa.io/en/latest/userguide/declarative_config.html 4 | # https://setuptools.pypa.io/en/latest/references/keywords.html 5 | 6 | [metadata] 7 | name = preset-cli 8 | description = A CLI to interact with Preset (https://preset.io/) workspaces. 9 | author = Beto Dealmeida 10 | author_email = beto@preset.io 11 | license = Other/Proprietary License 12 | license_files = LICENSE.txt 13 | long_description = file: README.rst 14 | long_description_content_type = text/x-rst; charset=UTF-8 15 | url = https://github.com/preset-io/backend-sdk 16 | # Add here related links, for example: 17 | project_urls = 18 | Documentation = https://github.com/preset-io/backend-sdk/blob/master/README.rst 19 | # Source = https://github.com/pyscaffold/pyscaffold/ 20 | # Changelog = https://pyscaffold.org/en/latest/changelog.html 21 | # Tracker = https://github.com/pyscaffold/pyscaffold/issues 22 | # Conda-Forge = https://anaconda.org/conda-forge/pyscaffold 23 | # Download = https://pypi.org/project/PyScaffold/#files 24 | # Twitter = https://twitter.com/PyScaffold 25 | 26 | # Change if running only on Windows, Mac or Linux (comma-separated) 27 | platforms = any 28 | 29 | # Add here all kinds of additional classifiers as defined under 30 | # https://pypi.org/classifiers/ 31 | classifiers = 32 | Development Status :: 4 - Beta 33 | Programming Language :: Python 34 | Programming Language :: Python :: 3.8 35 | Programming Language :: Python :: 3.9 36 | Programming Language :: Python :: 3.10 37 | Programming Language :: Python :: 3.11 38 | License :: Other/Proprietary License 39 | 40 | 41 | [options] 42 | zip_safe = False 43 | packages = find_namespace: 44 | include_package_data = True 45 | package_dir = 46 | =src 47 | 48 | # Require a min/specific Python version (comma-separated conditions) 49 | # python_requires = >=3.8 50 | 51 | # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. 52 | # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in 53 | # new major versions. This works if the required packages follow Semantic Versioning. 54 | # For more information, check out https://semver.org/. 55 | install_requires = 56 | importlib-metadata; python_version<"3.8" 57 | Cython>=0.29.26 58 | PyYAML>=6.0 59 | appdirs>=1.4.4 60 | backoff>=1.10.0 61 | beautifulsoup4>=4.10.0 62 | click>=8.0.3 63 | jinja2>=3.0.3 64 | marshmallow>=3.17.0 65 | numpy>=1.21.5 66 | pandas>=1.3.5 67 | prison>=0.2.1 68 | prompt-toolkit>=3.0.24 69 | pygments>=2.11.2 70 | python-graphql-client>=0.4.3 71 | requests>=2.26.0 72 | rich>=12.3.0 73 | sqlalchemy>=1.4,<2 74 | sqlglot>=26 75 | tabulate>=0.8.9 76 | typing-extensions>=4.0.1 77 | yarl>=1.7.2 78 | greenlet>=1.1.3 # required for Python 3.11 79 | aiohttp>=3.8.3 80 | 81 | [options.packages.find] 82 | where = src 83 | exclude = 84 | tests 85 | 86 | [options.extras_require] 87 | # TODO: Implement additional optional dependencies 88 | snowflake = snowflake-sqlalchemy==1.4.4 89 | 90 | # Add here test requirements (semicolon/line-separated) 91 | testing = 92 | setuptools 93 | freezegun 94 | pytest 95 | pytest-cov 96 | pytest-mock 97 | pyfakefs 98 | requests-mock 99 | codespell 100 | pre-commit 101 | pip-tools>=6.6.0 102 | pylint==2.15.5 103 | 104 | [options.entry_points] 105 | # Add here console scripts like: 106 | # console_scripts = 107 | # script_name = preset_cli.module:function 108 | # For example: 109 | # console_scripts = 110 | # fibonacci = preset_cli.skeleton:run 111 | # And any other entry points, for example: 112 | # pyscaffold.cli = 113 | # awesome = pyscaffoldext.awesome.extension:AwesomeExtension 114 | console_scripts = 115 | preset-cli = preset_cli.cli.main:preset_cli 116 | superset-cli = preset_cli.cli.superset.main:superset_cli 117 | 118 | [tool:pytest] 119 | # Specify command line options as you would do when invoking pytest directly. 120 | # e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml 121 | # in order to write a coverage file that can be read by Jenkins. 122 | # CAUTION: --cov flags may prohibit setting breakpoints while debugging. 123 | # Comment those flags to avoid this pytest issue. 124 | addopts = 125 | --cov preset_cli --cov-report term-missing 126 | --verbose 127 | norecursedirs = 128 | dist 129 | build 130 | .tox 131 | testpaths = tests 132 | # Use pytest markers to select/deselect specific tests 133 | # markers = 134 | # slow: mark tests as slow (deselect with '-m "not slow"') 135 | # system: mark end-to-end system tests 136 | 137 | [devpi:upload] 138 | # Options for the devpi: PyPI server and packaging tool 139 | # VCS export must be deactivated since we are using setuptools-scm 140 | no_vcs = 1 141 | formats = bdist_wheel 142 | 143 | [flake8] 144 | # Some sane defaults for the code style checker flake8 145 | max_line_length = 88 146 | extend_ignore = E203, W503 147 | # ^ Black-compatible 148 | # E203 and W503 have edge cases handled by black 149 | exclude = 150 | .tox 151 | build 152 | dist 153 | .eggs 154 | docs/conf.py 155 | 156 | [pyscaffold] 157 | # PyScaffold's parameters when the project was created. 158 | # This will be used when updating. Do not change! 159 | version = 4.1.4 160 | package = preset_cli 161 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup file for preset-cli. 3 | Use setup.cfg to configure your project. 4 | 5 | This file was generated with PyScaffold 4.1.4. 6 | PyScaffold helps you to put up the scaffold of your new Python project. 7 | Learn more under: https://pyscaffold.org/ 8 | """ 9 | from setuptools import setup 10 | 11 | if __name__ == "__main__": 12 | try: 13 | setup(use_scm_version={"version_scheme": "no-guess-dev"}) 14 | except: # noqa 15 | print( 16 | "\n\nAn error occurred while building the project, " 17 | "please ensure you have the most updated version of setuptools, " 18 | "setuptools_scm and wheel with:\n" 19 | " pip install -U setuptools setuptools_scm wheel\n\n", 20 | ) 21 | raise 22 | -------------------------------------------------------------------------------- /src/preset_cli/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Package version. 3 | """ 4 | 5 | from importlib.metadata import PackageNotFoundError, version # pragma: no cover 6 | 7 | try: 8 | # Change here if project is renamed and does not equal the package name 9 | dist_name = "preset-cli" # pylint: disable=C0103 10 | __version__ = version(dist_name) 11 | except PackageNotFoundError: # pragma: no cover 12 | __version__ = "unknown" 13 | finally: 14 | del version, PackageNotFoundError 15 | -------------------------------------------------------------------------------- /src/preset_cli/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/api/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/api/clients/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/api/clients/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/api/clients/preset.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple client for interacting with the Preset API. 3 | """ 4 | 5 | import json 6 | import logging 7 | from enum import Enum 8 | from typing import Any, Dict, Iterator, List, Optional, Union 9 | 10 | import prison 11 | from yarl import URL 12 | 13 | from preset_cli import __version__ 14 | from preset_cli.auth.main import Auth 15 | from preset_cli.lib import validate_response 16 | from preset_cli.typing import UserType 17 | 18 | _logger = logging.getLogger(__name__) 19 | 20 | MANAGER_MAX_PAGE_SIZE = 250 21 | SUPERSET_MAX_PAGE_SIZE = 100 22 | 23 | 24 | class Role(int, Enum): 25 | """ 26 | Roles for users. 27 | """ 28 | 29 | ADMIN = 1 30 | USER = 2 31 | 32 | 33 | class PresetClient: # pylint: disable=too-few-public-methods 34 | 35 | """ 36 | A client for the Preset API. 37 | """ 38 | 39 | def __init__(self, baseurl: Union[str, URL], auth: Auth): 40 | # convert to URL if necessary 41 | self.baseurl = URL(baseurl) 42 | self.auth = auth 43 | 44 | self.session = auth.session 45 | self.session.headers.update(auth.get_headers()) 46 | self.session.headers["User-Agent"] = "Preset CLI" 47 | self.session.headers["X-Client-Version"] = __version__ 48 | 49 | def get_teams(self) -> List[Any]: 50 | """ 51 | Retrieve all teams based on membership. 52 | """ 53 | url = self.get_base_url() / "teams" 54 | _logger.debug("GET %s", url) 55 | response = self.session.get(url) 56 | validate_response(response) 57 | 58 | payload = response.json() 59 | teams = payload["payload"] 60 | 61 | return teams 62 | 63 | def get_team_members(self, team_name: str) -> List[Any]: 64 | """ 65 | Retrieve all users for a given team. 66 | """ 67 | users = [] 68 | page_number = 1 69 | while True: 70 | params = {"page_number": page_number, "page_size": MANAGER_MAX_PAGE_SIZE} 71 | url = self.get_base_url() / "teams" / team_name / "memberships" % params 72 | _logger.debug("GET %s", url) 73 | response = self.session.get(url) 74 | validate_response(response) 75 | 76 | payload = response.json() 77 | users.extend(payload["payload"]) 78 | 79 | if payload["meta"]["count"] <= page_number * MANAGER_MAX_PAGE_SIZE: 80 | break 81 | 82 | page_number += 1 83 | 84 | return users 85 | 86 | def get_workspaces(self, team_name: str) -> List[Any]: 87 | """ 88 | Retrieve all workspaces for a given team. 89 | """ 90 | url = self.get_base_url() / "teams" / team_name / "workspaces" 91 | _logger.debug("GET %s", url) 92 | response = self.session.get(url) 93 | validate_response(response) 94 | 95 | payload = response.json() 96 | workspaces = payload["payload"] 97 | 98 | return workspaces 99 | 100 | def invite_users( 101 | self, 102 | teams: List[str], 103 | emails: List[str], 104 | role_id=Role.USER, 105 | ) -> None: 106 | """ 107 | Invite users to teams. 108 | """ 109 | for team in teams: 110 | url = self.get_base_url() / "teams" / team / "invites/many" 111 | payload = { 112 | "invites": [ 113 | {"team_role_id": role_id, "email": email} for email in emails 114 | ], 115 | } 116 | _logger.debug("POST %s\n%s", url, json.dumps(payload, indent=4)) 117 | response = self.session.post(url, json=payload) 118 | validate_response(response) 119 | 120 | # pylint: disable=too-many-locals 121 | def export_users(self, workspace_url: URL) -> Iterator[UserType]: 122 | """ 123 | Return all users from a given workspace. 124 | """ 125 | team_name: Optional[str] = None 126 | workspace_id: Optional[int] = None 127 | 128 | for team in self.get_teams(): 129 | for workspace in self.get_workspaces(team["name"]): 130 | if workspace_url.host == workspace["hostname"]: 131 | team_name = team["name"] 132 | workspace_id = workspace["id"] 133 | break 134 | 135 | if team_name is None or workspace_id is None: 136 | raise Exception("Unable to find workspace and/or team") 137 | 138 | workspace_membership: List[UserType] = [] 139 | page_number = 1 140 | while True: 141 | params = {"page_number": page_number, "page_size": MANAGER_MAX_PAGE_SIZE} 142 | url = ( 143 | self.get_base_url() 144 | / "teams" 145 | / team_name 146 | / "workspaces" 147 | / str(workspace_id) 148 | / "memberships" 149 | % params 150 | ) 151 | _logger.debug("GET %s", url) 152 | response = self.session.get(url) 153 | validate_response(response) 154 | payload = response.json() 155 | 156 | # Teams with SAML SSO might have emails with uppercase characters 157 | team_members: List[UserType] = [ 158 | { 159 | "id": 0, 160 | "username": payload["user"]["username"], 161 | "role": [], # TODO (betodealmeida) 162 | "first_name": payload["user"]["first_name"], 163 | "last_name": payload["user"]["last_name"], 164 | "email": payload["user"]["email"].lower(), 165 | } 166 | for payload in payload["payload"] 167 | ] 168 | workspace_membership.extend(team_members) 169 | 170 | if payload["meta"]["count"] <= page_number * MANAGER_MAX_PAGE_SIZE: 171 | break 172 | 173 | page_number += 1 174 | 175 | ids = {} 176 | page = 0 177 | while True: 178 | query = prison.dumps( 179 | { 180 | "page": page, 181 | "page_size": SUPERSET_MAX_PAGE_SIZE, 182 | }, 183 | ) 184 | url = workspace_url / "api/v1/chart/related/owners" % {"q": query} 185 | _logger.debug("GET %s", url) 186 | response = self.session.get(url) 187 | 188 | validate_response(response) 189 | payload = response.json() 190 | if not payload["result"]: 191 | break 192 | 193 | # Teams with SAML SSO might have emails with uppercase characters 194 | ids.update( 195 | { 196 | user["extra"]["email"].lower(): user["value"] 197 | for user in payload["result"] 198 | }, 199 | ) 200 | 201 | page += 1 202 | 203 | for team_member in workspace_membership: 204 | # pylint: disable=consider-using-f-string 205 | if team_member["email"] in ids: 206 | team_member["id"] = ids[team_member["email"]] 207 | yield team_member 208 | 209 | def import_users(self, teams: List[str], users: List[UserType]) -> None: 210 | """ 211 | Import users by adding them via SCIM. 212 | """ 213 | for team in teams: 214 | url = self.get_base_url() / "teams" / team / "scim/v2/Users" 215 | for user in users: 216 | payload = { 217 | "schemas": [ 218 | "urn:ietf:params:scim:schemas:core:2.0:User", 219 | "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User", 220 | ], 221 | "active": True, 222 | "displayName": f'{user["first_name"]} {user["last_name"]}', 223 | "emails": [ 224 | { 225 | "primary": True, 226 | "type": "work", 227 | "value": user["email"], 228 | }, 229 | ], 230 | "meta": {"resourceType": "User"}, 231 | "userName": user["email"], 232 | "name": { 233 | "formatted": f'{user["first_name"]} {user["last_name"]}', 234 | "familyName": user["last_name"], 235 | "givenName": user["first_name"], 236 | }, 237 | } 238 | self.session.headers["Content-Type"] = "application/scim+json" 239 | self.session.headers["Accept"] = "application/scim+json" 240 | _logger.info("Importing %s", user["email"]) 241 | _logger.debug("POST %s\n%s", url, json.dumps(payload, indent=4)) 242 | response = self.session.post(url, json=payload) 243 | 244 | # ignore existing users 245 | if response.status_code == 409: 246 | payload = response.json() 247 | _logger.info(payload["detail"]) 248 | continue 249 | 250 | validate_response(response) 251 | 252 | def change_team_role(self, team_name: str, user_id: int, role_id: int) -> None: 253 | """ 254 | Change the team role of a given user. 255 | """ 256 | url = self.get_base_url() / "teams" / team_name / "memberships" / str(user_id) 257 | payload = {"team_role_id": role_id} 258 | _logger.debug("PATCH %s\n%s", url, json.dumps(payload, indent=4)) 259 | self.session.patch(url, json=payload) 260 | 261 | def change_workspace_role( 262 | self, 263 | team_name: str, 264 | workspace_id: int, 265 | user_id: int, 266 | role_identifier: str, 267 | ) -> None: 268 | """ 269 | Change the workspace role of a given user. 270 | """ 271 | url = ( 272 | self.get_base_url() 273 | / "teams" 274 | / team_name 275 | / "workspaces" 276 | / str(workspace_id) 277 | / "membership" 278 | ) 279 | payload = {"role_identifier": role_identifier, "user_id": user_id} 280 | _logger.debug("PUT %s\n%s", url, json.dumps(payload, indent=4)) 281 | self.session.put(url, json=payload) 282 | 283 | def get_base_url(self, version: Optional[str] = "v1") -> URL: 284 | """ 285 | Return the base URL for API calls. 286 | """ 287 | return self.baseurl / version 288 | 289 | def get_group_membership( 290 | self, 291 | team_name: str, 292 | page: int, 293 | ) -> Dict[str, Any]: 294 | """ 295 | Lists all user/SCIM groups associated with a team 296 | """ 297 | url = ( 298 | self.get_base_url() 299 | / "teams" 300 | / team_name 301 | / "scim/v2/Groups" 302 | % {"startIndex": str(page)} 303 | ) 304 | self.session.headers["Accept"] = "application/scim+json" 305 | _logger.debug("GET %s", url) 306 | response = self.session.get(url) 307 | return response.json() 308 | -------------------------------------------------------------------------------- /src/preset_cli/api/operators.py: -------------------------------------------------------------------------------- 1 | """ 2 | Operators for filtering the API. 3 | """ 4 | 5 | # pylint: disable=too-few-public-methods 6 | 7 | from typing import Any 8 | 9 | 10 | class Operator: 11 | """ 12 | A filter operator. 13 | """ 14 | 15 | operator = "invalid" 16 | 17 | def __init__(self, value: Any): 18 | self.value = value 19 | 20 | 21 | class Equal(Operator): 22 | """ 23 | Equality operator. 24 | """ 25 | 26 | operator = "eq" 27 | 28 | 29 | class OneToMany(Operator): 30 | """ 31 | Operator for one-to-many relationships. 32 | """ 33 | 34 | operator = "rel_o_m" 35 | -------------------------------------------------------------------------------- /src/preset_cli/auth/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/auth/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/auth/jwt.py: -------------------------------------------------------------------------------- 1 | """ 2 | JWT auth. 3 | """ 4 | 5 | import yaml 6 | 7 | from preset_cli.auth.lib import get_access_token, get_credentials_path 8 | from preset_cli.auth.token import TokenAuth 9 | 10 | 11 | class JWTAuth(TokenAuth): # pylint: disable=too-few-public-methods, abstract-method 12 | """ 13 | Auth via JWT. 14 | """ 15 | 16 | @classmethod 17 | def from_stored_credentials(cls) -> "JWTAuth": 18 | """ 19 | Build auth from stored credentials. 20 | """ 21 | credentials_path = get_credentials_path() 22 | if not credentials_path.exists(): 23 | raise Exception(f"Could not load credentials from {credentials_path}") 24 | 25 | with open(credentials_path, encoding="utf-8") as input_: 26 | credentials = yaml.load(input_, Loader=yaml.SafeLoader) 27 | 28 | token = get_access_token(**credentials) 29 | 30 | return JWTAuth(token) 31 | -------------------------------------------------------------------------------- /src/preset_cli/auth/lib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helped functions for authentication. 3 | """ 4 | 5 | from pathlib import Path 6 | from typing import Union 7 | 8 | import requests 9 | import yaml 10 | from appdirs import user_config_dir 11 | from yarl import URL 12 | 13 | CREDENTIALS_FILE = "credentials.yaml" 14 | 15 | 16 | def get_access_token(baseurl: Union[str, URL], api_token: str, api_secret: str) -> str: 17 | """ 18 | Fetch the JWT access token. 19 | """ 20 | if isinstance(baseurl, str): 21 | baseurl = URL(baseurl) 22 | 23 | response = requests.post( 24 | baseurl / "v1/auth/", 25 | json={"name": api_token, "secret": api_secret}, 26 | headers={"Content-Type": "application/json"}, 27 | timeout=60, 28 | ) 29 | response.raise_for_status() 30 | payload = response.json() 31 | return payload["payload"]["access_token"] 32 | 33 | 34 | def get_credentials_path() -> Path: 35 | """ 36 | Return the system-dependent location of the credentials. 37 | """ 38 | config_dir = Path(user_config_dir("preset-cli", "Preset")) 39 | return config_dir / CREDENTIALS_FILE 40 | 41 | 42 | def store_credentials( 43 | api_token: str, 44 | api_secret: str, 45 | manager_url: URL, 46 | credentials_path: Path, 47 | ) -> None: 48 | """ 49 | Store credentials. 50 | """ 51 | credentials_path.parent.mkdir(parents=True, exist_ok=True) 52 | 53 | credentials = { 54 | "api_token": api_token, 55 | "api_secret": api_secret, 56 | "baseurl": str(manager_url), 57 | } 58 | 59 | while True: 60 | store = input(f"Store the credentials in {credentials_path}? [y/N] ") 61 | if store.strip().lower() == "y": 62 | with open(credentials_path, "w", encoding="utf-8") as output: 63 | yaml.safe_dump(credentials, output) 64 | credentials_path.chmod(0o600) 65 | break 66 | 67 | if store.strip().lower() in ("n", ""): 68 | break 69 | -------------------------------------------------------------------------------- /src/preset_cli/auth/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mechanisms for authentication and authorization. 3 | """ 4 | 5 | from typing import Any, Dict 6 | 7 | from requests import Response, Session 8 | from requests.adapters import HTTPAdapter 9 | from urllib3.util import Retry 10 | 11 | 12 | class Auth: # pylint: disable=too-few-public-methods 13 | """ 14 | An authentication/authorization mechanism. 15 | """ 16 | 17 | def __init__(self): 18 | self.session = Session() 19 | self.session.hooks["response"].append(self.reauth) 20 | 21 | retries = Retry( 22 | total=3, # max retries count 23 | backoff_factor=1, # delay factor between attempts 24 | respect_retry_after_header=True, 25 | ) 26 | 27 | self.session.mount("https://", HTTPAdapter(max_retries=retries)) 28 | 29 | def get_headers(self) -> Dict[str, str]: 30 | """ 31 | Return headers for auth. 32 | """ 33 | return {} 34 | 35 | def auth(self) -> None: 36 | """ 37 | Perform authentication, fetching JWT tokens, CSRF tokens, cookies, etc. 38 | """ 39 | raise NotImplementedError("Must be implemented for reauthorizing") 40 | 41 | # pylint: disable=invalid-name, unused-argument 42 | def reauth(self, r: Response, *args: Any, **kwargs: Any) -> Response: 43 | """ 44 | Catch 401 and re-auth. 45 | """ 46 | if r.status_code != 401: 47 | return r 48 | 49 | try: 50 | self.auth() 51 | except NotImplementedError: 52 | return r 53 | 54 | self.session.headers.update(self.get_headers()) 55 | r.request.headers.update(self.get_headers()) 56 | return self.session.send(r.request, verify=False) 57 | -------------------------------------------------------------------------------- /src/preset_cli/auth/preset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Preset auth. 3 | """ 4 | 5 | from typing import Dict 6 | 7 | import yaml 8 | from yarl import URL 9 | 10 | from preset_cli.auth.lib import get_access_token, get_credentials_path 11 | from preset_cli.auth.main import Auth 12 | 13 | 14 | class JWTTokenError(Exception): 15 | """ 16 | Exception raised when fetching the JWT fails. 17 | """ 18 | 19 | 20 | class PresetAuth(Auth): # pylint: disable=too-few-public-methods 21 | """ 22 | Auth via Preset access token and secret. 23 | 24 | Automatically refreshes the JWT as needed. 25 | """ 26 | 27 | def __init__(self, baseurl: URL, api_token: str, api_secret: str): 28 | super().__init__() 29 | 30 | self.baseurl = baseurl 31 | self.api_token = api_token 32 | self.api_secret = api_secret 33 | self.auth() 34 | 35 | def get_headers(self) -> Dict[str, str]: 36 | return {"Authorization": f"Bearer {self.token}"} 37 | 38 | def auth(self) -> None: 39 | """ 40 | Fetch the JWT and store it. 41 | """ 42 | try: 43 | self.token = get_access_token(self.baseurl, self.api_token, self.api_secret) 44 | except Exception as ex: # pylint: disable=broad-except 45 | raise JWTTokenError("Unable to fetch JWT") from ex 46 | 47 | @classmethod 48 | def from_stored_credentials(cls) -> "PresetAuth": 49 | """ 50 | Build auth from stored credentials. 51 | """ 52 | credentials_path = get_credentials_path() 53 | if not credentials_path.exists(): 54 | raise Exception(f"Could not load credentials from {credentials_path}") 55 | 56 | with open(credentials_path, encoding="utf-8") as input_: 57 | credentials = yaml.load(input_, Loader=yaml.SafeLoader) 58 | 59 | return PresetAuth(**credentials) 60 | -------------------------------------------------------------------------------- /src/preset_cli/auth/superset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mechanisms for authentication and authorization for Superset instances. 3 | """ 4 | 5 | from typing import Dict, Optional 6 | 7 | from bs4 import BeautifulSoup 8 | from yarl import URL 9 | 10 | from preset_cli.auth.main import Auth 11 | from preset_cli.auth.token import TokenAuth 12 | 13 | 14 | class UsernamePasswordAuth(Auth): # pylint: disable=too-few-public-methods 15 | """ 16 | Auth to Superset via username/password. 17 | """ 18 | 19 | def __init__(self, baseurl: URL, username: str, password: Optional[str] = None): 20 | super().__init__() 21 | 22 | self.csrf_token: Optional[str] = None 23 | self.baseurl = baseurl 24 | self.username = username 25 | self.password = password 26 | self.auth() 27 | 28 | def get_headers(self) -> Dict[str, str]: 29 | return {"X-CSRFToken": self.csrf_token} if self.csrf_token else {} 30 | 31 | def auth(self) -> None: 32 | """ 33 | Login to get CSRF token and cookies. 34 | """ 35 | data = {"username": self.username, "password": self.password} 36 | 37 | response = self.session.get(self.baseurl / "login/") 38 | soup = BeautifulSoup(response.text, "html.parser") 39 | input_ = soup.find("input", {"id": "csrf_token"}) 40 | csrf_token = input_["value"] if input_ else None 41 | if csrf_token: 42 | self.session.headers["X-CSRFToken"] = csrf_token 43 | data["csrf_token"] = csrf_token 44 | self.csrf_token = csrf_token 45 | 46 | # set cookies 47 | self.session.post(self.baseurl / "login/", data=data) 48 | 49 | 50 | class SupersetJWTAuth(TokenAuth): # pylint: disable=abstract-method 51 | """ 52 | Auth to Superset via JWT token. 53 | """ 54 | 55 | def __init__(self, token: str, baseurl: URL): 56 | super().__init__(token) 57 | self.baseurl = baseurl 58 | 59 | def get_csrf_token(self, jwt: str) -> str: 60 | """ 61 | Get a CSRF token. 62 | """ 63 | response = self.session.get( 64 | self.baseurl / "api/v1/security/csrf_token/", # type: ignore 65 | headers={"Authorization": f"Bearer {jwt}"}, 66 | ) 67 | response.raise_for_status() 68 | payload = response.json() 69 | return payload["result"] 70 | 71 | def get_headers(self) -> Dict[str, str]: 72 | return { 73 | "Authorization": f"Bearer {self.token}", 74 | "X-CSRFToken": self.get_csrf_token(self.token), 75 | } 76 | -------------------------------------------------------------------------------- /src/preset_cli/auth/token.py: -------------------------------------------------------------------------------- 1 | """ 2 | Token auth. 3 | """ 4 | 5 | from typing import Dict 6 | 7 | from preset_cli.auth.main import Auth 8 | 9 | 10 | class TokenAuth(Auth): # pylint: disable=too-few-public-methods, abstract-method 11 | """ 12 | Auth via a token. 13 | """ 14 | 15 | def __init__(self, token: str): 16 | super().__init__() 17 | self.token = token 18 | 19 | def get_headers(self) -> Dict[str, str]: 20 | return {"Authorization": f"Bearer {self.token}"} 21 | -------------------------------------------------------------------------------- /src/preset_cli/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/cli/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/cli/superset/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/export.py: -------------------------------------------------------------------------------- 1 | """ 2 | A command to export Superset resources into a directory. 3 | """ 4 | 5 | import json 6 | import re 7 | from collections import defaultdict 8 | from pathlib import Path 9 | from typing import Any, Callable, List, Set, Tuple, Union 10 | from zipfile import ZipFile 11 | 12 | import click 13 | import yaml 14 | from yarl import URL 15 | 16 | from preset_cli.api.clients.superset import SupersetClient 17 | from preset_cli.lib import remove_root, split_comma 18 | 19 | JINJA2_OPEN_MARKER = "__JINJA2_OPEN__" 20 | JINJA2_CLOSE_MARKER = "__JINJA2_CLOSE__" 21 | assert JINJA2_OPEN_MARKER != JINJA2_CLOSE_MARKER 22 | 23 | 24 | def get_newline_char(force_unix_eol: bool = False) -> Union[str, None]: 25 | """Returns the newline character used by the open function""" 26 | return "\n" if force_unix_eol else None 27 | 28 | 29 | @click.command() 30 | @click.argument("directory", type=click.Path(exists=True, resolve_path=True)) 31 | @click.option( 32 | "--overwrite", 33 | is_flag=True, 34 | default=False, 35 | help="Overwrite existing resources", 36 | ) 37 | @click.option( 38 | "--disable-jinja-escaping", 39 | is_flag=True, 40 | default=False, 41 | help="Disable Jinja template escaping", 42 | ) 43 | @click.option( 44 | "--force-unix-eol", 45 | is_flag=True, 46 | default=False, 47 | help="Force Unix end-of-line characters, otherwise use system default", 48 | ) 49 | @click.option( 50 | "--asset-type", 51 | help="Asset type", 52 | multiple=True, 53 | ) 54 | @click.option( 55 | "--database-ids", 56 | callback=split_comma, 57 | help="Comma separated list of database IDs to export", 58 | ) 59 | @click.option( 60 | "--dataset-ids", 61 | callback=split_comma, 62 | help="Comma separated list of dataset IDs to export", 63 | ) 64 | @click.option( 65 | "--chart-ids", 66 | callback=split_comma, 67 | help="Comma separated list of chart IDs to export", 68 | ) 69 | @click.option( 70 | "--dashboard-ids", 71 | callback=split_comma, 72 | help="Comma separated list of dashboard IDs to export", 73 | ) 74 | @click.pass_context 75 | def export_assets( # pylint: disable=too-many-locals, too-many-arguments 76 | ctx: click.core.Context, 77 | directory: str, 78 | asset_type: Tuple[str, ...], 79 | database_ids: List[str], 80 | dataset_ids: List[str], 81 | chart_ids: List[str], 82 | dashboard_ids: List[str], 83 | overwrite: bool = False, 84 | disable_jinja_escaping: bool = False, 85 | force_unix_eol: bool = False, 86 | ) -> None: 87 | """ 88 | Export DBs/datasets/charts/dashboards to a directory. 89 | """ 90 | auth = ctx.obj["AUTH"] 91 | url = URL(ctx.obj["INSTANCE"]) 92 | client = SupersetClient(url, auth) 93 | root = Path(directory) 94 | asset_types = set(asset_type) 95 | ids = { 96 | "database": {int(id_) for id_ in database_ids}, 97 | "dataset": {int(id_) for id_ in dataset_ids}, 98 | "chart": {int(id_) for id_ in chart_ids}, 99 | "dashboard": {int(id_) for id_ in dashboard_ids}, 100 | } 101 | ids_requested = any([database_ids, dataset_ids, chart_ids, dashboard_ids]) 102 | 103 | for resource_name in ["database", "dataset", "chart", "dashboard"]: 104 | if (not asset_types or resource_name in asset_types) and ( 105 | ids[resource_name] or not ids_requested 106 | ): 107 | export_resource( 108 | resource_name, 109 | ids[resource_name], 110 | root, 111 | client, 112 | overwrite, 113 | disable_jinja_escaping, 114 | skip_related=not ids_requested, 115 | force_unix_eol=force_unix_eol, 116 | ) 117 | 118 | 119 | def export_resource( # pylint: disable=too-many-arguments, too-many-locals 120 | resource_name: str, 121 | requested_ids: Set[int], 122 | root: Path, 123 | client: SupersetClient, 124 | overwrite: bool, 125 | disable_jinja_escaping: bool, 126 | skip_related: bool = True, 127 | force_unix_eol: bool = False, 128 | ) -> None: 129 | """ 130 | Export a given resource and unzip it in a directory. 131 | """ 132 | resources = client.get_resources(resource_name) 133 | ids = [ 134 | resource["id"] 135 | for resource in resources 136 | if resource["id"] in requested_ids or not requested_ids 137 | ] 138 | buf = client.export_zip(resource_name, ids) 139 | 140 | with ZipFile(buf) as bundle: 141 | contents = { 142 | remove_root(file_name): bundle.read(file_name).decode() 143 | for file_name in bundle.namelist() 144 | } 145 | 146 | for file_name, file_contents in contents.items(): 147 | if skip_related and not file_name.startswith(resource_name): 148 | continue 149 | 150 | target = root / file_name 151 | if target.exists() and not overwrite: 152 | raise Exception( 153 | f"File already exists and ``--overwrite`` was not specified: {target}", 154 | ) 155 | if not target.parent.exists(): 156 | target.parent.mkdir(parents=True, exist_ok=True) 157 | 158 | # escape any pre-existing Jinja2 templates 159 | if not disable_jinja_escaping: 160 | asset_content = yaml.load(file_contents, Loader=yaml.SafeLoader) 161 | for key, value in asset_content.items(): 162 | asset_content[key] = traverse_data(value, handle_string) 163 | 164 | file_contents = yaml.dump(asset_content, sort_keys=False) 165 | 166 | newline = get_newline_char(force_unix_eol) 167 | with open(target, "w", encoding="utf-8", newline=newline) as output: 168 | output.write(file_contents) 169 | 170 | 171 | def traverse_data(value: Any, handler: Callable) -> Any: 172 | """ 173 | Process value according to its data type 174 | """ 175 | if isinstance(value, str): 176 | return handler(value) 177 | if isinstance(value, dict) and value: 178 | return {k: traverse_data(v, handler) for k, v in value.items()} 179 | if isinstance(value, list) and value: 180 | return [traverse_data(item, handler) for item in value] 181 | return value 182 | 183 | 184 | def handle_string(value): 185 | """ 186 | Try to load a string as JSON to traverse its content for proper Jinja templating escaping. 187 | Required for fields like ``query_context`` 188 | """ 189 | try: 190 | asset_dict = json.loads(value) 191 | return ( 192 | json.dumps(traverse_data(asset_dict, jinja_escaper)) if asset_dict else "{}" 193 | ) 194 | except json.JSONDecodeError: 195 | return jinja_escaper(value) 196 | 197 | 198 | def jinja_escaper(value: str) -> str: 199 | """ 200 | Escape Jinja macros and logical statements that shouldn't be handled by CLI 201 | """ 202 | logical_statements_patterns = [ 203 | r"(\{%-?\s*if)", # {%if || {% if || {%-if || {%- if 204 | r"(\{%-?\s*elif)", # {%elif || {% elif || {%-elif || {%- elif 205 | r"(\{%-?\s*else)", # {%else || {% else || {%-else || {%- else 206 | r"(\{%-?\s*endif)", # {%endif || {% endif || {%-endif || {%- endif 207 | r"(\{%-?\s*for)", # {%for || {% for || {%-for || {%- for 208 | r"(\{%-?\s*endfor)", # {%endfor || {% endfor || {%-endfor || {%- endfor 209 | r"(%})", # %} 210 | r"(-%})", # -%} 211 | ] 212 | 213 | for syntax in logical_statements_patterns: 214 | replacement = JINJA2_OPEN_MARKER + " '" + r"\1" + "' " + JINJA2_CLOSE_MARKER 215 | value = re.sub(syntax, replacement, value) 216 | 217 | # escaping macros 218 | value = value.replace( 219 | "{{", 220 | f"{JINJA2_OPEN_MARKER} '{{{{' {JINJA2_CLOSE_MARKER}", 221 | ) 222 | value = value.replace( 223 | "}}", 224 | f"{JINJA2_OPEN_MARKER} '}}}}' {JINJA2_CLOSE_MARKER}", 225 | ) 226 | value = value.replace(JINJA2_OPEN_MARKER, "{{") 227 | value = value.replace(JINJA2_CLOSE_MARKER, "}}") 228 | value = re.sub(r"' }} {{ '", " ", value) 229 | 230 | return value 231 | 232 | 233 | @click.command() 234 | @click.argument( 235 | "path", 236 | type=click.Path(resolve_path=True), 237 | default="users.yaml", 238 | ) 239 | @click.option( 240 | "--force-unix-eol", 241 | is_flag=True, 242 | default=False, 243 | help="Force Unix end-of-line characters, otherwise use system default", 244 | ) 245 | @click.pass_context 246 | def export_users( 247 | ctx: click.core.Context, 248 | path: str, 249 | force_unix_eol: bool = False, 250 | ) -> None: 251 | """ 252 | Export users and their roles to a YAML file. 253 | """ 254 | auth = ctx.obj["AUTH"] 255 | url = URL(ctx.obj["INSTANCE"]) 256 | client = SupersetClient(url, auth) 257 | 258 | users = [ 259 | {k: v for k, v in user.items() if k != "id"} for user in client.export_users() 260 | ] 261 | 262 | newline = get_newline_char(force_unix_eol) 263 | with open(path, "w", encoding="utf-8", newline=newline) as output: 264 | yaml.dump(users, output) 265 | 266 | 267 | @click.command() 268 | @click.argument( 269 | "path", 270 | type=click.Path(resolve_path=True), 271 | default="roles.yaml", 272 | ) 273 | @click.option( 274 | "--force-unix-eol", 275 | is_flag=True, 276 | default=False, 277 | help="Force Unix end-of-line characters, otherwise use system default", 278 | ) 279 | @click.pass_context 280 | def export_roles( 281 | ctx: click.core.Context, 282 | path: str, 283 | force_unix_eol: bool = False, 284 | ) -> None: 285 | """ 286 | Export roles to a YAML file. 287 | """ 288 | auth = ctx.obj["AUTH"] 289 | url = URL(ctx.obj["INSTANCE"]) 290 | client = SupersetClient(url, auth) 291 | 292 | newline = get_newline_char(force_unix_eol) 293 | with open(path, "w", encoding="utf-8", newline=newline) as output: 294 | yaml.dump(list(client.export_roles()), output) 295 | 296 | 297 | @click.command() 298 | @click.argument( 299 | "path", 300 | type=click.Path(resolve_path=True), 301 | default="rls.yaml", 302 | ) 303 | @click.option( 304 | "--force-unix-eol", 305 | is_flag=True, 306 | default=False, 307 | help="Force Unix end-of-line characters, otherwise use system default", 308 | ) 309 | @click.pass_context 310 | def export_rls( 311 | ctx: click.core.Context, 312 | path: str, 313 | force_unix_eol: bool = False, 314 | ) -> None: 315 | """ 316 | Export RLS rules to a YAML file. 317 | """ 318 | auth = ctx.obj["AUTH"] 319 | url = URL(ctx.obj["INSTANCE"]) 320 | client = SupersetClient(url, auth) 321 | 322 | newline = get_newline_char(force_unix_eol) 323 | with open(path, "w", encoding="utf-8", newline=newline) as output: 324 | yaml.dump(list(client.export_rls()), output, sort_keys=False) 325 | 326 | 327 | @click.command() 328 | @click.argument( 329 | "path", 330 | type=click.Path(resolve_path=True), 331 | default="ownership.yaml", 332 | ) 333 | @click.option( 334 | "--force-unix-eol", 335 | is_flag=True, 336 | default=False, 337 | help="Force Unix end-of-line characters, otherwise use system default", 338 | ) 339 | @click.pass_context 340 | def export_ownership( 341 | ctx: click.core.Context, 342 | path: str, 343 | force_unix_eol: bool = False, 344 | ) -> None: 345 | """ 346 | Export DBs/datasets/charts/dashboards ownership to a YAML file. 347 | """ 348 | auth = ctx.obj["AUTH"] 349 | url = URL(ctx.obj["INSTANCE"]) 350 | client = SupersetClient(url, auth) 351 | 352 | ownership = defaultdict(list) 353 | for resource_name in ["dataset", "chart", "dashboard"]: 354 | for resource in client.export_ownership(resource_name): 355 | ownership[resource_name].append( 356 | { 357 | "name": resource["name"], 358 | "uuid": str(resource["uuid"]), 359 | "owners": resource["owners"], 360 | }, 361 | ) 362 | 363 | newline = get_newline_char(force_unix_eol) 364 | with open(path, "w", encoding="utf-8", newline=newline) as output: 365 | yaml.dump(dict(ownership), output) 366 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/import_.py: -------------------------------------------------------------------------------- 1 | """ 2 | Commands to import RLS rules, ownership, and more. 3 | """ 4 | 5 | import logging 6 | 7 | import click 8 | import yaml 9 | from yarl import URL 10 | 11 | from preset_cli.api.clients.superset import SupersetClient 12 | from preset_cli.cli.superset.lib import ( 13 | LogType, 14 | clean_logs, 15 | get_logs, 16 | write_logs_to_file, 17 | ) 18 | 19 | _logger = logging.getLogger(__name__) 20 | 21 | 22 | @click.command() 23 | @click.argument( 24 | "path", 25 | type=click.Path(resolve_path=True), 26 | default="rls.yaml", 27 | ) 28 | @click.pass_context 29 | def import_rls(ctx: click.core.Context, path: str) -> None: 30 | """ 31 | Import RLS rules from a YAML file. 32 | """ 33 | auth = ctx.obj["AUTH"] 34 | url = URL(ctx.obj["INSTANCE"]) 35 | client = SupersetClient(url, auth) 36 | 37 | with open(path, encoding="utf-8") as input_: 38 | config = yaml.load(input_, Loader=yaml.SafeLoader) 39 | for rls in config: 40 | client.import_rls(rls) 41 | 42 | 43 | @click.command() 44 | @click.argument( 45 | "path", 46 | type=click.Path(resolve_path=True), 47 | default="roles.yaml", 48 | ) 49 | @click.pass_context 50 | def import_roles(ctx: click.core.Context, path: str) -> None: 51 | """ 52 | Import roles from a YAML file. 53 | """ 54 | auth = ctx.obj["AUTH"] 55 | url = URL(ctx.obj["INSTANCE"]) 56 | client = SupersetClient(url, auth) 57 | 58 | with open(path, encoding="utf-8") as input_: 59 | config = yaml.load(input_, Loader=yaml.SafeLoader) 60 | for role in config: 61 | client.import_role(role) 62 | 63 | 64 | @click.command() 65 | @click.argument( 66 | "path", 67 | type=click.Path(resolve_path=True), 68 | default="ownership.yaml", 69 | ) 70 | @click.option( 71 | "--continue-on-error", 72 | "-c", 73 | is_flag=True, 74 | default=False, 75 | help="Continue the import if an asset fails to import ownership", 76 | ) 77 | @click.pass_context 78 | def import_ownership( # pylint: disable=too-many-locals 79 | ctx: click.core.Context, 80 | path: str, 81 | continue_on_error: bool = False, 82 | ) -> None: 83 | """ 84 | Import resource ownership from a YAML file. 85 | """ 86 | client = SupersetClient(baseurl=URL(ctx.obj["INSTANCE"]), auth=ctx.obj["AUTH"]) 87 | 88 | log_file_path, logs = get_logs(LogType.OWNERSHIP) 89 | assets_to_skip = {log["uuid"] for log in logs[LogType.OWNERSHIP]} | { 90 | log["uuid"] for log in logs[LogType.ASSETS] if log["status"] == "FAILED" 91 | } 92 | 93 | with open(path, encoding="utf-8") as input_: 94 | config = yaml.load(input_, Loader=yaml.SafeLoader) 95 | 96 | users = {user["email"]: user["id"] for user in client.export_users()} 97 | with open(log_file_path, "w", encoding="utf-8") as log_file: 98 | for resource_name, resources in config.items(): 99 | resource_ids = { 100 | str(v): k for k, v in client.get_uuids(resource_name).items() 101 | } 102 | for ownership in resources: 103 | if ownership["uuid"] not in assets_to_skip: 104 | 105 | _logger.info( 106 | "Importing ownership for %s %s", 107 | resource_name, 108 | ownership["name"], 109 | ) 110 | asset_log = {"uuid": ownership["uuid"], "status": "SUCCESS"} 111 | 112 | try: 113 | client.import_ownership( 114 | resource_name, 115 | ownership, 116 | users, 117 | resource_ids, 118 | ) 119 | except Exception as exc: # pylint: disable=broad-except 120 | _logger.debug( 121 | "Failed to import ownership for %s %s: %s", 122 | resource_name, 123 | ownership["name"], 124 | str(exc), 125 | ) 126 | if not continue_on_error: 127 | raise 128 | asset_log["status"] = "FAILED" 129 | 130 | logs[LogType.OWNERSHIP].append(asset_log) 131 | write_logs_to_file(log_file, logs) 132 | 133 | if not continue_on_error or not any( 134 | log["status"] == "FAILED" for log in logs[LogType.OWNERSHIP] 135 | ): 136 | clean_logs(LogType.OWNERSHIP, logs) 137 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/lib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper functions for the Superset commands 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from enum import Enum 8 | from pathlib import Path 9 | from typing import IO, Any, Dict, Tuple 10 | 11 | import yaml 12 | 13 | from preset_cli.lib import dict_merge 14 | 15 | LOG_FILE_PATH = Path("progress.log") 16 | 17 | 18 | class LogType(str, Enum): 19 | """ 20 | Roles for users. 21 | """ 22 | 23 | ASSETS = "assets" 24 | OWNERSHIP = "ownership" 25 | 26 | 27 | def get_logs(log_type: LogType) -> Tuple[Path, Dict[LogType, Any]]: 28 | """ 29 | Returns the path and content of the progress log file. 30 | 31 | Creates the file if it does not exist yet. Filters out FAILED 32 | entries for the particular log type. Defaults to an empty list. 33 | """ 34 | base_logs: Dict[LogType, Any] = {log_type_: [] for log_type_ in LogType} 35 | 36 | if not LOG_FILE_PATH.exists(): 37 | LOG_FILE_PATH.touch() 38 | return LOG_FILE_PATH, base_logs 39 | 40 | with open(LOG_FILE_PATH, "r", encoding="utf-8") as log_file: 41 | logs = yaml.load(log_file, Loader=yaml.SafeLoader) or {} 42 | 43 | logs = {LogType(log_type): log_entries for log_type, log_entries in logs.items()} 44 | dict_merge(base_logs, logs) 45 | base_logs[log_type] = [ 46 | log for log in base_logs[log_type] if log["status"] != "FAILED" 47 | ] 48 | return LOG_FILE_PATH, base_logs 49 | 50 | 51 | def serialize_enum_logs_to_string(logs: Dict[LogType, Any]) -> Dict[str, Any]: 52 | """ 53 | Helper method to serialize the enum keys in the logs dict to str. 54 | """ 55 | return {log_type.value: log_entries for log_type, log_entries in logs.items()} 56 | 57 | 58 | def write_logs_to_file(log_file: IO[str], logs: Dict[LogType, Any]) -> None: 59 | """ 60 | Writes logs list to .log file. 61 | """ 62 | logs_ = serialize_enum_logs_to_string(logs) 63 | log_file.seek(0) 64 | yaml.dump(logs_, log_file) 65 | log_file.truncate() 66 | 67 | 68 | def clean_logs(log_type: LogType, logs: Dict[LogType, Any]) -> None: 69 | """ 70 | Cleans the progress log file for the specific log type. 71 | 72 | If there are no other log types, the file is deleted. 73 | """ 74 | logs.pop(log_type, None) 75 | if any(logs.values()): 76 | with open(LOG_FILE_PATH, "w", encoding="utf-8") as log_file: 77 | logs_ = serialize_enum_logs_to_string(logs) 78 | yaml.dump(logs_, log_file) 79 | else: 80 | LOG_FILE_PATH.unlink() 81 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main entry point for Superset commands. 3 | """ 4 | from typing import Any, Optional 5 | 6 | import click 7 | from yarl import URL 8 | 9 | from preset_cli.auth.superset import SupersetJWTAuth, UsernamePasswordAuth 10 | from preset_cli.cli.superset.export import ( 11 | export_assets, 12 | export_ownership, 13 | export_rls, 14 | export_roles, 15 | export_users, 16 | ) 17 | from preset_cli.cli.superset.import_ import import_ownership, import_rls, import_roles 18 | from preset_cli.cli.superset.sql import sql 19 | from preset_cli.cli.superset.sync.main import sync 20 | from preset_cli.cli.superset.sync.native.command import native 21 | from preset_cli.lib import setup_logging 22 | 23 | 24 | @click.group() 25 | @click.argument("instance") 26 | @click.option("--jwt-token", default=None, help="JWT token") 27 | @click.option("-u", "--username", default="admin", help="Username") 28 | @click.option( 29 | "-p", 30 | "--password", 31 | prompt=True, 32 | prompt_required=False, 33 | default="admin", 34 | hide_input=True, 35 | help="Password (leave empty for prompt)", 36 | ) 37 | @click.option("--loglevel", default="INFO") 38 | @click.version_option() 39 | @click.pass_context 40 | def superset_cli( # pylint: disable=too-many-arguments 41 | ctx: click.core.Context, 42 | instance: str, 43 | jwt_token: Optional[str], 44 | username: str, 45 | password: str, 46 | loglevel: str, 47 | ): 48 | """ 49 | An Apache Superset CLI. 50 | """ 51 | setup_logging(loglevel) 52 | 53 | ctx.ensure_object(dict) 54 | 55 | ctx.obj["INSTANCE"] = instance 56 | 57 | # allow a custom authenticator to be passed via the context 58 | if "AUTH" not in ctx.obj: 59 | if jwt_token: 60 | ctx.obj["AUTH"] = SupersetJWTAuth(jwt_token, URL(instance)) 61 | else: 62 | ctx.obj["AUTH"] = UsernamePasswordAuth(URL(instance), username, password) 63 | 64 | 65 | superset_cli.add_command(sql) 66 | superset_cli.add_command(sync) 67 | superset_cli.add_command(export_assets) 68 | superset_cli.add_command(export_assets, name="export") # for backwards compatibility 69 | superset_cli.add_command(export_users) 70 | superset_cli.add_command(export_rls) 71 | superset_cli.add_command(export_roles) 72 | superset_cli.add_command(export_ownership) 73 | superset_cli.add_command(import_rls) 74 | superset_cli.add_command(import_roles) 75 | superset_cli.add_command(import_ownership) 76 | superset_cli.add_command(native, name="import-assets") 77 | 78 | 79 | @click.group() 80 | @click.pass_context 81 | def superset(ctx: click.core.Context) -> None: 82 | """ 83 | Send commands to one or more Superset instances. 84 | """ 85 | ctx.ensure_object(dict) 86 | 87 | 88 | def mutate_commands(source: click.core.Group, target: click.core.Group) -> None: 89 | """ 90 | Programmatically modify commands so they work with workspaces. 91 | """ 92 | for name, command in source.commands.items(): 93 | 94 | if isinstance(command, click.core.Group): 95 | 96 | @click.group() 97 | @click.pass_context 98 | def new_group( 99 | ctx: click.core.Context, *args: Any, command=command, **kwargs: Any 100 | ) -> None: 101 | ctx.invoke(command, *args, **kwargs) 102 | 103 | mutate_commands(command, new_group) 104 | new_group.params = command.params[:] 105 | target.add_command(new_group, name) 106 | 107 | else: 108 | 109 | @click.command() 110 | @click.pass_context 111 | def new_command( 112 | ctx: click.core.Context, *args: Any, command=command, **kwargs: Any 113 | ) -> None: 114 | for instance in ctx.obj["WORKSPACES"]: 115 | click.echo(f"\n{instance}") 116 | ctx.obj["INSTANCE"] = instance 117 | ctx.invoke(command, *args, **kwargs) 118 | 119 | new_command.params = command.params[:] 120 | target.add_command(new_command, name) 121 | 122 | 123 | mutate_commands(superset_cli, superset) 124 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sql.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run SQL queries on Superset. 3 | """ 4 | import os.path 5 | import traceback 6 | from operator import itemgetter 7 | from pathlib import Path 8 | from typing import List, Optional, Tuple 9 | 10 | import click 11 | from prompt_toolkit import PromptSession 12 | from prompt_toolkit.completion import WordCompleter 13 | from prompt_toolkit.history import FileHistory 14 | from prompt_toolkit.lexers import PygmentsLexer 15 | from prompt_toolkit.styles.pygments import style_from_pygments_cls 16 | from pygments.lexers.sql import SqlLexer 17 | from pygments.styles import get_style_by_name 18 | from sqlglot.tokens import Tokenizer 19 | from tabulate import tabulate 20 | from yarl import URL 21 | 22 | from preset_cli.api.clients.superset import SupersetClient 23 | from preset_cli.exceptions import SupersetError 24 | 25 | sql_completer = WordCompleter(list(Tokenizer.KEYWORDS)) 26 | style = style_from_pygments_cls(get_style_by_name("stata-dark")) 27 | 28 | 29 | @click.command() 30 | @click.option( 31 | "--database-id", 32 | default=None, 33 | help="Database ID (leave empty for options)", 34 | type=click.INT, 35 | ) 36 | @click.option( 37 | "--schema", 38 | default=None, 39 | help="Schema", 40 | ) 41 | @click.option("-e", "--execute", default=None, help="Run query non-interactively") 42 | @click.pass_context 43 | def sql( # pylint: disable=too-many-arguments 44 | ctx: click.core.Context, 45 | database_id: Optional[int], 46 | schema: Optional[str] = None, 47 | execute: Optional[str] = None, 48 | ) -> None: 49 | """ 50 | Run SQL against an Apache Superset database. 51 | """ 52 | auth = ctx.obj["AUTH"] 53 | url = URL(ctx.obj["INSTANCE"]) 54 | client = SupersetClient(url, auth) 55 | 56 | databases = client.get_databases() 57 | if not databases: 58 | click.echo("No databases available") 59 | return None 60 | 61 | if len(databases) == 1 and database_id is None: 62 | database_id = databases[0]["id"] 63 | 64 | if database_id is None: 65 | click.echo("Choose the ID of a database to connect to:") 66 | for database in sorted(databases, key=itemgetter("id")): 67 | click.echo(f'({database["id"]}) {database["database_name"]}') 68 | while database_id is None: 69 | try: 70 | choice = int(input("> ")) 71 | if any(database["id"] == choice for database in databases): 72 | database_id = choice 73 | break 74 | except ValueError: 75 | pass 76 | click.echo("Invalid choice") 77 | 78 | database_name = [ 79 | database["database_name"] 80 | for database in databases 81 | if database["id"] == database_id 82 | ][0] 83 | 84 | if execute: 85 | return run_query(client, database_id, schema, execute) 86 | 87 | return run_session(client, database_id, database_name, schema, url) 88 | 89 | 90 | def run_query( 91 | client: SupersetClient, 92 | database_id: int, 93 | schema: Optional[str], 94 | query: str, 95 | ) -> None: 96 | """ 97 | Run a query in a given database. 98 | """ 99 | try: 100 | results = client.run_query(database_id, query, schema) 101 | click.echo(tabulate(results, headers=results.columns, showindex=False)) 102 | except SupersetError as ex: 103 | click.echo( 104 | click.style( 105 | "\n".join(error["message"] for error in ex.errors), 106 | fg="bright_red", 107 | ), 108 | ) 109 | except Exception: # pylint: disable=broad-except 110 | traceback.print_exc() 111 | 112 | 113 | def run_session( 114 | client: SupersetClient, 115 | database_id: int, 116 | database_name: str, 117 | schema: Optional[str], 118 | url: URL, 119 | ) -> None: 120 | """ 121 | Run SQL queries in an interactive session. 122 | """ 123 | history = Path(os.path.expanduser("~/.config/preset-cli/")) 124 | if not history.exists(): 125 | history.mkdir(parents=True) 126 | 127 | session = PromptSession( 128 | lexer=PygmentsLexer(SqlLexer), 129 | completer=sql_completer, 130 | style=style, 131 | history=FileHistory(history / f"sql-{url.host}-{database_id}.history"), 132 | ) 133 | 134 | lines: List[str] = [] 135 | quote_context = " " 136 | padding = " " * (len(database_name) - 1) 137 | while True: 138 | prompt = f"{database_name}> " if not lines else f"{padding}{quote_context}. " 139 | try: 140 | line = session.prompt(prompt) 141 | except KeyboardInterrupt: 142 | lines = [] 143 | quote_context = " " 144 | continue # Control-C pressed. Try again. 145 | except EOFError: 146 | break # Control-D pressed. 147 | 148 | lines.append(line) 149 | query = "\n".join(lines) 150 | 151 | is_terminated, quote_context = get_query_termination(query) 152 | if is_terminated: 153 | run_query(client, database_id, schema, query) 154 | lines = [] 155 | quote_context = " " 156 | 157 | click.echo("Goodbye!") 158 | 159 | 160 | def get_query_termination(query: str) -> Tuple[bool, str]: 161 | """ 162 | Check if a query is ended or if a new line should be created. 163 | 164 | This function looks for a semicolon at the end, making sure no quotation mark must be 165 | closed. 166 | """ 167 | quote_context = " " 168 | quote_chars = ('"', "'", "`") 169 | 170 | for query_char in query: 171 | if quote_context == query_char: 172 | quote_context = " " 173 | else: 174 | for quote in quote_chars: 175 | if quote_context == " " and quote == query_char: 176 | quote_context = quote 177 | 178 | is_terminated = quote_context == " " and query.endswith(";") 179 | 180 | return is_terminated, quote_context 181 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/cli/superset/sync/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/dbt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/cli/superset/sync/dbt/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/dbt/databases.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sync dbt database to Superset. 3 | """ 4 | 5 | import logging 6 | from pathlib import Path 7 | from typing import Any, Optional 8 | 9 | from yarl import URL 10 | 11 | from preset_cli.api.clients.superset import SupersetClient 12 | from preset_cli.cli.superset.sync.dbt.lib import build_sqlalchemy_params, load_profiles 13 | from preset_cli.exceptions import DatabaseNotFoundError 14 | 15 | _logger = logging.getLogger(__name__) 16 | 17 | 18 | def sync_database( # pylint: disable=too-many-locals, too-many-arguments 19 | client: SupersetClient, 20 | profiles_path: Path, 21 | project_name: str, 22 | profile_name: str, 23 | target_name: Optional[str], 24 | import_db: bool, 25 | disallow_edits: bool, # pylint: disable=unused-argument 26 | external_url_prefix: str, 27 | ) -> Any: 28 | """ 29 | Read target database from a dbt profiles.yml and sync to Superset. 30 | """ 31 | base_url = URL(external_url_prefix) if external_url_prefix else None 32 | 33 | profiles = load_profiles(profiles_path, project_name, profile_name, target_name) 34 | project = profiles[profile_name] 35 | outputs = project["outputs"] 36 | if target_name is None: 37 | target_name = project["target"] 38 | target = outputs[target_name] 39 | 40 | # read additional metadata that should be applied to the DB 41 | meta = target.get("meta", {}).get("superset", {}) 42 | 43 | database_name = meta.pop("database_name", f"{project_name}_{target_name}") 44 | databases = client.get_databases(database_name=database_name) 45 | if len(databases) > 1: 46 | raise Exception("More than one database with the same name found") 47 | 48 | if base_url and "external_url" not in meta: 49 | meta["external_url"] = str(base_url.with_fragment("!/overview")) 50 | 51 | if import_db: 52 | connection_params = meta.pop( 53 | "connection_params", 54 | build_sqlalchemy_params(target), 55 | ) 56 | 57 | if databases: 58 | _logger.info("Found an existing database connection, updating it") 59 | database = databases[0] 60 | meta.pop("uuid", None) 61 | 62 | database = client.update_database( 63 | database_id=database["id"], 64 | database_name=database_name, 65 | is_managed_externally=disallow_edits, 66 | masked_encrypted_extra=connection_params.get("encrypted_extra"), 67 | sqlalchemy_uri=connection_params["sqlalchemy_uri"], 68 | **meta, 69 | ) 70 | 71 | else: 72 | _logger.info("No database connection found, creating it") 73 | 74 | database = client.create_database( 75 | database_name=database_name, 76 | is_managed_externally=disallow_edits, 77 | masked_encrypted_extra=connection_params.get("encrypted_extra"), 78 | **connection_params, 79 | **meta, 80 | ) 81 | 82 | database["sqlalchemy_uri"] = connection_params["sqlalchemy_uri"] 83 | 84 | elif databases: 85 | _logger.info("Found an existing database connection, using it") 86 | database = databases[0] 87 | database["sqlalchemy_uri"] = client.get_database(database["id"])[ 88 | "sqlalchemy_uri" 89 | ] 90 | 91 | else: 92 | raise DatabaseNotFoundError() 93 | 94 | return database 95 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/dbt/exposures.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sync Superset dashboards as dbt exposures. 3 | """ 4 | 5 | import json 6 | import re 7 | from pathlib import Path 8 | from typing import Any, Dict, List, NamedTuple, Optional 9 | 10 | import yaml 11 | 12 | from preset_cli.api.clients.dbt import ModelSchema 13 | from preset_cli.api.clients.superset import SupersetClient 14 | 15 | # XXX: DashboardResponseType and DatasetResponseType 16 | 17 | 18 | class ModelKey(NamedTuple): 19 | """ 20 | Model key, so they can be mapped from datasets. 21 | """ 22 | 23 | schema: Optional[str] 24 | table: str 25 | 26 | 27 | def get_chart_depends_on( 28 | client: SupersetClient, 29 | chart: Any, 30 | model_map: Dict[ModelKey, ModelSchema], 31 | ) -> List[str]: 32 | """ 33 | Get all the dbt dependencies for a given chart. 34 | """ 35 | 36 | # imported charts have a null query context until loaded in Explore for the first time. 37 | # in that case, we can get the dataset id from the params 38 | if chart["query_context"]: 39 | dataset_id = json.loads(chart["query_context"])["datasource"]["id"] 40 | elif chart["params"]: 41 | dataset_id = json.loads(chart["params"])["datasource"].split("__")[0] 42 | else: 43 | raise Exception( 44 | f'Unable to find dataset information for Chart {chart["slice_name"]}', 45 | ) 46 | 47 | dataset = client.get_dataset(dataset_id) 48 | extra = json.loads(dataset["extra"] or "{}") 49 | if "depends_on" in extra: 50 | return [extra["depends_on"]] 51 | 52 | key = ModelKey(dataset["schema"], dataset["table_name"]) 53 | if dataset["datasource_type"] == "table" and key in model_map: 54 | model = model_map[key] 55 | return [f"ref('{model['name']}')"] 56 | 57 | return [] 58 | 59 | 60 | def get_dashboard_depends_on( 61 | client: SupersetClient, 62 | dashboard: Any, 63 | model_map: Dict[ModelKey, ModelSchema], 64 | ) -> List[str]: 65 | """ 66 | Get all the dbt dependencies for a given dashboard. 67 | """ 68 | 69 | url = client.baseurl / "api/v1/dashboard" / str(dashboard["id"]) / "datasets" 70 | 71 | session = client.auth.session 72 | headers = client.auth.get_headers() 73 | response = session.get(url, headers=headers) 74 | response.raise_for_status() 75 | 76 | payload = response.json() 77 | 78 | depends_on = [] 79 | for dataset in payload["result"]: 80 | full_dataset = client.get_dataset(int(dataset["id"])) 81 | try: 82 | extra = json.loads(full_dataset["extra"] or "{}") 83 | except json.decoder.JSONDecodeError: 84 | extra = {} 85 | 86 | key = ModelKey(full_dataset["schema"], full_dataset["table_name"]) 87 | if "depends_on" in extra: 88 | depends_on.append(extra["depends_on"]) 89 | elif full_dataset["datasource_type"] == "table" and key in model_map: 90 | model = model_map[key] 91 | depends_on.append(f"ref('{model['name']}')") 92 | 93 | return depends_on 94 | 95 | 96 | def sync_exposures( # pylint: disable=too-many-locals 97 | client: SupersetClient, 98 | exposures_path: Path, 99 | datasets: List[Any], 100 | model_map: Dict[ModelKey, ModelSchema], 101 | ) -> None: 102 | """ 103 | Write dashboards back to dbt as exposures. 104 | """ 105 | exposures = [] 106 | charts_ids = set() 107 | dashboards_ids = set() 108 | 109 | for dataset in datasets: 110 | url = client.baseurl / "api/v1/dataset" / str(dataset["id"]) / "related_objects" 111 | 112 | session = client.auth.session 113 | headers = client.auth.get_headers() 114 | response = session.get(url, headers=headers) 115 | response.raise_for_status() 116 | 117 | payload = response.json() 118 | for chart in payload["charts"]["result"]: 119 | charts_ids.add(chart["id"]) 120 | for dashboard in payload["dashboards"]["result"]: 121 | dashboards_ids.add(dashboard["id"]) 122 | 123 | for chart_id in charts_ids: 124 | chart = client.get_chart(chart_id) 125 | first_owner = chart["owners"][0] 126 | 127 | # remove unsupported characters for dbt exposures name 128 | asset_title = re.sub(" ", "_", chart["slice_name"]) 129 | asset_title = re.sub(r"\W", "", asset_title) 130 | 131 | exposure = { 132 | "name": asset_title + "_chart_" + str(chart_id), 133 | "label": chart["slice_name"] + " [chart]", 134 | "type": "analysis", 135 | "maturity": "high" if chart["certified_by"] else "low", 136 | "url": str( 137 | client.baseurl 138 | / "superset/explore/" 139 | % {"form_data": json.dumps({"slice_id": chart_id})}, 140 | ), 141 | "description": chart["description"] or "", 142 | "depends_on": get_chart_depends_on(client, chart, model_map), 143 | "owner": { 144 | "name": first_owner["first_name"] + " " + first_owner["last_name"], 145 | "email": first_owner.get("email", "unknown"), 146 | }, 147 | } 148 | exposures.append(exposure) 149 | 150 | for dashboard_id in dashboards_ids: 151 | dashboard = client.get_dashboard(dashboard_id) 152 | first_owner = dashboard["owners"][0] 153 | 154 | asset_title = re.sub(" ", "_", dashboard["dashboard_title"]) 155 | asset_title = re.sub(r"\W", "", asset_title) 156 | 157 | exposure = { 158 | "name": asset_title + "_dashboard_" + str(dashboard_id), 159 | "label": dashboard["dashboard_title"] + " [dashboard]", 160 | "type": "dashboard", 161 | "maturity": "high" 162 | if dashboard["published"] or dashboard["certified_by"] 163 | else "low", 164 | "url": str(client.baseurl / dashboard["url"].lstrip("/")), 165 | "description": "", 166 | "depends_on": get_dashboard_depends_on(client, dashboard, model_map), 167 | "owner": { 168 | "name": first_owner["first_name"] + " " + first_owner["last_name"], 169 | "email": first_owner.get("email", "unknown"), 170 | }, 171 | } 172 | exposures.append(exposure) 173 | 174 | with open(exposures_path, "w", encoding="utf-8") as output: 175 | yaml.safe_dump({"version": 2, "exposures": exposures}, output, sort_keys=False) 176 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/dbt/metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Metric conversion. 3 | 4 | This module is used to convert dbt metrics into Superset metrics. 5 | """ 6 | 7 | # pylint: disable=consider-using-f-string 8 | 9 | import json 10 | import logging 11 | import re 12 | from collections import defaultdict 13 | from typing import Dict, List, Optional, Set 14 | 15 | import sqlglot 16 | from sqlglot import Expression, ParseError, exp, parse_one 17 | from sqlglot.expressions import ( 18 | Alias, 19 | Case, 20 | Distinct, 21 | Identifier, 22 | If, 23 | Join, 24 | Select, 25 | Table, 26 | Where, 27 | ) 28 | from sqlglot.optimizer import traverse_scope 29 | 30 | from preset_cli.api.clients.dbt import ( 31 | FilterSchema, 32 | MFMetricWithSQLSchema, 33 | MFSQLEngine, 34 | ModelSchema, 35 | OGMetricSchema, 36 | ) 37 | from preset_cli.api.clients.superset import SupersetMetricDefinition 38 | from preset_cli.cli.superset.sync.dbt.exposures import ModelKey 39 | from preset_cli.cli.superset.sync.dbt.lib import parse_metric_meta 40 | 41 | _logger = logging.getLogger(__name__) 42 | 43 | # dbt => sqlglot 44 | DIALECT_MAP = { 45 | MFSQLEngine.BIGQUERY: "bigquery", 46 | MFSQLEngine.DUCKDB: "duckdb", 47 | MFSQLEngine.REDSHIFT: "redshift", 48 | MFSQLEngine.POSTGRES: "postgres", 49 | MFSQLEngine.SNOWFLAKE: "snowflake", 50 | MFSQLEngine.DATABRICKS: "databricks", 51 | } 52 | 53 | 54 | JINJAPATTERN = re.compile( 55 | r"(\s*(?:\{\{[\s\S]*?\}\}|\{%[\s\S]*?%\}))+", 56 | re.DOTALL, 57 | ) 58 | 59 | 60 | # pylint: disable=too-many-locals, too-many-branches 61 | def get_metric_expression(metric_name: str, metrics: Dict[str, OGMetricSchema]) -> str: 62 | """ 63 | Return a SQL expression for a given dbt metric using sqlglot. 64 | """ 65 | if metric_name not in metrics: 66 | raise Exception(f"Invalid metric {metric_name}") 67 | 68 | metric = metrics[metric_name] 69 | if "calculation_method" in metric: 70 | # dbt >= 1.3 71 | type_ = metric["calculation_method"] 72 | sql = metric["expression"] 73 | elif "sql" in metric: 74 | # dbt < 1.3 75 | type_ = metric["type"] 76 | sql = metric["sql"] 77 | else: 78 | raise Exception(f"Unable to generate metric expression from: {metric}") 79 | 80 | if metric.get("filters"): 81 | sql = apply_filters(sql, metric["filters"]) 82 | 83 | simple_mappings = { 84 | "count": "COUNT", 85 | "sum": "SUM", 86 | "average": "AVG", 87 | "min": "MIN", 88 | "max": "MAX", 89 | } 90 | 91 | if type_ in simple_mappings: 92 | function = simple_mappings[type_] 93 | return f"{function}({sql})" 94 | 95 | if type_ == "count_distinct": 96 | return f"COUNT(DISTINCT {sql})" 97 | 98 | if type_ in {"expression", "derived"}: 99 | if metric.get("skip_parsing"): 100 | return sql.strip() 101 | 102 | # if the metric expression contains Jinja syntax, we can't parse it as SQL; 103 | # instead we fallback to the regex method 104 | if re.search(JINJAPATTERN, sql): 105 | return replace_metric_syntax(sql, metric["depends_on"], metrics) 106 | 107 | try: 108 | expression = sqlglot.parse_one(sql, dialect=metric["dialect"]) 109 | tokens = expression.find_all(exp.Column) 110 | 111 | for token in tokens: 112 | if token.sql() in metrics: 113 | parent_sql = get_metric_expression(token.sql(), metrics) 114 | parent_expression = sqlglot.parse_one( 115 | parent_sql, 116 | dialect=metric["dialect"], 117 | ) 118 | token.replace(parent_expression) 119 | 120 | return expression.sql(dialect=metric["dialect"]) 121 | except ParseError: 122 | return replace_metric_syntax(sql, metric["depends_on"], metrics) 123 | 124 | sorted_metric = dict(sorted(metric.items())) 125 | raise Exception(f"Unable to generate metric expression from: {sorted_metric}") 126 | 127 | 128 | def apply_filters(sql: str, filters: List[FilterSchema]) -> str: 129 | """ 130 | Apply filters to SQL expression. 131 | """ 132 | condition = " AND ".join( 133 | "{field} {operator} {value}".format(**filter_) for filter_ in filters 134 | ) 135 | return f"CASE WHEN {condition} THEN {sql} END" 136 | 137 | 138 | def is_derived(metric: OGMetricSchema) -> bool: 139 | """ 140 | Return if the metric is derived. 141 | """ 142 | return ( 143 | metric.get("calculation_method") == "derived" # dbt >= 1.3 144 | or metric.get("type") == "expression" # dbt < 1.3 145 | or metric.get("type") == "derived" # WTF dbt Cloud 146 | ) 147 | 148 | 149 | def get_metrics_for_model( 150 | model: ModelSchema, 151 | metrics: List[OGMetricSchema], 152 | ) -> List[OGMetricSchema]: 153 | """ 154 | Given a list of metrics, return those that are based on a given model. 155 | """ 156 | metric_map = {metric["unique_id"]: metric for metric in metrics} 157 | related_metrics = [] 158 | 159 | for metric in metrics: 160 | parents = set() 161 | queue = [metric] 162 | while queue: 163 | node = queue.pop() 164 | depends_on = node["depends_on"] 165 | if is_derived(node): 166 | queue.extend(metric_map[parent] for parent in depends_on) 167 | else: 168 | parents.update(depends_on) 169 | 170 | if len(parents) > 1: 171 | _logger.warning( 172 | "Metric %s cannot be calculated because it depends on multiple models: %s", 173 | metric["name"], 174 | ", ".join(sorted(parents)), 175 | ) 176 | continue 177 | 178 | if parents == {model["unique_id"]}: 179 | related_metrics.append(metric) 180 | 181 | return related_metrics 182 | 183 | 184 | def get_metric_models(unique_id: str, metrics: List[OGMetricSchema]) -> Set[str]: 185 | """ 186 | Given a metric, return the models it depends on. 187 | """ 188 | metric_map = {metric["unique_id"]: metric for metric in metrics} 189 | metric = metric_map[unique_id] 190 | depends_on = metric["depends_on"] 191 | 192 | if is_derived(metric): 193 | return { 194 | model 195 | for parent in depends_on 196 | for model in get_metric_models(parent, metrics) 197 | } 198 | 199 | return set(depends_on) 200 | 201 | 202 | def get_metric_definition( 203 | metric_name: str, 204 | metrics: List[OGMetricSchema], 205 | ) -> SupersetMetricDefinition: 206 | """ 207 | Build a Superset metric definition from an OG (< 1.6) dbt metric. 208 | """ 209 | metric_map = {metric["name"]: metric for metric in metrics} 210 | metric = metric_map[metric_name] 211 | metric_meta = parse_metric_meta(metric) 212 | final_metric_name = metric_meta["metric_name_override"] or metric_name 213 | 214 | return { 215 | "expression": get_metric_expression(metric_name, metric_map), 216 | "metric_name": final_metric_name, 217 | "metric_type": (metric.get("type") or metric.get("calculation_method")), 218 | "verbose_name": metric.get("label", final_metric_name), 219 | "description": metric.get("description", ""), 220 | "extra": json.dumps(metric_meta["meta"]), 221 | **metric_meta["kwargs"], # type: ignore 222 | } 223 | 224 | 225 | def get_superset_metrics_per_model( 226 | og_metrics: List[OGMetricSchema], 227 | sl_metrics: Optional[List[MFMetricWithSQLSchema]] = None, 228 | ) -> Dict[str, List[SupersetMetricDefinition]]: 229 | """ 230 | Build a dictionary of Superset metrics for each dbt model. 231 | """ 232 | superset_metrics = defaultdict(list) 233 | for metric in og_metrics: 234 | # dbt supports creating derived metrics with raw syntax. In case the metric doesn't 235 | # rely on other metrics (or rely on other metrics that aren't associated with any 236 | # model), it's required to specify the dataset the metric should be associated with 237 | # under the ``meta.superset.model`` key. If the derived metric is just an expression 238 | # with no dependency, it's not required to parse the metric SQL. 239 | if model := metric.get("meta", {}).get("superset", {}).pop("model", None): 240 | if len(metric["depends_on"]) == 0: 241 | metric["skip_parsing"] = True 242 | else: 243 | metric_models = get_metric_models(metric["unique_id"], og_metrics) 244 | if len(metric_models) == 0: 245 | _logger.warning( 246 | "Metric %s cannot be calculated because it's not associated with any model." 247 | " Please specify the model under metric.meta.superset.model.", 248 | metric["name"], 249 | ) 250 | continue 251 | 252 | if len(metric_models) != 1: 253 | _logger.warning( 254 | "Metric %s cannot be calculated because it depends on multiple models: %s", 255 | metric["name"], 256 | ", ".join(sorted(metric_models)), 257 | ) 258 | continue 259 | model = metric_models.pop() 260 | 261 | metric_definition = get_metric_definition( 262 | metric["name"], 263 | og_metrics, 264 | ) 265 | superset_metrics[model].append(metric_definition) 266 | 267 | for sl_metric in sl_metrics or []: 268 | metric_definition = convert_metric_flow_to_superset(sl_metric) 269 | model = sl_metric["model"] 270 | superset_metrics[model].append(metric_definition) 271 | 272 | return superset_metrics 273 | 274 | 275 | def extract_aliases(parsed_query: Expression) -> Dict[str, str]: 276 | """ 277 | Extract column aliases from a SQL query. 278 | """ 279 | aliases = {} 280 | for expression in parsed_query.find_all(Alias): 281 | alias_name = expression.alias 282 | expression_text = expression.this.sql() 283 | aliases[alias_name] = expression_text 284 | 285 | return aliases 286 | 287 | 288 | def convert_query_to_projection(sql: str, dialect: MFSQLEngine) -> str: 289 | """ 290 | Convert a MetricFlow compiled SQL to a projection. 291 | """ 292 | parsed_query = parse_one(sql, dialect=DIALECT_MAP.get(dialect)) 293 | 294 | # extract aliases from inner query 295 | scopes = traverse_scope(parsed_query) 296 | has_subquery = len(scopes) > 1 297 | aliases = extract_aliases(scopes[0].expression) if has_subquery else {} 298 | 299 | # find the metric expression 300 | select_expression = parsed_query.find(Select) 301 | if select_expression.find(Join): 302 | raise ValueError("Unable to convert metrics with JOINs") 303 | 304 | projection = select_expression.args["expressions"] 305 | if len(projection) > 1: 306 | raise ValueError("Unable to convert metrics with multiple selected expressions") 307 | 308 | metric_expression = ( 309 | projection[0].this if isinstance(projection[0], Alias) else projection[0] 310 | ) 311 | 312 | # replace aliases with their original expressions 313 | for node in metric_expression.walk(): 314 | if isinstance(node, Identifier) and node.sql() in aliases: 315 | node.replace(parse_one(aliases[node.sql()])) 316 | 317 | # convert WHERE predicate to a CASE statement 318 | where_expression = parsed_query.find(Where) 319 | if where_expression: 320 | 321 | # Remove DISTINCT from metric to avoid conficting with CASE 322 | distinct = False 323 | for node in metric_expression.this.walk(): 324 | if isinstance(node, Distinct): 325 | distinct = True 326 | node.replace(node.expressions[0]) 327 | 328 | for node in where_expression.walk(): 329 | if isinstance(node, Identifier) and node.sql() in aliases: 330 | node.replace(parse_one(aliases[node.sql()])) 331 | 332 | case_expression = Case( 333 | ifs=[If(this=where_expression.this, true=metric_expression.this)], 334 | ) 335 | 336 | if distinct: 337 | case_expression = Distinct(expressions=[case_expression]) 338 | 339 | metric_expression.set("this", case_expression) 340 | 341 | return metric_expression.sql(dialect=DIALECT_MAP.get(dialect)) 342 | 343 | 344 | def convert_metric_flow_to_superset( 345 | sl_metric: MFMetricWithSQLSchema, 346 | ) -> SupersetMetricDefinition: 347 | """ 348 | Convert a MetricFlow metric to a Superset metric. 349 | 350 | Before MetricFlow we could build the metrics based on the metadata returned by the 351 | GraphQL API. With MetricFlow we only have access to the compiled SQL used to 352 | compute the metric, so we need to parse it and build a single projection for 353 | Superset. 354 | 355 | For example, this: 356 | 357 | SELECT 358 | SUM(order_count) AS large_order 359 | FROM ( 360 | SELECT 361 | order_total AS order_id__order_total_dim 362 | , 1 AS order_count 363 | FROM `dbt-tutorial-347100`.`dbt_beto`.`orders` orders_src_106 364 | ) subq_796 365 | WHERE order_id__order_total_dim >= 20 366 | 367 | Becomes: 368 | 369 | SUM(CASE WHEN order_total > 20 THEN 1 END) 370 | 371 | """ 372 | metric_meta = parse_metric_meta(sl_metric) 373 | return { 374 | "expression": convert_query_to_projection( 375 | sl_metric["sql"], 376 | sl_metric["dialect"], 377 | ), 378 | "metric_name": metric_meta["metric_name_override"] or sl_metric["name"], 379 | "metric_type": sl_metric["type"], 380 | "verbose_name": sl_metric["label"], 381 | "description": sl_metric["description"], 382 | "extra": json.dumps(metric_meta["meta"]), 383 | **metric_meta["kwargs"], # type: ignore 384 | } 385 | 386 | 387 | def get_models_from_sql( 388 | sql: str, 389 | dialect: MFSQLEngine, 390 | model_map: Dict[ModelKey, ModelSchema], 391 | ) -> Optional[List[ModelSchema]]: 392 | """ 393 | Return the model associated with a SQL query. 394 | """ 395 | parsed_query = parse_one(sql, dialect=DIALECT_MAP.get(dialect)) 396 | sources = list(parsed_query.find_all(Table)) 397 | 398 | for table in sources: 399 | if ModelKey(table.db, table.name) not in model_map: 400 | return None 401 | 402 | return [model_map[ModelKey(table.db, table.name)] for table in sources] 403 | 404 | 405 | def replace_metric_syntax( 406 | sql: str, 407 | dependencies: List[str], 408 | metrics: Dict[str, OGMetricSchema], 409 | ) -> str: 410 | """ 411 | Replace metric keys with their SQL syntax. 412 | This method is a fallback in case ``sqlglot`` raises a ``ParseError``. 413 | """ 414 | for parent_metric in dependencies: 415 | parent_metric_name = parent_metric.split(".")[-1] 416 | pattern = r"\b" + re.escape(parent_metric_name) + r"\b" 417 | parent_metric_syntax = get_metric_expression( 418 | parent_metric_name, 419 | metrics, 420 | ) 421 | sql = re.sub(pattern, parent_metric_syntax, sql) 422 | 423 | return sql.strip() 424 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Commands for syncing metastores to and from Superset. 3 | """ 4 | 5 | import click 6 | 7 | from preset_cli.cli.superset.sync.dbt.command import dbt_cloud, dbt_core 8 | from preset_cli.cli.superset.sync.native.command import native 9 | 10 | 11 | @click.group() 12 | def sync() -> None: 13 | """ 14 | Sync metadata between Superset and an external repository. 15 | """ 16 | 17 | 18 | sync.add_command(native) 19 | sync.add_command(dbt_cloud, name="dbt-cloud") 20 | sync.add_command(dbt_core, name="dbt-core") 21 | # for backwards compatibility 22 | sync.add_command(dbt_core, name="dbt") 23 | -------------------------------------------------------------------------------- /src/preset_cli/cli/superset/sync/native/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/src/preset_cli/cli/superset/sync/native/__init__.py -------------------------------------------------------------------------------- /src/preset_cli/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom exceptions. 3 | """ 4 | 5 | from enum import Enum 6 | from typing import Any, Dict, List 7 | 8 | from typing_extensions import TypedDict 9 | 10 | 11 | class ErrorLevel(str, Enum): 12 | """ 13 | Levels of errors that can exist within Superset. 14 | """ 15 | 16 | INFO = "info" 17 | WARNING = "warning" 18 | ERROR = "error" 19 | 20 | 21 | class ErrorPayload(TypedDict, total=False): 22 | """ 23 | A SIP-40 error payload. 24 | """ 25 | 26 | message: str 27 | error_type: str # import SupersetErrorType from Superset? 28 | level: ErrorLevel 29 | extra: Dict[str, Any] 30 | 31 | 32 | class SupersetError(Exception): 33 | """ 34 | A SIP-40 compliant exception. 35 | """ 36 | 37 | def __init__(self, errors: List[ErrorPayload]): 38 | super().__init__() 39 | self.errors = errors 40 | 41 | 42 | class DatabaseNotFoundError(SupersetError): 43 | """ 44 | Exception when no database is found. 45 | """ 46 | 47 | def __init__(self): 48 | super().__init__( 49 | [ 50 | { 51 | "message": "Database not found", 52 | "error_type": "DATABASE_NOT_FOUND_ERROR", 53 | "level": ErrorLevel.ERROR, 54 | }, 55 | ], 56 | ) 57 | 58 | 59 | class CLIError(Exception): 60 | """ 61 | Exception raised for errors that occur during the CLI execution that should 62 | stop the execution with an exit code. 63 | """ 64 | 65 | def __init__(self, message: str, exit_code: int): 66 | super().__init__(message) 67 | self.exit_code = exit_code 68 | -------------------------------------------------------------------------------- /src/preset_cli/lib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Basic helper functions. 3 | """ 4 | 5 | import json 6 | import logging 7 | import sys 8 | from pathlib import Path 9 | from typing import Any, Callable, Dict, List, Optional, cast 10 | 11 | import click 12 | from requests import Response 13 | from rich.logging import RichHandler 14 | 15 | from preset_cli.exceptions import CLIError, ErrorLevel, ErrorPayload, SupersetError 16 | 17 | _logger = logging.getLogger(__name__) 18 | 19 | 20 | def remove_root(file_path: str) -> str: 21 | """ 22 | Remove the first directory of a path. 23 | """ 24 | full_path = Path(file_path) 25 | return str(Path(*full_path.parts[1:])) 26 | 27 | 28 | def setup_logging(loglevel: str) -> None: 29 | """ 30 | Setup basic logging. 31 | """ 32 | level = getattr(logging, loglevel.upper(), None) 33 | if not isinstance(level, int): 34 | raise ValueError(f"Invalid log level: {loglevel}") 35 | 36 | logformat = "[%(asctime)s] %(levelname)s: %(name)s: %(message)s" 37 | logging.basicConfig( 38 | level=level, 39 | format=logformat, 40 | datefmt="[%X]", 41 | handlers=[RichHandler()], 42 | force=True, 43 | ) 44 | logging.captureWarnings(True) 45 | 46 | 47 | def deserialize_error_level(errors: List[Dict[str, Any]]) -> List[ErrorPayload]: 48 | """ 49 | Convert error level from string to enum. 50 | """ 51 | for error in errors: 52 | if isinstance(error, dict) and isinstance(error.get("level"), str): 53 | error["level"] = ErrorLevel(error["level"]) 54 | return cast(List[ErrorPayload], errors) 55 | 56 | 57 | def is_sip_40_payload(errors: List[Dict[str, Any]]) -> bool: 58 | """ 59 | Return if a given error payload comforms with SIP-40. 60 | """ 61 | return isinstance(errors, list) and all( 62 | isinstance(error, dict) 63 | and set(error.keys()) <= {"message", "error_type", "level", "extra"} 64 | for error in errors 65 | ) 66 | 67 | 68 | def validate_response(response: Response) -> None: 69 | """ 70 | Check for errors in a response. 71 | """ 72 | if response.ok: 73 | return 74 | 75 | if response.headers.get("content-type") == "application/json": 76 | payload = response.json() 77 | message = json.dumps(payload, indent=4) 78 | 79 | if "errors" in payload and is_sip_40_payload(payload["errors"]): 80 | errors = deserialize_error_level(payload["errors"]) 81 | else: 82 | errors = [ 83 | { 84 | "message": "Unknown error", 85 | "error_type": "UNKNOWN_ERROR", 86 | "level": ErrorLevel.ERROR, 87 | "extra": payload, 88 | }, 89 | ] 90 | else: 91 | message = response.text 92 | errors = [ 93 | { 94 | "message": message, 95 | "error_type": "UNKNOWN_ERROR", 96 | "level": ErrorLevel.ERROR, 97 | }, 98 | ] 99 | 100 | _logger.error(message) 101 | raise SupersetError(errors=errors) 102 | 103 | 104 | def split_comma( # pylint: disable=unused-argument 105 | ctx: click.core.Context, 106 | param: str, 107 | value: Optional[str], 108 | ) -> List[str]: 109 | """ 110 | Split CLI option into multiple values. 111 | """ 112 | if value is None: 113 | return [] 114 | 115 | return [option.strip() for option in value.split(",")] 116 | 117 | 118 | def dict_merge(base: Dict[Any, Any], overrides: Dict[Any, Any]) -> None: 119 | """ 120 | Recursive dict merge. 121 | """ 122 | for k in overrides: # pylint: disable=invalid-name 123 | if k in base and isinstance(base[k], dict) and isinstance(overrides[k], dict): 124 | dict_merge(base[k], overrides[k]) 125 | else: 126 | base[k] = overrides[k] 127 | 128 | 129 | def raise_cli_errors(function: Callable[..., Any]) -> Callable[..., Any]: 130 | """ 131 | Decorator to catch any CLIError raised and exits the execution with an error code. 132 | """ 133 | 134 | def wrapper(*args, **kwargs): 135 | try: 136 | return function(*args, **kwargs) 137 | except CLIError as excinfo: 138 | click.echo( 139 | click.style( 140 | str(excinfo), 141 | fg="bright_red", 142 | ), 143 | ) 144 | sys.exit(excinfo.exit_code) 145 | 146 | return wrapper 147 | -------------------------------------------------------------------------------- /src/preset_cli/typing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom types. 3 | """ 4 | 5 | from typing import List, TypedDict 6 | 7 | 8 | class UserType(TypedDict): 9 | """ 10 | Schema for a user. 11 | """ 12 | 13 | id: int 14 | username: str 15 | role: List[str] 16 | first_name: str 17 | last_name: str 18 | email: str 19 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/__init__.py -------------------------------------------------------------------------------- /tests/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/api/__init__.py -------------------------------------------------------------------------------- /tests/api/clients/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/api/clients/__init__.py -------------------------------------------------------------------------------- /tests/auth/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/auth/__init__.py -------------------------------------------------------------------------------- /tests/auth/jwt_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test JWT auth. 3 | """ 4 | 5 | import pytest 6 | from pytest_mock import MockerFixture 7 | 8 | from preset_cli.auth.jwt import JWTAuth 9 | 10 | 11 | def test_jwt_auth() -> None: 12 | """ 13 | Test the ``JWTAuth`` authentication mechanism. 14 | """ 15 | auth = JWTAuth("my-token") 16 | assert auth.get_headers() == {"Authorization": "Bearer my-token"} 17 | 18 | 19 | def test_jwt_auth_from_stored_credentials(mocker: MockerFixture) -> None: 20 | """ 21 | Test instantiating the object from stored credentials 22 | """ 23 | mocker.patch("preset_cli.auth.jwt.open") 24 | 25 | get_credentials_path = mocker.patch("preset_cli.auth.jwt.get_credentials_path") 26 | get_credentials_path().exists.return_value = True 27 | get_credentials_path().__str__.return_value = "/path/to/credentials.yaml" 28 | 29 | yaml = mocker.patch("preset_cli.auth.jwt.yaml") 30 | yaml.load.return_value = { 31 | "api_token": "TOKEN", 32 | "api_secret": "SECRET", 33 | "baseurl": "https://api.app.preset.io/", 34 | } 35 | 36 | get_access_token = mocker.patch("preset_cli.auth.jwt.get_access_token") 37 | get_access_token.return_value = "JWT_TOKEN" 38 | 39 | auth = JWTAuth.from_stored_credentials() 40 | assert auth.token == "JWT_TOKEN" 41 | get_access_token.assert_called_with( 42 | baseurl="https://api.app.preset.io/", 43 | api_token="TOKEN", 44 | api_secret="SECRET", 45 | ) 46 | 47 | # test for error 48 | get_credentials_path().exists.return_value = False 49 | with pytest.raises(Exception) as excinfo: 50 | JWTAuth.from_stored_credentials() 51 | assert ( 52 | str(excinfo.value) 53 | == "Could not load credentials from /path/to/credentials.yaml" 54 | ) 55 | -------------------------------------------------------------------------------- /tests/auth/lib_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for ``preset_cli.auth.lib``. 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | import yaml 8 | from pyfakefs.fake_filesystem import FakeFilesystem 9 | from pytest_mock import MockerFixture 10 | from requests_mock.mocker import Mocker 11 | from yarl import URL 12 | 13 | from preset_cli.auth.lib import ( 14 | get_access_token, 15 | get_credentials_path, 16 | store_credentials, 17 | ) 18 | 19 | 20 | def test_get_access_token(requests_mock: Mocker) -> None: 21 | """ 22 | Test ``get_access_token``. 23 | """ 24 | requests_mock.post( 25 | "https://api.app.preset.io/v1/auth/", 26 | json={"payload": {"access_token": "TOKEN"}}, 27 | ) 28 | 29 | access_token = get_access_token( 30 | URL("https://api.app.preset.io/"), 31 | "API_TOKEN", 32 | "API_SECRET", 33 | ) 34 | assert access_token == "TOKEN" 35 | 36 | access_token = get_access_token( 37 | "https://api.app.preset.io/", 38 | "API_TOKEN", 39 | "API_SECRET", 40 | ) 41 | assert access_token == "TOKEN" 42 | 43 | 44 | def test_get_credentials_path(mocker: MockerFixture) -> None: 45 | """ 46 | Test ``get_credentials_path``. 47 | """ 48 | mocker.patch("preset_cli.auth.lib.user_config_dir", return_value="/path/to/config") 49 | assert get_credentials_path() == Path("/path/to/config/credentials.yaml") 50 | 51 | 52 | # pylint: disable=unused-argument, invalid-name 53 | def test_store_credentials(mocker: MockerFixture, fs: FakeFilesystem) -> None: 54 | """ 55 | Test ``store_credentials``. 56 | """ 57 | credentials_path = Path("/path/to/config/credentials.yaml") 58 | 59 | mocker.patch("preset_cli.auth.lib.input", side_effect=["invalid", "n"]) 60 | store_credentials( 61 | "API_TOKEN", 62 | "API_SECRET", 63 | URL("https://api.app.preset.io/"), 64 | credentials_path, 65 | ) 66 | assert not credentials_path.exists() 67 | 68 | mocker.patch("preset_cli.auth.lib.input", return_value="y") 69 | store_credentials( 70 | "API_TOKEN", 71 | "API_SECRET", 72 | URL("https://api.app.preset.io/"), 73 | credentials_path, 74 | ) 75 | assert credentials_path.exists() 76 | with open(credentials_path, encoding="utf-8") as input_: 77 | contents = yaml.load(input_, Loader=yaml.SafeLoader) 78 | assert contents == { 79 | "api_secret": "API_SECRET", 80 | "api_token": "API_TOKEN", 81 | "baseurl": "https://api.app.preset.io/", 82 | } 83 | -------------------------------------------------------------------------------- /tests/auth/main_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test authentication mechanisms. 3 | """ 4 | 5 | from pytest_mock import MockerFixture 6 | from requests_mock.mocker import Mocker 7 | 8 | from preset_cli.auth.main import Auth 9 | 10 | 11 | def test_auth(mocker: MockerFixture) -> None: 12 | """ 13 | Tests for the base class ``Auth``. 14 | """ 15 | # pylint: disable=invalid-name 16 | Session = mocker.patch("preset_cli.auth.main.Session") 17 | 18 | auth = Auth() 19 | assert auth.session == Session() 20 | 21 | 22 | def test_reauth(requests_mock: Mocker) -> None: 23 | """ 24 | Test the ``reauth`` hook when authentication fails. 25 | """ 26 | requests_mock.get("http://example.org/", status_code=401) 27 | 28 | # the base class has no reauth 29 | auth = Auth() 30 | response = auth.session.get("http://example.org/") 31 | assert response.status_code == 401 32 | -------------------------------------------------------------------------------- /tests/auth/preset_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test Preset auth. 3 | """ 4 | 5 | import pytest 6 | from pytest_mock import MockerFixture 7 | from requests_mock.mocker import Mocker 8 | from yarl import URL 9 | 10 | from preset_cli.auth.preset import PresetAuth 11 | 12 | 13 | def test_preset_auth(mocker: MockerFixture) -> None: 14 | """ 15 | Test the ``PresetAuth`` authentication mechanism. 16 | """ 17 | mocker.patch("preset_cli.auth.preset.get_access_token", return_value="JWT_TOKEN") 18 | 19 | auth = PresetAuth(URL("http:/api.app.preset.io/"), "TOKEN", "SECRET") 20 | assert auth.get_headers() == {"Authorization": "Bearer JWT_TOKEN"} 21 | 22 | 23 | def test_preset_auth_reauth(mocker: MockerFixture, requests_mock: Mocker) -> None: 24 | """ 25 | Test reauthorizing on a 401. 26 | """ 27 | mocker.patch( 28 | "preset_cli.auth.preset.get_access_token", 29 | side_effect=["JWT_TOKEN1", "JWT_TOKEN2"], 30 | ) 31 | requests_mock.get( 32 | "https://api.app.preset.io/", 33 | status_code=401, 34 | ) 35 | requests_mock.get( 36 | "https://api.app.preset.io/", 37 | request_headers={"Authorization": "Bearer JWT_TOKEN1"}, 38 | status_code=401, 39 | ) 40 | requests_mock.get( 41 | "https://api.app.preset.io/", 42 | request_headers={"Authorization": "Bearer JWT_TOKEN2"}, 43 | status_code=200, 44 | ) 45 | 46 | auth = PresetAuth(URL("https:/api.app.preset.io/"), "TOKEN", "SECRET") 47 | assert auth.get_headers() == {"Authorization": "Bearer JWT_TOKEN1"} 48 | response = auth.session.get("https://api.app.preset.io/") 49 | assert response.status_code == 200 50 | assert auth.get_headers() == {"Authorization": "Bearer JWT_TOKEN2"} 51 | 52 | 53 | def test_preset_auth_from_stored_credentials(mocker: MockerFixture) -> None: 54 | """ 55 | Test instantiating the object from stored credentials 56 | """ 57 | mocker.patch("preset_cli.auth.preset.open") 58 | 59 | get_credentials_path = mocker.patch("preset_cli.auth.preset.get_credentials_path") 60 | get_credentials_path().exists.return_value = True 61 | get_credentials_path().__str__.return_value = "/path/to/credentials.yaml" 62 | 63 | yaml = mocker.patch("preset_cli.auth.preset.yaml") 64 | yaml.load.return_value = { 65 | "api_token": "TOKEN", 66 | "api_secret": "SECRET", 67 | "baseurl": "https://api.app.preset.io/", 68 | } 69 | 70 | get_access_token = mocker.patch("preset_cli.auth.preset.get_access_token") 71 | get_access_token.return_value = "JWT_TOKEN" 72 | 73 | auth = PresetAuth.from_stored_credentials() 74 | assert auth.token == "JWT_TOKEN" 75 | get_access_token.assert_called_with( 76 | "https://api.app.preset.io/", 77 | "TOKEN", 78 | "SECRET", 79 | ) 80 | 81 | # test for error 82 | get_credentials_path().exists.return_value = False 83 | with pytest.raises(Exception) as excinfo: 84 | PresetAuth.from_stored_credentials() 85 | assert ( 86 | str(excinfo.value) 87 | == "Could not load credentials from /path/to/credentials.yaml" 88 | ) 89 | -------------------------------------------------------------------------------- /tests/auth/superset_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test username:password authentication mechanism. 3 | """ 4 | 5 | from pytest_mock import MockerFixture 6 | from requests_mock.mocker import Mocker 7 | from yarl import URL 8 | 9 | from preset_cli.auth.superset import SupersetJWTAuth, UsernamePasswordAuth 10 | 11 | 12 | def test_username_password_auth(requests_mock: Mocker) -> None: 13 | """ 14 | Tests for the username/password authentication mechanism. 15 | """ 16 | csrf_token = "CSFR_TOKEN" 17 | requests_mock.get( 18 | "https://superset.example.org/login/", 19 | text=f'', 20 | ) 21 | requests_mock.post("https://superset.example.org/login/") 22 | 23 | auth = UsernamePasswordAuth( 24 | URL("https://superset.example.org/"), 25 | "admin", 26 | "password123", 27 | ) 28 | assert auth.get_headers() == { 29 | "X-CSRFToken": csrf_token, 30 | } 31 | 32 | assert ( 33 | requests_mock.last_request.text 34 | == "username=admin&password=password123&csrf_token=CSFR_TOKEN" 35 | ) 36 | 37 | 38 | def test_username_password_auth_no_csrf(requests_mock: Mocker) -> None: 39 | """ 40 | Tests for the username/password authentication mechanism. 41 | """ 42 | requests_mock.get( 43 | "https://superset.example.org/login/", 44 | text="WTF_CSRF_ENABLED = False", 45 | ) 46 | requests_mock.post("https://superset.example.org/login/") 47 | 48 | auth = UsernamePasswordAuth( 49 | URL("https://superset.example.org/"), 50 | "admin", 51 | "password123", 52 | ) 53 | # pylint: disable=use-implicit-booleaness-not-comparison 54 | assert auth.get_headers() == {} 55 | 56 | assert requests_mock.last_request.text == "username=admin&password=password123" 57 | 58 | 59 | def test_jwt_auth_superset(mocker: MockerFixture) -> None: 60 | """ 61 | Test the ``JWTAuth`` authentication mechanism for Superset tenant. 62 | """ 63 | auth = SupersetJWTAuth("my-token", URL("https://example.org/")) 64 | mocker.patch.object(auth, "get_csrf_token", return_value="myCSRFToken") 65 | 66 | assert auth.get_headers() == { 67 | "Authorization": "Bearer my-token", 68 | "X-CSRFToken": "myCSRFToken", 69 | } 70 | 71 | 72 | def test_get_csrf_token(requests_mock: Mocker) -> None: 73 | """ 74 | Test the get_csrf_token method. 75 | """ 76 | auth = SupersetJWTAuth("my-token", URL("https://example.org/")) 77 | requests_mock.get( 78 | "https://example.org/api/v1/security/csrf_token/", 79 | json={"result": "myCSRFToken"}, 80 | ) 81 | 82 | assert auth.get_csrf_token("my-token") == "myCSRFToken" 83 | -------------------------------------------------------------------------------- /tests/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/cli/__init__.py -------------------------------------------------------------------------------- /tests/cli/superset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/cli/superset/__init__.py -------------------------------------------------------------------------------- /tests/cli/superset/import_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the import commands. 3 | """ 4 | 5 | # pylint: disable=invalid-name 6 | 7 | from pathlib import Path 8 | from unittest import mock 9 | from uuid import UUID 10 | 11 | import pytest 12 | import yaml 13 | from click.testing import CliRunner 14 | from pyfakefs.fake_filesystem import FakeFilesystem 15 | from pytest_mock import MockerFixture 16 | 17 | from preset_cli.cli.superset.main import superset_cli 18 | 19 | 20 | def test_import_rls(mocker: MockerFixture, fs: FakeFilesystem) -> None: 21 | """ 22 | Test the ``import_rls`` command. 23 | """ 24 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 25 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 26 | client = SupersetClient() 27 | rls = [ 28 | { 29 | "clause": "client_id = 9", 30 | "description": "Rule description", 31 | "filter_type": "Regular", 32 | "group_key": "department", 33 | "name": "Rule name", 34 | "roles": ["Gamma"], 35 | "tables": ["main.test_table"], 36 | }, 37 | ] 38 | fs.create_file("rls.yaml", contents=yaml.dump(rls)) 39 | 40 | runner = CliRunner() 41 | result = runner.invoke( 42 | superset_cli, 43 | ["https://superset.example.org/", "import-rls"], 44 | catch_exceptions=False, 45 | ) 46 | assert result.exit_code == 0 47 | 48 | client.import_rls.assert_called_with(rls[0]) 49 | 50 | 51 | def test_import_roles(mocker: MockerFixture, fs: FakeFilesystem) -> None: 52 | """ 53 | Test the ``import_roles`` command. 54 | """ 55 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 56 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 57 | client = SupersetClient() 58 | roles = [ 59 | { 60 | "name": "Role name", 61 | "permissions": ["can do this", "can do that"], 62 | }, 63 | ] 64 | fs.create_file("roles.yaml", contents=yaml.dump(roles)) 65 | 66 | runner = CliRunner() 67 | result = runner.invoke( 68 | superset_cli, 69 | ["https://superset.example.org/", "import-roles"], 70 | catch_exceptions=False, 71 | ) 72 | assert result.exit_code == 0 73 | 74 | client.import_role.assert_called_with(roles[0]) 75 | 76 | 77 | def test_import_ownership(mocker: MockerFixture, fs: FakeFilesystem) -> None: 78 | """ 79 | Test the ``import_ownership`` command. 80 | """ 81 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 82 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 83 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 84 | client = SupersetClient() 85 | client.export_users.return_value = [{"id": 1, "email": "admin@example.com"}] 86 | client.get_uuids.return_value = {1: UUID("e4e6a14b-c3e8-4fdf-a850-183ba6ce15e0")} 87 | ownership = { 88 | "dataset": [ 89 | { 90 | "name": "test_table", 91 | "owners": ["admin@example.com"], 92 | "uuid": "e4e6a14b-c3e8-4fdf-a850-183ba6ce15e0", 93 | }, 94 | ], 95 | } 96 | fs.create_file("ownership.yaml", contents=yaml.dump(ownership)) 97 | 98 | runner = CliRunner() 99 | result = runner.invoke( 100 | superset_cli, 101 | ["https://superset.example.org/", "import-ownership"], 102 | catch_exceptions=False, 103 | ) 104 | assert result.exit_code == 0 105 | 106 | client.import_ownership.assert_called_once_with( 107 | "dataset", 108 | ownership["dataset"][0], 109 | {"admin@example.com": 1}, 110 | {"e4e6a14b-c3e8-4fdf-a850-183ba6ce15e0": 1}, 111 | ) 112 | 113 | 114 | def test_import_ownership_progress_log( 115 | mocker: MockerFixture, 116 | fs: FakeFilesystem, 117 | ) -> None: 118 | """ 119 | Test the ``import_ownership`` command with an existing log file. 120 | """ 121 | logs_content = { 122 | "assets": [ 123 | { 124 | "path": "/path/to/root/first_path", 125 | "status": "SUCCESS", 126 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 127 | }, 128 | { 129 | "path": "/path/to/root/second_path", 130 | "status": "FAILED", 131 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e7", 132 | }, 133 | ], 134 | "ownership": [ 135 | { 136 | "status": "SUCCESS", 137 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e8", 138 | }, 139 | { 140 | "status": "FAILED", 141 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e9", 142 | }, 143 | ], 144 | } 145 | fs.create_file("progress.log", contents=yaml.dump(logs_content)) 146 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 147 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 148 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 149 | client = SupersetClient() 150 | client.export_users.return_value = [ 151 | {"id": 1, "email": "admin@example.com"}, 152 | {"id": 2, "email": "viewer@example.com"}, 153 | ] 154 | users = { 155 | "admin@example.com": 1, 156 | "viewer@example.com": 2, 157 | } 158 | client.get_uuids.return_value = { 159 | 1: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e6"), 160 | 2: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e7"), 161 | 3: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e8"), 162 | 4: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e9"), 163 | 5: UUID("e4e6a14b-c3e8-4fdf-a850-183ba6ce15e0"), 164 | } 165 | uuids = { 166 | "18ddf8ab-68f9-4c15-ba9f-c75921b019e6": 1, 167 | "18ddf8ab-68f9-4c15-ba9f-c75921b019e7": 2, 168 | "18ddf8ab-68f9-4c15-ba9f-c75921b019e8": 3, 169 | "18ddf8ab-68f9-4c15-ba9f-c75921b019e9": 4, 170 | "e4e6a14b-c3e8-4fdf-a850-183ba6ce15e0": 5, 171 | } 172 | ownership = { 173 | "dataset": [ 174 | { 175 | "name": "test_table", 176 | "owners": ["admin@example.com"], 177 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e8", 178 | }, 179 | { 180 | "name": "other_table", 181 | "owners": ["viewer@example.com"], 182 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e9", 183 | }, 184 | { 185 | "name": "yet_another_table", 186 | "owners": ["admin@example.com", "viewer@example.com"], 187 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e7", 188 | }, 189 | { 190 | "name": "just_another_test_table", 191 | "owners": ["viewer@example.com", "admin@example.com"], 192 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 193 | }, 194 | { 195 | "name": "last_table", 196 | "owners": ["viewer@example.com"], 197 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e0", 198 | }, 199 | ], 200 | } 201 | fs.create_file("ownership.yaml", contents=yaml.dump(ownership)) 202 | 203 | runner = CliRunner() 204 | result = runner.invoke( 205 | superset_cli, 206 | ["https://superset.example.org/", "import-ownership"], 207 | catch_exceptions=False, 208 | ) 209 | assert result.exit_code == 0 210 | 211 | # Should skip `18ddf8ab-68f9-4c15-ba9f-c75921b019e7` as its import 212 | # failed. Should also skip `18ddf8ab-68f9-4c15-ba9f-c75921b019e8` 213 | # because its ownership was sucessfully imported, but retry 214 | # `18ddf8ab-68f9-4c15-ba9f-c75921b019e9`. 215 | client.import_ownership.assert_has_calls( 216 | [ 217 | mock.call("dataset", ownership["dataset"][1], users, uuids), 218 | mock.call("dataset", ownership["dataset"][3], users, uuids), 219 | mock.call("dataset", ownership["dataset"][4], users, uuids), 220 | ], 221 | ) 222 | 223 | 224 | def test_import_ownership_failure(mocker: MockerFixture, fs: FakeFilesystem) -> None: 225 | """ 226 | Test the ``import_ownership`` command when a failure happens without 227 | the ``continue-on-error`` flag. 228 | """ 229 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 230 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 231 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 232 | client = SupersetClient() 233 | client.export_users.return_value = [{"id": 1, "email": "admin@example.com"}] 234 | client.get_uuids.return_value = { 235 | 1: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e6"), 236 | 2: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e7"), 237 | } 238 | ownership = { 239 | "dataset": [ 240 | { 241 | "name": "test_table", 242 | "owners": ["admin@example.com"], 243 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 244 | }, 245 | { 246 | "name": "test_table_two", 247 | "owners": ["admin@example.com"], 248 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e7", 249 | }, 250 | ], 251 | } 252 | fs.create_file("ownership.yaml", contents=yaml.dump(ownership)) 253 | client.import_ownership.side_effect = [ 254 | None, 255 | Exception("An error occurred!"), 256 | ] 257 | 258 | assert not Path("progress.log").exists() 259 | 260 | runner = CliRunner() 261 | with pytest.raises(Exception): 262 | runner.invoke( 263 | superset_cli, 264 | ["https://superset.example.org/", "import-ownership"], 265 | catch_exceptions=False, 266 | ) 267 | 268 | assert Path("progress.log").exists() 269 | with open("progress.log", encoding="utf-8") as log: 270 | content = yaml.load(log, Loader=yaml.SafeLoader) 271 | 272 | assert content == { 273 | "assets": [], 274 | "ownership": [ 275 | { 276 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 277 | "status": "SUCCESS", 278 | }, 279 | ], 280 | } 281 | 282 | 283 | def test_import_ownership_failure_continue( 284 | mocker: MockerFixture, 285 | fs: FakeFilesystem, 286 | ) -> None: 287 | """ 288 | Test the ``import_ownership`` command when a failure happens 289 | with the ``continue-on-error`` flag. 290 | """ 291 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 292 | SupersetClient = mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 293 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 294 | client = SupersetClient() 295 | client.export_users.return_value = [{"id": 1, "email": "admin@example.com"}] 296 | client.get_uuids.return_value = { 297 | 1: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e6"), 298 | 2: UUID("18ddf8ab-68f9-4c15-ba9f-c75921b019e7"), 299 | } 300 | ownership = { 301 | "dataset": [ 302 | { 303 | "name": "test_table", 304 | "owners": ["admin@example.com"], 305 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 306 | }, 307 | { 308 | "name": "test_table_two", 309 | "owners": ["admin@example.com"], 310 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e7", 311 | }, 312 | ], 313 | } 314 | fs.create_file("ownership.yaml", contents=yaml.dump(ownership)) 315 | client.import_ownership.side_effect = [ 316 | Exception("An error occurred!"), 317 | None, 318 | ] 319 | 320 | assert not Path("progress.log").exists() 321 | 322 | runner = CliRunner() 323 | result = runner.invoke( 324 | superset_cli, 325 | ["https://superset.example.org/", "import-ownership", "--continue-on-error"], 326 | catch_exceptions=False, 327 | ) 328 | assert result.exit_code == 0 329 | 330 | assert Path("progress.log").exists() 331 | with open("progress.log", encoding="utf-8") as log: 332 | content = yaml.load(log, Loader=yaml.SafeLoader) 333 | 334 | assert content == { 335 | "assets": [], 336 | "ownership": [ 337 | { 338 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e6", 339 | "status": "FAILED", 340 | }, 341 | { 342 | "uuid": "18ddf8ab-68f9-4c15-ba9f-c75921b019e7", 343 | "status": "SUCCESS", 344 | }, 345 | ], 346 | } 347 | 348 | 349 | def test_import_ownership_continue_on_errors( 350 | mocker: MockerFixture, 351 | fs: FakeFilesystem, 352 | ) -> None: 353 | """ 354 | Test the ``import_ownership`` command with the ``continue-on-error`` flag. 355 | """ 356 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 357 | mocker.patch("preset_cli.cli.superset.import_.SupersetClient") 358 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 359 | ownership = { 360 | "dataset": [ 361 | { 362 | "name": "test_table", 363 | "owners": ["admin@example.com"], 364 | "uuid": "uuid1", 365 | }, 366 | { 367 | "name": "test_table_two", 368 | "owners": ["admin@example.com"], 369 | "uuid": "uuid2", 370 | }, 371 | ], 372 | } 373 | fs.create_file("ownership.yaml", contents=yaml.dump(ownership)) 374 | 375 | assert not Path("progress.log").exists() 376 | 377 | runner = CliRunner() 378 | result = runner.invoke( 379 | superset_cli, 380 | ["https://superset.example.org/", "import-ownership", "--continue-on-error"], 381 | catch_exceptions=False, 382 | ) 383 | assert result.exit_code == 0 384 | assert not Path("progress.log").exists() 385 | -------------------------------------------------------------------------------- /tests/cli/superset/lib_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for ``preset_cli.cli.superset.lib``. 3 | """ 4 | # pylint: disable=unused-argument, invalid-name 5 | 6 | from pathlib import Path 7 | 8 | import yaml 9 | from pyfakefs.fake_filesystem import FakeFilesystem 10 | from pytest_mock import MockerFixture 11 | 12 | from preset_cli.cli.superset.lib import ( 13 | LogType, 14 | clean_logs, 15 | get_logs, 16 | write_logs_to_file, 17 | ) 18 | 19 | 20 | def test_get_logs_new_file(mocker: MockerFixture, fs: FakeFilesystem) -> None: 21 | """ 22 | Test the ``get_logs`` helper when the log file does not exist. 23 | """ 24 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", Path("progress.log")) 25 | assert get_logs(LogType.ASSETS) == ( 26 | Path("progress.log"), 27 | {"assets": [], "ownership": []}, 28 | ) 29 | 30 | 31 | def test_get_logs_existing_file(mocker: MockerFixture, fs: FakeFilesystem) -> None: 32 | """ 33 | Test the ``get_logs`` helper when the log file does not exist. 34 | """ 35 | root = Path("/path/to/root") 36 | fs.create_dir(root) 37 | 38 | logs_content = { 39 | "assets": [ 40 | { 41 | "path": "/path/to/root/first_path", 42 | "status": "SUCCESS", 43 | "uuid": "uuid1", 44 | }, 45 | { 46 | "path": "/path/to/root/first_path", 47 | "status": "FAILED", 48 | "uuid": "uuid3", 49 | }, 50 | ], 51 | "ownership": [ 52 | { 53 | "status": "SUCCESS", 54 | "uuid": "uuid2", 55 | }, 56 | ], 57 | } 58 | fs.create_file( 59 | root / "progress.log", 60 | contents=yaml.dump(logs_content), 61 | ) 62 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", root / "progress.log") 63 | 64 | assert get_logs(LogType.ASSETS) == ( 65 | root / "progress.log", 66 | { 67 | "assets": [ 68 | { 69 | "path": "/path/to/root/first_path", 70 | "status": "SUCCESS", 71 | "uuid": "uuid1", 72 | }, 73 | ], 74 | "ownership": [ 75 | { 76 | "status": "SUCCESS", 77 | "uuid": "uuid2", 78 | }, 79 | ], 80 | }, 81 | ) 82 | 83 | assert get_logs(LogType.OWNERSHIP) == (root / "progress.log", logs_content) 84 | 85 | 86 | def test_write_logs_to_file(mocker: MockerFixture, fs: FakeFilesystem) -> None: 87 | """ 88 | Test the ``write_logs_to_file`` helper. 89 | """ 90 | root = Path("/path/to/root") 91 | fs.create_dir(root) 92 | fs.create_file( 93 | root / "progress.log", 94 | contents=yaml.dump( 95 | { 96 | "assets": [ 97 | { 98 | "path": "/path/to/root/first_path", 99 | "status": "FAILED", 100 | "uuid": "uuid1", 101 | }, 102 | ], 103 | }, 104 | ), 105 | ) 106 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", root / "progress.log") 107 | 108 | new_logs = { 109 | LogType.ASSETS: [ 110 | { 111 | "path": "/path/to/root/second_path", 112 | "status": "SUCCESS", 113 | "uuid": "uuid2", 114 | }, 115 | { 116 | "path": "/path/to/root/third_path", 117 | "status": "SUCCESS", 118 | "uuid": "uuid3", 119 | }, 120 | ], 121 | LogType.OWNERSHIP: [ 122 | { 123 | "status": "SUCCESS", 124 | "uuid": "uuid4", 125 | }, 126 | ], 127 | } 128 | 129 | with open(root / "progress.log", "r+", encoding="utf-8") as file: 130 | write_logs_to_file(file, new_logs) 131 | 132 | with open(root / "progress.log", encoding="utf-8") as file: 133 | content = yaml.load(file, Loader=yaml.SafeLoader) 134 | 135 | assert content == new_logs 136 | 137 | 138 | def test_clean_logs_delete_file(mocker: MockerFixture, fs: FakeFilesystem) -> None: 139 | """ 140 | Test the ``clean_logs`` helper when the log file should be deleted. 141 | """ 142 | root = Path("/path/to/root") 143 | logs_path = root / "progress.log" 144 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", logs_path) 145 | fs.create_dir(root) 146 | fs.create_file( 147 | root / "progress.log", 148 | contents=yaml.dump( 149 | { 150 | "assets": [ 151 | { 152 | "path": "/path/to/root/first_path", 153 | "status": "SUCCESS", 154 | "uuid": "uuid1", 155 | }, 156 | ], 157 | }, 158 | ), 159 | ) 160 | assert logs_path.exists() 161 | 162 | current_logs = { 163 | LogType.ASSETS: [ 164 | { 165 | "path": "/path/to/root/first_path", 166 | "status": "SUCCESS", 167 | "uuid": "uuid1", 168 | }, 169 | ], 170 | } 171 | 172 | clean_logs(LogType.ASSETS, current_logs) 173 | assert not logs_path.exists() 174 | 175 | 176 | def test_clean_logs_keep_file(mocker: MockerFixture, fs: FakeFilesystem) -> None: 177 | """ 178 | Test the ``clean_logs`` helper when the log file should be kept. 179 | """ 180 | root = Path("/path/to/root") 181 | logs_path = root / "progress.log" 182 | mocker.patch("preset_cli.cli.superset.lib.LOG_FILE_PATH", logs_path) 183 | fs.create_dir(root) 184 | fs.create_file( 185 | root / "progress.log", 186 | contents=yaml.dump( 187 | { 188 | "assets": [ 189 | { 190 | "path": "/path/to/root/first_path", 191 | "status": "SUCCESS", 192 | "uuid": "uuid1", 193 | }, 194 | ], 195 | "ownership": [ 196 | { 197 | "status": "SUCCESS", 198 | "uuid": "uuid2", 199 | }, 200 | ], 201 | }, 202 | ), 203 | ) 204 | assert logs_path.exists() 205 | 206 | current_logs = { 207 | LogType.ASSETS: [ 208 | { 209 | "path": "/path/to/root/first_path", 210 | "status": "SUCCESS", 211 | "uuid": "uuid1", 212 | }, 213 | ], 214 | LogType.OWNERSHIP: [ 215 | { 216 | "status": "SUCCESS", 217 | "uuid": "uuid2", 218 | }, 219 | ], 220 | } 221 | clean_logs(LogType.ASSETS, current_logs) 222 | 223 | with open(logs_path, encoding="utf-8") as log: 224 | content = yaml.load(log, Loader=yaml.SafeLoader) 225 | 226 | assert content == {"ownership": [{"status": "SUCCESS", "uuid": "uuid2"}]} 227 | -------------------------------------------------------------------------------- /tests/cli/superset/main_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the Superset dispatcher. 3 | """ 4 | 5 | import click 6 | from click.testing import CliRunner 7 | from pytest_mock import MockerFixture 8 | from yarl import URL 9 | 10 | from preset_cli.cli.superset.main import mutate_commands, superset, superset_cli 11 | 12 | 13 | def test_mutate_commands() -> None: 14 | """ 15 | Test ``mutate_commands``. 16 | """ 17 | 18 | @click.group() 19 | def source_group() -> None: 20 | """ 21 | A simple group of commands. 22 | """ 23 | 24 | @click.command() 25 | @click.argument("name") 26 | def source_command(name: str) -> None: 27 | """ 28 | Say hello. 29 | """ 30 | click.echo(f"Hello, {name}!") 31 | 32 | source_group.add_command(source_command) 33 | 34 | @click.group() 35 | def source_subgroup() -> None: 36 | """ 37 | A simple subgroup. 38 | """ 39 | 40 | @click.command() 41 | @click.argument("name") 42 | def source_subcommand(name: str) -> None: 43 | """ 44 | Say goodbye 45 | """ 46 | click.echo(f"Goodbye, {name}!") 47 | 48 | source_subgroup.add_command(source_subcommand) 49 | source_group.add_command(source_subgroup) 50 | 51 | @click.group() 52 | @click.pass_context 53 | def target_group(ctx: click.core.Context) -> None: 54 | """ 55 | The target group to which commands will be added to. 56 | """ 57 | ctx.ensure_object(dict) 58 | ctx.obj["WORKSPACES"] = ["instance1", "instance2"] 59 | 60 | mutate_commands(source_group, target_group) 61 | 62 | runner = CliRunner() 63 | 64 | result = runner.invoke( 65 | target_group, 66 | ["source-command", "Alice"], 67 | catch_exceptions=False, 68 | ) 69 | assert result.exit_code == 0 70 | assert ( 71 | result.output 72 | == """ 73 | instance1 74 | Hello, Alice! 75 | 76 | instance2 77 | Hello, Alice! 78 | """ 79 | ) 80 | 81 | result = runner.invoke( 82 | target_group, 83 | ["source-subgroup", "source-subcommand", "Alice"], 84 | catch_exceptions=False, 85 | ) 86 | assert result.exit_code == 0 87 | assert ( 88 | result.output 89 | == """ 90 | instance1 91 | Goodbye, Alice! 92 | 93 | instance2 94 | Goodbye, Alice! 95 | """ 96 | ) 97 | 98 | 99 | def test_superset() -> None: 100 | """ 101 | Test the ``superset`` command. 102 | """ 103 | runner = CliRunner() 104 | 105 | result = runner.invoke(superset, ["--help"], catch_exceptions=False) 106 | assert result.exit_code == 0 107 | assert ( 108 | result.output 109 | == """Usage: superset [OPTIONS] COMMAND [ARGS]... 110 | 111 | Send commands to one or more Superset instances. 112 | 113 | Options: 114 | --help Show this message and exit. 115 | 116 | Commands: 117 | export 118 | export-assets 119 | export-ownership 120 | export-rls 121 | export-roles 122 | export-users 123 | import-assets 124 | import-ownership 125 | import-rls 126 | import-roles 127 | sql 128 | sync 129 | """ 130 | ) 131 | 132 | result = runner.invoke(superset, ["export", "--help"], catch_exceptions=False) 133 | assert result.exit_code == 0 134 | assert ( 135 | result.output 136 | == """Usage: superset export [OPTIONS] DIRECTORY 137 | 138 | Options: 139 | --overwrite Overwrite existing resources 140 | --disable-jinja-escaping Disable Jinja template escaping 141 | --force-unix-eol Force Unix end-of-line characters, otherwise use 142 | system default 143 | --asset-type TEXT Asset type 144 | --database-ids TEXT Comma separated list of database IDs to export 145 | --dataset-ids TEXT Comma separated list of dataset IDs to export 146 | --chart-ids TEXT Comma separated list of chart IDs to export 147 | --dashboard-ids TEXT Comma separated list of dashboard IDs to export 148 | --help Show this message and exit. 149 | """ 150 | ) 151 | 152 | 153 | def test_superset_jwt_auth(mocker: MockerFixture) -> None: 154 | """ 155 | Test passing a JWT to authenticate with Superset. 156 | """ 157 | # pylint: disable=invalid-name 158 | SupersetJWTAuth = mocker.patch("preset_cli.cli.superset.main.SupersetJWTAuth") 159 | 160 | runner = CliRunner() 161 | runner.invoke( 162 | superset_cli, 163 | ["--jwt-token=SECRET", "http://localhost:8088/", "export"], 164 | catch_exceptions=False, 165 | ) 166 | 167 | SupersetJWTAuth.assert_called_with("SECRET", URL("http://localhost:8088/")) 168 | -------------------------------------------------------------------------------- /tests/cli/superset/sql_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the ``sql`` command. 3 | """ 4 | # pylint: disable=invalid-name, unused-argument, redefined-outer-name 5 | 6 | from io import StringIO 7 | from pathlib import Path 8 | 9 | import pandas as pd 10 | from click.testing import CliRunner 11 | from pyfakefs.fake_filesystem import FakeFilesystem 12 | from pytest_mock import MockerFixture 13 | from yarl import URL 14 | 15 | from preset_cli.cli.superset.main import superset_cli 16 | from preset_cli.cli.superset.sql import run_query, run_session 17 | from preset_cli.exceptions import ErrorLevel, SupersetError 18 | 19 | 20 | def test_run_query(mocker: MockerFixture) -> None: 21 | """ 22 | Test ``run_query``. 23 | """ 24 | client = mocker.MagicMock() 25 | client.run_query.return_value = pd.DataFrame([{"answer": 42}]) 26 | click = mocker.patch("preset_cli.cli.superset.sql.click") 27 | 28 | run_query(client=client, database_id=1, schema=None, query="SELECT 42 AS answer") 29 | client.run_query.assert_called_with(1, "SELECT 42 AS answer", None) 30 | click.echo.assert_called_with(" answer\n--------\n 42") 31 | 32 | 33 | def test_run_query_superset_error(mocker: MockerFixture) -> None: 34 | """ 35 | Test ``run_query`` when a ``SupersetError`` happens. 36 | """ 37 | client = mocker.MagicMock() 38 | client.run_query.side_effect = SupersetError( 39 | [ 40 | { 41 | "message": "Only SELECT statements are allowed against this database.", 42 | "error_type": "DML_NOT_ALLOWED_ERROR", 43 | "level": ErrorLevel.ERROR, 44 | "extra": { 45 | "issue_codes": [ 46 | { 47 | "code": 1022, 48 | "message": "Issue 1022 - Database does not allow data manipulation.", 49 | }, 50 | ], 51 | }, 52 | }, 53 | ], 54 | ) 55 | click = mocker.patch("preset_cli.cli.superset.sql.click") 56 | 57 | run_query(client=client, database_id=1, schema=None, query="SSELECT 1") 58 | click.style.assert_called_with( 59 | "Only SELECT statements are allowed against this database.", 60 | fg="bright_red", 61 | ) 62 | 63 | 64 | def test_run_query_exception(mocker: MockerFixture) -> None: 65 | """ 66 | Test ``run_query`` when a different exception happens. 67 | """ 68 | client = mocker.MagicMock() 69 | client.run_query.side_effect = Exception("Unexpected error") 70 | traceback = mocker.patch("preset_cli.cli.superset.sql.traceback") 71 | 72 | run_query(client=client, database_id=1, schema=None, query="SSELECT 1") 73 | traceback.print_exc.assert_called_with() 74 | 75 | 76 | def test_run_session(mocker: MockerFixture, fs: FakeFilesystem) -> None: 77 | """ 78 | Test ``run_session``. 79 | """ 80 | history = Path("/path/to/.config/preset-cli/") 81 | os = mocker.patch("preset_cli.cli.superset.sql.os") 82 | os.path.expanduser.return_value = str(history) 83 | 84 | client = mocker.MagicMock() 85 | client.run_query.return_value = pd.DataFrame([{"answer": 42}]) 86 | 87 | stdout = mocker.patch("sys.stdout", new_callable=StringIO) 88 | PromptSession = mocker.patch("preset_cli.cli.superset.sql.PromptSession") 89 | session = PromptSession() 90 | session.prompt.side_effect = ["SELECT 42 AS answer;", "", EOFError()] 91 | 92 | run_session( 93 | client=client, 94 | database_id=1, 95 | database_name="GSheets", 96 | schema=None, 97 | url=URL("https://superset.example.org/"), 98 | ) 99 | result = stdout.getvalue() 100 | assert ( 101 | result 102 | == """ answer 103 | -------- 104 | 42 105 | Goodbye! 106 | """ 107 | ) 108 | 109 | 110 | def test_run_session_multiple_commands( 111 | mocker: MockerFixture, 112 | fs: FakeFilesystem, 113 | ) -> None: 114 | """ 115 | Test ``run_session``. 116 | """ 117 | history = Path("/path/to/.config/preset-cli/") 118 | os = mocker.patch("preset_cli.cli.superset.sql.os") 119 | os.path.expanduser.return_value = str(history) 120 | 121 | client = mocker.MagicMock() 122 | client.run_query.side_effect = [ 123 | pd.DataFrame([{"answer": 42}]), 124 | pd.DataFrame([{"question": "Life, universe, everything"}]), 125 | ] 126 | 127 | stdout = mocker.patch("sys.stdout", new_callable=StringIO) 128 | PromptSession = mocker.patch("preset_cli.cli.superset.sql.PromptSession") 129 | session = PromptSession() 130 | session.prompt.side_effect = [ 131 | "SELECT 42 AS answer;", 132 | "SELECT 'Life, universe, everything' AS question;", 133 | "", 134 | EOFError(), 135 | ] 136 | 137 | run_session( 138 | client=client, 139 | database_id=1, 140 | database_name="GSheets", 141 | schema=None, 142 | url=URL("https://superset.example.org/"), 143 | ) 144 | result = stdout.getvalue() 145 | assert ( 146 | result 147 | == """ answer 148 | -------- 149 | 42 150 | question 151 | -------------------------- 152 | Life, universe, everything 153 | Goodbye! 154 | """ 155 | ) 156 | 157 | 158 | def test_run_session_multiline(mocker: MockerFixture, fs: FakeFilesystem) -> None: 159 | """ 160 | Test ``run_session`` with multilines. 161 | """ 162 | history = Path("/path/to/.config/preset-cli/") 163 | os = mocker.patch("preset_cli.cli.superset.sql.os") 164 | os.path.expanduser.return_value = str(history) 165 | 166 | client = mocker.MagicMock() 167 | client.run_query.return_value = pd.DataFrame([{"the\nanswer": "foo\nbar"}]) 168 | 169 | stdout = mocker.patch("sys.stdout", new_callable=StringIO) 170 | PromptSession = mocker.patch("preset_cli.cli.superset.sql.PromptSession") 171 | session = PromptSession() 172 | session.prompt.side_effect = [ 173 | """SELECT 'foo\nbar' AS "the\nanswer";""", 174 | "", 175 | EOFError(), 176 | ] 177 | 178 | run_session( 179 | client=client, 180 | database_id=1, 181 | database_name="GSheets", 182 | schema=None, 183 | url=URL("https://superset.example.org/"), 184 | ) 185 | result = stdout.getvalue() 186 | assert ( 187 | result 188 | == """the 189 | answer 190 | -------- 191 | foo 192 | bar 193 | Goodbye! 194 | """ 195 | ) 196 | 197 | 198 | def test_run_session_ctrl_c(mocker: MockerFixture, fs: FakeFilesystem) -> None: 199 | """ 200 | Test that ``CTRL-C`` does not exit the REPL. 201 | """ 202 | history = Path("/path/to/.config/preset-cli/") 203 | os = mocker.patch("preset_cli.cli.superset.sql.os") 204 | os.path.expanduser.return_value = str(history) 205 | 206 | client = mocker.MagicMock() 207 | client.run_query.return_value = pd.DataFrame([{"answer": 42}]) 208 | 209 | stdout = mocker.patch("sys.stdout", new_callable=StringIO) 210 | PromptSession = mocker.patch("preset_cli.cli.superset.sql.PromptSession") 211 | session = PromptSession() 212 | session.prompt.side_effect = [KeyboardInterrupt(), "SELECT 1;", EOFError()] 213 | 214 | run_session( 215 | client=client, 216 | database_id=1, 217 | database_name="GSheets", 218 | schema=None, 219 | url=URL("https://superset.example.org/"), 220 | ) 221 | result = stdout.getvalue() 222 | assert ( 223 | result 224 | == """ answer 225 | -------- 226 | 42 227 | Goodbye! 228 | """ 229 | ) 230 | 231 | 232 | def test_run_session_history_exists(mocker: MockerFixture, fs: FakeFilesystem) -> None: 233 | """ 234 | Test ``run_session``. 235 | """ 236 | history = Path("/path/to/.config/preset-cli/") 237 | os = mocker.patch("preset_cli.cli.superset.sql.os") 238 | os.path.expanduser.return_value = str(history) 239 | history.mkdir(parents=True) 240 | 241 | client = mocker.MagicMock() 242 | client.run_query.return_value = pd.DataFrame([{"answer": 42}]) 243 | 244 | stdout = mocker.patch("sys.stdout", new_callable=StringIO) 245 | PromptSession = mocker.patch("preset_cli.cli.superset.sql.PromptSession") 246 | session = PromptSession() 247 | session.prompt.side_effect = ["SELECT 42 AS answer;", "", EOFError()] 248 | 249 | run_session( 250 | client=client, 251 | database_id=1, 252 | database_name="GSheets", 253 | schema=None, 254 | url=URL("https://superset.example.org/"), 255 | ) 256 | result = stdout.getvalue() 257 | assert ( 258 | result 259 | == """ answer 260 | -------- 261 | 42 262 | Goodbye! 263 | """ 264 | ) 265 | 266 | 267 | def test_sql_run_query(mocker: MockerFixture) -> None: 268 | """ 269 | Test the ``sql`` command in programmatic mode (run single query). 270 | """ 271 | SupersetClient = mocker.patch("preset_cli.cli.superset.sql.SupersetClient") 272 | client = SupersetClient() 273 | client.get_databases.return_value = [{"id": 1, "database_name": "GSheets"}] 274 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 275 | run_query = mocker.patch("preset_cli.cli.superset.sql.run_query") 276 | 277 | runner = CliRunner() 278 | result = runner.invoke( 279 | superset_cli, 280 | [ 281 | "https://superset.example.org/", 282 | "sql", 283 | "-e", 284 | "SELECT 1", 285 | "--database-id", 286 | "1", 287 | ], 288 | catch_exceptions=False, 289 | ) 290 | assert result.exit_code == 0 291 | run_query.assert_called_with(client, 1, None, "SELECT 1") 292 | 293 | 294 | def test_sql_run_session(mocker: MockerFixture) -> None: 295 | """ 296 | Test the ``sql`` command in session mode (REPL). 297 | """ 298 | SupersetClient = mocker.patch("preset_cli.cli.superset.sql.SupersetClient") 299 | client = SupersetClient() 300 | client.get_databases.return_value = [{"id": 1, "database_name": "GSheets"}] 301 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 302 | run_session = mocker.patch("preset_cli.cli.superset.sql.run_session") 303 | 304 | runner = CliRunner() 305 | result = runner.invoke( 306 | superset_cli, 307 | [ 308 | "https://superset.example.org/", 309 | "sql", 310 | "--database-id", 311 | "1", 312 | ], 313 | catch_exceptions=False, 314 | ) 315 | assert result.exit_code == 0 316 | run_session.assert_called_with( 317 | client, 318 | 1, 319 | "GSheets", 320 | None, 321 | URL("https://superset.example.org/"), 322 | ) 323 | 324 | 325 | def test_sql_run_query_no_databases(mocker: MockerFixture) -> None: 326 | """ 327 | Test the ``sql`` command when no databases are found. 328 | """ 329 | SupersetClient = mocker.patch("preset_cli.cli.superset.sql.SupersetClient") 330 | client = SupersetClient() 331 | client.get_databases.return_value = [] 332 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 333 | mocker.patch("preset_cli.cli.superset.sql.run_query") 334 | 335 | runner = CliRunner() 336 | result = runner.invoke( 337 | superset_cli, 338 | [ 339 | "https://superset.example.org/", 340 | "sql", 341 | "-e", 342 | "SELECT 1", 343 | "--database-id", 344 | "1", 345 | ], 346 | catch_exceptions=False, 347 | ) 348 | assert result.exit_code == 0 349 | assert result.output == "No databases available\n" 350 | 351 | 352 | def test_sql_choose_database(mocker: MockerFixture) -> None: 353 | """ 354 | Test the ``sql`` command choosing a DB interactively. 355 | """ 356 | SupersetClient = mocker.patch("preset_cli.cli.superset.sql.SupersetClient") 357 | client = SupersetClient() 358 | client.get_databases.return_value = [ 359 | {"id": 1, "database_name": "GSheets"}, 360 | {"id": 2, "database_name": "Trino"}, 361 | ] 362 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 363 | mocker.patch("preset_cli.cli.superset.sql.input", side_effect=["3", "invalid", "1"]) 364 | run_query = mocker.patch("preset_cli.cli.superset.sql.run_query") 365 | 366 | runner = CliRunner() 367 | result = runner.invoke( 368 | superset_cli, 369 | [ 370 | "https://superset.example.org/", 371 | "sql", 372 | "-e", 373 | "SELECT 1", 374 | ], 375 | catch_exceptions=False, 376 | ) 377 | assert result.exit_code == 0 378 | run_query.assert_called_with(client, 1, None, "SELECT 1") 379 | 380 | 381 | def test_sql_single_database(mocker: MockerFixture) -> None: 382 | """ 383 | Test the ``sql`` command when there's a single database available. 384 | """ 385 | SupersetClient = mocker.patch("preset_cli.cli.superset.sql.SupersetClient") 386 | client = SupersetClient() 387 | client.get_databases.return_value = [ 388 | {"id": 1, "database_name": "GSheets"}, 389 | ] 390 | mocker.patch("preset_cli.cli.superset.main.UsernamePasswordAuth") 391 | run_query = mocker.patch("preset_cli.cli.superset.sql.run_query") 392 | 393 | runner = CliRunner() 394 | result = runner.invoke( 395 | superset_cli, 396 | [ 397 | "https://superset.example.org/", 398 | "sql", 399 | "-e", 400 | "SELECT 1", 401 | ], 402 | catch_exceptions=False, 403 | ) 404 | assert result.exit_code == 0 405 | run_query.assert_called_with(client, 1, None, "SELECT 1") 406 | -------------------------------------------------------------------------------- /tests/cli/superset/sync/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/cli/superset/sync/__init__.py -------------------------------------------------------------------------------- /tests/cli/superset/sync/dbt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/cli/superset/sync/dbt/__init__.py -------------------------------------------------------------------------------- /tests/cli/superset/sync/dbt/databases_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for ``preset_cli.cli.superset.sync.dbt.databases``. 3 | """ 4 | # pylint: disable=invalid-name 5 | 6 | from pathlib import Path 7 | 8 | import pytest 9 | import yaml 10 | from pyfakefs.fake_filesystem import FakeFilesystem 11 | from pytest_mock import MockerFixture 12 | 13 | from preset_cli.cli.superset.sync.dbt.databases import sync_database 14 | from preset_cli.exceptions import DatabaseNotFoundError 15 | 16 | 17 | def test_sync_database_new(mocker: MockerFixture, fs: FakeFilesystem) -> None: 18 | """ 19 | Test ``sync_database`` when we want to import a new DB. 20 | """ 21 | fs.create_file( 22 | "/path/to/.dbt/profiles.yml", 23 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 24 | ) 25 | mocker.patch( 26 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 27 | return_value={"sqlalchemy_uri": "dummy://"}, 28 | ) 29 | client = mocker.MagicMock() 30 | client.get_databases.return_value = [] 31 | 32 | sync_database( 33 | client=client, 34 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 35 | project_name="my_project", 36 | profile_name="default", 37 | target_name="dev", 38 | import_db=True, 39 | disallow_edits=False, 40 | external_url_prefix="", 41 | ) 42 | 43 | client.create_database.assert_called_with( 44 | database_name="my_project_dev", 45 | is_managed_externally=False, 46 | masked_encrypted_extra=None, 47 | sqlalchemy_uri="dummy://", 48 | ) 49 | 50 | 51 | def test_sync_database_new_default_target( 52 | mocker: MockerFixture, 53 | fs: FakeFilesystem, 54 | ) -> None: 55 | """ 56 | Test ``sync_database`` when we want to import a new DB using the default target. 57 | """ 58 | fs.create_file( 59 | "/path/to/.dbt/profiles.yml", 60 | contents=yaml.dump({"default": {"outputs": {"dev": {}}, "target": "dev"}}), 61 | ) 62 | mocker.patch( 63 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 64 | return_value={"sqlalchemy_uri": "dummy://"}, 65 | ) 66 | client = mocker.MagicMock() 67 | client.get_databases.return_value = [] 68 | 69 | sync_database( 70 | client=client, 71 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 72 | project_name="my_project", 73 | profile_name="default", 74 | target_name=None, 75 | import_db=True, 76 | disallow_edits=False, 77 | external_url_prefix="", 78 | ) 79 | 80 | client.create_database.assert_called_with( 81 | database_name="my_project_dev", 82 | is_managed_externally=False, 83 | masked_encrypted_extra=None, 84 | sqlalchemy_uri="dummy://", 85 | ) 86 | 87 | 88 | def test_sync_database_new_custom_sqlalchemy_uri( 89 | mocker: MockerFixture, 90 | fs: FakeFilesystem, 91 | ) -> None: 92 | """ 93 | Test ``sync_database`` when we want to import a new DB. 94 | """ 95 | fs.create_file( 96 | "/path/to/.dbt/profiles.yml", 97 | contents=yaml.dump( 98 | { 99 | "default": { 100 | "outputs": { 101 | "dev": { 102 | "meta": { 103 | "superset": { 104 | "connection_params": { 105 | "sqlalchemy_uri": "sqlite://", 106 | }, 107 | "database_name": "my_database", 108 | }, 109 | }, 110 | }, 111 | }, 112 | }, 113 | }, 114 | ), 115 | ) 116 | mocker.patch( 117 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 118 | return_value={"sqlalchemy_uri": "dummy://"}, 119 | ) 120 | client = mocker.MagicMock() 121 | client.get_databases.return_value = [] 122 | 123 | sync_database( 124 | client=client, 125 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 126 | project_name="my_project", 127 | profile_name="default", 128 | target_name="dev", 129 | import_db=True, 130 | disallow_edits=False, 131 | external_url_prefix="", 132 | ) 133 | 134 | client.create_database.assert_called_with( 135 | database_name="my_database", 136 | is_managed_externally=False, 137 | masked_encrypted_extra=None, 138 | sqlalchemy_uri="sqlite://", 139 | ) 140 | 141 | 142 | def test_sync_database_env_var( 143 | mocker: MockerFixture, 144 | fs: FakeFilesystem, 145 | monkeypatch: pytest.MonkeyPatch, 146 | ) -> None: 147 | """ 148 | Test ``sync_database`` when the profiles file uses ``env_var``. 149 | """ 150 | monkeypatch.setenv("dsn", "sqlite://") 151 | 152 | fs.create_file( 153 | "/path/to/.dbt/profiles.yml", 154 | contents=yaml.dump( 155 | { 156 | "default": { 157 | "outputs": { 158 | "dev": { 159 | "meta": { 160 | "superset": { 161 | "connection_params": { 162 | "sqlalchemy_uri": '{{ env_var("dsn") }}', 163 | }, 164 | "database_name": "my_database", 165 | }, 166 | }, 167 | }, 168 | }, 169 | }, 170 | }, 171 | ), 172 | ) 173 | mocker.patch( 174 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 175 | return_value={"sqlalchemy_uri": "dummy://"}, 176 | ) 177 | client = mocker.MagicMock() 178 | client.get_databases.return_value = [] 179 | 180 | sync_database( 181 | client=client, 182 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 183 | project_name="my_project", 184 | profile_name="default", 185 | target_name="dev", 186 | import_db=True, 187 | disallow_edits=False, 188 | external_url_prefix="", 189 | ) 190 | 191 | client.create_database.assert_called_with( 192 | database_name="my_database", 193 | is_managed_externally=False, 194 | masked_encrypted_extra=None, 195 | sqlalchemy_uri="sqlite://", 196 | ) 197 | 198 | 199 | def test_sync_database_no_project(mocker: MockerFixture, fs: FakeFilesystem) -> None: 200 | """ 201 | Test ``sync_database`` when the project is invalid. 202 | """ 203 | fs.create_file( 204 | "/path/to/.dbt/profiles.yml", 205 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 206 | ) 207 | client = mocker.MagicMock() 208 | client.get_databases.return_value = [] 209 | 210 | with pytest.raises(Exception) as excinfo: 211 | sync_database( 212 | client=client, 213 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 214 | project_name="my_project", 215 | profile_name="my_other_project", 216 | target_name="dev", 217 | import_db=True, 218 | disallow_edits=False, 219 | external_url_prefix="", 220 | ) 221 | assert ( 222 | str(excinfo.value) 223 | == "Project my_other_project not found in /path/to/.dbt/profiles.yml" 224 | ) 225 | 226 | 227 | def test_sync_database_no_target(mocker: MockerFixture, fs: FakeFilesystem) -> None: 228 | """ 229 | Test ``sync_database`` when the target is invalid. 230 | """ 231 | fs.create_file( 232 | "/path/to/.dbt/profiles.yml", 233 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 234 | ) 235 | client = mocker.MagicMock() 236 | client.get_databases.return_value = [] 237 | 238 | with pytest.raises(Exception) as excinfo: 239 | sync_database( 240 | client=client, 241 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 242 | project_name="my_project", 243 | profile_name="default", 244 | target_name="prod", 245 | import_db=True, 246 | disallow_edits=False, 247 | external_url_prefix="", 248 | ) 249 | assert ( 250 | str(excinfo.value) 251 | == "Target prod not found in the outputs of /path/to/.dbt/profiles.yml" 252 | ) 253 | 254 | 255 | def test_sync_database_multiple_databases( 256 | mocker: MockerFixture, 257 | fs: FakeFilesystem, 258 | ) -> None: 259 | """ 260 | Test ``sync_database`` when multiple databases are found. 261 | 262 | This should not happen, since database names are unique. 263 | """ 264 | fs.create_file( 265 | "/path/to/.dbt/profiles.yml", 266 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 267 | ) 268 | mocker.patch( 269 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 270 | return_value={"sqlalchemy_uri": "dummy://"}, 271 | ) 272 | client = mocker.MagicMock() 273 | client.get_databases.return_value = [ 274 | {"id": 1, "database_name": "my_project_dev", "sqlalchemy_uri": "dummy://"}, 275 | {"id": 2, "database_name": "my_project_dev", "sqlalchemy_uri": "dummy://"}, 276 | ] 277 | 278 | with pytest.raises(Exception) as excinfo: 279 | sync_database( 280 | client=client, 281 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 282 | project_name="my_project", 283 | profile_name="default", 284 | target_name="dev", 285 | import_db=True, 286 | disallow_edits=False, 287 | external_url_prefix="", 288 | ) 289 | assert str(excinfo.value) == "More than one database with the same name found" 290 | 291 | 292 | def test_sync_database_external_url_prefix_disallow_edits( 293 | mocker: MockerFixture, 294 | fs: FakeFilesystem, 295 | ) -> None: 296 | """ 297 | Test ``sync_database`` with an external URL prefix and disallow-edits. 298 | """ 299 | fs.create_file( 300 | "/path/to/.dbt/profiles.yml", 301 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 302 | ) 303 | mocker.patch( 304 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 305 | return_value={"sqlalchemy_uri": "dummy://"}, 306 | ) 307 | client = mocker.MagicMock() 308 | client.get_databases.return_value = [] 309 | 310 | sync_database( 311 | client=client, 312 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 313 | project_name="my_project", 314 | profile_name="default", 315 | target_name="dev", 316 | import_db=True, 317 | disallow_edits=True, 318 | external_url_prefix="https://dbt.example.org/", 319 | ) 320 | 321 | client.create_database.assert_called_with( 322 | database_name="my_project_dev", 323 | is_managed_externally=True, 324 | external_url="https://dbt.example.org/#!/overview", 325 | sqlalchemy_uri="dummy://", 326 | masked_encrypted_extra=None, 327 | ) 328 | 329 | 330 | def test_sync_database_existing(mocker: MockerFixture, fs: FakeFilesystem) -> None: 331 | """ 332 | Test ``sync_database`` when we want to import an existing DB. 333 | """ 334 | fs.create_file( 335 | "/path/to/.dbt/profiles.yml", 336 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 337 | ) 338 | mocker.patch( 339 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 340 | return_value={"sqlalchemy_uri": "dummy://"}, 341 | ) 342 | client = mocker.MagicMock() 343 | client.get_databases.return_value = [ 344 | {"id": 1, "database_name": "my_project_dev", "sqlalchemy_uri": "dummy://"}, 345 | ] 346 | 347 | sync_database( 348 | client=client, 349 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 350 | project_name="my_project", 351 | profile_name="default", 352 | target_name="dev", 353 | import_db=True, 354 | disallow_edits=False, 355 | external_url_prefix="", 356 | ) 357 | 358 | client.update_database.assert_called_with( 359 | database_id=1, 360 | database_name="my_project_dev", 361 | is_managed_externally=False, 362 | masked_encrypted_extra=None, 363 | sqlalchemy_uri="dummy://", 364 | ) 365 | 366 | 367 | def test_sync_database_new_no_import(mocker: MockerFixture, fs: FakeFilesystem) -> None: 368 | """ 369 | Test ``sync_database`` when we want to import a new DB. 370 | """ 371 | fs.create_file( 372 | "/path/to/.dbt/profiles.yml", 373 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 374 | ) 375 | mocker.patch( 376 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 377 | return_value={"sqlalchemy_uri": "dummy://"}, 378 | ) 379 | client = mocker.MagicMock() 380 | client.get_databases.return_value = [] 381 | 382 | with pytest.raises(DatabaseNotFoundError): 383 | sync_database( 384 | client=client, 385 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 386 | project_name="my_project", 387 | profile_name="default", 388 | target_name="dev", 389 | import_db=False, 390 | disallow_edits=False, 391 | external_url_prefix="", 392 | ) 393 | 394 | 395 | def test_sync_database_reuse_connection( 396 | mocker: MockerFixture, 397 | fs: FakeFilesystem, 398 | ) -> None: 399 | """ 400 | Test ``sync_database`` when the connection already exists and ``--import-db`` wasn't passed. 401 | """ 402 | fs.create_file( 403 | "/path/to/.dbt/profiles.yml", 404 | contents=yaml.dump({"default": {"outputs": {"dev": {}}}}), 405 | ) 406 | mocker.patch( 407 | "preset_cli.cli.superset.sync.dbt.databases.build_sqlalchemy_params", 408 | return_value={"sqlalchemy_uri": "dummy://"}, 409 | ) 410 | client = mocker.MagicMock() 411 | client.get_databases.return_value = [ 412 | {"id": 1, "database_name": "my_project_dev", "sqlalchemy_uri": "dummy://"}, 413 | ] 414 | 415 | sync_database( 416 | client=client, 417 | profiles_path=Path("/path/to/.dbt/profiles.yml"), 418 | project_name="my_project", 419 | profile_name="default", 420 | target_name="dev", 421 | import_db=False, 422 | disallow_edits=False, 423 | external_url_prefix="", 424 | ) 425 | 426 | client.get_database.assert_called_with(1) 427 | -------------------------------------------------------------------------------- /tests/cli/superset/sync/dbt/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "c": { 4 | "depends_on": {"nodes": ["a", "b"]}, 5 | "description": "", 6 | "filters": [], 7 | "label": "", 8 | "meta": {}, 9 | "name": "multiple parents", 10 | "sql": "*", 11 | "type": "count", 12 | "unique_id": "c" 13 | }, 14 | "metric.superset_examples.cnt": { 15 | "fqn": [ 16 | "superset_examples", 17 | "slack", 18 | "cnt" 19 | ], 20 | "unique_id": "metric.superset_examples.cnt", 21 | "package_name": "superset_examples", 22 | "root_path": "/Users/beto/Projects/dbt-examples/superset_examples", 23 | "path": "slack/schema.yml", 24 | "original_file_path": "models/slack/schema.yml", 25 | "model": "ref('messages_channels')", 26 | "name": "cnt", 27 | "description": "", 28 | "label": "", 29 | "type": "count", 30 | "sql": "*", 31 | "timestamp": null, 32 | "filters": [], 33 | "time_grains": [], 34 | "dimensions": [], 35 | "resource_type": "metric", 36 | "meta": {}, 37 | "tags": [], 38 | "sources": [], 39 | "depends_on": { 40 | "macros": [], 41 | "nodes": [ 42 | "model.superset_examples.messages_channels" 43 | ] 44 | }, 45 | "refs": [ 46 | [ 47 | "messages_channels" 48 | ] 49 | ], 50 | "created_at": 1642630986.1942852 51 | } 52 | }, 53 | "sources": { 54 | "source.superset_examples.public.messages": { 55 | "fqn": [ 56 | "superset_examples", 57 | "slack", 58 | "public", 59 | "messages" 60 | ], 61 | "database": "examples_dev", 62 | "schema": "public", 63 | "unique_id": "source.superset_examples.public.messages", 64 | "package_name": "superset_examples", 65 | "root_path": "/Users/beto/Projects/dbt-examples/superset_examples", 66 | "path": "models/slack/schema.yml", 67 | "original_file_path": "models/slack/schema.yml", 68 | "name": "messages", 69 | "source_name": "public", 70 | "source_description": "", 71 | "loader": "", 72 | "identifier": "messages", 73 | "resource_type": "source", 74 | "quoting": { 75 | "database": null, 76 | "schema": null, 77 | "identifier": null, 78 | "column": null 79 | }, 80 | "loaded_at_field": null, 81 | "freshness": { 82 | "warn_after": { 83 | "count": null, 84 | "period": null 85 | }, 86 | "error_after": { 87 | "count": null, 88 | "period": null 89 | }, 90 | "filter": null 91 | }, 92 | "external": null, 93 | "description": "Messages in the Slack channel", 94 | "columns": {}, 95 | "meta": {}, 96 | "source_meta": {}, 97 | "tags": [], 98 | "config": { 99 | "enabled": true 100 | }, 101 | "patch_path": null, 102 | "unrendered_config": {}, 103 | "relation_name": "\"examples_dev\".\"public\".\"messages\"", 104 | "created_at": 1642628933.0432189 105 | }, 106 | "source.superset_examples.public.channels": { 107 | "fqn": [ 108 | "superset_examples", 109 | "slack", 110 | "public", 111 | "channels" 112 | ], 113 | "database": "examples_dev", 114 | "schema": "public", 115 | "unique_id": "source.superset_examples.public.channels", 116 | "package_name": "superset_examples", 117 | "root_path": "/Users/beto/Projects/dbt-examples/superset_examples", 118 | "path": "models/slack/schema.yml", 119 | "original_file_path": "models/slack/schema.yml", 120 | "name": "channels", 121 | "source_name": "public", 122 | "source_description": "", 123 | "loader": "", 124 | "identifier": "channels", 125 | "resource_type": "source", 126 | "quoting": { 127 | "database": null, 128 | "schema": null, 129 | "identifier": null, 130 | "column": null 131 | }, 132 | "loaded_at_field": null, 133 | "freshness": { 134 | "warn_after": { 135 | "count": null, 136 | "period": null 137 | }, 138 | "error_after": { 139 | "count": null, 140 | "period": null 141 | }, 142 | "filter": null 143 | }, 144 | "external": null, 145 | "description": "Information about Slack channels", 146 | "columns": {}, 147 | "meta": {}, 148 | "source_meta": {}, 149 | "tags": [], 150 | "config": { 151 | "enabled": true 152 | }, 153 | "patch_path": null, 154 | "unrendered_config": {}, 155 | "relation_name": "\"examples_dev\".\"public\".\"channels\"", 156 | "created_at": 1642628933.043388 157 | } 158 | }, 159 | "nodes": { 160 | "model.superset_examples.messages_channels": { 161 | "raw_sql": "SELECT messages.ts, channels.name, messages.text FROM {{ source ('public', 'messages') }} messages JOIN {{ source ('public', 'channels') }} channels ON messages.channel_id = channels.id", 162 | "compiled": true, 163 | "resource_type": "model", 164 | "depends_on": { 165 | "macros": [], 166 | "nodes": [ 167 | "source.superset_examples.public.channels", 168 | "source.superset_examples.public.messages" 169 | ] 170 | }, 171 | "config": { 172 | "enabled": true, 173 | "alias": null, 174 | "schema": null, 175 | "database": null, 176 | "tags": [], 177 | "meta": {}, 178 | "materialized": "view", 179 | "persist_docs": {}, 180 | "quoting": {}, 181 | "column_types": {}, 182 | "full_refresh": null, 183 | "on_schema_change": "ignore", 184 | "post-hook": [], 185 | "pre-hook": [] 186 | }, 187 | "database": "examples_dev", 188 | "schema": "public", 189 | "fqn": [ 190 | "superset_examples", 191 | "slack", 192 | "messages_channels" 193 | ], 194 | "unique_id": "model.superset_examples.messages_channels", 195 | "package_name": "superset_examples", 196 | "root_path": "/Users/beto/Projects/dbt-examples/superset_examples", 197 | "path": "slack/messages_channels.sql", 198 | "original_file_path": "models/slack/messages_channels.sql", 199 | "name": "messages_channels", 200 | "alias": "messages_channels", 201 | "checksum": { 202 | "name": "sha256", 203 | "checksum": "b4ce232b28280daa522b37e12c36b67911e2a98456b8a3b99440075ec5564609" 204 | }, 205 | "tags": [], 206 | "refs": [], 207 | "sources": [ 208 | [ 209 | "public", 210 | "channels" 211 | ], 212 | [ 213 | "public", 214 | "messages" 215 | ] 216 | ], 217 | "description": "", 218 | "columns": {}, 219 | "meta": {}, 220 | "docs": { 221 | "show": true 222 | }, 223 | "patch_path": null, 224 | "compiled_path": "target/compiled/superset_examples/models/slack/messages_channels.sql", 225 | "build_path": null, 226 | "deferred": false, 227 | "unrendered_config": { 228 | "materialized": "view" 229 | }, 230 | "created_at": 1642628933.004452, 231 | "compiled_sql": "SELECT messages.ts, channels.name, messages.text FROM \"examples_dev\".\"public\".\"messages\" messages JOIN \"examples_dev\".\"public\".\"channels\" channels ON messages.channel_id = channels.id", 232 | "extra_ctes_injected": true, 233 | "extra_ctes": [], 234 | "relation_name": "\"examples_dev\".\"public\".\"messages_channels\"" 235 | }, 236 | "test.jaffle_shop.unique_stg_customers_customer_id.c7614daada": { 237 | "raw_sql": "{{ test_unique(**_dbt_generic_test_kwargs) }}", 238 | "test_metadata": { 239 | "name": "unique", 240 | "kwargs": { 241 | "column_name": "customer_id", 242 | "model": "{{ get_where_subquery(ref('stg_customers')) }}" 243 | }, 244 | "namespace": null 245 | }, 246 | "compiled": true, 247 | "resource_type": "test", 248 | "depends_on": { 249 | "macros": [ 250 | "macro.dbt.test_unique", 251 | "macro.dbt.get_where_subquery" 252 | ], 253 | "nodes": [ 254 | "model.jaffle_shop.stg_customers" 255 | ] 256 | }, 257 | "config": { 258 | "enabled": true, 259 | "alias": null, 260 | "schema": "dbt_test__audit", 261 | "database": null, 262 | "tags": [], 263 | "meta": {}, 264 | "materialized": "test", 265 | "severity": "ERROR", 266 | "store_failures": null, 267 | "where": null, 268 | "limit": null, 269 | "fail_calc": "count(*)", 270 | "warn_if": "!= 0", 271 | "error_if": "!= 0" 272 | }, 273 | "database": "dbt-tutorial-347100", 274 | "schema": "dbt_beto_dbt_test__audit", 275 | "fqn": [ 276 | "jaffle_shop", 277 | "unique_stg_customers_customer_id" 278 | ], 279 | "unique_id": "test.jaffle_shop.unique_stg_customers_customer_id.c7614daada", 280 | "package_name": "jaffle_shop", 281 | "root_path": ".", 282 | "path": "unique_stg_customers_customer_id.sql", 283 | "original_file_path": "models/schema.yml", 284 | "name": "unique_stg_customers_customer_id", 285 | "alias": "unique_stg_customers_customer_id", 286 | "checksum": { 287 | "name": "none", 288 | "checksum": "" 289 | }, 290 | "tags": [], 291 | "refs": [ 292 | [ 293 | "stg_customers" 294 | ] 295 | ], 296 | "sources": [], 297 | "metrics": [], 298 | "description": "", 299 | "columns": {}, 300 | "meta": {}, 301 | "docs": { 302 | "show": true 303 | }, 304 | "patch_path": null, 305 | "compiled_path": "target/compiled/jaffle_shop/models/schema.yml/unique_stg_customers_customer_id.sql", 306 | "build_path": null, 307 | "deferred": false, 308 | "unrendered_config": {}, 309 | "created_at": 1657295698.2953868, 310 | "compiled_sql": "\\n \\n \\n\\nwith dbt_test__target as (\\n \\n select customer_id as unique_field\\n from `dbt-tutorial-347100`.`dbt_beto`.`stg_customers`\\n where customer_id is not null\\n \\n)\\n\\nselect\\n unique_field,\\n count(*) as n_records\\n\\nfrom dbt_test__target\\ngroup by unique_field\\nhaving count(*) > 1\\n\\n\\n", 311 | "extra_ctes_injected": true, 312 | "extra_ctes": [], 313 | "relation_name": null, 314 | "column_name": "customer_id", 315 | "file_key_name": "models.stg_customers" 316 | } 317 | }, 318 | "child_map": { 319 | "model.superset_examples.messages_channels": [ 320 | "metric.superset_examples.cnt" 321 | ], 322 | "source.superset_examples.public.messages": [ 323 | "model.superset_examples.messages_channels" 324 | ], 325 | "source.superset_examples.public.channels": [ 326 | "model.superset_examples.messages_channels" 327 | ], 328 | "metric.superset_examples.cnt": [] 329 | } 330 | } 331 | -------------------------------------------------------------------------------- /tests/cli/superset/sync/native/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/preset-io/backend-sdk/693156e8439bc729a7cbea820c7465f043ff878c/tests/cli/superset/sync/native/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dummy conftest.py for preset_cli. 3 | 4 | If you don't know what this is for, just leave it empty. 5 | Read more about conftest.py under: 6 | - https://docs.pytest.org/en/stable/fixture.html 7 | - https://docs.pytest.org/en/stable/writing_plugins.html 8 | """ 9 | 10 | # import pytest 11 | -------------------------------------------------------------------------------- /tests/lib_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for ``preset_cli.lib``. 3 | """ 4 | 5 | import logging 6 | 7 | import pytest 8 | from pytest_mock import MockerFixture 9 | 10 | from preset_cli.exceptions import CLIError, ErrorLevel, SupersetError 11 | from preset_cli.lib import ( 12 | dict_merge, 13 | raise_cli_errors, 14 | remove_root, 15 | setup_logging, 16 | validate_response, 17 | ) 18 | 19 | 20 | def test_remove_root() -> None: 21 | """ 22 | Test ``remove_root``. 23 | """ 24 | assert remove_root("bundle/database/examples.yaml") == "database/examples.yaml" 25 | 26 | 27 | def test_setup_logging() -> None: 28 | """ 29 | Test ``setup_logging``. 30 | """ 31 | setup_logging("debug") 32 | assert logging.root.level == logging.DEBUG 33 | 34 | with pytest.raises(ValueError) as excinfo: 35 | setup_logging("invalid") 36 | assert str(excinfo.value) == "Invalid log level: invalid" 37 | 38 | 39 | def test_validate_response(mocker: MockerFixture) -> None: 40 | """ 41 | Test ``validate_response``. 42 | """ 43 | response = mocker.MagicMock() 44 | 45 | response.ok = True 46 | validate_response(response) 47 | 48 | # SIP-40 payload 49 | response.ok = False 50 | response.headers.get.return_value = "application/json" 51 | response.json.return_value = { 52 | "errors": [{"message": "Some message", "level": "error"}], 53 | } 54 | with pytest.raises(SupersetError) as excinfo: 55 | validate_response(response) 56 | assert excinfo.value.errors == [ 57 | {"message": "Some message", "level": ErrorLevel.ERROR}, 58 | ] 59 | 60 | # SIP-40-ish payload 61 | response.ok = False 62 | response.headers.get.return_value = "application/json" 63 | response.json.return_value = {"errors": {"message": "Some message"}} 64 | with pytest.raises(SupersetError) as excinfo: 65 | validate_response(response) 66 | assert excinfo.value.errors == [ 67 | { 68 | "message": "Unknown error", 69 | "error_type": "UNKNOWN_ERROR", 70 | "level": ErrorLevel.ERROR, 71 | "extra": {"errors": {"message": "Some message"}}, 72 | }, 73 | ] 74 | 75 | # Old error 76 | response.ok = False 77 | response.headers.get.return_value = "application/json" 78 | response.json.return_value = {"message": "Some message"} 79 | with pytest.raises(SupersetError) as excinfo: 80 | validate_response(response) 81 | assert excinfo.value.errors == [ 82 | { 83 | "message": "Unknown error", 84 | "error_type": "UNKNOWN_ERROR", 85 | "level": ErrorLevel.ERROR, 86 | "extra": {"message": "Some message"}, 87 | }, 88 | ] 89 | 90 | # Non JSON error 91 | response.ok = False 92 | response.headers.get.return_value = "text/plain" 93 | response.text = "An error occurred" 94 | with pytest.raises(SupersetError) as excinfo: 95 | validate_response(response) 96 | assert excinfo.value.errors == [ 97 | { 98 | "message": "An error occurred", 99 | "error_type": "UNKNOWN_ERROR", 100 | "level": ErrorLevel.ERROR, 101 | }, 102 | ] 103 | 104 | 105 | def test_dict_merge() -> None: 106 | """ 107 | Test ``dict_merge``. 108 | """ 109 | base = {"a": {"b": 42, "c": 43}, "d": 1, "e": 3} 110 | overrides = {"a": {"c": 44}, "d": 2, "f": 3} 111 | dict_merge(base, overrides) 112 | assert base == {"a": {"b": 42, "c": 44}, "d": 2, "e": 3, "f": 3} 113 | 114 | 115 | def test_raise_cli_errors_decorator_when_raising( 116 | capsys: pytest.CaptureFixture[str], 117 | ) -> None: 118 | """ 119 | Test the ``raise_cli_errors`` decorator when a CLI error is raised. 120 | """ 121 | 122 | @raise_cli_errors 123 | def mock_function(): 124 | raise CLIError("Error", 1) 125 | 126 | with pytest.raises(SystemExit) as excinfo: 127 | mock_function() 128 | 129 | assert excinfo.type == SystemExit 130 | assert excinfo.value.code == 1 131 | 132 | output_content = capsys.readouterr() 133 | assert "Error" in output_content.out 134 | 135 | 136 | def test_raise_cli_errors_decorator_when_not_raising() -> None: 137 | """ 138 | Test the ``raise_cli_errors`` decorator when no error is raised. 139 | """ 140 | 141 | @raise_cli_errors 142 | def mock_function(): 143 | return "All good!" 144 | 145 | result = mock_function() 146 | assert result == "All good!" 147 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox configuration file 2 | # Read more under https://tox.wiki/ 3 | # THIS SCRIPT IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS! 4 | 5 | [tox] 6 | minversion = 3.15 7 | envlist = default 8 | isolated_build = True 9 | 10 | 11 | [testenv] 12 | description = Invoke pytest to run automated tests 13 | setenv = 14 | TOXINIDIR = {toxinidir} 15 | passenv = 16 | HOME 17 | extras = 18 | testing 19 | commands = 20 | pytest {posargs} 21 | 22 | 23 | [testenv:{build,clean}] 24 | description = 25 | build: Build the package in isolation according to PEP517, see https://github.com/pypa/build 26 | clean: Remove old distribution files and temporary build artifacts (./build and ./dist) 27 | # https://setuptools.pypa.io/en/stable/build_meta.html#how-to-use-it 28 | skip_install = True 29 | changedir = {toxinidir} 30 | deps = 31 | build: build[virtualenv] 32 | commands = 33 | clean: python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' 34 | build: python -m build {posargs} 35 | 36 | 37 | [testenv:{docs,doctests,linkcheck}] 38 | description = 39 | docs: Invoke sphinx-build to build the docs 40 | doctests: Invoke sphinx-build to run doctests 41 | linkcheck: Check for broken links in the documentation 42 | setenv = 43 | DOCSDIR = {toxinidir}/docs 44 | BUILDDIR = {toxinidir}/docs/_build 45 | docs: BUILD = html 46 | doctests: BUILD = doctest 47 | linkcheck: BUILD = linkcheck 48 | deps = 49 | -r {toxinidir}/docs/requirements.txt 50 | # ^ requirements.txt shared with Read The Docs 51 | commands = 52 | sphinx-build --color -b {env:BUILD} -d "{env:BUILDDIR}/doctrees" "{env:DOCSDIR}" "{env:BUILDDIR}/{env:BUILD}" {posargs} 53 | 54 | 55 | [testenv:publish] 56 | description = 57 | Publish the package you have been developing to a package index server. 58 | By default, it uses testpypi. If you really want to publish your package 59 | to be publicly accessible in PyPI, use the `-- --repository pypi` option. 60 | skip_install = True 61 | changedir = {toxinidir} 62 | passenv = 63 | TWINE_USERNAME 64 | TWINE_PASSWORD 65 | TWINE_REPOSITORY 66 | deps = twine 67 | commands = 68 | python -m twine check dist/* 69 | python -m twine upload {posargs:--repository testpypi} dist/* 70 | --------------------------------------------------------------------------------