├── .gitattributes ├── .github └── workflows │ ├── ci.yml │ └── publish.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── ci ├── array-api-skips.txt ├── array-api-tests-rev.txt └── clone_array_api_tests.sh ├── develop.py ├── pixi.toml ├── pyproject.toml ├── pytest.ini ├── src └── finch │ ├── __init__.py │ ├── _array_api_info.py │ ├── compiled.py │ ├── dtypes.py │ ├── errors.py │ ├── io.py │ ├── julia.py │ ├── juliapkg.json │ ├── juliapkg_dev.json │ ├── levels.py │ ├── linalg │ ├── __init__.py │ └── _linalg.py │ ├── tensor.py │ └── typing.py └── tests ├── __init__.py ├── conftest.py ├── data └── matrix_1.ttx ├── test_indexing.py ├── test_io.py ├── test_linalg.py ├── test_ops.py ├── test_scipy_constructors.py └── test_sparse.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # SCM syntax highlighting 2 | pixi.lock linguist-language=YAML linguist-generated=true 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | jobs: 3 | test: 4 | defaults: 5 | run: 6 | shell: bash -el {0} 7 | strategy: 8 | matrix: 9 | os: [ubuntu-latest] 10 | python: ['3.10', '3.11', '3.12'] 11 | include: 12 | - os: macos-latest 13 | python: '3.10' 14 | - os: windows-latest 15 | python: '3.10' 16 | fail-fast: false 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Checkout Repo 20 | uses: actions/checkout@v3 21 | - uses: actions/setup-python@v5 22 | with: 23 | python-version: ${{ matrix.python }} 24 | - name: Install Poetry 25 | uses: snok/install-poetry@v1 26 | - name: Install package 27 | run: | 28 | poetry install --with test 29 | - name: Run tests 30 | run: | 31 | poetry run pytest --junit-xml=test-${{ matrix.os }}-Python-${{ matrix.python }}.xml 32 | - uses: codecov/codecov-action@v3 33 | 34 | array_api_tests: 35 | env: 36 | ARRAY_API_TESTS_DIR: ${{ github.workspace }}/array-api-tests 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Checkout Repo 40 | uses: actions/checkout@v4 41 | - name: Checkout array-api-tests 42 | run: ci/clone_array_api_tests.sh 43 | - name: Set up Python 44 | uses: actions/setup-python@v5 45 | with: 46 | python-version: '3.11' 47 | cache: 'pip' 48 | - name: Install Poetry 49 | run: | 50 | pip install poetry 51 | - name: Build wheel 52 | run: | 53 | python -m poetry build --format wheel 54 | - name: Install build and test dependencies from PyPI 55 | run: | 56 | pip install dist/*.whl 57 | pip install -U setuptools wheel 58 | pip install pytest-xdist hypothesis==6.131.0 -r "$ARRAY_API_TESTS_DIR/requirements.txt" 59 | - name: Run the test suite 60 | env: 61 | ARRAY_API_TESTS_MODULE: finch 62 | run: | 63 | python -c 'import finch' 64 | pytest "$ARRAY_API_TESTS_DIR/array_api_tests/" -v -c "$ARRAY_API_TESTS_DIR/pytest.ini" --ci --max-examples=2 --derandomize --disable-deadline --disable-warnings -n auto --skips-file ci/array-api-skips.txt 65 | 66 | on: 67 | # Trigger the workflow on push or pull request, 68 | # but only for the main branch 69 | push: 70 | branches: 71 | - main 72 | pull_request: 73 | branches: 74 | - main 75 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | on: 3 | workflow_dispatch: 4 | jobs: 5 | build: 6 | #if: github.ref == 'refs/heads/master' 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | - name: Set up Python 11 | uses: actions/setup-python@v2 12 | with: 13 | python-version: '3.x' 14 | - name: Install dependencies 15 | run: pip install toml 16 | - name: Get version 17 | run: | 18 | VERSION=$(python -c 'import toml; print("v" + toml.load("pyproject.toml")["tool"]["poetry"]["version"])') 19 | if [ $? -ne 0 ]; then exit 1; fi 20 | echo "VERSION=$VERSION" >> $GITHUB_ENV 21 | - name: Build and publish to pypi 22 | uses: JRubics/poetry-publish@v1.17 23 | with: 24 | pypi_token: ${{ secrets.PYPI_TOKEN }} 25 | - name: Create Release 26 | uses: actions/create-release@v1 27 | if: success() 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | with: 31 | tag_name: ${{ env.VERSION }} 32 | release_name: Release ${{ env.VERSION }} 33 | draft: false 34 | prerelease: false 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | junit/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | .idea/ 162 | 163 | # mac os 164 | .DS_Store 165 | 166 | # pixi environments 167 | .pixi/ 168 | *.egg-info 169 | pixi.lock 170 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: check-yaml 6 | - id: end-of-file-fixer 7 | - id: trailing-whitespace 8 | - id: fix-byte-order-marker 9 | - id: destroyed-symlinks 10 | - id: fix-encoding-pragma 11 | args: ["--remove"] 12 | - id: mixed-line-ending 13 | - id: name-tests-test 14 | args: ["--pytest-test-first"] 15 | - id: no-commit-to-branch 16 | - id: pretty-format-json 17 | args: ["--autofix", "--no-ensure-ascii"] 18 | 19 | - repo: https://github.com/astral-sh/ruff-pre-commit 20 | rev: v0.11.2 21 | hooks: 22 | - id: ruff 23 | args: ["--fix"] 24 | types_or: [ python, pyi, jupyter ] 25 | - id: ruff-format 26 | types_or: [ python, pyi, jupyter ] 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Willow Ahrens 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # finch-tensor-python 2 | 3 | This is the beginnings of a sparse tensor library for Python, backed by the 4 | [Finch.jl](https://github.com/finch-tensor/Finch.jl) tensor compiler. 5 | 6 | ## Source 7 | 8 | The source code for `finch-tensor` is available on GitHub at [https://github.com/finch-tensor/finch-tensor-python](https://github.com/FinchTensor/finch-tensor-python) 9 | 10 | ## Installation 11 | 12 | `finch-tensor` is available on PyPi, and can be installed with pip: 13 | ```bash 14 | pip install finch-tensor 15 | ``` 16 | 17 | ## Contributing 18 | 19 | ### Packaging 20 | 21 | Finch uses [poetry](https://python-poetry.org/) for packaging. 22 | 23 | To install for development, clone the repository and run: 24 | ```bash 25 | poetry install --with test 26 | ``` 27 | to install the current project and dev dependencies. 28 | 29 | ### Working with a local copy of Finch.jl 30 | The `develop.py ` script can be used to set up a local copy of Finch.jl for development. 31 | 32 | ``` 33 | Usage: 34 | develop.py [--restore] [--path ] 35 | 36 | Options: 37 | --restore Restore the original juliapkg.json file. 38 | --path Path to the local copy of Finch.jl [default: ../Finch.jl]. 39 | ``` 40 | 41 | ### Publishing 42 | 43 | The "Publish" GitHub Action is a manual workflow for publishing Python packages to PyPI using Poetry. It handles the version management based on the `pyproject.toml` file and automates tagging and creating GitHub releases. 44 | 45 | #### Version Update 46 | 47 | Before initiating the "Publish" action, update the package's version number in `pyproject.toml`. Follow semantic versioning guidelines for this update. 48 | 49 | #### Triggering the Action 50 | 51 | The action is triggered manually. Once the version in `pyproject.toml` is updated, manually start the "Publish" action from the GitHub repository's Actions tab. 52 | 53 | #### Process and Outcomes 54 | 55 | On successful execution, the action publishes the package to PyPI and tags the release in the GitHub repository. If the version number is not updated, the action fails to publish to PyPI, and no tagging or release is done. In case of failure, correct the version number and rerun the action. 56 | 57 | #### Best Practices 58 | 59 | - Ensure the version number in `pyproject.toml` is updated before triggering the action. 60 | - Regularly check action logs for successful completion or to identify issues. 61 | 62 | ### Pre-commit hooks 63 | 64 | To add pre-commit hooks, run: 65 | ```bash 66 | poetry run pre-commit install 67 | ``` 68 | 69 | ### Testing 70 | 71 | Finch uses [pytest](https://docs.pytest.org/en/latest/) for testing. To run the 72 | tests: 73 | 74 | ```bash 75 | poetry run pytest 76 | ``` 77 | -------------------------------------------------------------------------------- /ci/array-api-skips.txt: -------------------------------------------------------------------------------- 1 | # `test_nonzero` name conflict 2 | array_api_tests/test_searching_functions.py::test_nonzero_zerodim_error 3 | # flaky test 4 | array_api_tests/test_special_cases.py::test_unary[sign((x_i is -0 or x_i == +0)) -> 0] 5 | # `broadcast_to` is not defined in Finch, hangs as xfail 6 | array_api_tests/test_searching_functions.py::test_where 7 | # `test_solve` is not defined in Finch, hangs as xfail 8 | array_api_tests/test_linalg.py::test_solve 9 | 10 | # test_signatures 11 | 12 | # not implemented 13 | # stats functions 14 | array_api_tests/test_signatures.py::test_func_signature[mean] 15 | array_api_tests/test_signatures.py::test_func_signature[std] 16 | array_api_tests/test_signatures.py::test_func_signature[var] 17 | # set functions 18 | array_api_tests/test_signatures.py::test_func_signature[unique_all] 19 | array_api_tests/test_signatures.py::test_func_signature[unique_counts] 20 | array_api_tests/test_signatures.py::test_func_signature[unique_inverse] 21 | array_api_tests/test_signatures.py::test_func_signature[unique_values] 22 | # creation functions 23 | array_api_tests/test_signatures.py::test_func_signature[meshgrid] 24 | array_api_tests/test_signatures.py::test_func_signature[tril] 25 | array_api_tests/test_signatures.py::test_func_signature[triu] 26 | # inspection functions 27 | array_api_tests/test_signatures.py::test_func_signature[isdtype] 28 | array_api_tests/test_signatures.py::test_func_signature[result_type] 29 | # other functions 30 | array_api_tests/test_signatures.py::test_func_signature[concat] 31 | array_api_tests/test_signatures.py::test_func_signature[argsort] 32 | array_api_tests/test_signatures.py::test_func_signature[sort] 33 | array_api_tests/test_signatures.py::test_func_signature[broadcast_arrays] 34 | array_api_tests/test_signatures.py::test_func_signature[broadcast_to] 35 | array_api_tests/test_signatures.py::test_func_signature[expand_dims] 36 | array_api_tests/test_signatures.py::test_func_signature[flip] 37 | array_api_tests/test_signatures.py::test_func_signature[roll] 38 | array_api_tests/test_signatures.py::test_func_signature[squeeze] 39 | array_api_tests/test_signatures.py::test_func_signature[stack] 40 | array_api_tests/test_signatures.py::test_func_signature[matrix_transpose] 41 | array_api_tests/test_signatures.py::test_func_signature[vecdot] 42 | array_api_tests/test_signatures.py::test_func_signature[take] 43 | array_api_tests/test_signatures.py::test_func_signature[argmax] 44 | array_api_tests/test_signatures.py::test_func_signature[argmin] 45 | array_api_tests/test_signatures.py::test_func_signature[from_dlpack] 46 | array_api_tests/test_signatures.py::test_func_signature[cumulative_sum] 47 | array_api_tests/test_signatures.py::test_func_signature[searchsorted] 48 | array_api_tests/test_signatures.py::test_func_signature[repeat] 49 | array_api_tests/test_signatures.py::test_func_signature[tile] 50 | array_api_tests/test_signatures.py::test_func_signature[unstack] 51 | array_api_tests/test_signatures.py::test_func_signature[clip] 52 | array_api_tests/test_signatures.py::test_func_signature[copysign] 53 | array_api_tests/test_signatures.py::test_func_signature[hypot] 54 | array_api_tests/test_signatures.py::test_func_signature[logical_not] 55 | array_api_tests/test_signatures.py::test_func_signature[maximum] 56 | array_api_tests/test_signatures.py::test_func_signature[minimum] 57 | array_api_tests/test_signatures.py::test_func_signature[signbit] 58 | array_api_tests/test_signatures.py::test_func_signature[nextafter] 59 | array_api_tests/test_signatures.py::test_func_signature[reciprocal] 60 | array_api_tests/test_signatures.py::test_func_signature[count_nonzero] 61 | array_api_tests/test_signatures.py::test_func_signature[take_along_axis] 62 | array_api_tests/test_signatures.py::test_func_signature[astype] 63 | array_api_tests/test_signatures.py::test_func_signature[cumulative_prod] 64 | array_api_tests/test_signatures.py::test_func_signature[diff] 65 | # linalg namespace 66 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.cross] 67 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matmul] 68 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.cholesky] 69 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_norm] 70 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_rank] 71 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_transpose] 72 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.outer] 73 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.pinv] 74 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.svdvals] 75 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.tensordot] 76 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.vecdot] 77 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.det] 78 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.diagonal] 79 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.eigh] 80 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.eigvalsh] 81 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.inv] 82 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_power] 83 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.qr] 84 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.slogdet] 85 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.solve] 86 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.svd] 87 | array_api_tests/test_signatures.py::test_extension_func_signature[linalg.trace] 88 | # Array object namespace 89 | array_api_tests/test_signatures.py::test_array_method_signature[__dlpack__] 90 | array_api_tests/test_signatures.py::test_array_method_signature[__dlpack_device__] 91 | array_api_tests/test_signatures.py::test_array_method_signature[__setitem__] 92 | 93 | array_api_tests/test_creation_functions.py::test_eye 94 | # not implemented 95 | array_api_tests/test_creation_functions.py::test_meshgrid 96 | array_api_tests/test_creation_functions.py::test_empty_like 97 | 98 | # test_array_object 99 | 100 | array_api_tests/test_array_object.py::test_getitem 101 | array_api_tests/test_array_object.py::test_setitem 102 | array_api_tests/test_array_object.py::test_getitem_masking 103 | array_api_tests/test_array_object.py::test_setitem_masking 104 | array_api_tests/test_array_object.py::test_getitem_arrays_and_ints_2[None] 105 | array_api_tests/test_array_object.py::test_getitem_arrays_and_ints_2[1] 106 | array_api_tests/test_array_object.py::test_getitem_arrays_and_ints_1[None] 107 | array_api_tests/test_array_object.py::test_getitem_arrays_and_ints_1[1] 108 | 109 | # test_operators_and_elementwise_functions 110 | 111 | # throws for x < 1 instead of NaN 112 | array_api_tests/test_operators_and_elementwise_functions.py::test_acosh 113 | # not implemented 114 | array_api_tests/test_operators_and_elementwise_functions.py::test_logical_not 115 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real[logaddexp] 116 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real[maximum] 117 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real[minimum] 118 | array_api_tests/test_operators_and_elementwise_functions.py::test_copysign 119 | array_api_tests/test_operators_and_elementwise_functions.py::test_clip 120 | array_api_tests/test_operators_and_elementwise_functions.py::test_hypot 121 | array_api_tests/test_operators_and_elementwise_functions.py::test_maximum 122 | array_api_tests/test_operators_and_elementwise_functions.py::test_minimum 123 | array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x1, x2)] 124 | array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] 125 | array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x, s)] 126 | array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] 127 | array_api_tests/test_operators_and_elementwise_functions.py::test_reciprocal 128 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real[copysign] 129 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real[hypot] 130 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_int[remainder] 131 | array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_int[floor_divide] 132 | array_api_tests/test_operators_and_elementwise_functions.py::test_signbit 133 | 134 | # test_data_type_functions 135 | 136 | # not implemented 137 | array_api_tests/test_data_type_functions.py::test_broadcast_arrays 138 | array_api_tests/test_data_type_functions.py::test_broadcast_to 139 | array_api_tests/test_data_type_functions.py::test_isdtype 140 | array_api_tests/test_data_type_functions.py::test_result_type 141 | array_api_tests/test_data_type_functions.py::test_finfo[Float32] 142 | array_api_tests/test_data_type_functions.py::TestResultType::test_with_scalars 143 | array_api_tests/test_data_type_functions.py::TestResultType::test_result_type 144 | array_api_tests/test_data_type_functions.py::TestResultType::test_shuffled 145 | array_api_tests/test_searching_functions.py::test_searchsorted 146 | array_api_tests/test_data_type_functions.py::test_astype 147 | array_api_tests/test_data_type_functions.py::TestResultType::test_arrays_and_dtypes 148 | 149 | # test_has_names 150 | 151 | array_api_tests/test_has_names.py::test_has_names[linalg-cholesky] 152 | array_api_tests/test_has_names.py::test_has_names[linalg-cross] 153 | array_api_tests/test_has_names.py::test_has_names[linalg-det] 154 | array_api_tests/test_has_names.py::test_has_names[linalg-diagonal] 155 | array_api_tests/test_has_names.py::test_has_names[linalg-eigh] 156 | array_api_tests/test_has_names.py::test_has_names[linalg-eigvalsh] 157 | array_api_tests/test_has_names.py::test_has_names[linalg-inv] 158 | array_api_tests/test_has_names.py::test_has_names[linalg-matmul] 159 | array_api_tests/test_has_names.py::test_has_names[linalg-matrix_norm] 160 | array_api_tests/test_has_names.py::test_has_names[linalg-matrix_power] 161 | array_api_tests/test_has_names.py::test_has_names[linalg-matrix_rank] 162 | array_api_tests/test_has_names.py::test_has_names[linalg-matrix_transpose] 163 | array_api_tests/test_has_names.py::test_has_names[linalg-outer] 164 | array_api_tests/test_has_names.py::test_has_names[linalg-pinv] 165 | array_api_tests/test_has_names.py::test_has_names[linalg-qr] 166 | array_api_tests/test_has_names.py::test_has_names[linalg-slogdet] 167 | array_api_tests/test_has_names.py::test_has_names[linalg-solve] 168 | array_api_tests/test_has_names.py::test_has_names[linalg-svd] 169 | array_api_tests/test_has_names.py::test_has_names[linalg-svdvals] 170 | array_api_tests/test_has_names.py::test_has_names[linalg-tensordot] 171 | array_api_tests/test_has_names.py::test_has_names[linalg-trace] 172 | array_api_tests/test_has_names.py::test_has_names[linalg-vecdot] 173 | array_api_tests/test_has_names.py::test_has_names[statistical-cumulative_sum] 174 | array_api_tests/test_has_names.py::test_has_names[statistical-mean] 175 | array_api_tests/test_has_names.py::test_has_names[statistical-std] 176 | array_api_tests/test_has_names.py::test_has_names[statistical-var] 177 | array_api_tests/test_has_names.py::test_has_names[set-unique_all] 178 | array_api_tests/test_has_names.py::test_has_names[set-unique_counts] 179 | array_api_tests/test_has_names.py::test_has_names[set-unique_inverse] 180 | array_api_tests/test_has_names.py::test_has_names[set-unique_values] 181 | array_api_tests/test_has_names.py::test_has_names[searching-argmax] 182 | array_api_tests/test_has_names.py::test_has_names[searching-argmin] 183 | array_api_tests/test_has_names.py::test_has_names[searching-searchsorted] 184 | array_api_tests/test_has_names.py::test_has_names[creation-from_dlpack] 185 | array_api_tests/test_has_names.py::test_has_names[creation-meshgrid] 186 | array_api_tests/test_has_names.py::test_has_names[creation-tril] 187 | array_api_tests/test_has_names.py::test_has_names[creation-triu] 188 | array_api_tests/test_has_names.py::test_has_names[manipulation-broadcast_arrays] 189 | array_api_tests/test_has_names.py::test_has_names[manipulation-broadcast_to] 190 | array_api_tests/test_has_names.py::test_has_names[manipulation-concat] 191 | array_api_tests/test_has_names.py::test_has_names[manipulation-expand_dims] 192 | array_api_tests/test_has_names.py::test_has_names[manipulation-flip] 193 | array_api_tests/test_has_names.py::test_has_names[manipulation-repeat] 194 | array_api_tests/test_has_names.py::test_has_names[manipulation-roll] 195 | array_api_tests/test_has_names.py::test_has_names[manipulation-squeeze] 196 | array_api_tests/test_has_names.py::test_has_names[manipulation-stack] 197 | array_api_tests/test_has_names.py::test_has_names[manipulation-tile] 198 | array_api_tests/test_has_names.py::test_has_names[manipulation-unstack] 199 | array_api_tests/test_has_names.py::test_has_names[sorting-argsort] 200 | array_api_tests/test_has_names.py::test_has_names[sorting-sort] 201 | array_api_tests/test_has_names.py::test_has_names[data_type-isdtype] 202 | array_api_tests/test_has_names.py::test_has_names[data_type-result_type] 203 | array_api_tests/test_has_names.py::test_has_names[elementwise-clip] 204 | array_api_tests/test_has_names.py::test_has_names[elementwise-copysign] 205 | array_api_tests/test_has_names.py::test_has_names[elementwise-hypot] 206 | array_api_tests/test_has_names.py::test_has_names[elementwise-logical_not] 207 | array_api_tests/test_has_names.py::test_has_names[elementwise-maximum] 208 | array_api_tests/test_has_names.py::test_has_names[elementwise-minimum] 209 | array_api_tests/test_has_names.py::test_has_names[elementwise-signbit] 210 | array_api_tests/test_has_names.py::test_has_names[linear_algebra-matrix_transpose] 211 | array_api_tests/test_has_names.py::test_has_names[linear_algebra-vecdot] 212 | array_api_tests/test_has_names.py::test_has_names[indexing-take] 213 | array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack__] 214 | array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack_device__] 215 | array_api_tests/test_has_names.py::test_has_names[array_method-__setitem__] 216 | array_api_tests/test_has_names.py::test_has_names[array_attribute-T] 217 | array_api_tests/test_has_names.py::test_has_names[fft-irfft] 218 | array_api_tests/test_has_names.py::test_has_names[fft-ifft] 219 | array_api_tests/test_has_names.py::test_has_names[fft-rfftfreq] 220 | array_api_tests/test_has_names.py::test_has_names[fft-ifftshift] 221 | array_api_tests/test_has_names.py::test_has_names[fft-ihfft] 222 | array_api_tests/test_has_names.py::test_has_names[fft-ifftn] 223 | array_api_tests/test_has_names.py::test_has_names[fft-irfftn] 224 | array_api_tests/test_has_names.py::test_has_names[elementwise-nextafter] 225 | array_api_tests/test_has_names.py::test_has_names[fft-fft] 226 | array_api_tests/test_has_names.py::test_has_names[fft-rfftn] 227 | array_api_tests/test_has_names.py::test_has_names[fft-fftn] 228 | array_api_tests/test_has_names.py::test_has_names[fft-fftshift] 229 | array_api_tests/test_has_names.py::test_has_names[fft-fftfreq] 230 | array_api_tests/test_has_names.py::test_has_names[fft-hfft] 231 | array_api_tests/test_has_names.py::test_has_names[fft-rfft] 232 | array_api_tests/test_has_names.py::test_has_names[elementwise-reciprocal] 233 | array_api_tests/test_has_names.py::test_has_names[utility-diff] 234 | array_api_tests/test_has_names.py::test_has_names[statistical-cumulative_prod] 235 | array_api_tests/test_has_names.py::test_has_names[searching-count_nonzero] 236 | array_api_tests/test_has_names.py::test_has_names[indexing-take_along_axis] 237 | 238 | # test_indexing_functions 239 | 240 | # not implemented 241 | array_api_tests/test_indexing_functions.py::test_take 242 | 243 | # test_linalg 244 | 245 | # not implemented 246 | array_api_tests/test_linalg.py::test_matrix_transpose 247 | array_api_tests/test_linalg.py::test_vecdot 248 | array_api_tests/test_linalg.py::test_eigh 249 | array_api_tests/test_linalg.py::test_eigvalsh 250 | array_api_tests/test_linalg.py::test_inv 251 | array_api_tests/test_linalg.py::test_linalg_matmul 252 | array_api_tests/test_linalg.py::test_matrix_norm 253 | array_api_tests/test_linalg.py::test_matrix_power 254 | array_api_tests/test_linalg.py::test_matrix_rank 255 | array_api_tests/test_linalg.py::test_linalg_matrix_transpose 256 | array_api_tests/test_linalg.py::test_outer 257 | array_api_tests/test_linalg.py::test_pinv 258 | array_api_tests/test_linalg.py::test_qr 259 | array_api_tests/test_linalg.py::test_slogdet 260 | array_api_tests/test_linalg.py::test_cholesky 261 | array_api_tests/test_linalg.py::test_det 262 | array_api_tests/test_linalg.py::test_diagonal 263 | array_api_tests/test_linalg.py::test_vector_norm 264 | array_api_tests/test_linalg.py::test_svdvals 265 | array_api_tests/test_linalg.py::test_svd 266 | array_api_tests/test_linalg.py::test_trace 267 | array_api_tests/test_linalg.py::test_linalg_vecdot 268 | array_api_tests/test_linalg.py::test_linalg_tensordot 269 | array_api_tests/test_linalg.py::test_vecdot_conj 270 | array_api_tests/test_linalg.py::test_tensordot 271 | 272 | # test_manipulation_functions 273 | 274 | # not implemented 275 | array_api_tests/test_manipulation_functions.py::test_concat 276 | array_api_tests/test_manipulation_functions.py::test_expand_dims 277 | array_api_tests/test_manipulation_functions.py::test_squeeze 278 | array_api_tests/test_manipulation_functions.py::test_flip 279 | array_api_tests/test_manipulation_functions.py::test_roll 280 | array_api_tests/test_manipulation_functions.py::test_stack 281 | array_api_tests/test_manipulation_functions.py::test_unstack 282 | array_api_tests/test_manipulation_functions.py::test_repeat 283 | array_api_tests/test_manipulation_functions.py::test_tile 284 | 285 | # test_searching_functions 286 | 287 | # not implemented 288 | array_api_tests/test_searching_functions.py::test_argmax 289 | array_api_tests/test_searching_functions.py::test_argmin 290 | array_api_tests/test_searching_functions.py::test_count_nonzero 291 | # 0D issue 292 | array_api_tests/test_searching_functions.py::test_nonzero 293 | 294 | # test_set_functions 295 | 296 | # not implemented 297 | array_api_tests/test_set_functions.py::test_unique_all 298 | array_api_tests/test_set_functions.py::test_unique_counts 299 | array_api_tests/test_set_functions.py::test_unique_inverse 300 | array_api_tests/test_set_functions.py::test_unique_values 301 | 302 | # test_sorting_functions 303 | 304 | # not implemented 305 | array_api_tests/test_sorting_functions.py::test_argsort 306 | array_api_tests/test_sorting_functions.py::test_sort 307 | 308 | # test_special_cases 309 | 310 | array_api_tests/test_special_cases.py::test_unary[acos(x_i > 1) -> NaN] 311 | array_api_tests/test_special_cases.py::test_unary[acos(x_i < -1) -> NaN] 312 | array_api_tests/test_special_cases.py::test_unary[acosh(x_i is NaN) -> NaN] 313 | array_api_tests/test_special_cases.py::test_unary[acosh(x_i < 1) -> NaN] 314 | array_api_tests/test_special_cases.py::test_unary[acosh(x_i is 1) -> +0] 315 | array_api_tests/test_special_cases.py::test_unary[acosh(x_i is +infinity) -> +infinity] 316 | array_api_tests/test_special_cases.py::test_unary[asin(x_i > 1) -> NaN] 317 | array_api_tests/test_special_cases.py::test_unary[asin(x_i < -1) -> NaN] 318 | array_api_tests/test_special_cases.py::test_unary[atanh(x_i < -1) -> NaN] 319 | array_api_tests/test_special_cases.py::test_unary[atanh(x_i > 1) -> NaN] 320 | array_api_tests/test_special_cases.py::test_unary[cos(x_i is +infinity) -> NaN] 321 | array_api_tests/test_special_cases.py::test_unary[cos(x_i is -infinity) -> NaN] 322 | array_api_tests/test_special_cases.py::test_unary[log(x_i < 0) -> NaN] 323 | array_api_tests/test_special_cases.py::test_unary[log1p(x_i < -1) -> NaN] 324 | array_api_tests/test_special_cases.py::test_unary[log2(x_i < 0) -> NaN] 325 | array_api_tests/test_special_cases.py::test_unary[log10(x_i < 0) -> NaN] 326 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is +0) -> False] 327 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is -0) -> True] 328 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is +infinity) -> False] 329 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is -infinity) -> True] 330 | array_api_tests/test_special_cases.py::test_unary[signbit(isfinite(x_i) and x_i > 0) -> False] 331 | array_api_tests/test_special_cases.py::test_unary[signbit(isfinite(x_i) and x_i < 0) -> True] 332 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is NaN) -> False] 333 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is NaN) -> True] 334 | array_api_tests/test_special_cases.py::test_unary[sin((x_i is +infinity or x_i == -infinity)) -> NaN] 335 | array_api_tests/test_special_cases.py::test_unary[sqrt(x_i < 0) -> NaN] 336 | array_api_tests/test_special_cases.py::test_unary[tan((x_i is +infinity or x_i == -infinity)) -> NaN] 337 | array_api_tests/test_special_cases.py::test_binary[copysign(x2_i < 0) -> NaN] 338 | array_api_tests/test_special_cases.py::test_binary[copysign(x2_i is -0) -> NaN] 339 | array_api_tests/test_special_cases.py::test_binary[copysign(x2_i is +0) -> NaN] 340 | array_api_tests/test_special_cases.py::test_binary[copysign(x2_i > 0) -> NaN] 341 | array_api_tests/test_special_cases.py::test_binary[maximum(x1_i is NaN or x2_i is NaN) -> NaN] 342 | array_api_tests/test_special_cases.py::test_binary[minimum(x1_i is NaN or x2_i is NaN) -> NaN] 343 | array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] 344 | array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 345 | array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 346 | array_api_tests/test_special_cases.py::test_binary[pow(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] 347 | array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] 348 | array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] 349 | array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 350 | array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 351 | array_api_tests/test_special_cases.py::test_binary[copysign(x1_i is NaN and x2_i is -0) -> NaN] 352 | array_api_tests/test_special_cases.py::test_binary[copysign(x1_i is NaN and x2_i is +0) -> NaN] 353 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +0 and x2_i is -0) -> roughly +pi] 354 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is +0) -> roughly +pi/2] 355 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i is -0) -> roughly -pi] 356 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +infinity and x2_i is -infinity) -> roughly +3pi/4] 357 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -infinity and x2_i is +infinity) -> roughly -pi/4] 358 | array_api_tests/test_special_cases.py::test_binary[copysign(x1_i is NaN and x2_i > 0) -> NaN] 359 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -infinity and x2_i is -infinity) -> roughly -3pi/4] 360 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -infinity and isfinite(x2_i)) -> roughly -pi/2] 361 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i < 0) -> roughly -pi] 362 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and isfinite(x1_i) and x2_i is -infinity) -> roughly +pi] 363 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and isfinite(x1_i) and x2_i is -infinity) -> roughly -pi] 364 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +infinity and isfinite(x2_i)) -> roughly +pi/2] 365 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +infinity and x2_i is +infinity) -> roughly +pi/4] 366 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is -0) -> roughly +pi/2] 367 | array_api_tests/test_special_cases.py::test_binary[copysign(x1_i is NaN and x2_i < 0) -> NaN] 368 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is +0) -> roughly -pi/2] 369 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +0 and x2_i < 0) -> roughly +pi] 370 | array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is -0) -> roughly -pi/2] 371 | array_api_tests/test_special_cases.py::test_binary[floor_divide((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 372 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] 373 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] 374 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] 375 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] 376 | array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] 377 | array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] 378 | array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] 379 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is -0) -> -infinity] 380 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is -0) -> +infinity] 381 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is +0) -> -infinity] 382 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is +0) -> +infinity] 383 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i < 0) -> +0] 384 | array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is NaN or x2_i is NaN) -> NaN] 385 | array_api_tests/test_special_cases.py::test_binary[remainder(x1_i > 0 and x2_i is +0) -> NaN] 386 | array_api_tests/test_special_cases.py::test_binary[remainder(x1_i < 0 and x2_i is -0) -> NaN] 387 | array_api_tests/test_special_cases.py::test_binary[remainder(x1_i > 0 and x2_i is -0) -> NaN] 388 | array_api_tests/test_special_cases.py::test_binary[remainder(x1_i < 0 and x2_i is +0) -> NaN] 389 | array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is NaN or x2_i is NaN) -> NaN] 390 | array_api_tests/test_special_cases.py::test_binary[remainder((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 391 | array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i < 0 and x2_i is +0) -> NaN] 392 | array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i < 0 and x2_i is -0) -> NaN] 393 | array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i > 0 and x2_i is -0) -> NaN] 394 | array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i > 0 and x2_i is +0) -> NaN] 395 | array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is NaN or x2_i is NaN) -> NaN] 396 | array_api_tests/test_special_cases.py::test_binary[__mod__((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 397 | array_api_tests/test_special_cases.py::test_binary[__floordiv__((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 398 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] 399 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] 400 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] 401 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] 402 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] 403 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] 404 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] 405 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is +0) -> -infinity] 406 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] 407 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] 408 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is +0) -> +infinity] 409 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is NaN or x2_i is NaN) -> NaN] 410 | array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] 411 | array_api_tests/test_special_cases.py::test_binary[nextafter(x1_i is +0 and x2_i is -0) -> -0] 412 | array_api_tests/test_special_cases.py::test_binary[nextafter(x1_i is -0 and x2_i is +0) -> +0] 413 | array_api_tests/test_special_cases.py::test_binary[nextafter(x1_i is NaN or x2_i is NaN) -> NaN] 414 | array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] 415 | array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 416 | array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] 417 | array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] 418 | array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i < 0 and x2_i is +0) -> NaN] 419 | array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i > 0 and x2_i is -0) -> NaN] 420 | array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i < 0 and x2_i is -0) -> NaN] 421 | array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i > 0 and x2_i is +0) -> NaN] 422 | array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is NaN or x2_i is NaN) -> NaN] 423 | array_api_tests/test_special_cases.py::test_iop[__imod__((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 424 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] 425 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] 426 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] 427 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] 428 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__((x1_i is +0 or x1_i == -0) and (x2_i is +0 or x2_i == -0)) -> NaN] 429 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] 430 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] 431 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] 432 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i > 0 and x2_i is -0) -> -infinity] 433 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i < 0 and x2_i is -0) -> +infinity] 434 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i < 0 and x2_i is +0) -> -infinity] 435 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i > 0 and x2_i is +0) -> +infinity] 436 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is NaN or x2_i is NaN) -> NaN] 437 | array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -0 and x2_i < 0) -> +0] 438 | array_api_tests/test_special_cases.py::test_empty_arrays[mean] 439 | array_api_tests/test_special_cases.py::test_empty_arrays[std] 440 | array_api_tests/test_special_cases.py::test_empty_arrays[var] 441 | array_api_tests/test_special_cases.py::test_nan_propagation[cumulative_sum] 442 | array_api_tests/test_special_cases.py::test_nan_propagation[max] 443 | array_api_tests/test_special_cases.py::test_nan_propagation[mean] 444 | array_api_tests/test_special_cases.py::test_nan_propagation[min] 445 | array_api_tests/test_special_cases.py::test_nan_propagation[prod] 446 | array_api_tests/test_special_cases.py::test_nan_propagation[std] 447 | array_api_tests/test_special_cases.py::test_nan_propagation[sum] 448 | array_api_tests/test_special_cases.py::test_nan_propagation[var] 449 | array_api_tests/test_special_cases.py::test_unary[clip(x_i is NaN) -> NaN] 450 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is +NaN) -> False] 451 | array_api_tests/test_special_cases.py::test_unary[signbit(x_i is -NaN) -> True] 452 | 453 | # test_statistical_functions 454 | 455 | # check for errors 456 | array_api_tests/test_statistical_functions.py::test_sum 457 | 458 | # not implemented 459 | array_api_tests/test_statistical_functions.py::test_mean 460 | array_api_tests/test_statistical_functions.py::test_cumulative_prod 461 | array_api_tests/test_statistical_functions.py::test_cumulative_sum 462 | -------------------------------------------------------------------------------- /ci/array-api-tests-rev.txt: -------------------------------------------------------------------------------- 1 | c48410f96fc58e02eea844e6b7f6cc01680f77ce 2 | -------------------------------------------------------------------------------- /ci/clone_array_api_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | ARRAY_API_TESTS_DIR="${ARRAY_API_TESTS_DIR:-"../array-api-tests"}" 5 | if [ ! -d "$ARRAY_API_TESTS_DIR" ]; then 6 | git clone --recursive https://github.com/data-apis/array-api-tests.git "$ARRAY_API_TESTS_DIR" 7 | fi 8 | 9 | git --git-dir="$ARRAY_API_TESTS_DIR/.git" --work-tree "$ARRAY_API_TESTS_DIR" clean -xddf 10 | git --git-dir="$ARRAY_API_TESTS_DIR/.git" --work-tree "$ARRAY_API_TESTS_DIR" fetch 11 | git --git-dir="$ARRAY_API_TESTS_DIR/.git" --work-tree "$ARRAY_API_TESTS_DIR" reset --hard $(cat "ci/array-api-tests-rev.txt") 12 | -------------------------------------------------------------------------------- /develop.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env poetry run python 2 | import shutil 3 | import juliapkg 4 | import argparse 5 | import os 6 | 7 | 8 | script_dir = os.path.dirname(os.path.abspath(__file__)) 9 | source_file = os.path.join(script_dir, 'src/finch/juliapkg.json') 10 | backup_file = os.path.join(script_dir, 'src/finch/juliapkg.json.orig') 11 | 12 | # Parse command-line arguments 13 | usage = """ 14 | Usage: 15 | develop.py [--restore] [--path ] 16 | 17 | Options: 18 | --restore Restore the original juliapkg.json file. 19 | --path Path to the local copy of Finch.jl [default: ../Finch.jl]. 20 | """ 21 | parser = argparse.ArgumentParser(description="Development script for Finch. This script allows you to specify the location of a local copy of Finch.jl.", usage=usage) 22 | parser.add_argument("--path", default=os.path.join(script_dir, "../Finch.jl"), help="Path to the Finch.jl package.") 23 | parser.add_argument("--restore", action="store_true", help="Restore the original juliapkg.json file.") 24 | args = parser.parse_args() 25 | 26 | # Handle the --restore flag 27 | if args.restore: 28 | try: 29 | shutil.copy(backup_file, source_file) 30 | print("Restored src/finch/juliapkg.json from backup.") 31 | except FileNotFoundError: 32 | print("Error: Backup file src/finch/juliapkg.json.orig does not exist.") 33 | except Exception as e: 34 | print(f"An error occurred: {e}") 35 | exit() 36 | 37 | # Set the Finch path 38 | finch_path = os.path.abspath(args.path) 39 | 40 | # Define source and destination file paths and copy the file 41 | try: 42 | if not os.path.exists(backup_file): 43 | shutil.copy(source_file, backup_file) 44 | except Exception as e: 45 | print(f"An error occurred: {e}") 46 | 47 | #Checkout Finch for development 48 | 49 | juliapkg.rm("Finch", target='src/finch/juliapkg.json') 50 | juliapkg.add("Finch", "9177782c-1635-4eb9-9bfb-d9dfa25e6bce", dev=True, path=finch_path, target='src/finch/juliapkg.json') -------------------------------------------------------------------------------- /pixi.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | authors = ["Hameer Abbasi <2190658+hameerabbasi@users.noreply.github.com>"] 3 | channels = ["conda-forge"] 4 | description = "Add a short description here" 5 | name = "finch-tensor" 6 | platforms = ["osx-arm64"] 7 | version = "0.2.12" 8 | 9 | [tasks] 10 | compile = "python -c 'import finch'" 11 | 12 | [dependencies] 13 | python = ">=3.10,<3.13" 14 | juliaup = ">=1.17.10,<2" 15 | 16 | [pypi-dependencies] 17 | finch-tensor = { path = ".", editable = true } 18 | juliapkg = ">=0.1.16,<0.2" 19 | juliacall = ">=0.9.24,<0.10" 20 | numpy = ">=1.19" 21 | 22 | [feature.test.pypi-dependencies] 23 | pytest = "*" 24 | pytest-cov = "*" 25 | sparse = ">=0.16,<0.17" 26 | numba = ">=0.61" 27 | scipy = "*" 28 | numpy = "==2.*" 29 | pytest-xdist = ">=3.6.1,<4" 30 | 31 | [feature.test.tasks] 32 | test = { cmd = "pytest", depends-on = ["compile"] } 33 | 34 | [environments] 35 | test = ["test"] 36 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "finch-tensor" 3 | version = "0.2.12" 4 | description = "" 5 | authors = ["Willow Ahrens "] 6 | readme = "README.md" 7 | packages = [{include = "finch", from = "src"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | juliapkg = "^0.1.16" 12 | numpy = ">=1.19" 13 | juliacall = [ 14 | { version = "0.9.24", platform = "darwin" }, 15 | { version = "^0.9.24" } 16 | ] 17 | 18 | [tool.poetry.group.test.dependencies] 19 | pytest = "^7.4.4" 20 | pre-commit = "^3.6.0" 21 | pytest-cov = "^4.1.0" 22 | sparse = "^0.16.0" 23 | scipy = "^1.7" 24 | numba = "^0.61.0" 25 | 26 | [build-system] 27 | requires = ["poetry-core>=1.0.8"] 28 | build-backend = "poetry.core.masonry.api" 29 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --cov-report term-missing --cov-report html --cov-report=xml --cov-report=term --cov finch --cov-config .coveragerc --junitxml=junit/test-results.xml 3 | filterwarnings = 4 | ignore::PendingDeprecationWarning 5 | testpaths = 6 | finch 7 | junit_family=xunit2 8 | xfail_strict=true 9 | -------------------------------------------------------------------------------- /src/finch/__init__.py: -------------------------------------------------------------------------------- 1 | from operator import ( 2 | add as add, 3 | sub as subtract, 4 | mul as multiply, 5 | floordiv as floor_divide, 6 | truediv as divide, 7 | matmul as matmul, 8 | neg as negative, 9 | pos as positive, 10 | abs as abs, 11 | pow as pow, 12 | invert as bitwise_invert, 13 | xor as bitwise_xor, 14 | or_ as bitwise_or, 15 | and_ as bitwise_and, 16 | lshift as bitwise_left_shift, 17 | rshift as bitwise_right_shift, 18 | eq as equal, 19 | ne as not_equal, 20 | lt as less, 21 | le as less_equal, 22 | gt as greater, 23 | ge as greater_equal, 24 | mod as remainder, 25 | ) 26 | from numpy import ( 27 | e as e, 28 | pi as pi, 29 | inf as inf, 30 | nan as nan, 31 | newaxis as newaxis, 32 | ) 33 | from .levels import ( 34 | Dense, 35 | Element, 36 | Pattern, 37 | SparseList, 38 | SparseByteMap, 39 | RepeatRLE, 40 | SparseVBL, 41 | SparseCOO, 42 | SparseHash, 43 | Storage, 44 | DenseStorage, 45 | ) 46 | from .tensor import ( 47 | Tensor, 48 | SparseArray, 49 | asarray, 50 | astype, 51 | random, 52 | eye, 53 | diagonal, 54 | tensordot, 55 | permute_dims, 56 | moveaxis, 57 | where, 58 | nonzero, 59 | sum, 60 | prod, 61 | max, 62 | min, 63 | all, 64 | any, 65 | mean, 66 | std, 67 | var, 68 | squeeze, 69 | expand_dims, 70 | argmin, 71 | argmax, 72 | cos, 73 | cosh, 74 | acos, 75 | acosh, 76 | sin, 77 | sinh, 78 | asin, 79 | asinh, 80 | tan, 81 | tanh, 82 | atan, 83 | atanh, 84 | atan2, 85 | log, 86 | log10, 87 | log1p, 88 | log2, 89 | sqrt, 90 | exp, 91 | expm1, 92 | sign, 93 | round, 94 | floor, 95 | ceil, 96 | full, 97 | full_like, 98 | ones, 99 | ones_like, 100 | zeros, 101 | zeros_like, 102 | isnan, 103 | isfinite, 104 | isinf, 105 | reshape, 106 | square, 107 | logaddexp, 108 | trunc, 109 | logical_and, 110 | logical_or, 111 | logical_xor, 112 | real, 113 | imag, 114 | conj, 115 | empty, 116 | empty_like, 117 | arange, 118 | linspace, 119 | ) 120 | from .compiled import ( 121 | lazy, 122 | compiled, 123 | compute, 124 | set_optimizer, 125 | DefaultScheduler, 126 | GalleyScheduler, 127 | ) 128 | from .dtypes import ( 129 | int_, 130 | int8, 131 | int16, 132 | int32, 133 | int64, 134 | uint, 135 | uint8, 136 | uint16, 137 | uint32, 138 | uint64, 139 | float16, 140 | float32, 141 | float64, 142 | complex64, 143 | complex128, 144 | bool, 145 | finfo, 146 | iinfo, 147 | can_cast, 148 | ) 149 | from .io import ( 150 | read, 151 | write, 152 | ) 153 | from . import linalg 154 | from ._array_api_info import __array_namespace_info__ 155 | 156 | __all__ = [ 157 | "Tensor", 158 | "SparseArray", 159 | "Dense", 160 | "Element", 161 | "Pattern", 162 | "SparseList", 163 | "SparseByteMap", 164 | "RepeatRLE", 165 | "SparseVBL", 166 | "SparseCOO", 167 | "SparseHash", 168 | "Storage", 169 | "DenseStorage", 170 | "DefaultScheduler", 171 | "GalleyScheduler", 172 | "asarray", 173 | "astype", 174 | "random", 175 | "eye", 176 | "diagonal", 177 | "tensordot", 178 | "matmul", 179 | "permute_dims", 180 | "moveaxis", 181 | "where", 182 | "nonzero", 183 | "int_", 184 | "int8", 185 | "int16", 186 | "int32", 187 | "int64", 188 | "uint", 189 | "uint8", 190 | "uint16", 191 | "uint32", 192 | "uint64", 193 | "float16", 194 | "float32", 195 | "float64", 196 | "complex64", 197 | "complex128", 198 | "bool", 199 | "lazy", 200 | "compiled", 201 | "compute", 202 | "sum", 203 | "prod", 204 | "max", 205 | "min", 206 | "all", 207 | "any", 208 | "add", 209 | "subtract", 210 | "multiply", 211 | "divide", 212 | "floor_divide", 213 | "pow", 214 | "positive", 215 | "negative", 216 | "abs", 217 | "cos", 218 | "cosh", 219 | "acos", 220 | "acosh", 221 | "sin", 222 | "sinh", 223 | "asin", 224 | "asinh", 225 | "tan", 226 | "tanh", 227 | "atan", 228 | "atanh", 229 | "atan2", 230 | "log", 231 | "log10", 232 | "log1p", 233 | "log2", 234 | "sqrt", 235 | "exp", 236 | "expm1", 237 | "sign", 238 | "round", 239 | "floor", 240 | "ceil", 241 | "full", 242 | "full_like", 243 | "ones", 244 | "ones_like", 245 | "zeros", 246 | "zeros_like", 247 | "bitwise_and", 248 | "bitwise_or", 249 | "bitwise_left_shift", 250 | "bitwise_right_shift", 251 | "bitwise_xor", 252 | "bitwise_invert", 253 | "finfo", 254 | "iinfo", 255 | "isnan", 256 | "isinf", 257 | "isfinite", 258 | "reshape", 259 | "equal", 260 | "not_equal", 261 | "less", 262 | "less_equal", 263 | "greater", 264 | "greater_equal", 265 | "square", 266 | "logaddexp", 267 | "logical_and", 268 | "logical_or", 269 | "logical_xor", 270 | "trunc", 271 | "e", 272 | "pi", 273 | "inf", 274 | "nan", 275 | "newaxis", 276 | "can_cast", 277 | "remainder", 278 | "real", 279 | "imag", 280 | "conj", 281 | "read", 282 | "write", 283 | "empty", 284 | "empty_like", 285 | "arange", 286 | "linspace", 287 | "set_optimizer", 288 | "linalg", 289 | ] 290 | 291 | __array_api_version__: str = "2024.12" 292 | -------------------------------------------------------------------------------- /src/finch/_array_api_info.py: -------------------------------------------------------------------------------- 1 | from . import dtypes 2 | from .typing import DType 3 | 4 | 5 | class __array_namespace_info__: 6 | 7 | def capabilities(self) -> dict[str, bool]: 8 | return { 9 | "boolean indexing": True, "data-dependent shapes": True, 10 | } 11 | 12 | def default_device(self) -> str: 13 | return "cpu" 14 | 15 | def default_dtypes(self, *, device: str | None = None) -> dict[str, DType]: 16 | if device not in ["cpu", None]: 17 | raise ValueError( 18 | "Device not understood. Only \"cpu\" is allowed, but " 19 | f"received: {device}" 20 | ) 21 | return { 22 | "real floating": dtypes.float64, 23 | "complex floating": dtypes.complex128, 24 | "integral": dtypes.int_, 25 | "indexing": dtypes.int_, 26 | } 27 | 28 | _bool_dtypes = {"bool": dtypes.bool} 29 | _signed_integer_dtypes = { 30 | "int8": dtypes.int8, 31 | "int16": dtypes.int16, 32 | "int32": dtypes.int32, 33 | "int64": dtypes.int64, 34 | } 35 | _unsigned_integer_dtypes = { 36 | "uint8": dtypes.uint8, 37 | "uint16": dtypes.uint16, 38 | "uint32": dtypes.uint32, 39 | "uint64": dtypes.uint64, 40 | } 41 | _real_floating_dtypes = { 42 | "float32": dtypes.float32, 43 | "float64": dtypes.float64, 44 | } 45 | _complex_floating_dtypes = { 46 | "complex64": dtypes.complex64, 47 | "complex128": dtypes.complex128, 48 | } 49 | 50 | def dtypes( 51 | self, 52 | *, 53 | device: str | None = None, 54 | kind: str | tuple[str, ...] | None = None, 55 | ) -> dict[str, DType]: 56 | if device not in ["cpu", None]: 57 | raise ValueError( 58 | "Device not understood. Only \"cpu\" is allowed, but " 59 | f"received: {device}" 60 | ) 61 | if kind is None: 62 | return ( 63 | self._bool_dtypes | self._signed_integer_dtypes | 64 | self._unsigned_integer_dtypes | self._real_floating_dtypes | 65 | self._complex_floating_dtypes 66 | ) 67 | if kind == "bool": 68 | return self._bool_dtypes 69 | if kind == "signed integer": 70 | return self._signed_integer_dtypes 71 | if kind == "unsigned integer": 72 | return self._unsigned_integer_dtypes 73 | if kind == "integral": 74 | return self._signed_integer_dtypes | self._unsigned_integer_dtypes 75 | if kind == "real floating": 76 | return self._real_floating_dtypes 77 | if kind == "complex floating": 78 | return self._complex_floating_dtypes 79 | if kind == "numeric": 80 | return ( 81 | self._signed_integer_dtypes | self._unsigned_integer_dtypes | 82 | self._real_floating_dtypes | self._complex_floating_dtypes 83 | ) 84 | if isinstance(kind, tuple): 85 | res = {} 86 | for k in kind: 87 | res.update(self.dtypes(kind=k)) 88 | return res 89 | raise ValueError(f"unsupported kind: {kind!r}") 90 | 91 | def devices(self) -> list[str]: 92 | return ["cpu"] 93 | -------------------------------------------------------------------------------- /src/finch/compiled.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from abc import abstractmethod 4 | from functools import wraps 5 | from dataclasses import dataclass 6 | 7 | from .julia import jl 8 | from .typing import JuliaObj 9 | from typing import Any, Iterator, Callable, TYPE_CHECKING 10 | 11 | if TYPE_CHECKING: 12 | from .tensor import Tensor 13 | 14 | IterObj = tuple | list | dict | Any 15 | 16 | 17 | def _recurse(x: IterObj, /, *, f: Callable[[Any], Any]) -> IterObj: 18 | if isinstance(x, tuple | list): 19 | return type(x)(_recurse(xi, f=f) for xi in x) 20 | if isinstance(x, dict): 21 | ret = {k: _recurse(v, f=f) for k, v in x.items()} 22 | if type(x) is not dict: 23 | ret = type(x)(ret) 24 | return ret 25 | return f(x) 26 | 27 | 28 | def _recurse_iter(x: IterObj, /) -> Iterator[Any]: 29 | if isinstance(x, tuple | list): 30 | for xi in x: 31 | yield from _recurse_iter(xi) 32 | return 33 | if isinstance(x, dict): 34 | for xi in x.values(): 35 | yield from _recurse_iter(xi) 36 | return 37 | yield x 38 | 39 | 40 | def _to_lazy_tensor(x: Tensor | Any, /) -> Tensor | Any: 41 | from .tensor import Tensor 42 | 43 | return x if not isinstance(x, Tensor) else lazy(x) 44 | 45 | 46 | @dataclass 47 | class _ArgumentIndexer: 48 | _idx: int = 0 49 | 50 | def index(self, _) -> int: 51 | ret = self._idx 52 | self._idx += 1 53 | return ret 54 | 55 | 56 | def _recurse_iter_compute(x: IterObj, /, *, compute_kwargs: dict[str, Any]) -> IterObj: 57 | from .tensor import Tensor 58 | 59 | # Make a recursive iterator of indices. 60 | idx_obj = _recurse(x, f=_ArgumentIndexer().index) 61 | jl_computed = [] 62 | py_computed = [] 63 | 64 | # Collect lazy tensors; use placeholder 65 | _placeholder = object() 66 | for xi in _recurse_iter(x): 67 | if isinstance(xi, Tensor) and not xi.is_computed(): 68 | jl_computed.append(xi._obj) 69 | py_computed.append(_placeholder) 70 | else: 71 | py_computed.append(xi) 72 | jl_len = len(jl_computed) 73 | # This doesn't return an iterable of arrays -- only a single array 74 | # for `len(jl_computed) == 1` 75 | jl_computed = jl.Finch.compute(*jl_computed, **compute_kwargs) 76 | if jl_len == 1: 77 | jl_computed = (jl_computed,) 78 | 79 | # Replace placeholders with computed tensors. 80 | jl_computed_iter = iter(jl_computed) 81 | for i in range(len(py_computed)): 82 | if py_computed[i] is _placeholder: 83 | py_computed[i] = Tensor(next(jl_computed_iter)) 84 | 85 | # Replace recursive indices by actual computed objects 86 | return _recurse(idx_obj, f=lambda idx: py_computed[idx]) 87 | 88 | 89 | def compiled(opt=None, *, force_materialization=False, tag: int | None = None): 90 | def inner(func): 91 | @wraps(func) 92 | def wrapper_func(*args, **kwargs): 93 | from .tensor import Tensor 94 | 95 | args = tuple(args) 96 | kwargs = dict(kwargs) 97 | compute_at_end = force_materialization or all( 98 | t.is_computed() 99 | for t in _recurse_iter((args, kwargs)) 100 | if isinstance(t, Tensor) 101 | ) 102 | args = _recurse(args, f=_to_lazy_tensor) 103 | kwargs = _recurse(kwargs, f=_to_lazy_tensor) 104 | result = func(*args, **kwargs) 105 | if not compute_at_end: 106 | return result 107 | compute_kwargs = ( 108 | {"ctx": opt.get_julia_scheduler()} if opt is not None else {} 109 | ) 110 | if tag is not None: 111 | compute_kwargs["tag"] = tag 112 | 113 | return _recurse_iter_compute(result, compute_kwargs=compute_kwargs) 114 | 115 | return wrapper_func 116 | 117 | return inner 118 | 119 | 120 | class AbstractScheduler: 121 | def __init__(self, verbose: bool = False): 122 | self.verbose = verbose 123 | 124 | @abstractmethod 125 | def get_julia_scheduler(self) -> JuliaObj: 126 | pass 127 | 128 | 129 | class GalleyScheduler(AbstractScheduler): 130 | def get_julia_scheduler(self) -> JuliaObj: 131 | return jl.Finch.galley_scheduler(verbose=self.verbose) 132 | 133 | 134 | class DefaultScheduler(AbstractScheduler): 135 | def get_julia_scheduler(self) -> JuliaObj: 136 | return jl.Finch.default_scheduler(verbose=self.verbose) 137 | 138 | 139 | def set_optimizer(opt: AbstractScheduler) -> None: 140 | jl.Finch.set_scheduler_b(opt.get_julia_scheduler()) 141 | 142 | 143 | def lazy(tensor: Tensor) -> Tensor: 144 | from .tensor import Tensor 145 | 146 | if tensor.is_computed(): 147 | return Tensor(jl.Finch.LazyTensor(tensor._obj)) 148 | return tensor 149 | 150 | 151 | def compute( 152 | tensor: Tensor, *, opt: AbstractScheduler | None = None, tag: int = -1 153 | ) -> Tensor: 154 | from .tensor import Tensor 155 | 156 | if not tensor.is_computed(): 157 | if opt is None: 158 | return Tensor(jl.Finch.compute(tensor._obj, tag=tag)) 159 | else: 160 | return Tensor( 161 | jl.Finch.compute( 162 | tensor._obj, 163 | verbose=opt.verbose, 164 | ctx=opt.get_julia_scheduler(), 165 | tag=tag, 166 | ) 167 | ) 168 | return tensor 169 | -------------------------------------------------------------------------------- /src/finch/dtypes.py: -------------------------------------------------------------------------------- 1 | import builtins 2 | 3 | import numpy as np 4 | 5 | from .julia import jl 6 | 7 | 8 | int_: jl.DataType = jl.Int 9 | int8: jl.DataType = jl.Int8 10 | int16: jl.DataType = jl.Int16 11 | int32: jl.DataType = jl.Int32 12 | int64: jl.DataType = jl.Int64 13 | uint: jl.DataType = jl.UInt 14 | uint8: jl.DataType = jl.UInt8 15 | uint16: jl.DataType = jl.UInt16 16 | uint32: jl.DataType = jl.UInt32 17 | uint64: jl.DataType = jl.UInt64 18 | float16: jl.DataType = jl.Float16 19 | float32: jl.DataType = jl.Float32 20 | float64: jl.DataType = jl.Float64 21 | complex64: jl.DataType = jl.ComplexF32 22 | complex128: jl.DataType = jl.ComplexF64 23 | bool: jl.DataType = jl.Bool 24 | 25 | number: jl.DataType = jl.Number 26 | complex: jl.DataType = jl.Complex 27 | integer: jl.DataType = jl.Integer 28 | abstract_float: jl.DataType = jl.AbstractFloat 29 | 30 | jl_to_np_dtype = { 31 | int_: np.int_, 32 | int8: np.int8, 33 | int16: np.int16, 34 | int32: np.int32, 35 | int64: np.int64, 36 | uint: np.uint, 37 | uint8: np.uint8, 38 | uint16: np.uint16, 39 | uint32: np.uint32, 40 | uint64: np.uint64, 41 | float16: np.float16, 42 | float32: np.float32, 43 | float64: np.float64, 44 | complex64: np.complex64, 45 | complex128: np.complex128, 46 | bool: builtins.bool, 47 | None: None, 48 | } 49 | 50 | def finfo(dtype): 51 | return np.finfo(jl_to_np_dtype[dtype]) 52 | 53 | 54 | def iinfo(dtype): 55 | return np.iinfo(jl_to_np_dtype[dtype]) 56 | 57 | 58 | def can_cast(from_, to, /) -> builtins.bool: 59 | if hasattr(from_, "dtype"): 60 | from_ = from_.dtype 61 | return np.can_cast(jl_to_np_dtype[from_], jl_to_np_dtype[to]) 62 | -------------------------------------------------------------------------------- /src/finch/errors.py: -------------------------------------------------------------------------------- 1 | class PerformanceWarning(Warning): 2 | pass 3 | -------------------------------------------------------------------------------- /src/finch/io.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from .julia import jl 4 | from .tensor import Tensor 5 | 6 | def read(filename: Path | str) -> Tensor: 7 | fn = str(filename) 8 | julia_obj = jl.fread(fn) 9 | return Tensor(julia_obj) 10 | 11 | 12 | def write(filename: Path | str, tns: Tensor) -> None: 13 | fn = str(filename) 14 | jl.fwrite(fn, tns._obj) -------------------------------------------------------------------------------- /src/finch/julia.py: -------------------------------------------------------------------------------- 1 | import juliapkg 2 | #To change the version of Finch used, see the documentation for pyjuliapkg here: https://github.com/JuliaPy/pyjuliapkg 3 | #Use pyjuliapkg to modify the `juliapkg.json` file in the root of this repo. 4 | #You can also run `develop.py` to quickly use a local copy of Finch.jl. 5 | #An example development json is found in `juliapkg_dev.json` 6 | import juliacall as jc # noqa 7 | 8 | juliapkg.resolve() 9 | 10 | from juliacall import Main as jl # noqa 11 | 12 | jl.seval("using Finch") 13 | jl.seval("using HDF5") 14 | jl.seval("using NPZ") 15 | jl.seval("using TensorMarket") 16 | jl.seval("using Random") 17 | jl.seval("using Statistics") 18 | -------------------------------------------------------------------------------- /src/finch/juliapkg.json: -------------------------------------------------------------------------------- 1 | { 2 | "packages": { 3 | "Finch": { 4 | "uuid": "9177782c-1635-4eb9-9bfb-d9dfa25e6bce", 5 | "version": "1.2.9" 6 | }, 7 | "HDF5": { 8 | "uuid": "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f", 9 | "version": "0.17.2" 10 | }, 11 | "NPZ": { 12 | "uuid": "15e1cf62-19b3-5cfa-8e77-841668bca605", 13 | "version": "0.4.3" 14 | }, 15 | "TensorMarket": { 16 | "uuid": "8b7d4fe7-0b45-4d0d-9dd8-5cc9b23b4b77", 17 | "version":"0.2.0" 18 | } 19 | }, 20 | "julia": "^1.10" 21 | } 22 | -------------------------------------------------------------------------------- /src/finch/juliapkg_dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "packages": { 3 | "Finch": { 4 | "uuid": "9177782c-1635-4eb9-9bfb-d9dfa25e6bce", 5 | "dev": true, 6 | "path": "../Finch.jl" 7 | }, 8 | "HDF5": { 9 | "uuid": "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f", 10 | "version": "0.17.2" 11 | }, 12 | "NPZ": { 13 | "uuid": "15e1cf62-19b3-5cfa-8e77-841668bca605", 14 | "version": "0.4.3" 15 | }, 16 | "TensorMarket": { 17 | "uuid": "8b7d4fe7-0b45-4d0d-9dd8-5cc9b23b4b77", 18 | "version":"0.2.0" 19 | } 20 | }, 21 | "julia": "^1.10" 22 | } 23 | -------------------------------------------------------------------------------- /src/finch/levels.py: -------------------------------------------------------------------------------- 1 | from .julia import jl 2 | from .typing import OrderType, DType, JuliaObj 3 | 4 | 5 | class _Display: 6 | _obj: JuliaObj 7 | 8 | def __repr__(self): 9 | return jl.sprint(jl.show, self._obj) 10 | 11 | def __str__(self): 12 | return jl.sprint(jl.show, jl.MIME("text/plain"), self._obj) 13 | 14 | 15 | # LEVEL 16 | 17 | 18 | class AbstractLevel(_Display): 19 | pass 20 | 21 | 22 | # core levels 23 | 24 | 25 | class Dense(AbstractLevel): 26 | def __init__(self, lvl, shape=None): 27 | args = [lvl._obj] 28 | if shape is not None: 29 | args.append(shape) 30 | self._obj = jl.Dense(*args) 31 | 32 | 33 | class Element(AbstractLevel): 34 | def __init__(self, fill_value, data=None): 35 | args = [fill_value] 36 | if data is not None: 37 | args.append(data) 38 | self._obj = jl.Element(*args) 39 | 40 | 41 | class Pattern(AbstractLevel): 42 | def __init__(self): 43 | self._obj = jl.Pattern() 44 | 45 | 46 | # advanced levels 47 | 48 | 49 | class SparseList(AbstractLevel): 50 | def __init__(self, lvl): 51 | self._obj = jl.SparseList(lvl._obj) 52 | 53 | 54 | class SparseByteMap(AbstractLevel): 55 | def __init__(self, lvl): 56 | self._obj = jl.SparseByteMap(lvl._obj) 57 | 58 | 59 | class RepeatRLE(AbstractLevel): 60 | def __init__(self, lvl): 61 | self._obj = jl.RepeatRLE(lvl._obj) 62 | 63 | 64 | class SparseVBL(AbstractLevel): 65 | def __init__(self, lvl): 66 | self._obj = jl.SparseVBL(lvl._obj) 67 | 68 | 69 | class SparseCOO(AbstractLevel): 70 | def __init__(self, ndim, lvl): 71 | self._obj = jl.SparseCOO[ndim](lvl._obj) 72 | 73 | 74 | class SparseHash(AbstractLevel): 75 | def __init__(self, ndim, lvl): 76 | self._obj = jl.SparseHash[ndim](lvl._obj) 77 | 78 | 79 | sparse_formats_names = ( 80 | "SparseList", 81 | "Sparse", 82 | "SparseHash", 83 | "SparseCOO", 84 | "SparseRLE", 85 | "SparseVBL", 86 | "SparseBand", 87 | "SparsePoint", 88 | "SparseInterval", 89 | ) 90 | 91 | 92 | # STORAGE 93 | 94 | 95 | class Storage: 96 | def __init__(self, levels_descr: AbstractLevel, order: OrderType = None): 97 | self.levels_descr = levels_descr 98 | self.order = order if order is not None else "C" 99 | 100 | def __str__(self) -> str: 101 | return f"Storage(lvl={str(self.levels_descr)}, order={self.order})" 102 | 103 | 104 | class DenseStorage(Storage): 105 | def __init__(self, ndim: int, dtype: DType, order: OrderType = None): 106 | lvl = Element(dtype(0)) 107 | for _ in range(ndim): 108 | lvl = Dense(lvl) 109 | 110 | super().__init__(levels_descr=lvl, order=order) 111 | -------------------------------------------------------------------------------- /src/finch/linalg/__init__.py: -------------------------------------------------------------------------------- 1 | from ._linalg import vector_norm 2 | 3 | __all__ = ["vector_norm"] 4 | -------------------------------------------------------------------------------- /src/finch/linalg/_linalg.py: -------------------------------------------------------------------------------- 1 | from numpy.core.numeric import normalize_axis_tuple 2 | 3 | from ..julia import jl 4 | from ..tensor import Tensor 5 | 6 | 7 | def vector_norm( 8 | x: Tensor, 9 | /, 10 | *, 11 | axis: int | tuple[int, ...] | None = None, 12 | keepdims: bool = False, 13 | ord: int | float = 2, 14 | ) -> Tensor: 15 | if axis is not None: 16 | axis = normalize_axis_tuple(axis, x.ndim) 17 | if axis != tuple(range(x.ndim)): 18 | raise ValueError( 19 | "At the moment only `None` (vector norm of a flattened array) " 20 | "is supported. Got: {axis}." 21 | ) 22 | 23 | result = Tensor(jl.Finch.norm(x._obj, ord)) 24 | if keepdims: 25 | result = result[tuple(None for _ in range(x.ndim))] 26 | return result 27 | -------------------------------------------------------------------------------- /src/finch/tensor.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import builtins 4 | from typing import Any, Callable, Optional, Iterable, Literal 5 | import warnings 6 | 7 | import numpy as np 8 | from numpy.core.numeric import normalize_axis_index, normalize_axis_tuple 9 | 10 | from . import dtypes as jl_dtypes 11 | from .errors import PerformanceWarning 12 | from .julia import jc, jl 13 | from .levels import ( 14 | _Display, 15 | Dense, 16 | Element, 17 | Storage, 18 | DenseStorage, 19 | SparseCOO, 20 | SparseList, 21 | sparse_formats_names, 22 | ) 23 | from .typing import OrderType, JuliaObj, spmatrix, TupleOf3Arrays, DType, Device 24 | from .compiled import compiled, lazy, compute 25 | 26 | 27 | class SparseArray: 28 | """ 29 | PyData/Sparse marker class 30 | """ 31 | 32 | 33 | class Tensor(_Display, SparseArray): 34 | """ 35 | A wrapper class for Finch.Tensor and Finch.SwizzleArray. 36 | 37 | Constructors 38 | ------------ 39 | Tensor(scipy.sparse.spmatrix) 40 | Construct a Tensor out of a `scipy.sparse` object. Supported formats are: `COO`, `CSC`, and `CSR`. 41 | Tensor(numpy.ndarray) 42 | Construct a Tensor out of a NumPy array object. This is a no-copy operation. 43 | Tensor(Storage) 44 | Initialize a Tensor with a `storage` description. `storage` can already hold data. 45 | Tensor(julia_object) 46 | Tensor created from a compatible raw Julia object. Must be a `SwizzleArray` or `LazyTensor`. 47 | This is a no-copy operation. 48 | 49 | Parameters 50 | ---------- 51 | obj : np.ndarray or scipy.sparse or Storage or Finch.SwizzleArray 52 | Input to construct a Tensor. It's a no-copy operation of for NumPy and SciPy input. For Storage 53 | it's levels' description with order. The order numbers the dimensions from the fastest to slowest. 54 | The leaf nodes have mode `0` and the root node has mode `n-1`. If the tensor was square of size `N`, 55 | then `N .^ order == strides`. Available options are "C" (row-major), "F" (column-major), or a custom 56 | order. Default: row-major. 57 | fill_value : np.number, optional 58 | Only used when `numpy.ndarray` or `scipy.sparse` is passed. 59 | copy : bool, optional 60 | If ``True``, then the object is copied. If ``None`` then the object is copied only if needed. 61 | For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. Default: ``None``. 62 | 63 | Returns 64 | ------- 65 | Tensor 66 | Python wrapper for Finch Tensor. 67 | 68 | Examples 69 | -------- 70 | >>> import numpy as np 71 | >>> import finch 72 | >>> arr2d = np.arange(6).reshape((2, 3)) 73 | >>> t1 = finch.Tensor(arr2d) 74 | >>> t1.todense() 75 | array([[0, 1, 2], 76 | [3, 4, 5]]) 77 | >>> np.shares_memory(t1.todense(), arr2d) 78 | True 79 | >>> storage = finch.Storage(finch.Dense(finch.SparseList(finch.Element(1))), order="C") 80 | >>> t2 = t1.to_storage(storage) 81 | >>> t2.todense() 82 | array([[0, 1, 2], 83 | [3, 4, 5]]) 84 | """ 85 | 86 | row_major: str = "C" 87 | column_major: str = "F" 88 | 89 | def __init__( 90 | self, 91 | obj: np.ndarray | spmatrix | Storage | JuliaObj, 92 | /, 93 | *, 94 | fill_value: np.number | None = None, 95 | copy: bool | None = None, 96 | ): 97 | if isinstance(obj, (int, float, complex, bool, list)): 98 | if copy is False: 99 | raise ValueError( 100 | "copy=False isn't supported for scalar inputs and Python lists" 101 | ) 102 | obj = np.asarray(obj) 103 | if fill_value is None: 104 | fill_value = 0.0 105 | 106 | if _is_scipy_sparse_obj(obj): # scipy constructor 107 | jl_data = self._from_scipy_sparse(obj, fill_value=fill_value, copy=copy) 108 | self._obj = jl_data 109 | elif isinstance(obj, np.ndarray): # numpy constructor 110 | jl_data = self._from_numpy(obj, fill_value=fill_value, copy=copy) 111 | self._obj = jl_data 112 | elif isinstance(obj, Storage): # from-storage constructor 113 | if copy: 114 | self._raise_julia_copy_not_supported() 115 | order = self.preprocess_order( 116 | obj.order, self.get_lvl_ndim(obj.levels_descr._obj) 117 | ) 118 | self._obj = jl.swizzle(jl.Tensor(obj.levels_descr._obj), *order) 119 | elif jl.isa(obj, jl.Finch.Tensor): # raw-Julia-object constructors 120 | if copy: 121 | self._raise_julia_copy_not_supported() 122 | self._obj = jl.swizzle(obj, *tuple(range(1, jl.ndims(obj) + 1))) 123 | elif jl.isa(obj, jl.Finch.SwizzleArray) or jl.isa(obj, jl.Finch.LazyTensor): 124 | if copy: 125 | self._raise_julia_copy_not_supported() 126 | self._obj = obj 127 | elif isinstance(obj, Tensor): 128 | self._obj = obj._obj 129 | else: 130 | raise ValueError( 131 | "Either scalar, numpy, scipy.sparse or a raw julia object should " 132 | f"be provided. Found: {type(obj)}" 133 | ) 134 | 135 | def __pos__(self): 136 | return self._elemwise_op("+") 137 | 138 | def __neg__(self): 139 | return self._elemwise_op("-") 140 | 141 | def __add__(self, other): 142 | return self._elemwise_op("+", other) 143 | 144 | def __mul__(self, other): 145 | return self._elemwise_op("*", other) 146 | 147 | def __sub__(self, other): 148 | return self._elemwise_op("-", other) 149 | 150 | def __truediv__(self, other): 151 | return self._elemwise_op("/", other) 152 | 153 | def __floordiv__(self, other): 154 | return self._elemwise_op("Finch.fld_nothrow", other) 155 | 156 | def __mod__(self, other): 157 | return self._elemwise_op("Finch.mod_nothrow", other) 158 | 159 | def __pow__(self, other): 160 | return self._elemwise_op("^", other) 161 | 162 | @compiled() 163 | def __matmul__(self, other: Tensor) -> Tensor: 164 | if self.ndim == 0 or other.ndim == 0: 165 | raise ValueError( 166 | f"`{self.ndim=}`, `{other.ndim=}`. Both must be greater than `0`." 167 | ) 168 | 169 | if other.ndim == 1: 170 | return sum(self * other, axis=-1) 171 | 172 | if self.ndim == 1: 173 | return sum(self * other.mT, axis=-1) 174 | 175 | return sum(self[..., :, None, :] * other.mT[..., None, :, :], axis=-1) 176 | 177 | def __abs__(self): 178 | return self._elemwise_op("abs") 179 | 180 | def __invert__(self): 181 | return self._elemwise_op("~") 182 | 183 | def __and__(self, other): 184 | return self._elemwise_op("&", other) 185 | 186 | def __or__(self, other): 187 | return self._elemwise_op("|", other) 188 | 189 | def __xor__(self, other): 190 | return self._elemwise_op("xor", other) 191 | 192 | def __lshift__(self, other): 193 | return self._elemwise_op("<<", other) 194 | 195 | def __rshift__(self, other): 196 | return self._elemwise_op(">>", other) 197 | 198 | def __lt__(self, other): 199 | return self._elemwise_op("<", other) 200 | 201 | def __le__(self, other): 202 | return self._elemwise_op("<=", other) 203 | 204 | def __gt__(self, other): 205 | return self._elemwise_op(">", other) 206 | 207 | def __ge__(self, other): 208 | return self._elemwise_op(">=", other) 209 | 210 | def __eq__(self, other): 211 | return self._elemwise_op("==", other) 212 | 213 | def __ne__(self, other): 214 | return self._elemwise_op("!=", other) 215 | 216 | def _elemwise_op(self, op: str, other: Optional[Tensor] = None) -> Tensor: 217 | if other is None: 218 | result = jl.broadcast(jl.seval(op), self._obj) 219 | else: 220 | if np.isscalar(other): 221 | other = jc.convert(self.dtype, other) 222 | else: 223 | other = jl.permutedims(other._obj, tuple(range(other.ndim, 0, -1))) 224 | # inverse swizzle, so `broadcast` appends new dims to the front 225 | result = jl.broadcast( 226 | jl.seval(op), 227 | jl.permutedims(self._obj, tuple(range(self.ndim, 0, -1))), 228 | other, 229 | ) 230 | # swizzle back to the original order 231 | result = jl.permutedims(result, tuple(range(jl.ndims(result), 0, -1))) 232 | 233 | return Tensor(result) 234 | 235 | def __bool__(self): 236 | return self._to_scalar(bool) 237 | 238 | def __float__(self): 239 | return self._to_scalar(float) 240 | 241 | def __int__(self): 242 | return self._to_scalar(int) 243 | 244 | def __index__(self): 245 | return self._to_scalar(int) 246 | 247 | def __complex__(self): 248 | return self._to_scalar(complex) 249 | 250 | def _to_scalar(self, builtin): 251 | if self.ndim != 0: 252 | raise ValueError(f"{builtin} can be computed for one-element tensors only.") 253 | return builtin(self.todense().flatten()[0]) 254 | 255 | def __getitem__(self, key): 256 | if not isinstance(key, tuple): 257 | key = (key,) 258 | 259 | if not self.is_computed(): 260 | # lazy indexing mode 261 | key = _process_lazy_indexing(key, self.ndim) 262 | else: 263 | # standard indexing mode 264 | key = _expand_ellipsis(key, self.shape) 265 | key = _add_missing_dims(key, self.shape) 266 | key = _add_plus_one(key, self.shape) 267 | 268 | result = self._obj[key] 269 | if jl.isa(result, jl.Finch.SwizzleArray) or jl.isa(result, jl.Finch.LazyTensor): 270 | return Tensor(result) 271 | elif jl.isa(result, jl.Finch.Tensor): 272 | return Tensor(jl.swizzle(result, *range(1, jl.ndims(result) + 1))) 273 | else: 274 | return result 275 | 276 | @property 277 | def dtype(self) -> DType: 278 | return jl.eltype(self._obj.body) 279 | 280 | @property 281 | def ndim(self) -> int: 282 | return jl.ndims(self._obj) 283 | 284 | @property 285 | def shape(self) -> tuple[int, ...]: 286 | return jl.size(self._obj) 287 | 288 | @property 289 | def size(self) -> int: 290 | return np.prod(self.shape) 291 | 292 | @property 293 | def fill_value(self) -> np.number: 294 | return jl.fill_value(self._obj) 295 | 296 | @property 297 | def _is_dense(self) -> bool: 298 | lvl = self._obj.body.lvl 299 | for _ in self.shape: 300 | if not jl.isa(lvl, jl.Finch.Dense): 301 | return False 302 | lvl = lvl.lvl 303 | return True 304 | 305 | @property 306 | def _order(self) -> tuple[int, ...]: 307 | return jl.typeof(self._obj).parameters[1] 308 | 309 | @property 310 | def mT(self) -> Tensor: 311 | axes = list(range(self.ndim)) 312 | axes[-2], axes[-1] = axes[-1], axes[-2] 313 | axes = tuple(axes) 314 | return self.permute_dims(axes) 315 | 316 | @property 317 | def device(self) -> str: 318 | return "cpu" 319 | 320 | def to_device( 321 | self, device: Device, /, *, stream: int | Any | None = None 322 | ) -> Tensor: 323 | if device != "cpu": 324 | raise ValueError("Only `device='cpu'` is supported.") 325 | 326 | return self 327 | 328 | def is_computed(self) -> bool: 329 | return not jl.isa(self._obj, jl.Finch.LazyTensor) 330 | 331 | @classmethod 332 | def preprocess_order(cls, order: OrderType, ndim: int) -> tuple[int, ...]: 333 | if order == cls.column_major: 334 | permutation = tuple(range(1, ndim + 1)) 335 | elif order == cls.row_major or order is None: 336 | permutation = tuple(range(1, ndim + 1)[::-1]) 337 | elif isinstance(order, tuple): 338 | if builtins.min(order) == 0: 339 | order = tuple(i + 1 for i in order) 340 | if len(order) == ndim and builtins.all( 341 | [i in order for i in range(1, ndim + 1)] 342 | ): 343 | permutation = order 344 | else: 345 | raise ValueError(f"Custom order is not a permutation: {order}.") 346 | else: 347 | raise ValueError( 348 | f"order must be 'C', 'F' or a tuple, but is: {type(order)}." 349 | ) 350 | 351 | return permutation 352 | 353 | @classmethod 354 | def get_lvl_ndim(cls, lvl: JuliaObj) -> int: 355 | ndim = 0 356 | while True: 357 | ndim += 1 358 | lvl = lvl.lvl 359 | if jl.isa(lvl, jl.Finch.Element): 360 | break 361 | return ndim 362 | 363 | def get_order(self, zero_indexing: bool = True) -> tuple[int, ...]: 364 | order = self._order 365 | if zero_indexing: 366 | order = tuple(i - 1 for i in order) 367 | return order 368 | 369 | def get_inv_order(self, zero_indexing: bool = True) -> tuple[int, ...]: 370 | inv_order = jl.invperm(self._order) 371 | if zero_indexing: 372 | inv_order = tuple(i - 1 for i in inv_order) 373 | return inv_order 374 | 375 | def todense(self) -> np.ndarray: 376 | obj = self._obj 377 | 378 | if self._is_dense: 379 | # don't materialize a dense finch tensor 380 | shape = jl.size(obj.body) 381 | dense_tensor = obj.body.lvl 382 | else: 383 | # create materialized dense array 384 | shape = jl.size(obj) 385 | dense_lvls = jl.Element(jc.convert(self.dtype, jl.fill_value(obj))) 386 | for _ in range(self.ndim): 387 | dense_lvls = jl.Dense(dense_lvls) 388 | dense_tensor = jl.Tensor(dense_lvls, obj).lvl # materialize 389 | 390 | for _ in range(self.ndim): 391 | dense_tensor = dense_tensor.lvl 392 | 393 | result = np.asarray(jl.reshape(dense_tensor.val, shape)) 394 | return result.transpose(self.get_order()) if self._is_dense else result 395 | 396 | def permute_dims(self, axes: tuple[int, ...]) -> Tensor: 397 | axes = tuple(i + 1 for i in axes) 398 | new_obj = jl.permutedims(self._obj, axes) 399 | new_tensor = Tensor(new_obj) 400 | return new_tensor 401 | 402 | def to_storage(self, storage: Storage) -> Tensor: 403 | return Tensor(self._from_other_tensor(self, storage=storage)) 404 | 405 | @classmethod 406 | def _from_other_tensor(cls, tensor: Tensor, storage: Storage) -> JuliaObj: 407 | order = cls.preprocess_order(storage.order, tensor.ndim) 408 | result = jl.copyto_b( 409 | jl.swizzle(jl.Tensor(storage.levels_descr._obj), *order), tensor._obj 410 | ) 411 | return jl.dropfills(result) if tensor._is_dense else result 412 | 413 | @classmethod 414 | def _from_numpy( 415 | cls, arr: np.ndarray, fill_value: np.number, copy: bool | None = None 416 | ) -> JuliaObj: 417 | if copy: 418 | arr = arr.copy() 419 | order_char = "F" if np.isfortran(arr) else "C" 420 | order = cls.preprocess_order(order_char, arr.ndim) 421 | inv_order = tuple(i - 1 for i in jl.invperm(order)) 422 | 423 | dtype = arr.dtype.type 424 | if ( 425 | dtype == np.bool_ 426 | ): # Fails with: Finch currently only supports isbits defaults 427 | dtype = jl_dtypes.bool 428 | fill_value = dtype(fill_value) 429 | lvl = Element(fill_value, arr.reshape(-1, order=order_char)) 430 | for i in inv_order: 431 | lvl = Dense(lvl, arr.shape[i]) 432 | return jl.swizzle(jl.Tensor(lvl._obj), *order) 433 | 434 | @classmethod 435 | def from_scipy_sparse( 436 | cls, 437 | x, 438 | fill_value: np.number | None = None, 439 | copy: bool | None = None, 440 | ) -> Tensor: 441 | if not _is_scipy_sparse_obj(x): 442 | raise ValueError("{x} is not a SciPy sparse object.") 443 | return Tensor(x, fill_value=fill_value, copy=copy) 444 | 445 | @classmethod 446 | def _from_scipy_sparse( 447 | cls, 448 | x, 449 | *, 450 | fill_value: np.number | None = None, 451 | copy: bool | None = None, 452 | ) -> JuliaObj: 453 | if copy is False and not ( 454 | x.format in ("coo", "csr", "csc") and x.has_canonical_format 455 | ): 456 | raise ValueError( 457 | "Unable to avoid copy while creating an array as requested." 458 | ) 459 | if x.format not in ("coo", "csr", "csc"): 460 | x = x.asformat("coo") 461 | if copy: 462 | x = x.copy() 463 | if not x.has_canonical_format: 464 | x.sum_duplicates() 465 | assert x.has_canonical_format 466 | 467 | if x.format == "coo": 468 | return cls.construct_coo_jl_object( 469 | coords=(x.col, x.row), 470 | data=x.data, 471 | shape=x.shape[::-1], 472 | order=Tensor.row_major, 473 | fill_value=fill_value, 474 | ) 475 | elif x.format == "csc": 476 | return cls.construct_csc_jl_object( 477 | arg=(x.data, x.indices, x.indptr), 478 | shape=x.shape, 479 | fill_value=fill_value, 480 | ) 481 | elif x.format == "csr": 482 | return cls.construct_csr_jl_object( 483 | arg=(x.data, x.indices, x.indptr), 484 | shape=x.shape, 485 | fill_value=fill_value, 486 | ) 487 | else: 488 | raise ValueError(f"Unsupported SciPy format: {type(x)}") 489 | 490 | @classmethod 491 | def construct_coo_jl_object( 492 | cls, coords, data, shape, order, fill_value=0.0 493 | ) -> JuliaObj: 494 | assert len(coords) == 2 495 | ndim = len(shape) 496 | order = cls.preprocess_order(order, ndim) 497 | 498 | lvl = jl.Element(data.dtype.type(fill_value), data) 499 | ptr = jl.Vector[jl.Int]([1, len(data) + 1]) 500 | tbl = tuple(jl.PlusOneVector(arr) for arr in coords) 501 | 502 | jl_data = jl.swizzle( 503 | jl.Tensor(jl.SparseCOO[ndim](lvl, shape, ptr, tbl)), *order 504 | ) 505 | return jl_data 506 | 507 | @classmethod 508 | def construct_coo( 509 | cls, coords, data, shape, order=row_major, fill_value=0.0 510 | ) -> Tensor: 511 | return Tensor( 512 | cls.construct_coo_jl_object(coords, data, shape, order, fill_value) 513 | ) 514 | 515 | @staticmethod 516 | def _construct_compressed2d_jl_object( 517 | arg: TupleOf3Arrays, 518 | shape: tuple[int, ...], 519 | order: tuple[int, ...], 520 | fill_value: np.number = 0.0, 521 | ) -> JuliaObj: 522 | assert isinstance(arg, tuple) and len(arg) == 3 523 | assert len(shape) == 2 524 | 525 | data, indices, indptr = arg 526 | dtype = data.dtype.type 527 | indices = jl.PlusOneVector(indices) 528 | indptr = jl.PlusOneVector(indptr) 529 | 530 | lvl = jl.Element(dtype(fill_value), data) 531 | jl_data = jl.swizzle( 532 | jl.Tensor( 533 | jl.Dense(jl.SparseList(lvl, shape[0], indptr, indices), shape[1]) 534 | ), 535 | *order, 536 | ) 537 | return jl_data 538 | 539 | @classmethod 540 | def construct_csc_jl_object( 541 | cls, arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 542 | ) -> JuliaObj: 543 | return cls._construct_compressed2d_jl_object( 544 | arg=arg, shape=shape, order=(1, 2), fill_value=fill_value 545 | ) 546 | 547 | @classmethod 548 | def construct_csc( 549 | cls, arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 550 | ) -> Tensor: 551 | return Tensor(cls.construct_csc_jl_object(arg, shape, fill_value)) 552 | 553 | @classmethod 554 | def construct_csr_jl_object( 555 | cls, arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 556 | ) -> JuliaObj: 557 | return cls._construct_compressed2d_jl_object( 558 | arg=arg, shape=shape[::-1], order=(2, 1), fill_value=fill_value 559 | ) 560 | 561 | @classmethod 562 | def construct_csr( 563 | cls, arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 564 | ) -> Tensor: 565 | return Tensor(cls.construct_csr_jl_object(arg, shape, fill_value)) 566 | 567 | @staticmethod 568 | def construct_csf_jl_object( 569 | arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 570 | ) -> JuliaObj: 571 | assert isinstance(arg, tuple) and len(arg) == 3 572 | 573 | data, indices_list, indptr_list = arg 574 | dtype = data.dtype.type 575 | 576 | assert len(indices_list) == len(shape) - 1 577 | assert len(indptr_list) == len(shape) - 1 578 | 579 | indices_list = [jl.PlusOneVector(i) for i in indices_list] 580 | indptr_list = [jl.PlusOneVector(i) for i in indptr_list] 581 | 582 | lvl = jl.Element(dtype(fill_value), data) 583 | for size, indices, indptr in zip(shape[:-1], indices_list, indptr_list): 584 | lvl = jl.SparseList(lvl, size, indptr, indices) 585 | 586 | jl_data = jl.swizzle( 587 | jl.Tensor(jl.Dense(lvl, shape[-1])), *range(1, len(shape) + 1) 588 | ) 589 | return jl_data 590 | 591 | @classmethod 592 | def construct_csf( 593 | cls, arg: TupleOf3Arrays, shape: tuple[int, ...], fill_value: np.number = 0.0 594 | ) -> Tensor: 595 | return Tensor(cls.construct_csf_jl_object(arg, shape, fill_value)) 596 | 597 | def to_scipy_sparse(self, accept_fv=None): 598 | import scipy.sparse as sp 599 | 600 | if accept_fv is None: 601 | accept_fv = [0] 602 | elif not isinstance(accept_fv, Iterable): 603 | accept_fv = [accept_fv] 604 | 605 | if self.ndim != 2: 606 | raise ValueError( 607 | "Can only convert a 2-dimensional array to a Scipy sparse matrix." 608 | ) 609 | if not builtins.any(_eq_scalars(self.fill_value, fv) for fv in accept_fv): 610 | raise ValueError( 611 | f"Can only convert arrays with {accept_fv} fill-values " 612 | "to a Scipy sparse matrix." 613 | ) 614 | order = self.get_order() 615 | body = self._obj.body 616 | 617 | if str(jl.typeof(body.lvl).name.name) == "SparseCOOLevel": 618 | data = np.asarray(body.lvl.lvl.val) 619 | coords = body.lvl.tbl 620 | row, col = coords[::-1] if order == (1, 0) else coords 621 | row, col = np.asarray(row) - 1, np.asarray(col) - 1 622 | return sp.coo_matrix((data, (row, col)), shape=self.shape) 623 | 624 | if ( 625 | str(jl.typeof(body.lvl).name.name) == "DenseLevel" 626 | and str(jl.typeof(body.lvl.lvl).name.name) == "SparseListLevel" 627 | ): 628 | data = np.asarray(body.lvl.lvl.lvl.val) 629 | indices = np.asarray(body.lvl.lvl.idx) - 1 630 | indptr = np.asarray(body.lvl.lvl.ptr) - 1 631 | sp_class = sp.csr_matrix if order == (1, 0) else sp.csc_matrix 632 | return sp_class((data, indices, indptr), shape=self.shape) 633 | if ( 634 | jl.typeof(body.lvl).name.name in sparse_formats_names 635 | or jl.typeof(body.lvl.lvl).name.name in sparse_formats_names 636 | ): 637 | storage = Storage(SparseCOO(self.ndim, Element(self.fill_value)), order) 638 | return self.to_storage(storage).to_scipy_sparse() 639 | else: 640 | raise ValueError("Tensor can't be converted to scipy.sparse object.") 641 | 642 | @staticmethod 643 | def _raise_julia_copy_not_supported() -> None: 644 | raise ValueError("copy=True isn't supported for Julia object inputs") 645 | 646 | def __array_namespace__(self, *, api_version: str | None = None) -> Any: 647 | if api_version is None: 648 | api_version = "2024.12" 649 | 650 | if api_version not in {"2021.12", "2022.12", "2023.12", "2024.12"}: 651 | raise ValueError(f'"{api_version}" Array API version not supported.') 652 | import finch 653 | 654 | return finch 655 | 656 | 657 | def random(shape, density=0.01, random_state=None): 658 | args = [*shape, density] 659 | if random_state is not None: 660 | if isinstance(random_state, np.random.Generator): 661 | seed = random_state.integers(np.iinfo(np.int32).max) 662 | else: 663 | seed = random_state 664 | rng = jl.Random.default_rng() 665 | jl.Random.seed_b(rng, seed) 666 | args = [rng] + args 667 | return Tensor(jl.fsprand(*args)) 668 | 669 | 670 | def asarray( 671 | obj, 672 | /, 673 | *, 674 | dtype: DType | None = None, 675 | format: str | None = None, 676 | fill_value: np.number | None = None, 677 | device: Device | None = None, 678 | copy: bool | None = None, 679 | ) -> Tensor: 680 | if format not in {"coo", "csr", "csc", "csf", "dense", None}: 681 | raise ValueError(f"{format} format not supported.") 682 | _validate_device(device) 683 | tensor = ( 684 | obj 685 | if isinstance(obj, Tensor) 686 | else Tensor(obj, fill_value=fill_value, copy=copy) 687 | ) 688 | if format is not None: 689 | if copy is False: 690 | raise ValueError( 691 | "Unable to avoid copy while creating an array as requested." 692 | ) 693 | order = tensor.get_order() 694 | if format == "coo": 695 | storage = Storage(SparseCOO(tensor.ndim, Element(tensor.fill_value)), order) 696 | elif format == "csr": 697 | storage = Storage(Dense(SparseList(Element(tensor.fill_value))), (2, 1)) 698 | elif format == "csc": 699 | storage = Storage(Dense(SparseList(Element(tensor.fill_value))), (1, 2)) 700 | elif format == "csf": 701 | storage = Element(tensor.fill_value) 702 | for _ in range(tensor.ndim - 1): 703 | storage = SparseList(storage) 704 | storage = Storage(Dense(storage), order) 705 | elif format == "dense": 706 | storage = DenseStorage(tensor.ndim, tensor.dtype, order) 707 | tensor = tensor.to_storage(storage) 708 | 709 | if dtype is not None: 710 | return astype(tensor, dtype, copy=copy) 711 | else: 712 | return tensor 713 | 714 | 715 | def reshape( 716 | x: Tensor, /, shape: tuple[int, ...], *, copy: bool | None = None 717 | ) -> Tensor: 718 | if copy is False: 719 | raise ValueError("Unable to avoid copy during reshape.") 720 | # TODO: https://github.com/finch-tensor/Finch.jl/issues/743 721 | # Revert to `jl.reshape` implementation once aforementioned 722 | # issue is solved. 723 | warnings.warn("`reshape` densified the input tensor.", PerformanceWarning) 724 | arr = x.todense() 725 | arr = arr.reshape(shape) 726 | return Tensor(arr) 727 | 728 | def full( 729 | shape: int | tuple[int, ...], 730 | fill_value: jl_dtypes.number, 731 | *, 732 | dtype: DType | None = None, 733 | format: str = "coo", 734 | device: Device = None, 735 | ) -> Tensor: 736 | _validate_device(device) 737 | if not np.isscalar(fill_value): 738 | raise ValueError("`fill_value` must be a scalar") 739 | if format not in ("coo", "dense"): 740 | raise ValueError(f"{format} format not supported.") 741 | if isinstance(shape, int): 742 | shape = (shape,) 743 | dtype = ( 744 | np.asarray(fill_value).dtype.type 745 | if dtype is None 746 | else jl_dtypes.jl_to_np_dtype[dtype] 747 | ) 748 | if dtype == np.bool_: # Fails with: Finch currently only supports isbits defaults 749 | dtype = bool 750 | 751 | if format == "coo" and shape != (): 752 | return Tensor( 753 | jl.Tensor(jl.SparseCOO[len(shape)](jl.Element(dtype(fill_value))), *shape) 754 | ) 755 | else: # for dense format or () shape 756 | return Tensor(np.full(shape, fill_value, dtype=dtype)) 757 | 758 | 759 | def full_like( 760 | x: Tensor, 761 | /, 762 | fill_value: jl_dtypes.number, 763 | *, 764 | dtype: DType | None = None, 765 | format: str = "coo", 766 | device: Device = None, 767 | ) -> Tensor: 768 | return full(x.shape, fill_value, dtype=dtype, format=format, device=device) 769 | 770 | 771 | def ones( 772 | shape: int | tuple[int, ...], 773 | *, 774 | dtype: DType | None = None, 775 | format: str = "coo", 776 | device: Device = None, 777 | ) -> Tensor: 778 | return full(shape, np.float64(1), dtype=dtype, format=format, device=device) 779 | 780 | 781 | def ones_like( 782 | x: Tensor, 783 | /, 784 | *, 785 | dtype: DType | None = None, 786 | format: str = "coo", 787 | device: Device = None, 788 | ) -> Tensor: 789 | dtype = x.dtype if dtype is None else dtype 790 | return ones(x.shape, dtype=dtype, format=format, device=device) 791 | 792 | 793 | def zeros( 794 | shape: int | tuple[int, ...], 795 | *, 796 | dtype: DType | None = None, 797 | format: str = "coo", 798 | device: Device = None, 799 | ) -> Tensor: 800 | return full(shape, np.float64(0), dtype=dtype, format=format, device=device) 801 | 802 | 803 | def zeros_like( 804 | x: Tensor, 805 | /, 806 | *, 807 | dtype: DType | None = None, 808 | format: str = "coo", 809 | device: Device = None, 810 | ) -> Tensor: 811 | dtype = x.dtype if dtype is None else dtype 812 | return zeros(x.shape, dtype=dtype, format=format, device=device) 813 | 814 | 815 | def empty( 816 | shape: int | tuple[int, ...], 817 | *, 818 | dtype: DType | None = None, 819 | format: str = "coo", 820 | device: Device = None, 821 | ) -> Tensor: 822 | return full(shape, np.float64(0), dtype=dtype, format=format, device=device) 823 | 824 | 825 | def empty_like( 826 | x: Tensor, 827 | /, 828 | *, 829 | dtype: DType | None = None, 830 | format: str = "coo", 831 | device: Device = None, 832 | ) -> Tensor: 833 | dtype = x.dtype if dtype is None else dtype 834 | return empty(x.shape, dtype=dtype, format=format, device=device) 835 | 836 | 837 | def arange( 838 | start: int | float, 839 | /, 840 | stop: int | float | None = None, 841 | step: int | float = 1, 842 | *, 843 | dtype: DType | None = None, 844 | device: Device = None, 845 | ) -> Tensor: 846 | _validate_device(device) 847 | return Tensor(np.arange(start, stop, step, jl_dtypes.jl_to_np_dtype[dtype])) 848 | 849 | 850 | def linspace( 851 | start: int | float | complex, 852 | stop: int | float | complex, 853 | /, 854 | num: int, 855 | *, 856 | dtype: DType | None = None, 857 | device: Device = None, 858 | endpoint: bool = True, 859 | ) -> Tensor: 860 | _validate_device(device) 861 | return Tensor( 862 | np.linspace( 863 | start, 864 | stop, 865 | num=num, 866 | dtype=jl_dtypes.jl_to_np_dtype[dtype], 867 | endpoint=endpoint, 868 | ) 869 | ) 870 | 871 | 872 | def permute_dims(x: Tensor, axes: tuple[int, ...]) -> Tensor: 873 | return x.permute_dims(axes) 874 | 875 | 876 | def moveaxis(x: Tensor, source: int, destination: int) -> Tensor: 877 | axes = list(range(x.ndim)) 878 | norm_source = normalize_axis_index(source, x.ndim) 879 | norm_dest = normalize_axis_index(destination, x.ndim) 880 | axes.insert(norm_dest, axes.pop(norm_source)) 881 | return x.permute_dims(tuple(axes)) 882 | 883 | 884 | def astype(x: Tensor, dtype: DType, /, *, copy: bool = True) -> Tensor: 885 | if not copy: 886 | if x.dtype == dtype: 887 | return x 888 | if copy is False: 889 | raise ValueError("Unable to avoid a copy while casting in no-copy mode.") 890 | 891 | finch_tns = x._obj.body 892 | result = jl.copyto_b( 893 | jl.similar(finch_tns, jc.convert(dtype, jl.fill_value(finch_tns)), dtype), 894 | finch_tns, 895 | ) 896 | return Tensor(jl.swizzle(result, *x.get_order(zero_indexing=False))) 897 | 898 | 899 | def where(condition: Tensor, x1: Tensor, x2: Tensor, /) -> Tensor: 900 | axis_cond, axis_x1, axis_x2 = ( 901 | range(condition.ndim, 0, -1), 902 | range(x1.ndim, 0, -1), 903 | range(x2.ndim, 0, -1), 904 | ) 905 | # inverse swizzle, so `broadcast` appends new dims to the front 906 | result = jl.broadcast( 907 | jl.ifelse, 908 | jl.permutedims(condition._obj, tuple(axis_cond)), 909 | jl.permutedims(x1._obj, tuple(axis_x1)), 910 | jl.permutedims(x2._obj, tuple(axis_x2)), 911 | ) 912 | # swizzle back to the original order 913 | result = jl.permutedims(result, tuple(range(jl.ndims(result), 0, -1))) 914 | return Tensor(result) 915 | 916 | 917 | def nonzero(x: Tensor, /) -> tuple[np.ndarray, ...]: 918 | indices = jl.ffindnz(x._obj)[:-1] # return only indices, skip values 919 | indices = tuple(np.asarray(i) - 1 for i in indices) 920 | sort_order = np.lexsort(indices[::-1]) # sort to row-major, C-style order 921 | return tuple(Tensor(i[sort_order]) for i in indices) 922 | 923 | 924 | def _reduce_core(x: Tensor, fn: Callable, axis: int | tuple[int, ...] | None, keepdims: bool = False): 925 | if axis is None: 926 | axis = tuple(range(x.ndim)) 927 | axis = normalize_axis_tuple(axis, x.ndim) 928 | axis = tuple(i + 1 for i in axis) 929 | if keepdims: 930 | return fn(x._obj, dims=axis) 931 | else: 932 | if x.is_computed(): 933 | return jl.compute(jl.dropdims(fn(jl.lazy(x._obj), dims=axis), dims=axis)) 934 | else: 935 | return jl.dropdims(fn(x._obj, dims=axis), dims=axis) 936 | 937 | 938 | def _reduce_sum_prod( 939 | x: Tensor, 940 | fn: Callable, 941 | axis: int | tuple[int, ...] | None, 942 | dtype: DType | None, 943 | keepdims: bool = False, 944 | ) -> Tensor: 945 | result = _reduce_core(x, fn, axis, keepdims) 946 | 947 | if np.isscalar(result): 948 | if jl.seval(f"{x.dtype} <: Integer"): 949 | tmp_dtype = jl_dtypes.int_ 950 | else: 951 | tmp_dtype = x.dtype 952 | result = jl.Tensor( 953 | jl.Element( 954 | jc.convert(tmp_dtype, 0), 955 | np.array(result, dtype=jl_dtypes.jl_to_np_dtype[tmp_dtype]), 956 | ) 957 | ) 958 | 959 | result = Tensor(result) 960 | 961 | if jl.isa(result._obj, jl.Finch.LazyTensor): 962 | if dtype is not None: 963 | raise ValueError( 964 | "`dtype` keyword for `sum` and `prod` in the lazy mode isn't supported" 965 | ) 966 | # dtype casting rules 967 | elif dtype is not None: 968 | result = astype(result, dtype, copy=None) 969 | elif jl.seval(f"{x.dtype} <: Unsigned"): 970 | result = astype(result, jl_dtypes.uint, copy=None) 971 | elif jl.seval(f"{x.dtype} <: Signed"): 972 | result = astype(result, jl_dtypes.int_, copy=None) 973 | 974 | return result 975 | 976 | 977 | def _reduce(x: Tensor, fn: Callable, axis: int | tuple[int, ...] | None, keepdims: bool = False) -> Tensor: 978 | result = _reduce_core(x, fn, axis, keepdims) 979 | if np.isscalar(result): 980 | result = jl.Tensor( 981 | jl.Element( 982 | jc.convert(x.dtype, 0), 983 | np.array(result, dtype=jl_dtypes.jl_to_np_dtype[x.dtype]), 984 | ) 985 | ) 986 | return Tensor(result) 987 | 988 | 989 | def sum( 990 | x: Tensor, 991 | /, 992 | *, 993 | axis: int | tuple[int, ...] | None = None, 994 | dtype: DType | None = None, 995 | keepdims: bool = False, 996 | ) -> Tensor: 997 | return _reduce_sum_prod(x, jl.sum, axis, dtype, keepdims) 998 | 999 | 1000 | def prod( 1001 | x: Tensor, 1002 | /, 1003 | *, 1004 | axis: int | tuple[int, ...] | None = None, 1005 | dtype: DType | None = None, 1006 | keepdims: bool = False, 1007 | ) -> Tensor: 1008 | return _reduce_sum_prod(x, jl.prod, axis, dtype, keepdims) 1009 | 1010 | 1011 | def max( 1012 | x: Tensor, 1013 | /, 1014 | *, 1015 | axis: int | tuple[int, ...] | None = None, 1016 | keepdims: bool = False, 1017 | ) -> Tensor: 1018 | return _reduce(x, jl.maximum, axis, keepdims) 1019 | 1020 | 1021 | def min( 1022 | x: Tensor, 1023 | /, 1024 | *, 1025 | axis: int | tuple[int, ...] | None = None, 1026 | keepdims: bool = False, 1027 | ) -> Tensor: 1028 | return _reduce(x, jl.minimum, axis, keepdims) 1029 | 1030 | 1031 | def any( 1032 | x: Tensor, 1033 | /, 1034 | *, 1035 | axis: int | tuple[int, ...] | None = None, 1036 | keepdims: bool = False, 1037 | ) -> Tensor: 1038 | return _reduce(x != 0, jl.any, axis, keepdims) 1039 | 1040 | 1041 | def all( 1042 | x: Tensor, 1043 | /, 1044 | *, 1045 | axis: int | tuple[int, ...] | None = None, 1046 | keepdims: bool = False, 1047 | ) -> Tensor: 1048 | return _reduce(x != 0, jl.all, axis, keepdims) 1049 | 1050 | def mean( 1051 | x: Tensor, 1052 | /, 1053 | *, 1054 | axis: int | tuple[int, ...] | None = None, 1055 | keepdims: bool = False, 1056 | ) -> Tensor: 1057 | return _reduce(x, jl.mean, axis, keepdims) 1058 | 1059 | def std( 1060 | x: Tensor, 1061 | /, 1062 | *, 1063 | axis: int | tuple[int, ...] | None = None, 1064 | correction: int | float = 0.0, 1065 | keepdims: bool = False, 1066 | ) -> Tensor: 1067 | def _std(x): 1068 | return jl.std(x, correction=correction) 1069 | return _reduce(x, _std, axis, keepdims) 1070 | 1071 | def var( 1072 | x: Tensor, 1073 | /, 1074 | *, 1075 | axis: int | tuple[int, ...] | None = None, 1076 | correction: int | float = 0.0, 1077 | keepdims: bool = False, 1078 | ) -> Tensor: 1079 | def _var(x): 1080 | return jl.var(x, correction=correction) 1081 | return _reduce(x, _var, axis, keepdims) 1082 | 1083 | def argmin( 1084 | x: Tensor, 1085 | /, 1086 | *, 1087 | axis: int | None = None, 1088 | keepdims: bool = False, 1089 | ) -> Tensor: 1090 | return _reduce(x, jl.Finch.argmin_python, axis, keepdims) 1091 | 1092 | def argmax( 1093 | x: Tensor, 1094 | /, 1095 | *, 1096 | axis: int | None = None, 1097 | keepdims: bool = False, 1098 | ) -> Tensor: 1099 | return _reduce(x, jl.Finch.argmax_python, axis, keepdims) 1100 | 1101 | def squeeze( 1102 | x: Tensor, 1103 | /, 1104 | axis: int | tuple[int, ...], 1105 | ) -> Tensor: 1106 | if isinstance(axis, int): 1107 | axis = (axis,) 1108 | axis = normalize_axis_tuple(axis, x.ndim) 1109 | axis = tuple(i + 1 for i in axis) 1110 | result = jl.dropdims(x._obj, dims=axis) 1111 | return Tensor(result) 1112 | 1113 | def expand_dims( 1114 | x: Tensor, 1115 | /, 1116 | axis: int | tuple[int, ...] = 0, 1117 | ) -> Tensor: 1118 | if isinstance(axis, int): 1119 | axis = (axis,) 1120 | axis = normalize_axis_tuple(axis, x.ndim + len(axis)) 1121 | axis = tuple(i + 1 for i in axis) 1122 | result = jl.expanddims(x._obj, dims=axis) 1123 | return Tensor(result) 1124 | 1125 | 1126 | def diagonal( 1127 | x: Tensor, 1128 | /, 1129 | *, 1130 | offset: int = 0 1131 | ) -> Tensor: 1132 | m = x.shape[-2] 1133 | n = x.shape[-1] 1134 | mask = eye(m, n, k=offset, format="coo", dtype=bool) 1135 | res = compute(sum(where(mask, lazy(x), zeros(x.shape)), axis=-1)) 1136 | return res[..., 0:builtins.min(m, n, m + offset, n - offset)] 1137 | 1138 | def eye( 1139 | n_rows: int, 1140 | n_cols: int | None = None, 1141 | /, 1142 | *, 1143 | k: int = 0, 1144 | dtype: DType | None = None, 1145 | format: Literal["coo", "dense"] = "coo", 1146 | device: Device = None, 1147 | ) -> Tensor: 1148 | _validate_device(device) 1149 | n_cols = n_rows if n_cols is None else n_cols 1150 | dtype = jl_dtypes.float64 if dtype is None else dtype 1151 | tns = jl.Finch.eye_python(n_rows, n_cols, k, dtype(False)) 1152 | if format == "coo": 1153 | return Tensor(tns) 1154 | elif format == "dense": 1155 | return Tensor(jl.Tensor(jl.DenseFormat(2, dtype(False)), tns)) 1156 | else: 1157 | raise ValueError(f"{format} not supported, only 'coo' and 'dense' is allowed.") 1158 | 1159 | def tensordot(x1: Tensor, x2: Tensor, /, *, axes=2) -> Tensor: 1160 | if not isinstance(x1, Tensor): 1161 | x1 = Tensor(x1) 1162 | if not isinstance(x2, Tensor): 1163 | x2 = Tensor(x2) 1164 | if isinstance(axes, Iterable): 1165 | self_axes = normalize_axis_tuple(axes[0], x1.ndim) 1166 | other_axes = normalize_axis_tuple(axes[1], x2.ndim) 1167 | axes = (tuple(i + 1 for i in self_axes), tuple(i + 1 for i in other_axes)) 1168 | 1169 | result = jl.tensordot(x1._obj, x2._obj, axes) 1170 | return Tensor(result) 1171 | 1172 | 1173 | def log(x: Tensor, /) -> Tensor: 1174 | return x._elemwise_op("log") 1175 | 1176 | 1177 | def log10(x: Tensor, /) -> Tensor: 1178 | return x._elemwise_op("log10") 1179 | 1180 | 1181 | def log1p(x: Tensor, /) -> Tensor: 1182 | return x._elemwise_op("log1p") 1183 | 1184 | 1185 | def log2(x: Tensor, /) -> Tensor: 1186 | return x._elemwise_op("log2") 1187 | 1188 | 1189 | def sqrt(x: Tensor, /) -> Tensor: 1190 | return x._elemwise_op("sqrt") 1191 | 1192 | 1193 | def sign(x: Tensor, /) -> Tensor: 1194 | return x._elemwise_op("sign") 1195 | 1196 | 1197 | def round(x: Tensor, /) -> Tensor: 1198 | return x._elemwise_op("round") 1199 | 1200 | 1201 | def isnan(x: Tensor, /) -> Tensor: 1202 | return x._elemwise_op("isnan") 1203 | 1204 | 1205 | def isinf(x: Tensor, /) -> Tensor: 1206 | return x._elemwise_op("isinf") 1207 | 1208 | 1209 | def isfinite(x: Tensor, /) -> Tensor: 1210 | return x._elemwise_op("isfinite") 1211 | 1212 | 1213 | def exp(x: Tensor, /) -> Tensor: 1214 | return x._elemwise_op("exp") 1215 | 1216 | 1217 | def expm1(x: Tensor, /) -> Tensor: 1218 | return x._elemwise_op("expm1") 1219 | 1220 | 1221 | def floor(x: Tensor, /) -> Tensor: 1222 | return x._elemwise_op("floor") 1223 | 1224 | 1225 | def ceil(x: Tensor, /) -> Tensor: 1226 | return x._elemwise_op("ceil") 1227 | 1228 | 1229 | def cos(x: Tensor, /) -> Tensor: 1230 | return x._elemwise_op("cos") 1231 | 1232 | 1233 | def cosh(x: Tensor, /) -> Tensor: 1234 | return x._elemwise_op("cosh") 1235 | 1236 | 1237 | def acos(x: Tensor, /) -> Tensor: 1238 | return x._elemwise_op("acos") 1239 | 1240 | 1241 | def acosh(x: Tensor, /) -> Tensor: 1242 | return x._elemwise_op("acosh") 1243 | 1244 | 1245 | def sin(x: Tensor, /) -> Tensor: 1246 | return x._elemwise_op("sin") 1247 | 1248 | 1249 | def sinh(x: Tensor, /) -> Tensor: 1250 | return x._elemwise_op("sinh") 1251 | 1252 | 1253 | def asin(x: Tensor, /) -> Tensor: 1254 | return x._elemwise_op("asin") 1255 | 1256 | 1257 | def asinh(x: Tensor, /) -> Tensor: 1258 | return x._elemwise_op("asinh") 1259 | 1260 | 1261 | def tan(x: Tensor, /) -> Tensor: 1262 | return x._elemwise_op("tan") 1263 | 1264 | 1265 | def tanh(x: Tensor, /) -> Tensor: 1266 | return x._elemwise_op("tanh") 1267 | 1268 | 1269 | def atan(x: Tensor, /) -> Tensor: 1270 | return x._elemwise_op("atan") 1271 | 1272 | 1273 | def atanh(x: Tensor, /) -> Tensor: 1274 | return x._elemwise_op("atanh") 1275 | 1276 | 1277 | def atan2(x1: Tensor, x2: Tensor, /) -> Tensor: 1278 | return x1._elemwise_op("atand", x2) 1279 | 1280 | 1281 | def trunc(x: Tensor, /) -> Tensor: 1282 | return x._elemwise_op("trunc") 1283 | 1284 | 1285 | def real(x: Tensor, /) -> Tensor: 1286 | return x._elemwise_op("real") 1287 | 1288 | 1289 | def imag(x: Tensor, /) -> Tensor: 1290 | return x._elemwise_op("imag") 1291 | 1292 | 1293 | def conj(x: Tensor, /) -> Tensor: 1294 | return x._elemwise_op("conj") 1295 | 1296 | 1297 | def square(x: Tensor, /) -> Tensor: 1298 | return x ** Tensor(2) 1299 | 1300 | 1301 | def logaddexp(x1: Tensor, x2: Tensor, /) -> Tensor: 1302 | return log(exp(x1) + exp(x2)) 1303 | 1304 | 1305 | def logical_and(x1: Tensor, x2: Tensor, /) -> Tensor: 1306 | return x1._elemwise_op("Finch.and", x2) 1307 | 1308 | 1309 | def logical_or(x1: Tensor, x2: Tensor, /) -> Tensor: 1310 | return x1._elemwise_op("Finch.or", x2) 1311 | 1312 | 1313 | def logical_xor(x1: Tensor, x2: Tensor, /) -> Tensor: 1314 | return x1._elemwise_op("Finch.xor", x2) 1315 | 1316 | 1317 | def _is_scipy_sparse_obj(x): 1318 | return hasattr(x, "__module__") and x.__module__.startswith("scipy.sparse") 1319 | 1320 | 1321 | def _slice_plus_one(s: slice, size: int) -> range: 1322 | step = s.step if s.step is not None else 1 1323 | start_default = size if step < 0 else 1 1324 | stop_default = 1 if step < 0 else size 1325 | 1326 | if s.start is not None: 1327 | start = normalize_axis_index(s.start, size) + 1 if s.start < size else size 1328 | else: 1329 | start = start_default 1330 | 1331 | if s.stop is not None: 1332 | stop_offset = 2 if step < 0 else 0 1333 | stop = ( 1334 | normalize_axis_index(s.stop, size) + stop_offset if s.stop < size else size 1335 | ) 1336 | else: 1337 | stop = stop_default 1338 | 1339 | if (start, stop, step) == (1, size, 1): 1340 | return jl.Colon() 1341 | 1342 | return jl.range(start=start, step=step, stop=stop) 1343 | 1344 | 1345 | def _add_plus_one(key: tuple, shape: tuple[int, ...]) -> tuple: 1346 | new_key = [] 1347 | sizes = iter(shape) 1348 | for idx in key: 1349 | if idx is None: 1350 | new_key.append(jl.nothing) 1351 | continue 1352 | 1353 | size = next(sizes) 1354 | if isinstance(idx, int): 1355 | new_key.append(normalize_axis_index(idx, size) + 1) 1356 | elif isinstance(idx, slice): 1357 | new_key.append(_slice_plus_one(idx, size)) 1358 | elif isinstance(idx, (list, np.ndarray, tuple)): 1359 | idx = normalize_axis_tuple(idx, size) 1360 | new_key.append(jl.Vector([i + 1 for i in idx])) 1361 | else: 1362 | new_key.append(idx) 1363 | 1364 | return tuple(new_key) 1365 | 1366 | 1367 | def _expand_ellipsis(key: tuple, shape: tuple[int, ...]) -> tuple: 1368 | ellipsis_pos = None 1369 | key_without_ellipsis = [] 1370 | # first we need to find the ellipsis and confirm it's the only one 1371 | for pos, idx in enumerate(key): 1372 | if idx is Ellipsis: 1373 | if ellipsis_pos is None: 1374 | ellipsis_pos = pos 1375 | else: 1376 | raise IndexError("an index can only have a single ellipsis ('...')") 1377 | else: 1378 | key_without_ellipsis.append(idx) 1379 | key = key_without_ellipsis 1380 | 1381 | # then we expand ellipsis with a full range 1382 | if ellipsis_pos is not None: 1383 | n_missing_idxs = len(shape) - builtins.sum(1 for k in key if k is not None) 1384 | key = key[:ellipsis_pos] + [slice(None)] * n_missing_idxs + key[ellipsis_pos:] 1385 | 1386 | return tuple(key) 1387 | 1388 | 1389 | def _add_missing_dims(key: tuple, shape: tuple[int, ...]) -> tuple: 1390 | missing_dims = len(shape) - builtins.sum(1 for k in key if k is not None) 1391 | return key + (slice(None),) * missing_dims 1392 | 1393 | 1394 | def _process_lazy_indexing(key: tuple, ndim: int) -> tuple: 1395 | new_key = () 1396 | ellipsis_found = False 1397 | for idx in key: 1398 | if idx == slice(None): 1399 | new_key += (jl.Colon(),) 1400 | elif idx is None: 1401 | new_key += (jl.nothing,) 1402 | elif idx is Ellipsis: 1403 | num_of_colons = ndim - builtins.sum(1 for k in key if k is not None) + 1 1404 | new_key += (jl.Colon(),) * num_of_colons 1405 | if ellipsis_found: 1406 | raise IndexError("an index can only have a single ellipsis ('...')") 1407 | 1408 | ellipsis_found = True 1409 | else: 1410 | raise ValueError(f"Invalid lazy index member: {idx}") 1411 | return new_key 1412 | 1413 | 1414 | def _eq_scalars(x, y): 1415 | if x is None or y is None: 1416 | return x == y 1417 | if jl.isnan(x) or jl.isnan(y): 1418 | return jl.isnan(x) and jl.isnan(y) 1419 | else: 1420 | return x == y 1421 | 1422 | 1423 | def _validate_device(device: Device) -> None: 1424 | if device not in {"cpu", None}: 1425 | raise ValueError( 1426 | f'Device not understood. Only "cpu" is allowed, but received: {device}' 1427 | ) 1428 | -------------------------------------------------------------------------------- /src/finch/typing.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Literal 2 | 3 | import juliacall as jc 4 | import numpy as np 5 | 6 | 7 | OrderType = Literal["C", "F"] | tuple[int, ...] | None 8 | 9 | TupleOf3Arrays = tuple[np.ndarray, np.ndarray, np.ndarray] 10 | 11 | JuliaObj = jc.AnyValue 12 | 13 | DType = jc.AnyValue # represents jl.DataType 14 | 15 | spmatrix = Any 16 | 17 | Device = Literal["cpu"] | None 18 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finch-tensor/finch-tensor-python/cdfe8f09e35c184acc1f8fc845d9a0a1f2beb492/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | 5 | @pytest.fixture 6 | def rng(): 7 | return np.random.default_rng(42) 8 | 9 | 10 | @pytest.fixture 11 | def arr1d(): 12 | return np.arange(100) 13 | 14 | 15 | @pytest.fixture 16 | def arr2d(): 17 | return np.array( 18 | [ 19 | [0, 0, 3, 2, 0], 20 | [1, 0, 0, 1, 0], 21 | [0, 5, 0, 0, 0], 22 | ] 23 | ) 24 | 25 | 26 | @pytest.fixture 27 | def arr3d(): 28 | return np.array( 29 | [ 30 | [[0, 1, 0, 0], [1, 0, 0, 3]], 31 | [[4, 0, -1, 0], [2, 2, 0, 0]], 32 | [[0, 0, 0, 0], [1, 5, 0, 3]], 33 | ] 34 | ) 35 | -------------------------------------------------------------------------------- /tests/data/matrix_1.ttx: -------------------------------------------------------------------------------- 1 | %%MatrixMarket matrix coordinate integer general 2 | 3 5 15 3 | 1 1 0 4 | 2 1 1 5 | 3 1 0 6 | 1 2 0 7 | 2 2 0 8 | 3 2 5 9 | 1 3 3 10 | 2 3 0 11 | 3 3 0 12 | 1 4 2 13 | 2 4 1 14 | 3 4 0 15 | 1 5 0 16 | 2 5 0 17 | 3 5 0 18 | -------------------------------------------------------------------------------- /tests/test_indexing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_equal 3 | import pytest 4 | import juliacall as jc 5 | 6 | import finch 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "index", 11 | [ 12 | ..., 13 | 40, 14 | (32,), 15 | slice(None), 16 | slice(30, 60, 3), 17 | -10, 18 | slice(None, -10, -2), 19 | (None, slice(None)), 20 | ], 21 | ) 22 | @pytest.mark.parametrize("order", ["C", "F"]) 23 | def test_indexing_1d(arr1d, index, order): 24 | arr = np.array(arr1d, order=order) 25 | arr_finch = finch.Tensor(arr) 26 | 27 | actual = arr_finch[index] 28 | expected = arr[index] 29 | 30 | if isinstance(actual, finch.Tensor): 31 | actual = actual.todense() 32 | 33 | assert_equal(actual, expected) 34 | 35 | 36 | @pytest.mark.parametrize( 37 | "index", 38 | [ 39 | ..., 40 | 0, 41 | (2,), 42 | (2, 3), 43 | slice(None), 44 | (..., slice(0, 4, 2)), 45 | (-1, slice(-1, None, -1)), 46 | (None, slice(None), slice(None)), 47 | ], 48 | ) 49 | @pytest.mark.parametrize("order", ["C", "F"]) 50 | def test_indexing_2d(arr2d, index, order): 51 | arr = np.array(arr2d, order=order) 52 | arr_finch = finch.Tensor(arr) 53 | 54 | actual = arr_finch[index] 55 | expected = arr[index] 56 | 57 | if isinstance(actual, finch.Tensor): 58 | actual = actual.todense() 59 | 60 | assert_equal(actual, expected) 61 | 62 | 63 | @pytest.mark.parametrize( 64 | "index", 65 | [ 66 | (0, 1, 2), 67 | (1, 0, 0), 68 | (0, 1), 69 | 1, 70 | 2, 71 | (2, slice(None), 3), 72 | (slice(None), 0), 73 | slice(None), 74 | (0, slice(None), slice(1, 4, 2)), 75 | (0, 1, ...), 76 | (..., 1), 77 | (0, ..., 1), 78 | ..., 79 | (..., slice(1, 4, 2)), 80 | (slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)), 81 | (slice(None, -1, 1), slice(-1, None, -1), slice(4, 1, -1)), 82 | (-1, 0, 0), 83 | (0, -1, -2), 84 | ([1, 2], 0, slice(3, None, -1)), 85 | (0, slice(1, 0, -1), 0), 86 | (slice(None), None, slice(None), slice(None)), 87 | (slice(None), slice(None), slice(None), None), 88 | ], 89 | ) 90 | @pytest.mark.parametrize( 91 | "levels_descr", 92 | [ 93 | finch.Dense(finch.Dense(finch.Dense(finch.Element(0)))), 94 | finch.Dense(finch.SparseList(finch.SparseList(finch.Element(0)))), 95 | ], 96 | ) 97 | @pytest.mark.parametrize("order", ["C", "F"]) 98 | def test_indexing_3d(arr3d, index, levels_descr, order): 99 | arr = np.array(arr3d, order=order) 100 | storage = finch.Storage(levels_descr, order=order) 101 | arr_finch = finch.Tensor(arr).to_storage(storage) 102 | 103 | actual = arr_finch[index] 104 | expected = arr[index] 105 | 106 | if isinstance(actual, finch.Tensor): 107 | actual = actual.todense() 108 | 109 | assert_equal(actual, expected) 110 | 111 | 112 | def test_lazy_none_ellipsis(arr3d): 113 | arr_finch = finch.lazy(finch.Tensor(arr3d)) 114 | assert_equal(finch.compute(arr_finch[..., None]).todense(), arr3d[..., None]) 115 | 116 | with pytest.raises( 117 | jc.JuliaError, 118 | match="Cannot index a lazy tensor with more or fewer `:` dims than it had original dims.", 119 | ): 120 | arr_finch[None, :] 121 | -------------------------------------------------------------------------------- /tests/test_io.py: -------------------------------------------------------------------------------- 1 | from numpy.testing import assert_equal 2 | 3 | import finch 4 | 5 | base_path = "tests/data" 6 | 7 | 8 | def test_read(arr2d): 9 | tns = finch.read(f"{base_path}/matrix_1.ttx") 10 | 11 | assert_equal(tns.todense(), arr2d) 12 | 13 | 14 | def test_write(tmp_path, arr2d): 15 | tns = finch.asarray(arr2d) 16 | finch.write(tmp_path / "tmp.ttx", tns) 17 | 18 | expected = open(f"{base_path}/matrix_1.ttx").read() 19 | actual = open(tmp_path / "tmp.ttx").read() 20 | 21 | assert actual == expected 22 | -------------------------------------------------------------------------------- /tests/test_linalg.py: -------------------------------------------------------------------------------- 1 | import finch 2 | import numpy as np 3 | from numpy.testing import assert_allclose 4 | import pytest 5 | 6 | 7 | arr1d = np.array([1, -1, 2, 3]) 8 | arr2d = np.array([[1, 2, 0, 4, 0], [0, -2, 1, 0, 1]]) 9 | 10 | 11 | @pytest.mark.parametrize("arr", [arr1d, arr2d]) 12 | @pytest.mark.parametrize("keepdims", [True, False]) 13 | @pytest.mark.parametrize( 14 | "ord", 15 | [ 16 | 0, 17 | 1, 18 | 10, 19 | finch.inf, 20 | -finch.inf, 21 | pytest.param( 22 | 2, 23 | marks=pytest.mark.skip( 24 | reason="https://github.com/finch-tensor/Finch.jl/pull/709" 25 | ), 26 | ), 27 | ], 28 | ) 29 | def test_vector_norm(arr, keepdims, ord): 30 | tns = finch.asarray(arr) 31 | 32 | actual = finch.linalg.vector_norm(tns, keepdims=keepdims, ord=ord) 33 | expected = np.linalg.vector_norm(arr, keepdims=keepdims, ord=ord) 34 | 35 | assert_allclose(actual.todense(), expected) 36 | -------------------------------------------------------------------------------- /tests/test_ops.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_equal, assert_allclose 3 | import pytest 4 | from functools import reduce 5 | import juliacall as jc 6 | 7 | import finch 8 | 9 | 10 | arr1d = np.array([1, 1, 2, 3]) 11 | arr2d = np.array([[1, 2, 0, 0], [0, 1, 0, 1]]) 12 | arr3d = np.array( 13 | [ 14 | [[0, 1, 0, 0], [1, 0, 0, 3]], 15 | [[4, 0, -1, 0], [2, 2, 0, 0]], 16 | [[0, 0, 0, 0], [1, 5, 0, 3]], 17 | ] 18 | ) 19 | 20 | 21 | @pytest.fixture( 22 | scope="module", 23 | params=[finch.DefaultScheduler(), finch.GalleyScheduler()], 24 | ids=["default", "galley"], 25 | ) 26 | def opt(request): 27 | finch.set_optimizer(request.param) 28 | yield request.param 29 | 30 | 31 | def test_eager(arr3d, opt): 32 | A_finch = finch.Tensor(arr3d) 33 | B_finch = finch.Tensor(arr2d) 34 | 35 | result = finch.multiply(A_finch, B_finch) 36 | 37 | assert_equal(result.todense(), np.multiply(arr3d, arr2d)) 38 | 39 | 40 | def test_lazy_mode(arr3d, opt): 41 | A_finch = finch.Tensor(arr3d) 42 | B_finch = finch.Tensor(arr2d) 43 | C_finch = finch.Tensor(arr1d) 44 | 45 | @finch.compiled(opt=opt) 46 | def my_custom_fun(arr1, arr2, arr3): 47 | temp = finch.multiply(arr1, arr2) 48 | temp = finch.divide(temp, arr3) 49 | reduced = finch.sum(temp, axis=(0, 1)) 50 | return finch.add(temp, reduced) 51 | 52 | result = my_custom_fun(A_finch, B_finch, C_finch) 53 | 54 | temp = np.divide(np.multiply(arr3d, arr2d), arr1d) 55 | expected = np.add(temp, np.sum(temp, axis=(0, 1))) 56 | assert_equal(result.todense(), expected) 57 | 58 | A_lazy = finch.lazy(A_finch) 59 | B_lazy = finch.lazy(B_finch) 60 | mul_lazy = finch.multiply(A_lazy, B_lazy) 61 | result = finch.compute(mul_lazy) 62 | 63 | assert_equal(result.todense(), np.multiply(arr3d, arr2d)) 64 | 65 | 66 | def test_lazy_mode_mult_output(opt): 67 | A_finch = finch.Tensor(arr1d) 68 | B_finch = finch.Tensor(arr2d) 69 | 70 | @finch.compiled(opt=opt) 71 | def mult_out_fun(arr1, arr2): 72 | out1 = finch.add(arr1, arr2) 73 | out2 = finch.multiply(arr1, arr2) 74 | out3 = arr2 ** finch.asarray(2) 75 | return out1, out2, out3 76 | 77 | res1, res2, res3 = mult_out_fun(A_finch, B_finch) 78 | 79 | assert_equal(res1.todense(), np.add(arr1d, arr2d)) 80 | assert_equal(res2.todense(), np.multiply(arr1d, arr2d)) 81 | assert_equal(res3.todense(), arr2d**2) 82 | 83 | 84 | def test_lazy_mode_heterogenous_output(): 85 | A_finch = finch.Tensor(arr1d) 86 | B_finch = finch.Tensor(arr2d) 87 | 88 | @finch.compiled() 89 | def heterogenous_fun(a: list[finch.Tensor], b: int): 90 | sum_a = reduce(lambda x1, x2: x1 + x2, a) 91 | b_squared = b**2 92 | return (a, sum_a, (b, "text"), {"key1": 12, "key2": b_squared}) 93 | 94 | ret = heterogenous_fun([A_finch, B_finch], 3) 95 | 96 | assert type(ret) is tuple 97 | assert len(ret) == 4 98 | assert type(ret[0]) is list 99 | assert len(ret[0]) == 2 100 | assert_equal(ret[0][0].todense(), arr1d) 101 | assert_equal(ret[0][1].todense(), arr2d) 102 | assert_equal(ret[1].todense(), arr1d + arr2d) 103 | assert ret[2] == (3, "text") 104 | assert type(ret[3]) is dict 105 | assert ret[3] == {"key1": 12, "key2": 9} 106 | 107 | 108 | @pytest.mark.parametrize( 109 | "func_name", 110 | [ 111 | "log", 112 | "log10", 113 | "log1p", 114 | "log2", 115 | "sqrt", 116 | "sign", 117 | "round", 118 | "exp", 119 | "expm1", 120 | "floor", 121 | "ceil", 122 | "isnan", 123 | "isfinite", 124 | "isinf", 125 | "square", 126 | "trunc", 127 | ], 128 | ) 129 | def test_elemwise_ops_1_arg(arr3d, func_name, opt): 130 | arr = arr3d + 1.6 131 | A_finch = finch.Tensor(arr) 132 | 133 | actual = getattr(finch, func_name)(A_finch) 134 | expected = getattr(np, func_name)(arr) 135 | 136 | assert_allclose(actual.todense(), expected) 137 | 138 | 139 | @pytest.mark.parametrize("func_name", ["real", "imag", "conj"]) 140 | @pytest.mark.parametrize("dtype", [np.complex128, np.complex64, np.float64, np.int64]) 141 | def test_elemwise_complex_ops_1_arg(func_name, dtype, opt): 142 | arr = np.asarray([[1 + 1j, 2 + 2j], [3 + 3j, 4 - 4j], [-5 - 5j, -6 - 6j]]).astype( 143 | dtype 144 | ) 145 | arr_finch = finch.asarray(arr) 146 | 147 | actual = getattr(finch, func_name)(arr_finch) 148 | expected = getattr(np, func_name)(arr) 149 | 150 | assert_allclose(actual.todense(), expected) 151 | assert actual.todense().dtype == expected.dtype 152 | 153 | 154 | @pytest.mark.parametrize( 155 | "meth_name", 156 | ["__pos__", "__neg__", "__abs__", "__invert__"], 157 | ) 158 | def test_elemwise_tensor_ops_1_arg(arr3d, meth_name, opt): 159 | A_finch = finch.Tensor(arr3d) 160 | 161 | actual = getattr(A_finch, meth_name)() 162 | expected = getattr(arr3d, meth_name)() 163 | 164 | assert_equal(actual.todense(), expected) 165 | 166 | 167 | @pytest.mark.parametrize( 168 | "func_name", 169 | ["logaddexp", "logical_and", "logical_or", "logical_xor"], 170 | ) 171 | def test_elemwise_ops_2_args(arr3d, func_name, opt): 172 | arr2d = np.array([[0, 3, 2, 0], [0, 0, 3, 2]]) 173 | if func_name.startswith("logical"): 174 | arr3d = arr3d.astype(bool) 175 | arr2d = arr2d.astype(bool) 176 | A_finch = finch.Tensor(arr3d) 177 | B_finch = finch.Tensor(arr2d) 178 | 179 | actual = getattr(finch, func_name)(A_finch, B_finch) 180 | expected = getattr(np, func_name)(arr3d, arr2d) 181 | 182 | assert_allclose(actual.todense(), expected) 183 | 184 | 185 | @pytest.mark.parametrize( 186 | "meth_name", 187 | [ 188 | "__add__", 189 | "__mul__", 190 | "__sub__", 191 | "__truediv__", 192 | "__floordiv__", 193 | "__mod__", 194 | "__pow__", 195 | "__and__", 196 | "__or__", 197 | "__xor__", 198 | "__lshift__", 199 | "__rshift__", 200 | "__lt__", 201 | "__le__", 202 | "__gt__", 203 | "__ge__", 204 | "__eq__", 205 | "__ne__", 206 | ], 207 | ) 208 | def test_elemwise_tensor_ops_2_args(arr3d, meth_name, opt): 209 | arr2d = np.array([[2, 3, 2, 3], [3, 2, 3, 2]]) 210 | A_finch = finch.Tensor(arr3d) 211 | B_finch = finch.Tensor(arr2d) 212 | 213 | actual = getattr(A_finch, meth_name)(B_finch) 214 | expected = getattr(arr3d, meth_name)(arr2d) 215 | 216 | assert_equal(actual.todense(), expected) 217 | 218 | 219 | @pytest.mark.parametrize("func_name", ["sum", "prod", "max", "min", "any", "all", "mean", "std", "var"]) 220 | @pytest.mark.parametrize("axis", [None, -1, 1, (0, 1), (0, 1, 2)]) 221 | def test_reductions(arr3d, func_name, axis, opt): 222 | A_finch = finch.Tensor(arr3d) 223 | 224 | actual = getattr(finch, func_name)(A_finch, axis=axis) 225 | expected = getattr(np, func_name)(arr3d, axis=axis) 226 | 227 | assert_equal(actual.todense(), expected) 228 | 229 | @pytest.mark.parametrize("func_name", ["argmax", "argmin"]) 230 | @pytest.mark.parametrize("axis", [None, -1, 1, 2, (0, 1, 2)]) 231 | def test_reductions(arr3d, func_name, axis, opt): 232 | A_finch = finch.Tensor(arr3d) 233 | 234 | actual = getattr(finch, func_name)(A_finch, axis=axis) 235 | expected = getattr(np, func_name)(arr3d, axis=axis) 236 | 237 | assert_equal(actual.todense(), expected) 238 | 239 | @pytest.mark.parametrize("axis", [-1, 1, (0, 1), (0, 1, 2)]) 240 | def test_reductions(arr3d, axis, opt): 241 | A_finch = finch.Tensor(arr3d) 242 | 243 | actual = finch.expand_dims(A_finch, axis=axis) 244 | expected = np.expand_dims(arr3d, axis=axis) 245 | 246 | assert_equal(actual.todense(), expected) 247 | 248 | actual = finch.squeeze(actual, axis=axis) 249 | expected = np.squeeze(expected, axis=axis) 250 | 251 | assert_equal(actual.todense(), expected) 252 | 253 | @pytest.mark.parametrize("offset", [-1, 0, 1]) 254 | def test_diagonal_2d_array(offset, opt): 255 | arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) 256 | A_finch = finch.Tensor(arr2d) 257 | 258 | actual = finch.diagonal(A_finch, offset=offset) 259 | expected = np.diagonal(arr2d, offset=offset) 260 | 261 | assert_equal(actual.todense(), expected) 262 | 263 | 264 | @pytest.mark.parametrize("offset", [-1, 0, 1]) 265 | def test_diagonal_high_dimensional_array(offset, opt): 266 | arr_high_dim = np.random.rand(4, 3, 5, 6) 267 | A_finch = finch.Tensor(arr_high_dim) 268 | 269 | actual = finch.diagonal(A_finch, offset=offset) 270 | expected = np.diagonal(arr_high_dim, offset=offset) 271 | 272 | assert_equal(actual.todense(), expected) 273 | 274 | 275 | @pytest.mark.parametrize("func_name", ["sum", "prod"]) 276 | @pytest.mark.parametrize("axis", [None, 0, 1]) 277 | @pytest.mark.parametrize( 278 | "in_dtype, dtype, expected_dtype", 279 | [ 280 | (finch.int64, None, np.int64), 281 | (finch.int16, None, np.int64), 282 | (finch.uint8, None, np.uint64), 283 | (finch.int64, finch.float32, np.float32), 284 | (finch.float64, finch.complex128, np.complex128), 285 | ], 286 | ) 287 | def test_sum_prod_dtype_arg( 288 | arr3d, func_name, axis, in_dtype, dtype, expected_dtype, opt 289 | ): 290 | arr_finch = finch.asarray(np.abs(arr3d), dtype=in_dtype) 291 | 292 | actual = getattr(finch, func_name)(arr_finch, axis=axis, dtype=dtype).todense() 293 | 294 | assert actual.dtype == expected_dtype 295 | 296 | 297 | @pytest.mark.parametrize( 298 | "storage", 299 | [ 300 | None, 301 | ( 302 | finch.Storage(finch.SparseList(finch.Element(np.int64(0))), order="C"), 303 | finch.Storage( 304 | finch.Dense(finch.SparseList(finch.Element(np.int64(0)))), order="C" 305 | ), 306 | finch.Storage( 307 | finch.Dense( 308 | finch.SparseList(finch.SparseList(finch.Element(np.int64(0)))) 309 | ), 310 | order="C", 311 | ), 312 | ), 313 | ], 314 | ) 315 | def test_tensordot(arr3d, storage, opt): 316 | A_finch = finch.Tensor(arr1d) 317 | B_finch = finch.Tensor(arr2d) 318 | C_finch = finch.Tensor(arr3d) 319 | if storage is not None: 320 | A_finch = A_finch.to_storage(storage[0]) 321 | B_finch = B_finch.to_storage(storage[1]) 322 | C_finch = C_finch.to_storage(storage[2]) 323 | 324 | actual = finch.tensordot(B_finch, B_finch) 325 | expected = np.tensordot(arr2d, arr2d) 326 | assert_equal(actual.todense(), expected) 327 | 328 | actual = finch.tensordot(B_finch, B_finch, axes=(1, 1)) 329 | expected = np.tensordot(arr2d, arr2d, axes=(1, 1)) 330 | assert_equal(actual.todense(), expected) 331 | 332 | actual = finch.tensordot( 333 | C_finch, finch.permute_dims(C_finch, (2, 1, 0)), axes=((2, 0), (0, 2)) 334 | ) 335 | expected = np.tensordot(arr3d, arr3d.T, axes=((2, 0), (0, 2))) 336 | assert_equal(actual.todense(), expected) 337 | 338 | actual = finch.tensordot(C_finch, A_finch, axes=(2, 0)) 339 | expected = np.tensordot(arr3d, arr1d, axes=(2, 0)) 340 | assert_equal(actual.todense(), expected) 341 | 342 | 343 | @pytest.mark.parametrize( 344 | ("a", "b"), 345 | [ 346 | (arr2d, arr2d.mT), 347 | (arr2d, arr3d.mT), 348 | (arr2d.mT, arr3d), 349 | (arr3d, arr3d.mT), 350 | (arr1d, arr1d), 351 | (arr1d, arr2d.mT), 352 | (arr2d, arr1d), 353 | (arr1d, arr3d.mT), 354 | (arr3d, arr1d), 355 | ], 356 | ) 357 | def test_matmul(opt, a: np.ndarray, b: np.ndarray): 358 | A_finch = finch.Tensor(a) 359 | B_finch = finch.Tensor(b) 360 | 361 | expected = a @ b 362 | actual = A_finch @ B_finch 363 | 364 | assert_equal(actual.todense(), expected) 365 | 366 | if a.ndim >= 2 and b.ndim >= 2: 367 | At_finch = A_finch.mT 368 | Bt_finch = B_finch.mT 369 | 370 | assert_equal((Bt_finch @ At_finch).todense(), expected.mT) 371 | 372 | 373 | def test_matmul_dimension_mismatch(opt): 374 | A_finch = finch.Tensor(arr2d) 375 | B_finch = finch.Tensor(arr3d) 376 | 377 | with pytest.raises(jc.JuliaError, match="DimensionMismatch"): 378 | A_finch @ B_finch 379 | 380 | 381 | def test_negative__mod__(opt): 382 | arr = np.array([-1, 0, 0, -2, -3, 0]) 383 | arr_finch = finch.asarray(arr) 384 | 385 | actual = arr_finch % 5 386 | expected = arr % 5 387 | assert_equal(actual.todense(), expected) 388 | 389 | 390 | @pytest.mark.parametrize("force_materialization", [False, True]) 391 | def test_recursive_compiled( 392 | opt, force_materialization: bool, arr3d: finch.Tensor 393 | ) -> None: 394 | decorator = finch.compiled(opt=opt, force_materialization=force_materialization) 395 | 396 | @decorator 397 | def my_custom_fun_inner( 398 | arr1: finch.Tensor, arr2: finch.Tensor, arr3: finch.Tensor 399 | ) -> finch.Tensor: 400 | temp = finch.multiply(arr1, arr2) 401 | temp = finch.divide(temp, arr3) 402 | reduced = finch.sum(temp, axis=(0, 1)) 403 | return finch.add(temp, reduced) 404 | 405 | @decorator 406 | def my_custom_fun_outer( 407 | arr1: finch.Tensor, arr2: finch.Tensor, arr3: finch.Tensor 408 | ) -> finch.Tensor: 409 | arr = my_custom_fun_inner(arr1, arr2, arr3) 410 | assert arr.is_computed() == force_materialization 411 | return arr 412 | 413 | A_finch = finch.Tensor(arr3d) 414 | B_finch = finch.Tensor(arr2d) 415 | C_finch = finch.Tensor(arr1d) 416 | 417 | result = my_custom_fun_outer(A_finch, B_finch, C_finch) 418 | assert result.is_computed() 419 | -------------------------------------------------------------------------------- /tests/test_scipy_constructors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_equal 3 | import pytest 4 | import scipy.sparse as sp 5 | 6 | import finch 7 | from finch.tensor import _eq_scalars 8 | 9 | 10 | def test_scipy_coo(arr2d): 11 | sp_arr = sp.coo_matrix(arr2d, dtype=np.int64) 12 | finch_arr = finch.Tensor(sp_arr) 13 | lvl = finch_arr._obj.body.lvl 14 | 15 | assert np.shares_memory(sp_arr.row, lvl.tbl[1].data) 16 | assert np.shares_memory(sp_arr.col, lvl.tbl[0].data) 17 | assert np.shares_memory(sp_arr.data, lvl.lvl.val) 18 | 19 | assert_equal(finch_arr.todense(), sp_arr.todense()) 20 | new_arr = finch.permute_dims(finch_arr, (1, 0)) 21 | assert_equal(new_arr.todense(), sp_arr.todense().transpose()) 22 | 23 | 24 | @pytest.mark.parametrize("cls", [sp.csc_matrix, sp.csr_matrix]) 25 | def test_scipy_compressed2d(arr2d, cls): 26 | sp_arr = cls(arr2d, dtype=np.int64) 27 | finch_arr = finch.Tensor(sp_arr) 28 | lvl = finch_arr._obj.body.lvl.lvl 29 | 30 | assert np.shares_memory(sp_arr.indices, lvl.idx.data) 31 | assert np.shares_memory(sp_arr.indptr, lvl.ptr.data) 32 | assert np.shares_memory(sp_arr.data, lvl.lvl.val) 33 | 34 | assert_equal(finch_arr.todense(), sp_arr.todense()) 35 | new_arr = finch.permute_dims(finch_arr, (1, 0)) 36 | assert_equal(new_arr.todense(), sp_arr.todense().transpose()) 37 | 38 | 39 | @pytest.mark.parametrize( 40 | "format_with_cls_with_order", 41 | [ 42 | ("coo", sp.coo_matrix, "C"), 43 | ("coo", sp.coo_matrix, "F"), 44 | ("csc", sp.csc_matrix, "F"), 45 | ("csr", sp.csr_matrix, "C"), 46 | ], 47 | ) 48 | @pytest.mark.parametrize("fill_value_in", [0, finch.inf, finch.nan, 5, None]) 49 | @pytest.mark.parametrize("fill_value_out", [0, finch.inf, finch.nan, 5, None]) 50 | def test_to_scipy_sparse(format_with_cls_with_order, fill_value_in, fill_value_out): 51 | format, sp_class, order = format_with_cls_with_order 52 | np_arr = np.random.default_rng(0).random((4, 5)) 53 | np_arr = np.array(np_arr, order=order) 54 | 55 | finch_arr = finch.asarray(np_arr, format=format, fill_value=fill_value_in) 56 | 57 | if not ( 58 | fill_value_in in {0, None} and fill_value_out in {0, None} 59 | ) and not _eq_scalars(fill_value_in, fill_value_out): 60 | match_fill_value_out = 0 if fill_value_out is None else fill_value_out 61 | with pytest.raises( 62 | ValueError, 63 | match=rf"Can only convert arrays with \[{match_fill_value_out}\] fill-values " 64 | "to a Scipy sparse matrix.", 65 | ): 66 | finch_arr.to_scipy_sparse(accept_fv=fill_value_out) 67 | return 68 | 69 | actual = finch_arr.to_scipy_sparse(accept_fv=fill_value_out) 70 | 71 | assert isinstance(actual, sp_class) 72 | assert_equal(actual.todense(), np_arr) 73 | 74 | 75 | def test_to_scipy_sparse_invalid_input(): 76 | finch_arr = finch.asarray(np.ones((3, 3, 3)), format="dense") 77 | 78 | with pytest.raises(ValueError, match="Can only convert a 2-dimensional array"): 79 | finch_arr.to_scipy_sparse() 80 | 81 | finch_arr = finch.asarray(np.ones((3, 4)), format="dense") 82 | 83 | with pytest.raises( 84 | ValueError, match="Tensor can't be converted to scipy.sparse object" 85 | ): 86 | finch_arr.to_scipy_sparse() 87 | 88 | 89 | @pytest.mark.parametrize( 90 | "format_with_pattern", 91 | [ 92 | ("coo", "SparseCOO"), 93 | ("csr", "SparseList"), 94 | ("csc", "SparseList"), 95 | ("bsr", "SparseCOO"), 96 | ("dok", "SparseCOO"), 97 | ], 98 | ) 99 | @pytest.mark.parametrize("fill_value", [0, finch.inf, finch.nan, 5, None]) 100 | def test_from_scipy_sparse(format_with_pattern, fill_value): 101 | format, pattern = format_with_pattern 102 | sp_arr = sp.random(10, 5, density=0.1, format=format) 103 | 104 | result = finch.Tensor.from_scipy_sparse(sp_arr, fill_value=fill_value) 105 | assert pattern in str(result) 106 | fill_value = 0 if fill_value is None else fill_value 107 | assert _eq_scalars(result.fill_value, fill_value) 108 | 109 | 110 | @pytest.mark.parametrize("format", ["coo", "bsr"]) 111 | def test_non_canonical_format(format): 112 | sp_arr = sp.random(3, 4, density=0.5, format=format) 113 | 114 | with pytest.raises( 115 | ValueError, match="Unable to avoid copy while creating an array" 116 | ): 117 | finch.asarray(sp_arr, copy=False) 118 | 119 | finch_arr = finch.asarray(sp_arr) 120 | assert_equal(finch_arr.todense(), sp_arr.toarray()) 121 | -------------------------------------------------------------------------------- /tests/test_sparse.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_equal 3 | import pytest 4 | import sparse 5 | 6 | import finch 7 | 8 | parametrize_optimizer = pytest.mark.parametrize( 9 | "opt", [finch.DefaultScheduler(), finch.GalleyScheduler()] 10 | ) 11 | 12 | 13 | @pytest.mark.parametrize( 14 | "dtype,jl_dtype", 15 | [ 16 | (np.int64, finch.int64), 17 | (np.float64, finch.float64), 18 | (np.complex128, finch.complex128), 19 | ], 20 | ) 21 | @pytest.mark.parametrize("order", ["C", "F", None]) 22 | def test_wrappers(dtype, jl_dtype, order): 23 | A = np.array([[0, 0, 4], [1, 0, 0], [2, 0, 5], [3, 0, 0]], dtype=dtype, order=order) 24 | B = np.array(np.stack([A, A], axis=2, dtype=dtype), order=order) 25 | 26 | B_finch = finch.Tensor(B) 27 | 28 | storage = finch.Storage( 29 | finch.Dense(finch.SparseList(finch.SparseList(finch.Element(dtype(0.0))))), 30 | order=order, 31 | ) 32 | B_finch = B_finch.to_storage(storage) 33 | 34 | assert B_finch.shape == B.shape 35 | assert B_finch.dtype == jl_dtype 36 | assert_equal(B_finch.todense(), B) 37 | 38 | storage = finch.Storage( 39 | finch.Dense(finch.Dense(finch.Element(dtype(1.0)))), order=order 40 | ) 41 | A_finch = finch.Tensor(A).to_storage(storage) 42 | 43 | assert A_finch.shape == A.shape 44 | assert A_finch.dtype == jl_dtype 45 | assert_equal(A_finch.todense(), A) 46 | assert A_finch.todense().dtype == A.dtype and B_finch.todense().dtype == B.dtype 47 | 48 | 49 | @pytest.mark.parametrize("dtype", [np.int64, np.float64, np.complex128]) 50 | @pytest.mark.parametrize("order", ["C", "F", None]) 51 | @pytest.mark.parametrize("copy", [True, False, None]) 52 | def test_copy_fully_dense(dtype, order, copy, arr3d): 53 | arr = np.array(arr3d, dtype=dtype, order=order) 54 | arr_finch = finch.Tensor(arr, copy=copy) 55 | arr_todense = arr_finch.todense() 56 | 57 | assert_equal(arr_todense, arr) 58 | if copy: 59 | assert not np.shares_memory(arr_todense, arr) 60 | else: 61 | assert np.shares_memory(arr_todense, arr) 62 | 63 | 64 | def test_coo(rng): 65 | coords = ( 66 | np.asarray([0, 1, 2, 3, 4], dtype=np.intp), 67 | np.asarray([0, 1, 2, 3, 4], dtype=np.intp), 68 | ) 69 | data = rng.random(5) 70 | 71 | arr_pydata = sparse.COO(np.vstack(coords), data, shape=(5, 5)) 72 | arr = arr_pydata.todense() 73 | arr_finch = finch.Tensor.construct_coo(coords, data, shape=(5, 5)) 74 | 75 | assert_equal(arr_finch.todense(), arr) 76 | assert arr_finch.todense().dtype == data.dtype 77 | 78 | 79 | @pytest.mark.parametrize( 80 | "classes", 81 | [ 82 | (sparse._compressed.CSC, finch.Tensor.construct_csc), 83 | (sparse._compressed.CSR, finch.Tensor.construct_csr), 84 | ], 85 | ) 86 | def test_compressed2d(rng, classes): 87 | sparse_class, finch_class = classes 88 | indices, indptr, data = np.arange(5), np.arange(6), rng.random(5) 89 | 90 | arr_pydata = sparse_class((data, indices, indptr), shape=(5, 5)) 91 | arr = arr_pydata.todense() 92 | arr_finch = finch_class((data, indices, indptr), shape=(5, 5)) 93 | 94 | assert_equal(arr_finch.todense(), arr) 95 | assert arr_finch.todense().dtype == data.dtype 96 | 97 | 98 | def test_csf(arr3d): 99 | arr = arr3d 100 | dtype = np.int64 101 | 102 | data = np.array([4, 1, 2, 1, 1, 2, 5, -1, 3, 3], dtype=dtype) 103 | indices_list = [ 104 | np.array([1, 0, 1, 2, 0, 1, 2, 1, 0, 2], dtype=dtype), 105 | np.array([0, 1, 0, 1, 0, 1], dtype=dtype), 106 | ] 107 | indptr_list = [ 108 | np.array([0, 1, 4, 5, 7, 8, 10], dtype=dtype), 109 | np.array([0, 2, 4, 5, 6], dtype=dtype), 110 | ] 111 | 112 | arr_finch = finch.Tensor.construct_csf( 113 | (data, indices_list, indptr_list), shape=(3, 2, 4) 114 | ) 115 | 116 | assert_equal(arr_finch.todense(), arr) 117 | assert arr_finch.todense().dtype == data.dtype 118 | 119 | 120 | @pytest.mark.parametrize( 121 | "permutation", 122 | [(0, 1, 2), (2, 1, 0), (0, 2, 1), (1, 2, 0), (2, 0, 1), (1, 0, 2)], 123 | ) 124 | @pytest.mark.parametrize( 125 | "format", 126 | [ 127 | finch.Dense(finch.SparseList(finch.SparseList(finch.Element(0)))), 128 | finch.Dense(finch.Dense(finch.Dense(finch.Element(0)))), 129 | ], 130 | ) 131 | @pytest.mark.parametrize("order", ["C", "F"]) 132 | @parametrize_optimizer 133 | def test_permute_dims(arr3d, permutation, format, order, opt): 134 | finch.set_optimizer(opt) 135 | arr = np.array(arr3d, order=order) 136 | storage = finch.Storage(format, order=order) 137 | 138 | arr_finch = finch.Tensor(arr).to_storage(storage) 139 | 140 | actual_eager_mode = finch.permute_dims(arr_finch, permutation) 141 | actual_lazy_mode = finch.compute( 142 | finch.permute_dims(finch.lazy(arr_finch), permutation) 143 | ) 144 | expected = np.transpose(arr, permutation) 145 | 146 | assert_equal(actual_eager_mode.todense(), expected) 147 | assert_equal(actual_lazy_mode.todense(), expected) 148 | 149 | actual_eager_mode = finch.permute_dims(actual_eager_mode, permutation) 150 | actual_lazy_mode = finch.compute( 151 | finch.permute_dims(finch.lazy(actual_lazy_mode), permutation) 152 | ) 153 | expected = np.transpose(expected, permutation) 154 | 155 | assert_equal(actual_eager_mode.todense(), expected) 156 | assert_equal(actual_lazy_mode.todense(), expected) 157 | 158 | # test `.mT` 159 | actual_eager_mode = arr_finch.mT 160 | actual_lazy_mode = finch.compute(finch.lazy(arr_finch).mT) 161 | expected = arr.mT 162 | 163 | assert_equal(actual_eager_mode.todense(), expected) 164 | assert_equal(actual_lazy_mode.todense(), expected) 165 | 166 | 167 | @pytest.mark.parametrize( 168 | "src_dest", [(0, 1), (1, 0), (-1, 2), (-2, -1), (1, 1)] 169 | ) 170 | @parametrize_optimizer 171 | def test_moveaxis(arr3d, src_dest, opt): 172 | finch.set_optimizer(opt) 173 | src, dest = src_dest 174 | arr_finch = finch.Tensor(arr3d) 175 | 176 | actual = finch.moveaxis(arr_finch, src, dest) 177 | expected = np.moveaxis(arr3d, src, dest) 178 | assert_equal(actual.todense(), expected) 179 | 180 | 181 | @pytest.mark.parametrize("order", ["C", "F"]) 182 | @parametrize_optimizer 183 | def test_astype(arr3d, order, opt): 184 | finch.set_optimizer(opt) 185 | arr = np.array(arr3d, order=order, dtype=np.int64) 186 | storage = finch.Storage( 187 | finch.Dense(finch.SparseList(finch.SparseList(finch.Element(np.int64(0))))), 188 | order=order, 189 | ) 190 | arr_finch = finch.Tensor(arr).to_storage(storage) 191 | 192 | result = finch.astype(arr_finch, finch.int64) 193 | assert result is not arr_finch 194 | result = result.todense() 195 | assert_equal(result, arr) 196 | assert result.dtype == arr.dtype 197 | 198 | result = finch.astype(arr_finch, finch.int64, copy=False) 199 | assert result is arr_finch 200 | result = result.todense() 201 | assert_equal(result, arr) 202 | assert result.dtype == arr.dtype 203 | 204 | result = finch.astype(arr_finch, finch.float32).todense() 205 | arr = arr.astype(np.float32) 206 | assert_equal(result, arr) 207 | assert result.dtype == arr.dtype 208 | 209 | with pytest.raises( 210 | ValueError, match="Unable to avoid a copy while casting in no-copy mode." 211 | ): 212 | finch.astype(arr_finch, finch.float64, copy=False) 213 | 214 | 215 | @pytest.mark.parametrize("random_state", [42, np.random.default_rng(42)]) 216 | @parametrize_optimizer 217 | def test_random(random_state, opt): 218 | finch.set_optimizer(opt) 219 | result = finch.random((10, 20, 30), density=0.0, random_state=random_state) 220 | expected = sparse.random((10, 20, 30), density=0.0, random_state=random_state) 221 | 222 | assert_equal(result.todense(), expected.todense()) 223 | 224 | # test reproducible runs 225 | run1 = finch.random((20, 20), density=0.8, random_state=0) 226 | run2 = finch.random((20, 20), density=0.8, random_state=0) 227 | run3 = finch.random((20, 20), density=0.8, random_state=0) 228 | assert_equal(run1.todense(), run2.todense()) 229 | assert_equal(run1.todense(), run3.todense()) 230 | 231 | 232 | @pytest.mark.parametrize("order", ["C", "F"]) 233 | @pytest.mark.parametrize("format", ["coo", "csr", "csc", "csf", "dense", None]) 234 | @parametrize_optimizer 235 | def test_asarray(arr2d, arr3d, order, format, opt): 236 | finch.set_optimizer(opt) 237 | arr = arr3d if format == "csf" else arr2d 238 | arr = np.array(arr, order=order) 239 | arr_finch = finch.Tensor(arr) 240 | 241 | result = finch.asarray(arr_finch, format=format) 242 | assert_equal(result.todense(), arr) 243 | 244 | 245 | @pytest.mark.parametrize( 246 | "arr,new_shape", 247 | [ 248 | (np.arange(10), (2, 5)), 249 | (np.ones((10, 10)), (100,)), 250 | (np.ones((3, 4, 5)), (5, 2, 2, 3)), 251 | (np.arange(1), (1, 1, 1, 1)), 252 | (np.arange(1).reshape((1, 1, 1)), (1,)), 253 | (np.arange(1).reshape((1, 1)), ()), 254 | (np.zeros((10, 1, 2)), (1, 5, 4, 1)), 255 | (np.int64(0), ()), 256 | (np.int64(0), (1, 1)), 257 | ], 258 | ) 259 | @pytest.mark.parametrize("order", ["C", "F"]) 260 | @parametrize_optimizer 261 | def test_reshape(arr, new_shape, order, opt): 262 | finch.set_optimizer(opt) 263 | 264 | arr = np.array(arr, order=order) 265 | arr_finch = finch.Tensor(arr) 266 | 267 | res = finch.reshape(arr_finch, new_shape) 268 | assert_equal(res.todense(), arr.reshape(new_shape)) 269 | 270 | 271 | @pytest.mark.parametrize("shape", [10, (3, 3), (2, 1, 5)]) 272 | @pytest.mark.parametrize("dtype_name", [None, "int64", "float64"]) 273 | @pytest.mark.parametrize("format", ["coo", "dense"]) 274 | @parametrize_optimizer 275 | def test_full_ones_zeros_empty(shape, dtype_name, format, opt): 276 | finch.set_optimizer(opt) 277 | 278 | jl_dtype = getattr(finch, dtype_name) if dtype_name is not None else None 279 | np_dtype = getattr(np, dtype_name) if dtype_name is not None else None 280 | 281 | res = finch.full(shape, 2.0, dtype=jl_dtype, format=format) 282 | assert_equal(res.todense(), np.full(shape, 2.0, np_dtype)) 283 | res = finch.full_like(res, 3.0, dtype=jl_dtype, format=format) 284 | assert_equal(res.todense(), np.full(shape, 3.0, np_dtype)) 285 | 286 | res = finch.ones(shape, dtype=jl_dtype, format=format) 287 | assert_equal(res.todense(), np.ones(shape, np_dtype)) 288 | res = finch.ones_like(res, dtype=jl_dtype, format=format) 289 | assert_equal(res.todense(), np.ones(shape, np_dtype)) 290 | 291 | res = finch.zeros(shape, dtype=jl_dtype, format=format) 292 | assert_equal(res.todense(), np.zeros(shape, np_dtype)) 293 | res = finch.zeros_like(res, dtype=jl_dtype, format=format) 294 | assert_equal(res.todense(), np.zeros(shape, np_dtype)) 295 | 296 | res = finch.empty(shape, dtype=jl_dtype, format=format) 297 | assert_equal(res.todense(), np.empty(shape, np_dtype)) 298 | res = finch.empty_like(res, dtype=jl_dtype, format=format) 299 | assert_equal(res.todense(), np.empty(shape, np_dtype)) 300 | 301 | 302 | @pytest.mark.parametrize("func,arg", [(finch.asarray, np.zeros(3)), (finch.zeros, 3)]) 303 | def test_device_keyword(func, arg): 304 | func(arg, device="cpu") 305 | 306 | with pytest.raises( 307 | ValueError, 308 | match='Device not understood. Only "cpu" is allowed, but received: cuda', 309 | ): 310 | func(arg, device="cuda") 311 | 312 | 313 | @pytest.mark.parametrize( 314 | "order_and_format", 315 | [("C", None), ("F", None), ("C", "coo"), ("F", "coo"), ("F", "csc")], 316 | ) 317 | @parametrize_optimizer 318 | def test_where(order_and_format, opt): 319 | finch.set_optimizer(opt) 320 | 321 | order, format = order_and_format 322 | cond = np.array( 323 | [ 324 | [True, False, False, False], 325 | [False, True, True, False], 326 | [True, False, True, True], 327 | ], 328 | order=order, 329 | ) 330 | arr1 = np.array([[0, 0, 0, 1], [0, 2, 0, 3], [1, 0, 0, 5]], order=order) 331 | arr2 = np.array([10, 20, 30, 40], order=order) 332 | 333 | tns_cond = finch.asarray(cond, format=format) 334 | arr1_cond = finch.asarray(arr1, format=format) 335 | arr2_cond = finch.asarray(arr2) 336 | 337 | actual = finch.where(tns_cond, arr1_cond, arr2_cond) 338 | expected = np.where(cond, arr1, arr2) 339 | 340 | assert_equal(actual.todense(), expected) 341 | 342 | 343 | @pytest.mark.parametrize("order", ["C", "F"]) 344 | @pytest.mark.parametrize( 345 | "format_shape", 346 | [ 347 | ("coo", (80,)), 348 | ("coo", (10, 5, 8)), 349 | ("csf", (10, 5, 8)), 350 | ("csr", (5, 10)), 351 | ("csc", (5, 10)), 352 | ], 353 | ) 354 | @parametrize_optimizer 355 | def test_nonzero(order, format_shape, opt): 356 | finch.set_optimizer(opt) 357 | 358 | format, shape = format_shape 359 | rng = np.random.default_rng(0) 360 | arr = rng.random(shape) 361 | arr = np.array(arr, order=order) 362 | mask = arr < 0.8 363 | arr[mask] = 0.0 364 | 365 | tns = finch.asarray(arr, format=format) 366 | 367 | actual = finch.nonzero(tns) 368 | expected = np.nonzero(arr) 369 | for actual_i, expected_i in zip(actual, expected): 370 | assert_equal(actual_i.todense(), expected_i) 371 | 372 | 373 | @pytest.mark.parametrize("dtype_name", ["int64", "float64", "complex128"]) 374 | @pytest.mark.parametrize("k", [0, -1, 1, -2, 2]) 375 | @pytest.mark.parametrize("format", ["coo", "dense"]) 376 | @parametrize_optimizer 377 | def test_eye(dtype_name, k, format, opt): 378 | finch.set_optimizer(opt) 379 | 380 | result = finch.eye(3, 4, k=k, dtype=getattr(finch, dtype_name), format=format) 381 | expected = np.eye(3, 4, k=k, dtype=getattr(np, dtype_name)) 382 | 383 | assert_equal(result.todense(), expected) 384 | 385 | 386 | @parametrize_optimizer 387 | def test_to_scalar(opt): 388 | finch.set_optimizer(opt) 389 | 390 | for obj, meth_name in [ 391 | (True, "__bool__"), 392 | (1, "__int__"), 393 | (1.0, "__float__"), 394 | (1, "__index__"), 395 | (1 + 1j, "__complex__"), 396 | ]: 397 | tns = finch.asarray(np.asarray(obj)) 398 | assert getattr(tns, meth_name)() == obj 399 | 400 | tns = finch.asarray(np.ones((2, 2))) 401 | with pytest.raises( 402 | ValueError, match=" can be computed for one-element tensors only." 403 | ): 404 | tns.__int__() 405 | 406 | 407 | @pytest.mark.parametrize("dtype_name", [None, "int16", "float64"]) 408 | @parametrize_optimizer 409 | def test_arange_linspace(dtype_name, opt): 410 | finch.set_optimizer(opt) 411 | 412 | if dtype_name is not None: 413 | finch_dtype = getattr(finch, dtype_name) 414 | np_dtype = getattr(np, dtype_name) 415 | else: 416 | finch_dtype = np_dtype = None 417 | 418 | result = finch.arange(10, 100, 5, dtype=finch_dtype) 419 | expected = np.arange(10, 100, 5, dtype=np_dtype) 420 | assert_equal(result.todense(), expected) 421 | 422 | result = finch.linspace(20, 80, 10, dtype=finch_dtype) 423 | expected = np.linspace(20, 80, 10, dtype=np_dtype) 424 | assert_equal(result.todense(), expected) 425 | --------------------------------------------------------------------------------