├── .dockerignore ├── .flake8 ├── .github ├── dependabot.yml └── workflows │ ├── lint.yml │ ├── test.yml │ ├── update_graphblas.yml │ └── wheels.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Dockerfile ├── GB_VERSION.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── build_graphblas_cffi.py ├── continuous_integration └── environment.yml ├── docker_build.sh ├── latest_suitesparse_graphblas_version.py ├── pyproject.toml ├── setup.py ├── suitesparse.sh └── suitesparse_graphblas ├── __init__.py ├── create_headers.py ├── exceptions.py ├── io ├── __init__.py ├── binary.py └── serialize.py ├── matrix.py ├── scalar.py ├── source.c ├── suitesparse_graphblas.h ├── suitesparse_graphblas_no_complex.h ├── tests ├── __init__.py ├── conftest.py ├── test_doctest.py ├── test_exceptions.py ├── test_initialize.py ├── test_io.py ├── test_jit.py ├── test_package.py └── test_scalar.py ├── utils.pxd ├── utils.pyx └── vector.py /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | inline-quotes = " 4 | extend-ignore = 5 | E203, 6 | # E203 whitespace before ':' (to be compatible with black) 7 | per-file-ignores = 8 | suitesparse_graphblas/io/binary.py:C408, 9 | suitesparse_graphblas/tests/test_io.py:E721, 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: 'github-actions' 4 | directory: '/' 5 | schedule: 6 | interval: 'weekly' 7 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint via pre-commit 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches-ignore: 7 | - main 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | pre-commit: 14 | name: pre-commit-hooks 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: actions/setup-python@v5 19 | with: 20 | python-version: "3.10" 21 | - uses: pre-commit/action@v3.0.1 22 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | 8 | jobs: 9 | test: 10 | runs-on: ${{ matrix.os }} 11 | defaults: 12 | run: 13 | shell: bash -l {0} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: ["ubuntu-latest", "macos-latest", "windows-latest"] 18 | source: ["conda-forge"] 19 | # os: ["ubuntu-latest"] 20 | # source: ["source"] 21 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@v4 25 | with: 26 | fetch-depth: 0 27 | - name: Conda 28 | uses: conda-incubator/setup-miniconda@v3 29 | with: 30 | auto-update-conda: true 31 | python-version: ${{ matrix.python-version }} 32 | environment-file: continuous_integration/environment.yml 33 | channels: conda-forge,nodefaults 34 | channel-priority: strict 35 | activate-environment: suitesparse-graphblas 36 | auto-activate-base: false 37 | - name: GraphBLAS (from conda-forge) 38 | if: (contains(matrix.source, 'conda-forge')) 39 | run: | 40 | conda install graphblas=$(cat GB_VERSION.txt) 41 | - name: GraphBLAS (from source) 42 | if: (contains(matrix.source, 'source')) 43 | run: | 44 | # From release (also works with beta versions) 45 | GRAPHBLAS_PREFIX=${CONDA_PREFIX} bash suitesparse.sh refs/tags/$(cat GB_VERSION.txt).0 46 | 47 | # From tag 48 | # curl -L https://github.com/DrTimothyAldenDavis/GraphBLAS/archive/refs/tags/v$(cat GB_VERSION.txt).tar.gz | tar xzf - 49 | # pushd GraphBLAS-$(cat GB_VERSION.txt)/build 50 | 51 | # From branch 52 | # curl -L https://github.com/DrTimothyAldenDavis/GraphBLAS/tarball/$(cat GB_VERSION.txt) | tar xzf - 53 | # pushd DrTim*/build 54 | 55 | # echo ${CONDA_PREFIX} 56 | # cmake -DJITINIT=2 -DCMAKE_INSTALL_PREFIX=${CONDA_PREFIX} -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_BUILD_TYPE=Release .. 57 | # cat Makefile 58 | # make all JOBS=16 59 | # make install 60 | # popd 61 | - name: Build 62 | run: | 63 | pip install -e . --no-deps 64 | - name: Test 65 | env: 66 | CYTHON_COVERAGE: true 67 | run: | 68 | pytest -s -k test_print_jit_config 69 | coverage run --branch -m pytest 70 | coverage run -a --branch suitesparse_graphblas/tests/test_initialize.py 71 | - name: create_headers.py check 72 | if: (! contains(matrix.os, 'windows')) 73 | run: | 74 | coverage run -a --branch suitesparse_graphblas/create_headers.py 75 | git diff --exit-code # error if anything changed 76 | -------------------------------------------------------------------------------- /.github/workflows/update_graphblas.yml: -------------------------------------------------------------------------------- 1 | # Checks for latest SuiteSparse:GraphBLAS version on GitHub and creates a PR to update the version used by this repo. 2 | name: Check for GraphBLAS updates 3 | 4 | # In addition to permissions below, must explicitly allow GitHub Actions to create pull requests. 5 | # This setting can be found in a repository's settings under Actions > General > Workflow permissions. 6 | # https://github.com/peter-evans/create-pull-request#workflow-permissions 7 | permissions: 8 | contents: write 9 | pull-requests: write 10 | 11 | on: 12 | # Note: Workflow must run at least once to appear in workflows list. 13 | # Push to the bot's branch to trigger workflow. 14 | push: 15 | branches: [ auto_update_gb_version ] 16 | 17 | # Note: Workflow must exist in main branch for workflow dispatch option to appear 18 | workflow_dispatch: 19 | 20 | # Enable cron to check for updates once a day: 21 | # schedule: 22 | # - cron: '0 0 * * *' 23 | 24 | jobs: 25 | gb_version_check: 26 | name: Check for GraphBLAS update 27 | if: github.repository == 'GraphBLAS/python-suitesparse-graphblas' || github.repository == 'alugowski/python-suitesparse-graphblas' 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v4 31 | with: 32 | fetch-depth: 0 33 | 34 | - uses: actions/setup-python@v5 35 | with: 36 | python-version: "3.11" 37 | 38 | - name: Get latest GraphBLAS version 39 | run: | 40 | python latest_suitesparse_graphblas_version.py > GB_VERSION.txt 41 | echo "GB_VERSION=$(cat GB_VERSION.txt)" >> $GITHUB_ENV 42 | shell: bash 43 | 44 | - name: Create Pull Request 45 | uses: peter-evans/create-pull-request@v7 46 | with: 47 | # See documentation: https://github.com/peter-evans/create-pull-request 48 | # Action behavior: https://github.com/peter-evans/create-pull-request#action-behaviour 49 | # TL;DR: create a PR if there is a diff and keep it up to date with changes as they arrive. 50 | # 51 | # To trigger tests with this PR set up a Personal Access Token as in the docs above. 52 | # token: ${{ secrets.PAT }} 53 | add-paths: GB_VERSION.txt 54 | commit-message: Update to GraphBLAS ${{ env.GB_VERSION }} 55 | title: Update to SuiteSparse:GraphBLAS ${{ env.GB_VERSION }} 56 | body: | 57 | Auto-generated by `update_graphblas.yml` 58 | 59 | Close then reopen this PR to trigger tests. See [here](https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#triggering-further-workflow-runs) for why automatic triggers do not work. 60 | branch: auto_update_gb_version 61 | delete-branch: true 62 | -------------------------------------------------------------------------------- /.github/workflows/wheels.yml: -------------------------------------------------------------------------------- 1 | name: Python wheel package build and publish 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | # Enable Run Workflow button in GitHub UI 8 | workflow_dispatch: 9 | inputs: 10 | # Manual dispatch allows optional upload of wheels to PyPI 11 | upload_dest: 12 | type: choice 13 | description: Upload wheels to 14 | options: 15 | - No Upload 16 | - PyPI 17 | - Test PyPI 18 | 19 | push: 20 | branches: [ main ] 21 | 22 | pull_request: 23 | 24 | concurrency: 25 | group: ${{ github.workflow }}-${{ github.ref }} 26 | cancel-in-progress: true 27 | 28 | permissions: 29 | # For PyPI Trusted Publisher 30 | id-token: write 31 | 32 | jobs: 33 | build_sdist: 34 | name: Build SDist 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v4 38 | with: 39 | fetch-depth: 0 40 | 41 | - name: Build SDist 42 | run: pipx run build --sdist 43 | 44 | - name: Check metadata 45 | run: pipx run twine check dist/* 46 | 47 | - uses: actions/upload-artifact@v4 48 | with: 49 | name: sdist 50 | path: dist/*.tar.gz 51 | 52 | 53 | build_wheels: 54 | name: Wheels - ${{ matrix.cibw_archs }} ${{ matrix.arch_note}} - ${{ matrix.os }} 55 | runs-on: ${{ matrix.os }} 56 | strategy: 57 | fail-fast: false 58 | matrix: 59 | include: 60 | - os: windows-latest 61 | cibw_archs: "auto64" 62 | 63 | # Linux x86 manylinux 64 | - os: ubuntu-latest 65 | cibw_archs: "x86_64" 66 | # Python 3.12 wheel requires libffi-devel to be installed. manylinux container uses yum 67 | cibw_before_build_linux: "yum install -y libffi-devel" 68 | # skip musllinux 69 | cibw_skip: "*musl*" 70 | 71 | # Linux x86 musllinux 72 | # Separate runner for a Musl build of graphblas. The glibc build is not guaranteed to be compatible. 73 | - os: ubuntu-latest 74 | cibw_archs: "x86_64" 75 | arch_note: "musl" 76 | # skip manylinux (built elsewhere), PyPy (no musl numpy wheels), CPython 3.8 (no musl numpy wheels) 77 | cibw_skip: "*many* pp* cp38*" 78 | 79 | # Linux aarch64 80 | # Separate runner because this requires emulation (only x86 runners are available) and is very slow. 81 | - os: ubuntu-latest 82 | cibw_archs: "aarch64" 83 | # numpy wheels not available for aarch64 PyPy or musllinux 84 | cibw_skip: "pp* *musl*" 85 | 86 | # macOS x86 87 | # Note: keep as old as possible as due to libomp this will be the oldest supported macOS version. 88 | - os: macos-13 89 | cibw_archs: "x86_64" 90 | 91 | # macOS Apple Silicon 92 | # Note: keep as old as possible as due to libomp this will be the oldest supported macOS version. 93 | - os: macos-14 94 | cibw_archs: "arm64" 95 | 96 | steps: 97 | - uses: actions/checkout@v4 98 | with: 99 | fetch-depth: 0 100 | 101 | # aarch64 Linux builds are cross-compiled on x86 runners using emulation 102 | # see https://cibuildwheel.readthedocs.io/en/stable/faq/#emulation 103 | - name: Setup QEMU (for aarch64) 104 | if: matrix.cibw_archs == 'aarch64' 105 | uses: docker/setup-qemu-action@v3 106 | with: 107 | platforms: arm64 108 | 109 | - name: Setup env (for aarch64) 110 | if: matrix.cibw_archs == 'aarch64' 111 | # Ask suitesparse.sh to compile faster by optimizing fewer types. Otherwise, the build takes too long to finish 112 | # in 6 hour limit. 113 | run: | 114 | echo "SUITESPARSE_FAST_BUILD=1" >> $GITHUB_ENV 115 | 116 | - name: Setup for testing 117 | if: github.event_name == 'push' || github.event_name == 'pull_request' 118 | # Ask suitesparse.sh to compile in the fastest way possible and provide a GB version to build 119 | run: | 120 | echo "SUITESPARSE_FASTEST_BUILD=1" >> $GITHUB_ENV 121 | shell: bash 122 | 123 | - name: Setup GraphBLAS version from GB_VERSION.txt 124 | # Use GraphBLAS version specified in GB_VERSION.txt unless specified in a git tag (next workflow step). 125 | # Git tag method required for uploads to PyPI. 126 | if: github.event_name != 'release' && github.event.inputs.upload_dest != 'PyPI' 127 | run: echo "GB_VERSION_REF=refs/tags/$(cat GB_VERSION.txt).0" >> $GITHUB_ENV 128 | shell: bash 129 | 130 | - name: Setup GraphBLAS version from git tag 131 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 132 | # If this is a tagged ref, like a release, then use the tag for the graphblas version 133 | run: echo "GB_VERSION_REF=${{ github.ref }}" >> $GITHUB_ENV 134 | shell: bash 135 | 136 | - name: Install tools (macOS) 137 | if: contains(matrix.os, 'macos') 138 | # Install coreutils which includes `nproc` used by `make -j` in suitesparse.sh 139 | # 140 | # Explicitly install libomp to be clear about the dependency. 141 | # 142 | # libomp determines the minimum macOS version that we can build for 143 | run: | 144 | brew fetch --retry coreutils && brew install coreutils 145 | brew fetch --retry libomp && brew reinstall libomp 146 | echo MACOSX_DEPLOYMENT_TARGET=$(otool -l $(brew --prefix libomp)/lib/libomp.dylib | grep minos | awk '{print $2}') >> $GITHUB_ENV 147 | 148 | - uses: pypa/cibuildwheel@v2.22 149 | with: 150 | output-dir: wheelhouse 151 | env: 152 | # very verbose 153 | CIBW_BUILD_VERBOSITY: 3 154 | 155 | # Build SuiteSparse 156 | CIBW_BEFORE_ALL: bash suitesparse.sh ${{ env.GB_VERSION_REF }} 157 | 158 | CIBW_BEFORE_BUILD_LINUX: ${{ matrix.cibw_before_build_linux }} 159 | 160 | CIBW_ENVIRONMENT_PASS_LINUX: SUITESPARSE_FAST_BUILD SUITESPARSE_FASTEST_BUILD 161 | 162 | # CMAKE_GNUtoMS=ON asks suitesparse.sh to build libraries in MSVC style on Windows. 163 | CIBW_ENVIRONMENT_WINDOWS: CMAKE_GNUtoMS=ON GRAPHBLAS_PREFIX="C:/GraphBLAS" 164 | 165 | # macOS libomp requires special configs. BREW_LIBOMP=1 asks suitesparse.sh to include them. 166 | # SUITESPARSE_MACOS_ARCH asks to build a particular architecture. Either x86 or arm64. 167 | CIBW_ENVIRONMENT_MACOS: BREW_LIBOMP="1" SUITESPARSE_MACOS_ARCH=${{ matrix.cibw_archs }} 168 | 169 | # Uncomment to only build CPython wheels 170 | # CIBW_BUILD: "cp*" 171 | 172 | # Architectures to build specified in matrix 173 | CIBW_ARCHS: ${{ matrix.cibw_archs }} 174 | 175 | # as of writing numpy does not support pypy 3.10 176 | CIBW_SKIP: "${{ matrix.cibw_skip }} pp310*" 177 | 178 | # Use delvewheel on Windows. 179 | # This copies graphblas.dll into the wheel. "repair" in cibuildwheel parlance includes copying any shared 180 | # libraries from the build host into the wheel to make the wheel self-contained. 181 | # Cibuildwheel includes tools for this for Linux and macOS, and they recommend delvewheel for Windows. 182 | # Note: Currently using a workaround: --no-mangle instead of stripping graphblas.dll 183 | # see https://github.com/adang1345/delvewheel/issues/33 184 | CIBW_BEFORE_BUILD_WINDOWS: "pip install delvewheel" 185 | CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair --add-path \"C:\\GraphBLAS\\bin\" --no-mangle \"libgomp-1.dll;libgcc_s_seh-1.dll\" -w {dest_dir} {wheel}" 186 | 187 | # make cibuildwheel install test dependencies from pyproject.toml 188 | CIBW_TEST_EXTRAS: "test" 189 | 190 | # run tests 191 | CIBW_TEST_COMMAND: "pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config && pytest -v --pyargs suitesparse_graphblas" 192 | 193 | CIBW_TEST_SKIP: ${{ matrix.cibw_test_skip }} 194 | 195 | - uses: actions/upload-artifact@v4 196 | with: 197 | name: wheels-${{ matrix.os }}-${{ matrix.cibw_archs }}${{ matrix.arch_note}} 198 | path: wheelhouse/*.whl 199 | if-no-files-found: error 200 | 201 | 202 | upload_all: 203 | name: Upload to PyPI 204 | needs: [build_wheels, build_sdist] 205 | runs-on: ubuntu-latest 206 | # only upload releases to PyPI 207 | if: github.repository == 'GraphBLAS/python-suitesparse-graphblas' && ((github.event_name == 'release' && github.event.action == 'published') || (github.event_name == 'workflow_dispatch' && github.event.inputs.upload_dest != 'No Upload')) 208 | 209 | steps: 210 | - uses: actions/setup-python@v5 211 | with: 212 | python-version: "3.x" 213 | 214 | - uses: actions/download-artifact@v4 215 | with: 216 | path: dist 217 | merge-multiple: true 218 | 219 | # Upload to PyPI 220 | - uses: pypa/gh-action-pypi-publish@release/v1 221 | name: Upload to PyPI 222 | if: github.event_name == 'release' || (github.event_name == 'workflow_dispatch' && github.event.inputs.upload_dest == 'PyPI') 223 | with: 224 | # PyPI does not allow replacing a file. Without this flag the entire action fails if even a single duplicate exists. 225 | skip-existing: true 226 | verbose: true 227 | password: ${{ secrets.PYPI_TOKEN }} 228 | 229 | # Upload to Test PyPI 230 | - uses: pypa/gh-action-pypi-publish@release/v1 231 | name: Upload to Test PyPI 232 | if: github.event_name == 'workflow_dispatch' && github.event.inputs.upload_dest == 'Test PyPI' 233 | with: 234 | # PyPI does not allow replacing a file. Without this flag the entire action fails if even a single duplicate exists. 235 | skip-existing: true 236 | verbose: true 237 | repository-url: https://test.pypi.org/legacy/ 238 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.c 8 | *.so 9 | *.dll 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | pip-wheel-metadata/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | wheelhouse 32 | 33 | # Wheel building stuff 34 | GraphBLAS-*/ 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # emacs 138 | *~ 139 | 140 | # Vim 141 | *.sw? 142 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # https://pre-commit.com/ 2 | # 3 | # Before first use: `pre-commit install` 4 | # To run: `pre-commit run --all-files` 5 | # To update: `pre-commit autoupdate` 6 | # - &flake8_dependencies below needs updated manually 7 | fail_fast: true 8 | default_language_version: 9 | python: python3 10 | repos: 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v5.0.0 13 | hooks: 14 | - id: check-added-large-files 15 | - id: check-case-conflict 16 | - id: check-merge-conflict 17 | - id: check-ast 18 | - id: check-toml 19 | - id: check-yaml 20 | # - id: check-executables-have-shebangs 21 | - id: check-vcs-permalinks 22 | - id: destroyed-symlinks 23 | - id: detect-private-key 24 | - id: debug-statements 25 | - id: end-of-file-fixer 26 | - id: mixed-line-ending 27 | args: [--fix=lf] 28 | - id: trailing-whitespace 29 | - repo: https://github.com/abravalheri/validate-pyproject 30 | rev: v0.24 31 | hooks: 32 | - id: validate-pyproject 33 | name: Validate pyproject.toml 34 | - repo: https://github.com/PyCQA/autoflake 35 | rev: v2.3.1 36 | hooks: 37 | - id: autoflake 38 | args: [--in-place] 39 | - repo: https://github.com/pycqa/isort 40 | rev: 6.0.1 41 | hooks: 42 | - id: isort 43 | - repo: https://github.com/asottile/pyupgrade 44 | rev: v3.19.1 45 | hooks: 46 | - id: pyupgrade 47 | args: [--py39-plus] 48 | - repo: https://github.com/psf/black 49 | rev: 25.1.0 50 | hooks: 51 | - id: black 52 | - repo: https://github.com/PyCQA/flake8 53 | rev: 7.1.2 54 | hooks: 55 | - id: flake8 56 | args: ["--config=.flake8"] 57 | additional_dependencies: &flake8_dependencies 58 | # These versions need updated manually 59 | - flake8==7.1.2 60 | - flake8-comprehensions==3.16.0 61 | - flake8-bugbear==24.12.12 62 | # - flake8-simplify==0.21.0 63 | - repo: https://github.com/asottile/yesqa 64 | rev: v1.5.0 65 | hooks: 66 | - id: yesqa 67 | additional_dependencies: *flake8_dependencies 68 | # `pyroma` may help keep our package standards up to date if best practices change. 69 | # This is a "low value" check though and too slow to run as part of pre-commit. 70 | # - repo: https://github.com/regebro/pyroma 71 | # rev: "4.2" 72 | # hooks: 73 | # - id: pyroma 74 | # args: [-n, "10", .] 75 | - repo: https://github.com/python-jsonschema/check-jsonschema 76 | rev: 0.31.3 77 | hooks: 78 | - id: check-dependabot 79 | - id: check-github-workflows 80 | - repo: meta 81 | hooks: 82 | - id: check-hooks-apply 83 | - id: check-useless-excludes 84 | - repo: https://github.com/pre-commit/pre-commit-hooks 85 | rev: v5.0.0 86 | hooks: 87 | - id: no-commit-to-branch # no commit directly to main 88 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_CONTAINER=python:3.9-slim-buster 2 | FROM ${BASE_CONTAINER} as suitesparse 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update && apt-get install -yq build-essential cmake git 6 | 7 | ARG SUITESPARSE 8 | ARG COMPACT 9 | 10 | WORKDIR /build 11 | RUN git clone https://github.com/eliben/pycparser.git --depth 1 12 | 13 | WORKDIR /build/GraphBLAS/build 14 | RUN git clone https://github.com/DrTimothyAldenDavis/GraphBLAS.git --depth 1 --branch ${SUITESPARSE} \ 15 | && cd GraphBLAS/build \ 16 | && cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DGBCOMPACT=${COMPACT} \ 17 | && make -j$(nproc) \ 18 | && make install 19 | 20 | FROM ${BASE_CONTAINER} as psg 21 | ARG SUITESPARSE 22 | ARG VERSION 23 | ENV PYTHONUNBUFFERED 1 24 | 25 | COPY --from=suitesparse /usr/include/GraphBLAS.h /usr/local/include/ 26 | COPY --from=suitesparse /usr/lib/x86_64-linux-gnu/libgraphblas* /usr/lib/x86_64-linux-gnu/ 27 | COPY --from=suitesparse /build/pycparser/utils/fake_libc_include/* /usr/local/lib/python3.9/site-packages/pycparser/utils/fake_libc_include/ 28 | 29 | RUN apt-get update && apt-get install -yq build-essential git 30 | RUN pip3 install numpy cffi pytest cython 31 | 32 | RUN mkdir -p /psg 33 | ADD . /psg 34 | 35 | WORKDIR /psg 36 | RUN git tag ${VERSION} && \ 37 | python3 suitesparse_graphblas/create_headers.py && \ 38 | python3 setup.py install && \ 39 | ldconfig 40 | 41 | #RUN pytest --pyargs suitesparse_graphblas.tests 42 | RUN apt-get -y --purge remove git python3-pip && apt-get clean 43 | 44 | FROM ${BASE_CONTAINER} 45 | COPY --from=suitesparse /usr/lib/x86_64-linux-gnu/libgraphblas* /usr/lib/x86_64-linux-gnu/ 46 | COPY --from=suitesparse /usr/lib/x86_64-linux-gnu/libgomp* /usr/lib/x86_64-linux-gnu/ 47 | COPY --from=psg /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages 48 | -------------------------------------------------------------------------------- /GB_VERSION.txt: -------------------------------------------------------------------------------- 1 | 10.0.1 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Anaconda Inc., Graphegon, and contributors 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include setup.py 2 | include build_graphblas_cffi.py 3 | include README.md 4 | include LICENSE 5 | include suitesparse_graphblas/*.pxd 6 | include suitesparse_graphblas/*.pyx 7 | include suitesparse_graphblas/*.c 8 | include suitesparse_graphblas/*.h 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # python-suitesparse-graphblas 2 | 3 | [![Version](https://img.shields.io/pypi/v/suitesparse-graphblas.svg)](https://pypi.org/project/suitesparse-graphblas/) 4 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/GraphBLAS/python-suitesparse-graphblas/blob/main/LICENSE) 5 | [![Build Status](https://github.com/GraphBLAS/python-suitesparse-graphblas/workflows/Test/badge.svg)](https://github.com/GraphBLAS/python-suitesparse-graphblas/actions) 6 | [![Code style](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) 7 | 8 | Python CFFI Binding around 9 | [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS) 10 | 11 | This is a base package that exposes only the low level CFFI API 12 | bindings and symbols. This package is shared by the syntax bindings 13 | [pygraphblas](https://github.com/Graphegon/pygraphblas) and 14 | [python-graphblas](https://github.com/python-graphblas/python-graphblas). 15 | 16 | 17 | ## Installation from pre-built wheels 18 | Pre-built wheels for common platforms are available from PyPI and conda. These bundle a compiled copy of SuiteSparse:GraphBLAS. 19 | 20 | ```bash 21 | pip install suitesparse-graphblas 22 | ``` 23 | 24 | or 25 | 26 | ```bash 27 | conda install -c conda-forge python-suitesparse-graphblas 28 | ``` 29 | 30 | ## Installation from source 31 | If you wish to link against your own copy of SuiteSparse:GraphBLAS you may build from source. 32 | 33 | Specify the location of your SuiteSparse:GraphBLAS installation in the `GraphBLAS_ROOT` environment variable then use the standard pip build from source mechanism. This location must contain `include/GraphBLAS.h` and `lib/`. 34 | 35 | ```bash 36 | export GraphBLAS_ROOT="/path/to/graphblas" 37 | pip install suitesparse-graphblas-*.tar.gz 38 | ``` 39 | You may also have to appropriately set `LD_LIBRARY_PATH` to find `libgraphblas` at runtime. 40 | 41 | For example, to use Homebrew's SuiteSparse:GraphBLAS on macOS, with the sdist from PyPI, and with all dependencies using wheels: 42 | ```bash 43 | GraphBLAS_ROOT="$(brew --prefix suitesparse)" pip install --no-binary suitesparse-graphblas suitesparse-graphblas 44 | ``` 45 | -------------------------------------------------------------------------------- /build_graphblas_cffi.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | from cffi import FFI 6 | from setuptools import Extension 7 | 8 | is_win = sys.platform.startswith("win") 9 | ss_g = Path(__file__).parent / "suitesparse_graphblas" 10 | 11 | ffibuilder = FFI() 12 | 13 | # GraphBLAS_ROOT env var can point to the root directory of GraphBLAS to link against. 14 | # Expected subdirectories: include/ (contains GraphBLAS.h), lib/, and bin/ (on Windows only) 15 | # Otherwise fallback to default system folders. 16 | graphblas_root = os.environ.get("GraphBLAS_ROOT", None) 17 | 18 | if not graphblas_root: 19 | # Windows wheels.yml configures suitesparse.sh to install GraphBLAS to "C:\\GraphBLAS". 20 | if is_win: 21 | graphblas_root = "C:\\GraphBLAS" 22 | elif Path("/usr/local/include/suitesparse").exists(): 23 | # SuiteSparse:GraphBLAS 9.1+ built by suitesparse.sh 24 | graphblas_root = "/usr/local" 25 | else: 26 | # Conda install 27 | graphblas_root = sys.prefix 28 | 29 | include_dirs = [ 30 | os.path.join(graphblas_root, "include"), 31 | os.path.join(graphblas_root, "include", "suitesparse"), 32 | ] 33 | library_dirs = [os.path.join(graphblas_root, "lib"), os.path.join(graphblas_root, "lib64")] 34 | if is_win: 35 | include_dirs.append(os.path.join(sys.prefix, "Library", "include")) 36 | include_dirs.append(os.path.join(sys.prefix, "Library", "include", "suitesparse")) 37 | library_dirs.append(os.path.join(sys.prefix, "Library", "lib")) 38 | 39 | include_dirs.append(os.path.join(graphblas_root, "include")) 40 | include_dirs.append(os.path.join(graphblas_root, "include", "suitesparse")) 41 | library_dirs.append(os.path.join(graphblas_root, "lib")) 42 | library_dirs.append(os.path.join(graphblas_root, "bin")) 43 | 44 | ffibuilder.set_source( 45 | "suitesparse_graphblas._graphblas", 46 | (ss_g / "source.c").read_text(), 47 | libraries=["graphblas"], 48 | include_dirs=include_dirs, 49 | library_dirs=library_dirs, 50 | ) 51 | 52 | ffibuilder.cdef((ss_g / "suitesparse_graphblas.h").read_text()) 53 | 54 | 55 | def get_extension(apply_msvc_patch: bool = None, extra_compile_args=()): 56 | """ 57 | Get a setuptools.Extension version of this CFFI builder. 58 | 59 | In other words, enables `setup(ext_modules=[get_extension()])` 60 | instead of `setup(cffi_modules=["build_graphblas_cffi.py:ffibuilder"])`. 61 | 62 | The main reason for this is to allow a patch for complex values when compiling on MSVC. 63 | MSVC famously lacks support for standard C complex types like `double _Complex` and 64 | `float _Complex`. Instead, MSVC has its own `_Dcomplex` and `_Fcomplex` types. 65 | Cffi's machinery cannot be made to work with these types, so we instead 66 | emit the regular standard C code and patch it manually. 67 | 68 | :param apply_msvc_patch: whether to apply the MSVC patch. 69 | If None then auto-detect based on platform. 70 | :param extra_compile_args: forwarded to Extension constructor. 71 | """ 72 | code_path = ss_g / "_graphblas.c" 73 | ffibuilder.emit_c_code(str(code_path)) 74 | 75 | if apply_msvc_patch is None: 76 | apply_msvc_patch = is_win 77 | 78 | if apply_msvc_patch: 79 | msvc_code = code_path.read_text() 80 | msvc_code = msvc_code.replace("float _Complex", "_Fcomplex") 81 | msvc_code = msvc_code.replace("double _Complex", "_Dcomplex") 82 | code_path.write_text(msvc_code) 83 | 84 | # Hack: tell GraphBLAS.h that we need MSVC-style complex values 85 | extra_compile_args = list(extra_compile_args) + ["-DGxB_HAVE_COMPLEX_MSVC"] 86 | 87 | return Extension( 88 | "suitesparse_graphblas._graphblas", 89 | [os.path.join("suitesparse_graphblas", "_graphblas.c")], 90 | libraries=["graphblas"], 91 | include_dirs=include_dirs, 92 | library_dirs=library_dirs, 93 | extra_compile_args=extra_compile_args, 94 | ) 95 | 96 | 97 | if __name__ == "__main__": 98 | ffibuilder.compile(verbose=True) 99 | -------------------------------------------------------------------------------- /continuous_integration/environment.yml: -------------------------------------------------------------------------------- 1 | name: suitesparse-graphblas 2 | channels: 3 | - conda-forge 4 | - nodefaults # Only install packages from conda-forge for faster solving 5 | dependencies: 6 | # - graphblas=6.0.2 7 | - cffi 8 | - cython 9 | - numpy 10 | - pytest 11 | - pytest-randomly 12 | - coverage 13 | - pycparser 14 | -------------------------------------------------------------------------------- /docker_build.sh: -------------------------------------------------------------------------------- 1 | if [ $# -eq 0 ] 2 | then 3 | echo "Usage: ./docker_build.sh SUITESPARSE_BRANCH VERSION [BRANCH LOCATION PUSH] 4 | 5 | Example: ./docker_build.sh v5.1.3 5.1.3.1 main clone push 6 | 7 | If location is clone then a fresh git clone will be used. 8 | If push is provided then the script will attempt to push to dockerhub." 9 | exit 1 10 | fi 11 | 12 | IMAGE=graphblas/python-suitesparse-graphblas 13 | SUITESPARSE=$1 14 | VERSION=$2 15 | BRANCH=$3 16 | LOCATION=$4 17 | PUSH=$5 18 | 19 | COMPACT=${COMPACT:-0} 20 | 21 | if [ "$LOCATION" = "clone" ] 22 | then 23 | TMPDIR=$(mktemp -d) 24 | if [ ! -e $TMPDIR ]; then 25 | >&2 echo "Failed to create temp directory" 26 | exit 1 27 | fi 28 | trap "exit 1" HUP INT PIPE QUIT TERM 29 | trap 'rm -rf "$TMPDIR"' EXIT 30 | 31 | cd $TMPDIR 32 | git clone --branch $BRANCH https://github.com/GraphBLAS/python-suitesparse-graphblas.git 33 | cd python-suitesparse-graphblas 34 | fi 35 | 36 | docker build \ 37 | --build-arg SUITESPARSE=${SUITESPARSE} \ 38 | --build-arg VERSION=${VERSION} \ 39 | --build-arg COMPACT=${COMPACT} \ 40 | -t $IMAGE:$VERSION \ 41 | . 42 | 43 | docker tag $IMAGE:$VERSION $IMAGE:latest 44 | 45 | if [ "$PUSH" = "push" ] 46 | then 47 | docker push $IMAGE:$VERSION 48 | docker push $IMAGE:latest 49 | fi 50 | -------------------------------------------------------------------------------- /latest_suitesparse_graphblas_version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Find and print the latest version of SuiteSparse:GraphBLAS as published in this repo: 5 | https://github.com/DrTimothyAldenDavis/GraphBLAS 6 | """ 7 | 8 | import json 9 | from urllib.error import URLError 10 | from urllib.request import urlopen 11 | 12 | # fetch release data from GraphBLAS repo 13 | for _ in range(5): 14 | try: 15 | with urlopen( 16 | "https://api.github.com/repos/DrTimothyAldenDavis/GraphBLAS/releases/latest" 17 | ) as url: 18 | latest_release = json.load(url) 19 | break 20 | except URLError: 21 | # sleep before trying again 22 | from time import sleep 23 | 24 | sleep(1) 25 | 26 | version = latest_release["tag_name"].lstrip("v") 27 | print(version) 28 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = [ 4 | # setuptools<74 until PyPy vendors cffi 1.15.1 5 | "setuptools >=64, <74", 6 | "setuptools-git-versioning", 7 | "wheel", 8 | "cffi>=1.11", 9 | "cython", 10 | "numpy>=2.0", 11 | ] 12 | 13 | [project] 14 | name = "suitesparse-graphblas" 15 | dynamic = ["version"] 16 | description = "SuiteSparse:GraphBLAS Python bindings." 17 | readme = "README.md" 18 | requires-python = ">=3.9" 19 | license = {file = "LICENSE"} 20 | authors = [ 21 | {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, 22 | {name = "Jim Kitchen"}, 23 | {name = "Michel Pelletier"}, 24 | {name = "suitesparse-graphblas contributors"}, 25 | ] 26 | maintainers = [ 27 | {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, 28 | {name = "Jim Kitchen", email = "jim22k@gmail.com"}, 29 | {name = "Michel Pelletier", email = "michel@graphegon.com"}, 30 | {name = "Adam Lugowski", email = "alugowski@gmail.com"}, 31 | ] 32 | keywords = [ 33 | "graphblas", 34 | "graph", 35 | "sparse", 36 | "matrix", 37 | "lagraph", 38 | "suitesparse", 39 | "Networks", 40 | "Graph Theory", 41 | "Mathematics", 42 | "network", 43 | "discrete mathematics", 44 | "math", 45 | ] 46 | classifiers = [ 47 | "Development Status :: 5 - Production/Stable", 48 | "License :: OSI Approved :: Apache Software License", 49 | "Operating System :: MacOS :: MacOS X", 50 | "Operating System :: POSIX :: Linux", 51 | "Operating System :: Microsoft :: Windows", 52 | "Programming Language :: Python", 53 | "Programming Language :: Python :: 3", 54 | "Programming Language :: Python :: 3.9", 55 | "Programming Language :: Python :: 3.10", 56 | "Programming Language :: Python :: 3.11", 57 | "Programming Language :: Python :: 3.12", 58 | "Programming Language :: Python :: 3.13", 59 | "Programming Language :: Python :: 3 :: Only", 60 | "Intended Audience :: Developers", 61 | "Intended Audience :: Other Audience", 62 | "Intended Audience :: Science/Research", 63 | "Topic :: Scientific/Engineering", 64 | "Topic :: Scientific/Engineering :: Information Analysis", 65 | "Topic :: Scientific/Engineering :: Mathematics", 66 | "Topic :: Software Development :: Libraries :: Python Modules", 67 | ] 68 | dependencies = [ 69 | "cffi>=1.15", 70 | "numpy>=1.23", 71 | ] 72 | [project.urls] 73 | homepage = "https://github.com/GraphBLAS/python-suitesparse-graphblas" 74 | repository = "https://github.com/GraphBLAS/python-suitesparse-graphblas" 75 | changelog = "https://github.com/GraphBLAS/python-suitesparse-graphblas/releases" 76 | 77 | [project.optional-dependencies] 78 | test = [ 79 | "pytest", 80 | ] 81 | 82 | [tool.setuptools] 83 | packages = [ 84 | 'suitesparse_graphblas', 85 | 'suitesparse_graphblas.tests', 86 | 'suitesparse_graphblas.io', 87 | ] 88 | 89 | [tool.setuptools-git-versioning] 90 | enabled = true 91 | dev_template = "{tag}+{ccount}.g{sha}" 92 | dirty_template = "{tag}+{ccount}.g{sha}.dirty" 93 | 94 | [tool.black] 95 | line-length = 100 96 | target-version = ["py39", "py310", "py311", "py312", "py313"] 97 | 98 | [tool.isort] 99 | sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] 100 | profile = "black" 101 | skip_gitignore = true 102 | float_to_top = true 103 | default_section = "THIRDPARTY" 104 | known_first_party = "suitesparse_graphblas" 105 | line_length = 100 106 | skip_glob = ["*.pxd", "*.pyx"] 107 | 108 | [tool.coverage.run] 109 | branch = true 110 | source = ["suitesparse_graphblas"] 111 | omit = [] 112 | plugins = ["Cython.Coverage"] 113 | 114 | [tool.coverage.report] 115 | ignore_errors = false 116 | precision = 1 117 | fail_under = 0 118 | skip_covered = true 119 | skip_empty = true 120 | exclude_lines = [ 121 | "pragma: no cover", 122 | "raise AssertionError", 123 | "raise NotImplementedError", 124 | ] 125 | 126 | [tool.pytest] 127 | testpaths = ["suitesparse_graphblas/tests"] 128 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from glob import glob 4 | 5 | import numpy as np 6 | from setuptools import Extension, setup 7 | 8 | # Add current directory to the Python path because it's not present when running `pip install .` 9 | sys.path.append(os.path.dirname(__file__)) 10 | import build_graphblas_cffi # noqa: E402 # isort:skip 11 | 12 | try: 13 | from Cython.Build import cythonize 14 | from Cython.Compiler.Options import get_directive_defaults 15 | 16 | use_cython = True 17 | except ImportError: 18 | use_cython = False 19 | 20 | define_macros = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")] 21 | 22 | # /d2FH4- flag needed only for early Python 3.8 builds on Windows. 23 | # See https://cibuildwheel.readthedocs.io/en/stable/faq/ 24 | # (Search for flag on page. Full link is long and causes the linter to fail the tests.) 25 | # 26 | # The /std:c11 flag is because the MSVC default is C89. 27 | extra_compile_args = ["/d2FH4-", "/std:c11"] if sys.platform == "win32" else [] 28 | 29 | if use_cython: 30 | suffix = ".pyx" 31 | directive_defaults = get_directive_defaults() 32 | directive_defaults["binding"] = True 33 | directive_defaults["language_level"] = 3 34 | if os.environ.get("CYTHON_COVERAGE"): 35 | directive_defaults["linetrace"] = True 36 | define_macros.append(("CYTHON_TRACE_NOGIL", "1")) 37 | else: 38 | suffix = ".c" 39 | # Make sure all required .c files are here 40 | pyx_files = glob("suitesparse_graphblas/**.pyx", recursive=True) 41 | c_files = glob("suitesparse_graphblas/**.c", recursive=True) 42 | missing = {x[:-4] for x in pyx_files} - {x[:-2] for x in c_files} 43 | if missing: 44 | missing_c = sorted(x + ".c" for x in missing) 45 | raise RuntimeError("Cython required when missing C files: " + ", ".join(missing_c)) 46 | 47 | include_dirs = [np.get_include(), os.path.join(sys.prefix, "include")] 48 | ext_modules = [ 49 | Extension( 50 | name[: -len(suffix)].replace("/", ".").replace("\\", "."), 51 | [name], 52 | include_dirs=include_dirs, 53 | define_macros=define_macros, 54 | extra_compile_args=extra_compile_args, 55 | ) 56 | for name in glob(f"suitesparse_graphblas/**/*{suffix}", recursive=True) 57 | ] 58 | if use_cython: 59 | ext_modules = cythonize(ext_modules, include_path=include_dirs) 60 | 61 | if build_graphblas_cffi.is_win: 62 | ext_modules.append(build_graphblas_cffi.get_extension(extra_compile_args=extra_compile_args)) 63 | 64 | setup( 65 | ext_modules=ext_modules, 66 | cffi_modules=None if build_graphblas_cffi.is_win else ["build_graphblas_cffi.py:ffibuilder"], 67 | ) 68 | -------------------------------------------------------------------------------- /suitesparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x # echo on 4 | 5 | # parse SuiteSparse version from first argument, a git tag that ends in the version (no leading v) 6 | if [[ $1 =~ refs/tags/([0-9]*\.[0-9]*\.[0-9]*\.beta[0-9]*).*$ ]]; then 7 | echo "Beta version detected" 8 | VERSION=${BASH_REMATCH[1]} 9 | elif [[ $1 =~ refs/tags/([0-9]*\.[0-9]*\.[0-9]*)\..*$ ]]; then 10 | VERSION=${BASH_REMATCH[1]} 11 | else 12 | echo "Specify a SuiteSparse version, such as: $0 refs/tags/7.4.3.0 (got: $1)" 13 | exit -1 14 | fi 15 | echo VERSION: $VERSION 16 | 17 | NPROC="$(nproc)" 18 | if [ -z "${NPROC}" ]; then 19 | # Default for platforms that don't have nproc. Mostly Windows. 20 | NPROC="2" 21 | fi 22 | 23 | cmake_params=() 24 | if [ -n "${BREW_LIBOMP}" ]; then 25 | # macOS OpenMP flags. 26 | # FindOpenMP doesn't find brew's libomp, so set the necessary configs manually. 27 | cmake_params+=(-DOpenMP_C_FLAGS="-Xclang -fopenmp -I$(brew --prefix libomp)/include") 28 | cmake_params+=(-DOpenMP_C_LIB_NAMES="libomp") 29 | cmake_params+=(-DOpenMP_libomp_LIBRARY="omp") 30 | export LDFLAGS="-L$(brew --prefix libomp)/lib" 31 | 32 | if [ -n "${SUITESPARSE_MACOS_ARCH}" ]; then 33 | export CFLAGS="-arch ${SUITESPARSE_MACOS_ARCH}" 34 | else 35 | # build both x86 and ARM 36 | export CFLAGS="-arch x86_64 -arch arm64" 37 | fi 38 | fi 39 | 40 | if [ -n "${CMAKE_GNUtoMS}" ]; then 41 | # Windows needs .lib libraries, not .a 42 | cmake_params+=(-DCMAKE_GNUtoMS=ON) 43 | # Windows expects 'graphblas.lib', not 'libgraphblas.lib' 44 | cmake_params+=(-DCMAKE_SHARED_LIBRARY_PREFIX=) 45 | cmake_params+=(-DCMAKE_STATIC_LIBRARY_PREFIX=) 46 | fi 47 | 48 | if [ -n "${GRAPHBLAS_PREFIX}" ]; then 49 | echo "GRAPHBLAS_PREFIX=${GRAPHBLAS_PREFIX}" 50 | cmake_params+=(-DCMAKE_INSTALL_PREFIX="${GRAPHBLAS_PREFIX}") 51 | fi 52 | 53 | curl -L https://github.com/DrTimothyAldenDavis/GraphBLAS/archive/refs/tags/v${VERSION}.tar.gz | tar xzf - 54 | cd GraphBLAS-${VERSION}/build 55 | 56 | # Disable optimizing some rarely-used types for significantly faster builds and significantly smaller wheel size. 57 | # Also the build with all types enabled sometimes stalls on GitHub Actions. Probably due to exceeded resource limits. 58 | # These can still be used, they'll just have reduced performance (AFAIK similar to UDTs). 59 | # echo "#define GxB_NO_BOOL 1" >> ../Source/GB_control.h # 60 | # echo "#define GxB_NO_FP32 1" >> ../Source/GB_control.h # 61 | # echo "#define GxB_NO_FP64 1" >> ../Source/GB_control.h # 62 | echo "#define GxB_NO_FC32 1" >> ../Source/GB_control.h 63 | echo "#define GxB_NO_FC64 1" >> ../Source/GB_control.h 64 | # echo "#define GxB_NO_INT16 1" >> ../Source/GB_control.h # 65 | # echo "#define GxB_NO_INT32 1" >> ../Source/GB_control.h # 66 | # echo "#define GxB_NO_INT64 1" >> ../Source/GB_control.h # 67 | # echo "#define GxB_NO_INT8 1" >> ../Source/GB_control.h # 68 | echo "#define GxB_NO_UINT16 1" >> ../Source/GB_control.h 69 | echo "#define GxB_NO_UINT32 1" >> ../Source/GB_control.h 70 | # echo "#define GxB_NO_UINT64 1" >> ../Source/GB_control.h # 71 | echo "#define GxB_NO_UINT8 1" >> ../Source/GB_control.h 72 | 73 | if [ -n "${SUITESPARSE_FAST_BUILD}" ]; then 74 | echo "suitesparse.sh: Fast build requested." 75 | # Disable optimizing even more types. This is for builds that don't finish in runner resource limits, 76 | # such as emulated aarm64. 77 | 78 | # echo "#define GxB_NO_BOOL 1" >> ../Source/GB_control.h 79 | # echo "#define GxB_NO_FP32 1" >> ../Source/GB_control.h 80 | # echo "#define GxB_NO_FP64 1" >> ../Source/GB_control.h 81 | echo "#define GxB_NO_FC32 1" >> ../Source/GB_control.h 82 | echo "#define GxB_NO_FC64 1" >> ../Source/GB_control.h 83 | echo "#define GxB_NO_INT16 1" >> ../Source/GB_control.h 84 | echo "#define GxB_NO_INT32 1" >> ../Source/GB_control.h 85 | # echo "#define GxB_NO_INT64 1" >> ../Source/GB_control.h 86 | echo "#define GxB_NO_INT8 1" >> ../Source/GB_control.h 87 | echo "#define GxB_NO_UINT16 1" >> ../Source/GB_control.h 88 | echo "#define GxB_NO_UINT32 1" >> ../Source/GB_control.h 89 | echo "#define GxB_NO_UINT64 1" >> ../Source/GB_control.h 90 | echo "#define GxB_NO_UINT8 1" >> ../Source/GB_control.h 91 | fi 92 | 93 | if [ -n "${SUITESPARSE_FASTEST_BUILD}" ]; then 94 | echo "suitesparse.sh: Fastest build requested." 95 | # Fastest build possible. For use in development and automated tests that do not depend on performance. 96 | 97 | echo "#define GxB_NO_BOOL 1" >> ../Source/GB_control.h 98 | echo "#define GxB_NO_FP32 1" >> ../Source/GB_control.h 99 | echo "#define GxB_NO_FP64 1" >> ../Source/GB_control.h 100 | echo "#define GxB_NO_FC32 1" >> ../Source/GB_control.h 101 | echo "#define GxB_NO_FC64 1" >> ../Source/GB_control.h 102 | echo "#define GxB_NO_INT16 1" >> ../Source/GB_control.h 103 | echo "#define GxB_NO_INT32 1" >> ../Source/GB_control.h 104 | echo "#define GxB_NO_INT64 1" >> ../Source/GB_control.h 105 | echo "#define GxB_NO_INT8 1" >> ../Source/GB_control.h 106 | echo "#define GxB_NO_UINT16 1" >> ../Source/GB_control.h 107 | echo "#define GxB_NO_UINT32 1" >> ../Source/GB_control.h 108 | echo "#define GxB_NO_UINT64 1" >> ../Source/GB_control.h 109 | echo "#define GxB_NO_UINT8 1" >> ../Source/GB_control.h 110 | 111 | # Setting COMPACT probably makes setting config in GB_control.h above unnecessary 112 | cmake_params+=(-DCOMPACT=1) 113 | # Also no JIT for the fastest possible build 114 | cmake_params+=(-DNJIT=1) 115 | # Disable all Source/Generated2 kernels. For workflow development only. 116 | cmake_params+=(-DCMAKE_CUDA_DEV=1) 117 | fi 118 | 119 | if [ -n "${CMAKE_GNUtoMS}" ]; then 120 | # Windows options 121 | echo "Skipping JIT on Windows for now because it fails to build." 122 | cmake_params+=(-DGRAPHBLAS_USE_JIT=OFF) 123 | else 124 | # Use `-DJITINIT=2` so that the JIT functionality is available, but disabled by default. 125 | # Level 2, "run", means that pre-JIT kernels may be used, which does not require a compiler at runtime. 126 | cmake_params+=(-DJITINIT=2) 127 | 128 | # Disable JIT here too to not segfault in tests 129 | cmake_params+=(-DGRAPHBLAS_USE_JIT=OFF) 130 | fi 131 | 132 | # some platforms require sudo for installation, some don't have sudo at all 133 | if [ "$(uname)" == "Darwin" ]; then 134 | SUDO=sudo 135 | else 136 | SUDO="" 137 | fi 138 | 139 | cmake .. -DCMAKE_BUILD_TYPE=Release -G 'Unix Makefiles' "${cmake_params[@]}" 140 | make -j$NPROC 141 | $SUDO make install 142 | 143 | if [ -n "${CMAKE_GNUtoMS}" ]; then 144 | if [ -z "${GRAPHBLAS_PREFIX}" ]; then 145 | # Windows default 146 | GRAPHBLAS_PREFIX="C:/Program Files (x86)" 147 | fi 148 | 149 | # Windows: 150 | # CMAKE_STATIC_LIBRARY_PREFIX is sometimes ignored, possibly when the MinGW toolchain is selected. 151 | # Drop the 'lib' prefix manually. 152 | echo "manually removing lib prefix" 153 | mv "${GRAPHBLAS_PREFIX}/lib/libgraphblas.lib" "${GRAPHBLAS_PREFIX}/lib/graphblas.lib" 154 | mv "${GRAPHBLAS_PREFIX}/lib/libgraphblas.dll.a" "${GRAPHBLAS_PREFIX}/lib/graphblas.dll.a" 155 | # cp instead of mv because the GNU tools expect libgraphblas.dll and the MS tools expect graphblas.dll. 156 | cp "${GRAPHBLAS_PREFIX}/bin/libgraphblas.dll" "${GRAPHBLAS_PREFIX}/bin/graphblas.dll" 157 | fi 158 | -------------------------------------------------------------------------------- /suitesparse_graphblas/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | import platform 3 | import struct as _struct 4 | 5 | from . import exceptions as ex 6 | from . import utils 7 | from ._graphblas import ffi, lib 8 | 9 | _is_osx_arm64 = platform.machine() == "arm64" 10 | _is_ppc64le = platform.machine() == "ppc64le" 11 | _c_float = ffi.typeof("float") 12 | _c_double = ffi.typeof("double") 13 | 14 | try: 15 | __version__ = importlib.metadata.version("suitesparse-graphblas") 16 | except Exception as exc: # pragma: no cover (safety) 17 | raise AttributeError( 18 | "`suitesparse_graphblas.__version__` not available. This may mean " 19 | "suitesparse-graphblas was incorrectly installed or not installed at all. " 20 | "For local development, you may want to do an editable install via " 21 | "`python -m pip install -e path/to/suitesparse-graphblas`" 22 | ) from exc 23 | del importlib, platform 24 | 25 | # It is strongly recommended to use the non-variadic version of functions to be 26 | # compatible with the most number of architectures. For example, you should use 27 | # GxB_Matrix_Option_get_INT32 instead of GxB_Matrix_Option_get. 28 | if _is_osx_arm64 or _is_ppc64le: 29 | 30 | def vararg(val): 31 | # Interpret float as int32 and double as int64 32 | # https://devblogs.microsoft.com/oldnewthing/20220823-00/?p=107041 33 | tov = ffi.typeof(val) 34 | if tov == _c_float: 35 | val = _struct.unpack("l", _struct.pack("f", val))[0] 36 | val = ffi.cast("int64_t", val) 37 | elif tov == _c_double: 38 | val = _struct.unpack("q", _struct.pack("d", val))[0] 39 | val = ffi.cast("int64_t", val) 40 | # Cast variadic argument as char * to force it onto the stack where ARM64 expects it 41 | # https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms 42 | # 43 | # The same fix *may* work for ppc64le 44 | return ffi.cast("char *", val) 45 | 46 | else: 47 | 48 | def vararg(val): 49 | return val 50 | 51 | 52 | def is_initialized(): 53 | """Is GraphBLAS initialized via GrB_init or GxB_init?""" 54 | mode = ffi.new("int32_t*") 55 | return lib.GxB_Global_Option_get_INT32(lib.GxB_MODE, mode) != lib.GrB_PANIC 56 | 57 | 58 | def supports_complex(): 59 | """Does this package support complex numbers?""" 60 | return hasattr(lib, "GrB_FC64") or hasattr(lib, "GxB_FC64") 61 | 62 | 63 | def initialize(*, blocking=False, memory_manager="numpy"): 64 | """Initialize GraphBLAS via GrB_init or GxB_init. 65 | 66 | This must be called before any other GraphBLAS functions are called. 67 | A RuntimeError will be raised if called more than once. 68 | 69 | Parameters 70 | ---------- 71 | blocking : bool, optional 72 | Whether to call init with GrB_BLOCKING or GrB_NONBLOCKING. 73 | Default is False. 74 | memory_manager : {'numpy', 'c'}, optional 75 | Choose which malloc/free functions to use. 'numpy' uses numpy's 76 | allocators, which makes it safe to perform zero-copy to and from numpy, 77 | and allows Python to track memory usage via tracemalloc (if enabled). 78 | 'c' uses the default allocators. Default is 'numpy'. 79 | 80 | The global variable `suitesparse_graphblas.is_initialized` indicates whether 81 | GraphBLAS has been initialized. 82 | """ 83 | if is_initialized(): 84 | raise RuntimeError("GraphBLAS is already initialized! Unable to initialize again.") 85 | blocking = lib.GrB_BLOCKING if blocking else lib.GrB_NONBLOCKING 86 | memory_manager = memory_manager.lower() 87 | if memory_manager == "numpy": 88 | utils.call_gxb_init(ffi, lib, blocking) 89 | elif memory_manager == "c": 90 | lib.GrB_init(blocking) 91 | else: 92 | raise ValueError(f'memory_manager argument must be "numpy" or "c"; got: {memory_manager!r}') 93 | # See: https://github.com/GraphBLAS/python-suitesparse-graphblas/issues/40 94 | for attr in dir(lib): 95 | getattr(lib, attr) 96 | 97 | 98 | def libget(name): 99 | """Helper to get items from GraphBLAS which might be GrB or GxB""" 100 | try: 101 | return getattr(lib, name) 102 | except AttributeError: 103 | ext_name = f"GxB_{name[4:]}" 104 | try: 105 | return getattr(lib, ext_name) 106 | except AttributeError: 107 | pass 108 | raise 109 | 110 | 111 | bool_types = frozenset((lib.GrB_BOOL,)) 112 | 113 | signed_integer_types = frozenset( 114 | ( 115 | lib.GrB_INT8, 116 | lib.GrB_INT16, 117 | lib.GrB_INT32, 118 | lib.GrB_INT64, 119 | ) 120 | ) 121 | 122 | unsigned_integer_types = frozenset( 123 | ( 124 | lib.GrB_UINT8, 125 | lib.GrB_UINT16, 126 | lib.GrB_UINT32, 127 | lib.GrB_UINT64, 128 | ) 129 | ) 130 | 131 | integer_types = signed_integer_types | unsigned_integer_types 132 | 133 | real_types = frozenset( 134 | ( 135 | lib.GrB_FP32, 136 | lib.GrB_FP64, 137 | ) 138 | ) 139 | 140 | if supports_complex(): 141 | complex_types = frozenset( 142 | ( 143 | lib.GxB_FC32, 144 | lib.GxB_FC64, 145 | ) 146 | ) 147 | else: 148 | complex_types = frozenset() 149 | 150 | 151 | grb_types = bool_types | integer_types | real_types | complex_types 152 | 153 | 154 | _error_code_lookup = { 155 | # Warning 156 | lib.GrB_NO_VALUE: ex.NoValue, 157 | # API Errors 158 | lib.GrB_UNINITIALIZED_OBJECT: ex.UninitializedObject, 159 | lib.GrB_INVALID_OBJECT: ex.InvalidObject, 160 | lib.GrB_NULL_POINTER: ex.NullPointer, 161 | lib.GrB_INVALID_VALUE: ex.InvalidValue, 162 | lib.GrB_INVALID_INDEX: ex.InvalidIndex, 163 | lib.GrB_DOMAIN_MISMATCH: ex.DomainMismatch, 164 | lib.GrB_DIMENSION_MISMATCH: ex.DimensionMismatch, 165 | lib.GrB_OUTPUT_NOT_EMPTY: ex.OutputNotEmpty, 166 | lib.GrB_EMPTY_OBJECT: ex.EmptyObject, 167 | # Execution Errors 168 | lib.GrB_OUT_OF_MEMORY: ex.OutOfMemory, 169 | lib.GrB_INSUFFICIENT_SPACE: ex.InsufficientSpace, 170 | lib.GrB_INDEX_OUT_OF_BOUNDS: ex.IndexOutOfBound, 171 | lib.GrB_PANIC: ex.Panic, 172 | lib.GrB_NOT_IMPLEMENTED: ex.NotImplementedException, 173 | # GxB Errors 174 | lib.GxB_EXHAUSTED: StopIteration, 175 | lib.GxB_JIT_ERROR: ex.JitError, 176 | } 177 | GrB_SUCCESS = lib.GrB_SUCCESS 178 | GrB_NO_VALUE = lib.GrB_NO_VALUE 179 | 180 | 181 | _error_func_lookup = { 182 | "struct GB_Type_opaque *": lib.GrB_Type_error, 183 | "struct GB_UnaryOp_opaque *": lib.GrB_UnaryOp_error, 184 | "struct GB_BinaryOp_opaque *": lib.GrB_BinaryOp_error, 185 | "struct GB_Monoid_opaque *": lib.GrB_Monoid_error, 186 | "struct GB_Semiring_opaque *": lib.GrB_Semiring_error, 187 | "struct GB_Scalar_opaque *": lib.GxB_Scalar_error, 188 | "struct GB_Matrix_opaque *": lib.GrB_Matrix_error, 189 | "struct GB_Vector_opaque *": lib.GrB_Vector_error, 190 | "struct GB_Descriptor_opaque *": lib.GrB_Descriptor_error, 191 | } 192 | 193 | 194 | def check_status(obj, response_code): 195 | """Check the return code of the GraphBLAS function. 196 | 197 | If the operation was successful, return None. 198 | 199 | If the operation returned no value return `exceptions.NoValue`. 200 | 201 | Otherwise it is an error, lookup the exception and the error 202 | description, and throw the exception. 203 | 204 | """ 205 | if response_code == GrB_SUCCESS: 206 | return 207 | if response_code == GrB_NO_VALUE: 208 | return ex.NoValue 209 | 210 | if ffi.typeof(obj).item.kind == "pointer": 211 | obj = obj[0] 212 | 213 | cname = ffi.typeof(obj).cname 214 | error_func = _error_func_lookup.get(cname) 215 | if error_func is None: 216 | raise TypeError(f"Unknown cname {cname} looking up error string.") 217 | 218 | string = ffi.new("char**") 219 | error_func(string, obj) 220 | text = ffi.string(string[0]).decode() 221 | raise _error_code_lookup[response_code](text) 222 | 223 | 224 | class burble: 225 | """Control diagnostic output, and may be used as a context manager. 226 | 227 | Set up and simple usage: 228 | 229 | >>> from suitesparse_graphblas import burble, lib, matrix 230 | >>> 231 | >>> A = matrix.new(lib.GrB_BOOL, 3, 3) 232 | >>> burble.is_enabled 233 | False 234 | >>> burble.enable() 235 | >>> burble.is_enabled 236 | True 237 | >>> burble.disable() 238 | 239 | Example with explicit enable and disable: 240 | 241 | >>> burble.enable() 242 | >>> n = matrix.nvals(A) 243 | [ GrB_Matrix_nvals 244 | 1.91e-06 sec ] 245 | >>> burble.disable() 246 | 247 | Example as a context manager: 248 | 249 | >>> with burble(): 250 | >>> n = matrix.nvals(A) 251 | [ GrB_Matrix_nvals 252 | 1.91e-06 sec ] 253 | 254 | """ 255 | 256 | def __init__(self): 257 | self._states = [] 258 | 259 | @property 260 | def is_enabled(self): 261 | """Is burble enabled?""" 262 | val_ptr = ffi.new("int32_t*") 263 | info = lib.GxB_Global_Option_get_INT32(lib.GxB_BURBLE, val_ptr) 264 | if info != lib.GrB_SUCCESS: 265 | raise _error_code_lookup[info]( 266 | "Failed to get burble status (has GraphBLAS been initialized?" 267 | ) 268 | return val_ptr[0] 269 | 270 | def enable(self): 271 | """Enable diagnostic output""" 272 | info = lib.GxB_Global_Option_set_INT32(lib.GxB_BURBLE, ffi.cast("int32_t", 1)) 273 | if info != lib.GrB_SUCCESS: 274 | raise _error_code_lookup[info]( 275 | "Failed to enable burble (has GraphBLAS been initialized?" 276 | ) 277 | 278 | def disable(self): 279 | """Disable diagnostic output""" 280 | info = lib.GxB_Global_Option_set_INT32(lib.GxB_BURBLE, ffi.cast("int32_t", 0)) 281 | if info != lib.GrB_SUCCESS: 282 | raise _error_code_lookup[info]( 283 | "Failed to disable burble (has GraphBLAS been initialized?" 284 | ) 285 | 286 | def __enter__(self): 287 | is_enabled = self.is_enabled 288 | if not is_enabled: 289 | self.enable() 290 | self._states.append(is_enabled) 291 | return self 292 | 293 | def __exit__(self, type_, value, traceback): 294 | is_enabled = self._states.pop() 295 | if not is_enabled: 296 | self.disable() 297 | 298 | def __reduce__(self): 299 | return "burble" 300 | 301 | def __repr__(self): 302 | return f"" 303 | 304 | 305 | burble = burble() 306 | -------------------------------------------------------------------------------- /suitesparse_graphblas/create_headers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to generate suitesparse_graphblas.h, suitesparse_graphblas_no_complex.h, and source.c files. 3 | 4 | - Copy the SuiteSparse header file GraphBLAS.h to the local directory. 5 | - Run the C preprocessor (cleans it up, but also loses #define values). 6 | - Parse the processed header file using pycparser. 7 | - Create the final files with and without complex types. 8 | - Check #define values for sanity. 9 | 10 | The generated files are then used by cffi to bind to SuiteSparse:GraphBLAS. 11 | 12 | When running against new versions of SuiteSparse:GraphBLAS, the most likely 13 | things that may need to change are: 14 | 15 | - Update DEFINES, the integer #define constants defined by SuiteSparse. 16 | - Update CHAR_DEFINES, the char* #defines. 17 | - Update IGNORE_DEFINES, #defines that the script may mistakingly identity, 18 | but that we can safely ignore. 19 | - Update DEPRECATED: deprecated names (including enum fields) to exclude. 20 | 21 | Run `python create_headers.py --help` to see more help. 22 | 23 | """ 24 | 25 | import argparse 26 | import os 27 | import re 28 | import shutil 29 | import subprocess 30 | import sys 31 | 32 | import pycparser 33 | from pycparser import c_ast, c_generator, parse_file 34 | 35 | 36 | def sort_key(string): 37 | """e.g., sort 'INT8' before 'INT16'""" 38 | return string.replace("8", "08") 39 | 40 | 41 | def has_complex(string): 42 | return "FC32" in string or "FC64" in string 43 | 44 | 45 | def groupby(index, seq): 46 | rv = {} 47 | for item in seq: 48 | key = item[index] 49 | if key in rv: 50 | rv[key].append(item) 51 | else: 52 | rv[key] = [item] 53 | return rv 54 | 55 | 56 | AUTO = "/* This file is automatically generated */" 57 | 58 | DEPRECATED = { 59 | # Strongly deprecated in SuiteSparse:GraphBLAS 10; will be removed in 11 60 | "GrB_Field", 61 | # enums 62 | "GxB_IS_HYPER", 63 | "GrB_SCMP", 64 | # functions 65 | "GxB_kron", 66 | "GxB_Matrix_resize", 67 | "GxB_Vector_resize", 68 | # UnaryOp 69 | "GxB_ABS_BOOL", 70 | "GxB_ABS_INT8", 71 | "GxB_ABS_INT16", 72 | "GxB_ABS_INT32", 73 | "GxB_ABS_INT64", 74 | "GxB_ABS_UINT8", 75 | "GxB_ABS_UINT16", 76 | "GxB_ABS_UINT32", 77 | "GxB_ABS_UINT64", 78 | "GxB_ABS_FP32", 79 | "GxB_ABS_FP64", 80 | # Monoids 81 | "GxB_MIN_INT8_MONOID", 82 | "GxB_MIN_INT16_MONOID", 83 | "GxB_MIN_INT32_MONOID", 84 | "GxB_MIN_INT64_MONOID", 85 | "GxB_MIN_UINT8_MONOID", 86 | "GxB_MIN_UINT16_MONOID", 87 | "GxB_MIN_UINT32_MONOID", 88 | "GxB_MIN_UINT64_MONOID", 89 | "GxB_MIN_FP32_MONOID", 90 | "GxB_MIN_FP64_MONOID", 91 | "GxB_MAX_INT8_MONOID", 92 | "GxB_MAX_INT16_MONOID", 93 | "GxB_MAX_INT32_MONOID", 94 | "GxB_MAX_INT64_MONOID", 95 | "GxB_MAX_UINT8_MONOID", 96 | "GxB_MAX_UINT16_MONOID", 97 | "GxB_MAX_UINT32_MONOID", 98 | "GxB_MAX_UINT64_MONOID", 99 | "GxB_MAX_FP32_MONOID", 100 | "GxB_MAX_FP64_MONOID", 101 | "GxB_PLUS_INT8_MONOID", 102 | "GxB_PLUS_INT16_MONOID", 103 | "GxB_PLUS_INT32_MONOID", 104 | "GxB_PLUS_INT64_MONOID", 105 | "GxB_PLUS_UINT8_MONOID", 106 | "GxB_PLUS_UINT16_MONOID", 107 | "GxB_PLUS_UINT32_MONOID", 108 | "GxB_PLUS_UINT64_MONOID", 109 | "GxB_PLUS_FP32_MONOID", 110 | "GxB_PLUS_FP64_MONOID", 111 | "GxB_TIMES_INT8_MONOID", 112 | "GxB_TIMES_INT16_MONOID", 113 | "GxB_TIMES_INT32_MONOID", 114 | "GxB_TIMES_INT64_MONOID", 115 | "GxB_TIMES_UINT8_MONOID", 116 | "GxB_TIMES_UINT16_MONOID", 117 | "GxB_TIMES_UINT32_MONOID", 118 | "GxB_TIMES_UINT64_MONOID", 119 | "GxB_TIMES_FP32_MONOID", 120 | "GxB_TIMES_FP64_MONOID", 121 | "GxB_LOR_BOOL_MONOID", 122 | "GxB_LAND_BOOL_MONOID", 123 | "GxB_LXOR_BOOL_MONOID", 124 | "GxB_LXNOR_BOOL_MONOID", 125 | # "GxB_EQ_BOOL_MONOID", # XXX: I prefer this name to GrB_LXNOR_MONOID_BOOL 126 | # Semirings 127 | "GxB_PLUS_TIMES_INT8", 128 | "GxB_PLUS_TIMES_INT16", 129 | "GxB_PLUS_TIMES_INT32", 130 | "GxB_PLUS_TIMES_INT64", 131 | "GxB_PLUS_TIMES_UINT8", 132 | "GxB_PLUS_TIMES_UINT16", 133 | "GxB_PLUS_TIMES_UINT32", 134 | "GxB_PLUS_TIMES_UINT64", 135 | "GxB_PLUS_TIMES_FP32", 136 | "GxB_PLUS_TIMES_FP64", 137 | "GxB_PLUS_MIN_INT8", 138 | "GxB_PLUS_MIN_INT16", 139 | "GxB_PLUS_MIN_INT32", 140 | "GxB_PLUS_MIN_INT64", 141 | "GxB_PLUS_MIN_UINT8", 142 | "GxB_PLUS_MIN_UINT16", 143 | "GxB_PLUS_MIN_UINT32", 144 | "GxB_PLUS_MIN_UINT64", 145 | "GxB_PLUS_MIN_FP32", 146 | "GxB_PLUS_MIN_FP64", 147 | "GxB_MIN_PLUS_INT8", 148 | "GxB_MIN_PLUS_INT16", 149 | "GxB_MIN_PLUS_INT32", 150 | "GxB_MIN_PLUS_INT64", 151 | "GxB_MIN_PLUS_UINT8", 152 | "GxB_MIN_PLUS_UINT16", 153 | "GxB_MIN_PLUS_UINT32", 154 | "GxB_MIN_PLUS_UINT64", 155 | "GxB_MIN_PLUS_FP32", 156 | "GxB_MIN_PLUS_FP64", 157 | "GxB_MIN_TIMES_INT8", 158 | "GxB_MIN_TIMES_INT16", 159 | "GxB_MIN_TIMES_INT32", 160 | "GxB_MIN_TIMES_INT64", 161 | "GxB_MIN_TIMES_UINT8", 162 | "GxB_MIN_TIMES_UINT16", 163 | "GxB_MIN_TIMES_UINT32", 164 | "GxB_MIN_TIMES_UINT64", 165 | "GxB_MIN_TIMES_FP32", 166 | "GxB_MIN_TIMES_FP64", 167 | "GxB_MIN_FIRST_INT8", 168 | "GxB_MIN_FIRST_INT16", 169 | "GxB_MIN_FIRST_INT32", 170 | "GxB_MIN_FIRST_INT64", 171 | "GxB_MIN_FIRST_UINT8", 172 | "GxB_MIN_FIRST_UINT16", 173 | "GxB_MIN_FIRST_UINT32", 174 | "GxB_MIN_FIRST_UINT64", 175 | "GxB_MIN_FIRST_FP32", 176 | "GxB_MIN_FIRST_FP64", 177 | "GxB_MIN_SECOND_INT8", 178 | "GxB_MIN_SECOND_INT16", 179 | "GxB_MIN_SECOND_INT32", 180 | "GxB_MIN_SECOND_INT64", 181 | "GxB_MIN_SECOND_UINT8", 182 | "GxB_MIN_SECOND_UINT16", 183 | "GxB_MIN_SECOND_UINT32", 184 | "GxB_MIN_SECOND_UINT64", 185 | "GxB_MIN_SECOND_FP32", 186 | "GxB_MIN_SECOND_FP64", 187 | "GxB_MIN_MAX_INT8", 188 | "GxB_MIN_MAX_INT16", 189 | "GxB_MIN_MAX_INT32", 190 | "GxB_MIN_MAX_INT64", 191 | "GxB_MIN_MAX_UINT8", 192 | "GxB_MIN_MAX_UINT16", 193 | "GxB_MIN_MAX_UINT32", 194 | "GxB_MIN_MAX_UINT64", 195 | "GxB_MIN_MAX_FP32", 196 | "GxB_MIN_MAX_FP64", 197 | "GxB_MAX_PLUS_INT8", 198 | "GxB_MAX_PLUS_INT16", 199 | "GxB_MAX_PLUS_INT32", 200 | "GxB_MAX_PLUS_INT64", 201 | "GxB_MAX_PLUS_UINT8", 202 | "GxB_MAX_PLUS_UINT16", 203 | "GxB_MAX_PLUS_UINT32", 204 | "GxB_MAX_PLUS_UINT64", 205 | "GxB_MAX_PLUS_FP32", 206 | "GxB_MAX_PLUS_FP64", 207 | "GxB_MAX_TIMES_INT8", 208 | "GxB_MAX_TIMES_INT16", 209 | "GxB_MAX_TIMES_INT32", 210 | "GxB_MAX_TIMES_INT64", 211 | "GxB_MAX_TIMES_UINT8", 212 | "GxB_MAX_TIMES_UINT16", 213 | "GxB_MAX_TIMES_UINT32", 214 | "GxB_MAX_TIMES_UINT64", 215 | "GxB_MAX_TIMES_FP32", 216 | "GxB_MAX_TIMES_FP64", 217 | "GxB_MAX_FIRST_INT8", 218 | "GxB_MAX_FIRST_INT16", 219 | "GxB_MAX_FIRST_INT32", 220 | "GxB_MAX_FIRST_INT64", 221 | "GxB_MAX_FIRST_UINT8", 222 | "GxB_MAX_FIRST_UINT16", 223 | "GxB_MAX_FIRST_UINT32", 224 | "GxB_MAX_FIRST_UINT64", 225 | "GxB_MAX_FIRST_FP32", 226 | "GxB_MAX_FIRST_FP64", 227 | "GxB_MAX_SECOND_INT8", 228 | "GxB_MAX_SECOND_INT16", 229 | "GxB_MAX_SECOND_INT32", 230 | "GxB_MAX_SECOND_INT64", 231 | "GxB_MAX_SECOND_UINT8", 232 | "GxB_MAX_SECOND_UINT16", 233 | "GxB_MAX_SECOND_UINT32", 234 | "GxB_MAX_SECOND_UINT64", 235 | "GxB_MAX_SECOND_FP32", 236 | "GxB_MAX_SECOND_FP64", 237 | "GxB_MAX_MIN_INT8", 238 | "GxB_MAX_MIN_INT16", 239 | "GxB_MAX_MIN_INT32", 240 | "GxB_MAX_MIN_INT64", 241 | "GxB_MAX_MIN_UINT8", 242 | "GxB_MAX_MIN_UINT16", 243 | "GxB_MAX_MIN_UINT32", 244 | "GxB_MAX_MIN_UINT64", 245 | "GxB_MAX_MIN_FP32", 246 | "GxB_MAX_MIN_FP64", 247 | "GxB_LOR_LAND_BOOL", 248 | "GxB_LAND_LOR_BOOL", 249 | "GxB_LXOR_LAND_BOOL", 250 | # "GxB_EQ_LOR_BOOL", # XXX: I prefer this name to GrB_LXNOR_LOR_SEMIRING_BOOL 251 | # Old deprecated (probably already removed) 252 | "GrB_eWiseMult_Vector_Semiring", 253 | "GrB_eWiseMult_Vector_Monoid", 254 | "GrB_eWiseMult_Vector_BinaryOp", 255 | "GrB_eWiseMult_Matrix_Semiring", 256 | "GrB_eWiseMult_Matrix_Monoid", 257 | "GrB_eWiseMult_Matrix_BinaryOp", 258 | "GrB_eWiseAdd_Vector_Semiring", 259 | "GrB_eWiseAdd_Vector_Monoid", 260 | "GrB_eWiseAdd_Vector_BinaryOp", 261 | "GrB_eWiseAdd_Matrix_Semiring", 262 | "GrB_eWiseAdd_Matrix_Monoid", 263 | "GrB_eWiseAdd_Matrix_BinaryOp", 264 | } 265 | 266 | DEFINES = { 267 | "GrB_INDEX_MAX", 268 | "GxB_STDC_VERSION", 269 | "GxB_IMPLEMENTATION_MAJOR", 270 | "GxB_IMPLEMENTATION_MINOR", 271 | "GxB_IMPLEMENTATION_SUB", 272 | "GxB_SPEC_MAJOR", 273 | "GxB_SPEC_MINOR", 274 | "GxB_SPEC_SUB", 275 | "GxB_IMPLEMENTATION", 276 | "GxB_SPEC_VERSION", 277 | "GxB_INDEX_MAX", 278 | "GRB_VERSION", 279 | "GRB_SUBVERSION", 280 | "GxB_NTHREADS", 281 | "GxB_CHUNK", 282 | "GxB_GPU_ID", 283 | "GxB_HYPERSPARSE", 284 | "GxB_SPARSE", 285 | "GxB_BITMAP", 286 | "GxB_FULL", 287 | "GxB_NBITMAP_SWITCH", 288 | "GxB_ANY_SPARSITY", 289 | "GxB_AUTO_SPARSITY", 290 | "GxB_RANGE", 291 | "GxB_STRIDE", 292 | "GxB_BACKWARDS", 293 | "GxB_BEGIN", 294 | "GxB_END", 295 | "GxB_INC", 296 | "GxB_FAST_IMPORT", 297 | "GxB_MAX_NAME_LEN", 298 | "GxB_COMPRESSION_DEFAULT", 299 | "GxB_COMPRESSION_LZ4", 300 | "GxB_COMPRESSION_LZ4HC", 301 | "GxB_COMPRESSION_ZSTD", 302 | "GxB_COMPRESSION_NONE", 303 | "GxB_USE_VALUES", 304 | } 305 | 306 | CHAR_DEFINES = { 307 | "GxB_IMPLEMENTATION_NAME", 308 | "GxB_IMPLEMENTATION_DATE", 309 | "GxB_SPEC_DATE", 310 | "GxB_IMPLEMENTATION_ABOUT", 311 | "GxB_IMPLEMENTATION_LICENSE", 312 | "GxB_SPEC_ABOUT", 313 | } 314 | 315 | IGNORE_DEFINES = { 316 | "GrB", 317 | "GxB", 318 | "CMPLX", 319 | "CMPLXF", 320 | "GB_GLOBAL", 321 | "GB_HAS_CMPLX_MACROS", 322 | "GB_PUBLIC", 323 | "GB_restrict", 324 | "GRAPHBLAS_H", 325 | "GrB_INVALID_HANDLE", 326 | "GrB_NULL", 327 | "GxB_SUITESPARSE_GRAPHBLAS", 328 | "NMACRO", 329 | "RMM_WRAP_H", 330 | "GXB_COMPLEX_H", 331 | "GxB_STATIC_INLINE_VOID", 332 | "GxB_HAVE_COMPLEX_C99", 333 | # deprecated 334 | "GxB_HYPER", 335 | } 336 | 337 | IGNORE_LINES = { 338 | "GxB_cuda_calloc", 339 | "GxB_cuda_malloc", 340 | "GxB_cuda_free", 341 | } 342 | IGNORE_ENUMS = { 343 | "memory_order", 344 | "RMM_MODE", 345 | } 346 | 347 | 348 | class VisitEnumTypedef(c_generator.CGenerator): 349 | def __init__(self, *args, **kwargs): 350 | super().__init__(*args, **kwargs) 351 | self.results = [] 352 | 353 | def visit_Typedef(self, node): 354 | rv = super().visit_Typedef(node) 355 | if isinstance(node.type.type, c_ast.Enum): 356 | self.results.append(rv + ";") 357 | return rv 358 | 359 | 360 | class VisitStruct(c_generator.CGenerator): 361 | def __init__(self, *args, **kwargs): 362 | super().__init__(*args, **kwargs) 363 | self.results = [] 364 | 365 | def visit_Struct(self, node): 366 | rv = super().visit_Struct(node) 367 | if ( 368 | ("GxB_" in node.name or "GrB_" in node.name) 369 | and "_struct" in node.name 370 | and node.decls is not None 371 | ): 372 | self.results.append(rv + ";") 373 | return rv 374 | 375 | 376 | def get_ast(filename): 377 | fake_include = os.path.dirname(pycparser.__file__) + "utils/fake_libc_include" 378 | ast = parse_file(filename, cpp_args=f"-I{fake_include}") 379 | return ast 380 | 381 | 382 | def get_groups(ast): 383 | generator = c_generator.CGenerator() 384 | lines = generator.visit(ast).splitlines() 385 | 386 | seen = set() 387 | groups = {} 388 | vals = {x for x in lines if "GrB_Info GxB" in x} - seen 389 | vals |= {x for x in lines if "GxB_Iterator" in x and "GB" not in x} - seen 390 | seen.update(vals) 391 | groups["GxB methods"] = sorted(vals, key=sort_key) 392 | 393 | vals = {x for x in lines if "GrB_Info GrB" in x} - seen 394 | seen.update(vals) 395 | groups["GrB methods"] = sorted(vals, key=sort_key) 396 | 397 | vals = {x for x in lines if "GrB_Info GB" in x} - seen 398 | vals |= {x for x in lines if "GxB_Iterator" in x and "GB" in x and "typedef" not in x} - seen 399 | seen.update(vals) 400 | groups["GB methods"] = sorted(vals, key=sort_key) 401 | 402 | missing_methods = {x for x in lines if "GrB_Info " in x} - seen 403 | assert not missing_methods, ", ".join(sorted(missing_methods)) 404 | 405 | vals = {x for x in lines if "extern GrB" in x} - seen 406 | seen.update(vals) 407 | groups["GrB objects"] = sorted(vals, key=sort_key) 408 | 409 | vals = {x for x in lines if "extern GxB" in x} - seen 410 | seen.update(vals) 411 | groups["GxB objects"] = sorted(vals, key=sort_key) 412 | 413 | vals = {x for x in lines if "extern const" in x and "GxB" in x} - seen 414 | seen.update(vals) 415 | groups["GxB const"] = sorted(vals, key=sort_key) 416 | 417 | vals = {x for x in lines if "extern const" in x and "GrB" in x} - seen 418 | seen.update(vals) 419 | groups["GrB const"] = sorted(vals, key=sort_key) 420 | 421 | missing_const = {x for x in lines if "extern const" in x} - seen 422 | assert not missing_const, ", ".join(sorted(missing_const)) 423 | 424 | vals = {x for x in lines if "typedef" in x and "GxB" in x and "(" not in x} - seen 425 | seen.update(vals) 426 | groups["GxB typedef"] = sorted(vals, key=sort_key) 427 | 428 | vals = {x for x in lines if "typedef" in x and "GrB" in x and "(" not in x} - seen 429 | seen.update(vals) 430 | groups["GrB typedef"] = sorted(vals, key=sort_key) 431 | 432 | missing_typedefs = {x for x in lines if "typedef" in x and "GB" in x and "(" not in x} - seen 433 | assert not missing_typedefs, ", ".join(sorted(missing_typedefs)) 434 | assert all(x.endswith(";") for x in seen) # sanity check 435 | 436 | g = VisitEnumTypedef() 437 | _ = g.visit(ast) 438 | enums = g.results 439 | 440 | vals = {x for x in enums if "} GrB" in x} 441 | for val in vals: 442 | seen.update(val.splitlines()) 443 | groups["GrB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1])) 444 | 445 | vals = {x for x in enums if "} GxB" in x} 446 | for val in vals: 447 | seen.update(val.splitlines()) 448 | groups["GxB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1])) 449 | 450 | g = VisitStruct() 451 | _ = g.visit(ast) 452 | structs = g.results 453 | 454 | # No non-opaque GrB structs yet 455 | # vals = {x for x in structs if "struct GrB" in x} 456 | # for val in vals: 457 | # seen.update(val.splitlines()) 458 | # groups["GrB struct"] = sorted(vals) 459 | 460 | vals = {x for x in structs if "struct GxB" in x} 461 | for val in vals: 462 | seen.update(val.splitlines()) 463 | groups["GxB struct"] = sorted(vals) 464 | 465 | missing_enums = set(enums) - set(groups["GrB typedef enums"]) - set(groups["GxB typedef enums"]) 466 | missing_enums = {x for x in missing_enums if not any(y in x for y in IGNORE_ENUMS)} 467 | assert not missing_enums, ", ".join(sorted(missing_enums)) 468 | 469 | vals = {x for x in lines if "typedef" in x and "GxB" in x} - seen 470 | seen.update(vals) 471 | groups["GxB typedef funcs"] = sorted(vals, key=sort_key) 472 | 473 | vals = {x for x in lines if "typedef" in x and "GrB" in x} - seen 474 | assert not vals, ", ".join(sorted(vals)) 475 | groups["not seen"] = sorted(set(lines) - seen, key=sort_key) 476 | for group in groups["not seen"]: 477 | assert "extern" not in group, group 478 | 479 | unhandled = set() 480 | for line in groups["not seen"]: 481 | if "GrB" in line or "GxB" in line: 482 | for item in IGNORE_LINES: 483 | if item in line: 484 | break 485 | else: 486 | unhandled.add(line) 487 | if unhandled: 488 | raise ValueError( 489 | "\n===================================\n" 490 | "Unhandled functions with GrB or GxB\n" 491 | "-----------------------------------\n " 492 | + "\n ".join(sorted(unhandled)) 493 | + "\n===================================" 494 | ) 495 | return groups 496 | 497 | 498 | def get_group_info(groups, ast, *, skip_complex=False): 499 | rv = {} 500 | 501 | def handle_constants(group): 502 | for line in group: 503 | extern, const, ctype, name = line.split(" ") 504 | assert name.endswith(";") 505 | name = name[:-1].replace("(void)", "()") 506 | assert extern == "extern" 507 | assert const == "const" 508 | if name in DEPRECATED: 509 | continue 510 | if skip_complex and has_complex(line): 511 | continue 512 | info = { 513 | "text": line, 514 | } 515 | yield info 516 | 517 | rv["GrB const"] = list(handle_constants(groups["GrB const"])) 518 | rv["GxB const"] = list(handle_constants(groups["GxB const"])) 519 | 520 | def handle_objects(group): 521 | for line in group: 522 | extern, ctype, name = line.split(" ") 523 | assert name.endswith(";") 524 | name = name[:-1] 525 | assert extern == "extern" 526 | if name in DEPRECATED: 527 | continue 528 | if skip_complex and has_complex(line): 529 | continue 530 | info = { 531 | "text": line, 532 | } 533 | yield info 534 | 535 | rv["GrB objects"] = list(handle_objects(groups["GrB objects"])) 536 | rv["GxB objects"] = list(handle_objects(groups["GxB objects"])) 537 | 538 | def handle_enums(group): 539 | for text in group: 540 | text = text.replace("enum \n", "enum\n") 541 | typedef, bracket, *fields, name = text.splitlines() 542 | assert typedef.strip() == "typedef enum" 543 | assert bracket == "{" 544 | assert name.startswith("}") 545 | assert name.endswith(";") 546 | name = name[1:-1].strip() 547 | if name in DEPRECATED: 548 | continue 549 | if skip_complex and has_complex(name): 550 | continue 551 | 552 | # Break this open so we can remove unwanted deprecated fields. 553 | # Instead of traversing the AST, munging string is good enough. 554 | typedef, bracket, *fields, cname = text.splitlines() 555 | typedef = typedef.strip() 556 | assert typedef.strip() == "typedef enum" 557 | assert bracket == "{" 558 | assert cname.startswith("}") 559 | assert cname.endswith(";") 560 | new_fields = [] 561 | for field in fields: 562 | if field.endswith(","): 563 | field = field[:-1] 564 | field = field.strip() 565 | cfieldname, eq, val = field.split(" ") 566 | assert eq == "=" 567 | if cfieldname in DEPRECATED: 568 | continue 569 | if skip_complex and has_complex(cfieldname): 570 | continue 571 | new_fields.append(field) 572 | if not new_fields: 573 | continue 574 | lines = [typedef, bracket] 575 | for field in new_fields: 576 | lines.append(f" {field},") 577 | lines[-1] = lines[-1][:-1] # remove last comma 578 | lines.append(cname) 579 | info = { 580 | "orig_text": text, 581 | "text": "\n".join(lines), 582 | } 583 | yield info 584 | 585 | rv["GrB typedef enums"] = list(handle_enums(groups["GrB typedef enums"])) 586 | rv["GxB typedef enums"] = list(handle_enums(groups["GxB typedef enums"])) 587 | 588 | def handle_typedefs(group): 589 | for line in group: 590 | typedef, *ctypes, name = line.split(" ") 591 | assert typedef == "typedef" 592 | assert name.endswith(";") 593 | name = name[:-1] 594 | if name in DEPRECATED: 595 | continue 596 | if skip_complex and has_complex(line): 597 | continue 598 | info = { 599 | "text": line, 600 | } 601 | yield info 602 | 603 | rv["GrB typedef"] = list(handle_typedefs(groups["GrB typedef"])) 604 | rv["GxB typedef"] = list(handle_typedefs(groups["GxB typedef"])) 605 | 606 | def handle_typedef_funcs(group): 607 | for line in group: 608 | assert line.endswith(";") and line.startswith("typedef") 609 | if skip_complex and has_complex(line): 610 | continue 611 | info = { 612 | "text": line, 613 | } 614 | yield info 615 | 616 | rv["GxB typedef funcs"] = list(handle_typedef_funcs(groups["GxB typedef funcs"])) 617 | 618 | def handle_structs(group): 619 | for text in group: 620 | yield {"text": text} 621 | 622 | rv["GxB struct"] = list(handle_structs(groups["GxB struct"])) 623 | 624 | class FuncDeclVisitor(c_ast.NodeVisitor): 625 | def __init__(self): 626 | self.functions = [] 627 | 628 | def visit_Decl(self, node): 629 | if isinstance(node.type, c_ast.FuncDecl) and node.storage == []: 630 | self.functions.append(node) 631 | 632 | def handle_function_node(node): 633 | if generator.visit(node.type.type) != "GrB_Info" and "GxB_Iterator" not in generator.visit( 634 | node 635 | ): 636 | raise ValueError(generator.visit(node)) 637 | if node.name in DEPRECATED: 638 | return 639 | text = generator.visit(node) 640 | text += ";" 641 | if skip_complex and has_complex(text): 642 | return 643 | if "GrB_Matrix" in text: 644 | group = "matrix" 645 | elif "GrB_Vector" in text: 646 | group = "vector" 647 | elif "GxB_Scalar" in text or "GrB_Scalar" in text: 648 | group = "scalar" 649 | elif "GxB_Iterator" in text: 650 | group = "iterator" 651 | else: 652 | group = node.name.split("_", 2)[1] 653 | group = { 654 | # Apply our naming scheme 655 | "GrB_Matrix": "matrix", 656 | "Matrix": "matrix", 657 | "GrB_Vector": "vector", 658 | "GxB_Scalar": "scalar", 659 | "SelectOp": "selectop", 660 | "BinaryOp": "binary", 661 | "Desc": "descriptor", 662 | "Descriptor": "descriptor", 663 | "Monoid": "monoid", 664 | "Semiring": "semiring", 665 | "Type": "type", 666 | "UnaryOp": "unary", 667 | "IndexUnaryOp": "indexunary", 668 | "IndexBinaryOp": "indexbinary", 669 | "Iterator": "iterator", 670 | "Context": "context", 671 | "Container": "container", 672 | # "everything else" is "core" 673 | "getVersion": "core", 674 | "Global": "core", 675 | "cuda": "core", 676 | "finalize": "core", 677 | "init": "core", 678 | "wait": "core", 679 | "deserialize": "core", 680 | "Serialized": "core", # Added in version 9 681 | }[group] 682 | return { 683 | "name": node.name, 684 | "group": group, 685 | "node": node, 686 | "text": text, 687 | } 688 | 689 | generator = c_generator.CGenerator() 690 | visitor = FuncDeclVisitor() 691 | visitor.visit(ast) 692 | grb_nodes = [node for node in visitor.functions if node.name.startswith("GrB_")] 693 | gxb_nodes = [node for node in visitor.functions if node.name.startswith("GxB_")] 694 | gb_nodes = [node for node in visitor.functions if node.name.startswith("GB_")] 695 | assert len(grb_nodes) == len(groups["GrB methods"]), ( 696 | len(grb_nodes), 697 | len(groups["GrB methods"]), 698 | ) 699 | 700 | # Temporary hack for v10.0.1, which duplicates `GxB_Serialized_get_Scalar` 701 | temp_seen = set() 702 | gxb_nodes = [ 703 | temp_seen.add(node.name) or node for node in gxb_nodes if node.name not in temp_seen 704 | ] 705 | 706 | assert len(gxb_nodes) == len(groups["GxB methods"]), ( 707 | len(gxb_nodes), 708 | len(groups["GxB methods"]), 709 | ) 710 | assert len(gb_nodes) == len(groups["GB methods"]), (len(gb_nodes), len(groups["GB methods"])) 711 | 712 | grb_funcs = (handle_function_node(node) for node in grb_nodes) 713 | gxb_funcs = (handle_function_node(node) for node in gxb_nodes) 714 | gb_funcs = (handle_function_node(node) for node in gb_nodes) 715 | grb_funcs = [x for x in grb_funcs if x is not None] 716 | gxb_funcs = [x for x in gxb_funcs if x is not None] 717 | gb_funcs = [x for x in gb_funcs if x is not None] 718 | 719 | rv["GrB methods"] = sorted(grb_funcs, key=lambda x: sort_key(x["text"])) 720 | rv["GxB methods"] = sorted(gxb_funcs, key=lambda x: sort_key(x["text"])) 721 | rv["GB methods"] = sorted(gb_funcs, key=lambda x: sort_key(x["text"])) 722 | for key in groups.keys() - rv.keys(): 723 | rv[key] = groups[key] 724 | return rv 725 | 726 | 727 | def parse_header(filename, *, skip_complex=False): 728 | ast = get_ast(filename) 729 | groups = get_groups(ast) 730 | return get_group_info(groups, ast, skip_complex=skip_complex) 731 | 732 | 733 | def create_header_text(groups, *, char_defines=None, defines=None): 734 | if char_defines is None: 735 | char_defines = CHAR_DEFINES 736 | if defines is None: 737 | defines = DEFINES 738 | 739 | text = [AUTO] 740 | text.append("/* GrB typedefs */") 741 | for group in groups["GrB typedef"]: 742 | text.append(group["text"]) 743 | text.append("") 744 | text.append("/* GxB typedefs */") 745 | for group in groups["GxB typedef"]: 746 | text.append(group["text"]) 747 | text.append("") 748 | text.append("/* GxB typedefs (functions) */") 749 | for group in groups["GxB typedef funcs"]: 750 | text.append(group["text"]) 751 | text.append("") 752 | text.append("/* GxB structs */") 753 | for group in groups["GxB struct"]: 754 | text.append(group["text"]) 755 | text.append("") 756 | text.append("/* GrB enums */") 757 | for group in groups["GrB typedef enums"]: 758 | text.append(group["text"]) 759 | text.append("") 760 | text.append("/* GxB enums */") 761 | for group in groups["GxB typedef enums"]: 762 | text.append(group["text"]) 763 | text.append("") 764 | text.append("/* GrB consts */") 765 | for group in groups["GrB const"]: 766 | text.append(group["text"]) 767 | text.append("") 768 | text.append("/* GxB consts */") 769 | for group in groups["GxB const"]: 770 | text.append(group["text"]) 771 | text.append("") 772 | text.append("/* GrB objects */") 773 | for group in groups["GrB objects"]: 774 | if "GxB" not in group["text"]: 775 | text.append(group["text"]) 776 | text.append("") 777 | text.append("/* GrB objects (extended) */") 778 | for group in groups["GrB objects"]: 779 | if "GxB" in group["text"]: 780 | text.append(group["text"]) 781 | text.append("") 782 | text.append("/* GxB objects */") 783 | for group in groups["GxB objects"]: 784 | text.append(group["text"]) 785 | 786 | def handle_funcs(group): 787 | groups = groupby("group", group) 788 | for name in sorted(groups, key=sort_key): 789 | yield "" 790 | yield f"/* {name} */" 791 | for info in groups[name]: 792 | yield info["text"] 793 | 794 | text.append("") 795 | text.append("/****************") 796 | text.append("* GrB functions *") 797 | text.append("****************/") 798 | text.extend(handle_funcs(groups["GrB methods"])) 799 | 800 | text.append("") 801 | text.append("/***************") 802 | text.append("* GB functions *") 803 | text.append("***************/") 804 | text.extend(handle_funcs(groups["GB methods"])) 805 | 806 | text.append("") 807 | text.append("/****************") 808 | text.append("* GxB functions *") 809 | text.append("****************/") 810 | text.extend(handle_funcs(groups["GxB methods"])) 811 | 812 | text.append("") 813 | text.append("/* int DEFINES */") 814 | for item in sorted(defines, key=sort_key): 815 | text.append(f"#define {item} ...") 816 | 817 | text.append("") 818 | text.append("/* char* DEFINES */") 819 | for item in sorted(char_defines, key=sort_key): 820 | text.append(f"extern char *{item}_STR;") 821 | return text 822 | 823 | 824 | def create_source_text(groups, *, char_defines=None): 825 | if char_defines is None: 826 | char_defines = CHAR_DEFINES 827 | text = [ 828 | AUTO, 829 | '#include "GraphBLAS.h"', 830 | ] 831 | for item in sorted(char_defines, key=sort_key): 832 | text.append(f"char *{item}_STR = {item};") 833 | return text 834 | 835 | 836 | def main(): 837 | parser = argparse.ArgumentParser() 838 | parser.add_argument( 839 | "--graphblas", 840 | help="Path to GraphBLAS.h of SuiteSparse. Default will look in Python prefix path.", 841 | default=os.path.join(sys.prefix, "include", "suitesparse", "GraphBLAS.h"), 842 | ) 843 | parser.add_argument( 844 | "--show-skipped", 845 | action="store_true", 846 | help="If specified, then print the lines that were skipped when parsing the header file.", 847 | ) 848 | args = parser.parse_args() 849 | 850 | thisdir = os.path.dirname(__file__) 851 | # copy the original to this file 852 | graphblas_h = os.path.join(thisdir, "GraphBLAS-orig.h") 853 | # after the preprocessor 854 | processed_h = os.path.join(thisdir, "GraphBLAS-processed.h") 855 | 856 | # final files used by cffi (with and without complex numbers) 857 | final_h = os.path.join(thisdir, "suitesparse_graphblas.h") 858 | # final_arm64_h = os.path.join(thisdir, "suitesparse_graphblas_arm64.h") 859 | final_no_complex_h = os.path.join(thisdir, "suitesparse_graphblas_no_complex.h") 860 | source_c = os.path.join(thisdir, "source.c") 861 | 862 | # Copy original file 863 | print(f"Step 1: copy {args.graphblas} to {graphblas_h}") 864 | if not os.path.exists(args.graphblas): 865 | raise FileNotFoundError(f"File not found: {args.graphblas}") 866 | shutil.copyfile(args.graphblas, graphblas_h) 867 | 868 | # Run it through the preprocessor 869 | print(f"Step 2: run preprocessor to create {processed_h}") 870 | include = os.path.join(os.path.dirname(pycparser.__file__), "utils", "fake_libc_include") 871 | command = ( 872 | f"gcc -nostdinc -E -I{include} {graphblas_h} " 873 | f"| sed 's/ complex / _Complex /g' > {processed_h}" 874 | ) 875 | res = subprocess.run(command, shell=True) 876 | if res.returncode != 0: 877 | raise RuntimeError("Subprocess command failed", res) 878 | 879 | # Create final header file 880 | print(f"Step 3: parse header file to create {final_h}") 881 | groups = parse_header(processed_h, skip_complex=False) 882 | text = create_header_text(groups) 883 | with open(final_h, "w") as f: 884 | f.write("\n".join(text) + "\n") 885 | 886 | # NOTE:suitesparse_graphblas.h and suitesparse_graphblas_arm64.h are the same now 887 | # # Create final header file (arm64) 888 | # # Replace all variadic arguments (...) with "char *" 889 | # print(f"Step 4: parse header file to create {final_arm64_h}") 890 | # orig_text = text 891 | # patt = re.compile(r"^(extern GrB_Info .*\(.*)(\.\.\.)(\);)$") 892 | # text = [patt.sub(r"\1char *\3", line) for line in orig_text] 893 | # with open(final_arm64_h, "w") as f: 894 | # f.write("\n".join(text) + "\n") 895 | 896 | # Create final header file (no complex) 897 | print(f"Step 4: parse header file to create {final_no_complex_h}") 898 | groups_no_complex = parse_header(processed_h, skip_complex=True) 899 | text = create_header_text(groups_no_complex) 900 | with open(final_no_complex_h, "w") as f: 901 | f.write("\n".join(text) + "\n") 902 | 903 | # Create source 904 | print(f"Step 5: create {source_c}") 905 | text = create_source_text(groups) 906 | with open(source_c, "w") as f: 907 | f.write("\n".join(text) + "\n") 908 | 909 | # Check defines 910 | print("Step 6: check #define definitions") 911 | with open(graphblas_h) as f: 912 | text = f.read() 913 | define_lines = re.compile(r".*?#define\s+\w+\s+") 914 | define_pattern = re.compile(r"#define\s+\w+\s+") 915 | defines = set() 916 | for line in define_lines.findall(text): 917 | line = line.split("//")[0].split("/*")[0] 918 | defines.update(x[len("#define") :].strip() for x in define_pattern.findall(line)) 919 | extra_defines = (DEFINES | CHAR_DEFINES) - defines 920 | if extra_defines: 921 | # Should this raise? If it's a problem, it will raise when compiling. 922 | print( 923 | f"WARNING: the following #define values weren't found in {graphblas_h}: " 924 | + ", ".join(sorted(extra_defines)) 925 | ) 926 | unknown_defines = defines - DEFINES - CHAR_DEFINES - IGNORE_DEFINES 927 | if unknown_defines: 928 | raise ValueError( 929 | f"Unknown #define values found in {graphblas_h}: " + ", ".join(sorted(unknown_defines)) 930 | ) 931 | print("Success!", "\N{ROCKET}") 932 | if args.show_skipped: 933 | print() 934 | print(f"Showing lines from {processed_h} that were skipped when creating {final_h}:") 935 | print("-" * 80) 936 | for line in sorted(groups["not seen"], key=sort_key): 937 | print(line) 938 | 939 | 940 | if __name__ == "__main__": 941 | main() 942 | -------------------------------------------------------------------------------- /suitesparse_graphblas/exceptions.py: -------------------------------------------------------------------------------- 1 | class GraphBLASException(Exception): 2 | pass 3 | 4 | 5 | class NoValue(GraphBLASException): 6 | pass 7 | 8 | 9 | class UninitializedObject(GraphBLASException): 10 | pass 11 | 12 | 13 | class InvalidObject(GraphBLASException): 14 | pass 15 | 16 | 17 | class NullPointer(GraphBLASException): 18 | pass 19 | 20 | 21 | class InvalidValue(GraphBLASException): 22 | pass 23 | 24 | 25 | class InvalidIndex(GraphBLASException): 26 | pass 27 | 28 | 29 | class DomainMismatch(GraphBLASException): 30 | pass 31 | 32 | 33 | class DimensionMismatch(GraphBLASException): 34 | pass 35 | 36 | 37 | class OutputNotEmpty(GraphBLASException): 38 | pass 39 | 40 | 41 | class EmptyObject(GraphBLASException): 42 | pass 43 | 44 | 45 | class OutOfMemory(GraphBLASException): 46 | pass 47 | 48 | 49 | class InsufficientSpace(GraphBLASException): 50 | pass 51 | 52 | 53 | class IndexOutOfBound(GraphBLASException): 54 | pass 55 | 56 | 57 | class Panic(GraphBLASException): 58 | pass 59 | 60 | 61 | class NotImplementedException(GraphBLASException): 62 | pass 63 | 64 | 65 | class JitError(GraphBLASException): 66 | pass 67 | -------------------------------------------------------------------------------- /suitesparse_graphblas/io/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GraphBLAS/python-suitesparse-graphblas/74c45cc0e9ca2f454cb518a3df09b573b151bca1/suitesparse_graphblas/io/__init__.py -------------------------------------------------------------------------------- /suitesparse_graphblas/io/binary.py: -------------------------------------------------------------------------------- 1 | from ctypes.util import find_library 2 | from pathlib import Path 3 | 4 | from cffi import FFI 5 | 6 | from suitesparse_graphblas import __version__, check_status, ffi, lib, matrix 7 | 8 | stdffi = FFI() 9 | stdffi.cdef( 10 | """ 11 | void *malloc(size_t size); 12 | """ 13 | ) 14 | stdlib = stdffi.dlopen(find_library("c")) 15 | 16 | # When "packing" a matrix the owner of the memory buffer is transfered 17 | # to SuiteSparse, which then becomes responsible for freeing it. cffi 18 | # wisely does not allow you to do this without declaring and calling 19 | # malloc directly. When SuiteSparse moves over to a more formal 20 | # memory manager with the cuda work, this will likely change and have 21 | # to be replaceable with a allocator common to numpy, cuda, and here. 22 | # Maybe PyDataMem_NEW? 23 | 24 | 25 | def readinto_new_buffer(f, typ, size, allocator=stdlib.malloc): 26 | buff = ffi.cast(typ, allocator(size)) 27 | f.readinto(ffi.buffer(buff, size)) 28 | return buff 29 | 30 | 31 | GRB_HEADER_LEN = 512 32 | NULL = ffi.NULL 33 | 34 | header_template = """\ 35 | SuiteSparse:GraphBLAS matrix 36 | {suitesparse_version} ({user_agent}) 37 | nrows: {nrows} 38 | ncols: {ncols} 39 | nvec: {nvec} 40 | nvals: {nvals} 41 | format: {format} 42 | size: {size} 43 | type: {type} 44 | iso: {iso} 45 | {comments} 46 | """ 47 | 48 | sizeof = ffi.sizeof 49 | ffinew = ffi.new 50 | buff = ffi.buffer 51 | frombuff = ffi.from_buffer 52 | Isize = ffi.sizeof("GrB_Index") 53 | 54 | _ss_typecodes = { 55 | lib.GrB_BOOL: 0, 56 | lib.GrB_INT8: 1, 57 | lib.GrB_INT16: 2, 58 | lib.GrB_INT32: 3, 59 | lib.GrB_INT64: 4, 60 | lib.GrB_UINT8: 5, 61 | lib.GrB_UINT16: 6, 62 | lib.GrB_UINT32: 7, 63 | lib.GrB_UINT64: 8, 64 | lib.GrB_FP32: 9, 65 | lib.GrB_FP64: 10, 66 | lib.GxB_FC32: 11, 67 | lib.GxB_FC64: 12, 68 | } 69 | 70 | _ss_typenames = { 71 | lib.GrB_BOOL: "GrB_BOOL", 72 | lib.GrB_INT8: "GrB_INT8", 73 | lib.GrB_INT16: "GrB_INT16", 74 | lib.GrB_INT32: "GrB_INT32", 75 | lib.GrB_INT64: "GrB_INT64", 76 | lib.GrB_UINT8: "GrB_UINT8", 77 | lib.GrB_UINT16: "GrB_UINT16", 78 | lib.GrB_UINT32: "GrB_UINT32", 79 | lib.GrB_UINT64: "GrB_UINT64", 80 | lib.GrB_FP32: "GrB_FP32", 81 | lib.GrB_FP64: "GrB_FP64", 82 | lib.GxB_FC32: "GxB_FC32", 83 | lib.GxB_FC64: "GxB_FC64", 84 | } 85 | 86 | _ss_codetypes = {v: k for k, v in _ss_typecodes.items()} 87 | 88 | 89 | def binwrite(A, filename, comments=None, opener=Path.open): 90 | if isinstance(filename, str): 91 | filename = Path(filename) 92 | 93 | check_status(A, lib.GrB_Matrix_wait(A[0], lib.GrB_MATERIALIZE)) 94 | 95 | ffinew = ffi.new 96 | 97 | Ap = ffinew("GrB_Index**") 98 | Ai = ffinew("GrB_Index**") 99 | Ah = ffinew("GrB_Index**") 100 | Ax = ffinew("void**") 101 | Ab = ffinew("int8_t**") 102 | 103 | Ap_size = ffinew("GrB_Index*") 104 | Ai_size = ffinew("GrB_Index*") 105 | Ah_size = ffinew("GrB_Index*") 106 | Ax_size = ffinew("GrB_Index*") 107 | Ab_size = ffinew("GrB_Index*") 108 | 109 | nvec = ffinew("GrB_Index*") 110 | nrows = ffinew("GrB_Index*") 111 | ncols = ffinew("GrB_Index*") 112 | nvals = ffinew("GrB_Index*") 113 | 114 | typesize = ffi.new("size_t*") 115 | is_iso = ffinew("bool*") 116 | is_jumbled = ffinew("bool*") 117 | 118 | impl = ffi.new("uint64_t*", lib.GxB_IMPLEMENTATION) 119 | format = ffinew("GxB_Format_Value*") 120 | hyper_switch = ffinew("double*") 121 | bitmap_switch = ffinew("double*") 122 | sparsity_control = ffinew("int32_t*") 123 | sparsity_status = ffinew("int32_t*") 124 | 125 | typecode = ffinew("int32_t*") 126 | matrix_type = ffi.new("GrB_Type*") 127 | 128 | nrows[0] = matrix.nrows(A) 129 | ncols[0] = matrix.ncols(A) 130 | nvals[0] = matrix.nvals(A) 131 | matrix_type[0] = matrix.type(A) 132 | 133 | check_status(A, lib.GxB_Type_size(typesize, matrix_type[0])) 134 | typecode[0] = _ss_typecodes[matrix_type[0]] 135 | 136 | format[0] = matrix.format(A) 137 | hyper_switch[0] = matrix.hyper_switch(A) 138 | bitmap_switch[0] = matrix.bitmap_switch(A) 139 | sparsity_status[0] = matrix.sparsity_status(A) 140 | sparsity_control[0] = matrix.sparsity_control(A) 141 | 142 | by_row = format[0] == lib.GxB_BY_ROW 143 | by_col = format[0] == lib.GxB_BY_COL 144 | 145 | is_hyper = sparsity_status[0] == lib.GxB_HYPERSPARSE 146 | is_sparse = sparsity_status[0] == lib.GxB_SPARSE 147 | is_bitmap = sparsity_status[0] == lib.GxB_BITMAP 148 | is_full = sparsity_status[0] == lib.GxB_FULL 149 | 150 | if by_col and is_hyper: 151 | check_status( 152 | A, 153 | lib.GxB_Matrix_unpack_HyperCSC( 154 | A[0], 155 | Ap, 156 | Ah, 157 | Ai, 158 | Ax, 159 | Ap_size, 160 | Ah_size, 161 | Ai_size, 162 | Ax_size, 163 | is_iso, 164 | nvec, 165 | is_jumbled, 166 | NULL, 167 | ), 168 | ) 169 | fmt_string = "HCSC" 170 | 171 | elif by_row and is_hyper: 172 | check_status( 173 | A, 174 | lib.GxB_Matrix_unpack_HyperCSR( 175 | A[0], 176 | Ap, 177 | Ah, 178 | Ai, 179 | Ax, 180 | Ap_size, 181 | Ah_size, 182 | Ai_size, 183 | Ax_size, 184 | is_iso, 185 | nvec, 186 | is_jumbled, 187 | NULL, 188 | ), 189 | ) 190 | fmt_string = "HCSR" 191 | 192 | elif by_col and is_sparse: 193 | check_status( 194 | A, 195 | lib.GxB_Matrix_unpack_CSC( 196 | A[0], Ap, Ai, Ax, Ap_size, Ai_size, Ax_size, is_iso, is_jumbled, NULL 197 | ), 198 | ) 199 | nvec[0] = ncols[0] 200 | fmt_string = "CSC" 201 | 202 | elif by_row and is_sparse: 203 | check_status( 204 | A, 205 | lib.GxB_Matrix_unpack_CSR( 206 | A[0], Ap, Ai, Ax, Ap_size, Ai_size, Ax_size, is_iso, is_jumbled, NULL 207 | ), 208 | ) 209 | nvec[0] = nrows[0] 210 | fmt_string = "CSR" 211 | 212 | elif by_col and is_bitmap: 213 | check_status( 214 | A, lib.GxB_Matrix_unpack_BitmapC(A[0], Ab, Ax, Ab_size, Ax_size, is_iso, nvals, NULL) 215 | ) 216 | nvec[0] = ncols[0] 217 | fmt_string = "BITMAPC" 218 | 219 | elif by_row and is_bitmap: 220 | check_status( 221 | A, lib.GxB_Matrix_unpack_BitmapR(A[0], Ab, Ax, Ab_size, Ax_size, is_iso, nvals, NULL) 222 | ) 223 | nvec[0] = nrows[0] 224 | fmt_string = "BITMAPR" 225 | 226 | elif by_col and is_full: 227 | check_status(A, lib.GxB_Matrix_unpack_FullC(A[0], Ax, Ax_size, is_iso, NULL)) 228 | nvec[0] = ncols[0] 229 | fmt_string = "FULLC" 230 | 231 | elif by_row and is_full: 232 | check_status(A, lib.GxB_Matrix_unpack_FullR(A[0], Ax, Ax_size, is_iso, NULL)) 233 | nvec[0] = nrows[0] 234 | fmt_string = "FULLR" 235 | 236 | else: # pragma nocover 237 | raise TypeError(f"Unknown Matrix format {format[0]}") 238 | 239 | suitesparse_version = ( 240 | f"v{lib.GxB_IMPLEMENTATION_MAJOR}." 241 | f"{lib.GxB_IMPLEMENTATION_MINOR}." 242 | f"{lib.GxB_IMPLEMENTATION_SUB}" 243 | ) 244 | 245 | vars = dict( # noqa: C408 246 | suitesparse_version=suitesparse_version, 247 | user_agent="pygraphblas-" + __version__, 248 | nrows=nrows[0], 249 | ncols=ncols[0], 250 | nvals=nvals[0], 251 | nvec=nvec[0], 252 | format=fmt_string, 253 | size=typesize[0], 254 | type=_ss_typenames[matrix_type[0]], 255 | iso=int(is_iso[0]), 256 | comments=comments, 257 | ) 258 | header_content = header_template.format(**vars) 259 | header = f"{header_content: <{GRB_HEADER_LEN}}".encode("ascii") 260 | 261 | with opener(filename, "wb") as f: 262 | fwrite = f.write 263 | fwrite(header) 264 | fwrite(buff(impl, sizeof("uint64_t"))) 265 | fwrite(buff(format, sizeof("GxB_Format_Value"))) 266 | fwrite(buff(sparsity_status, sizeof("int32_t"))) 267 | fwrite(buff(sparsity_control, sizeof("int32_t"))) 268 | fwrite(buff(hyper_switch, sizeof("double"))) 269 | fwrite(buff(bitmap_switch, sizeof("double"))) 270 | fwrite(buff(nrows, Isize)) 271 | fwrite(buff(ncols, Isize)) 272 | fwrite(buff(nvec, Isize)) 273 | fwrite(buff(nvals, Isize)) 274 | fwrite(buff(typecode, sizeof("int32_t"))) 275 | fwrite(buff(typesize, sizeof("size_t"))) 276 | fwrite(buff(is_iso, sizeof("bool"))) 277 | 278 | Tsize = typesize[0] 279 | iso = is_iso[0] 280 | 281 | if is_hyper: 282 | fwrite(buff(Ap[0], (nvec[0] + 1) * Isize)) 283 | fwrite(buff(Ah[0], nvec[0] * Isize)) 284 | fwrite(buff(Ai[0], nvals[0] * Isize)) 285 | Axsize = Tsize if iso else nvals[0] * Tsize 286 | elif is_sparse: 287 | fwrite(buff(Ap[0], (nvec[0] + 1) * Isize)) 288 | fwrite(buff(Ai[0], nvals[0] * Isize)) 289 | Axsize = Tsize if iso else nvals[0] * Tsize 290 | elif is_bitmap: 291 | fwrite(buff(Ab[0], nrows[0] * ncols[0] * ffi.sizeof("int8_t"))) 292 | Axsize = Tsize if iso else nrows[0] * ncols[0] * Tsize 293 | else: 294 | Axsize = Tsize if iso else nrows[0] * ncols[0] * Tsize 295 | 296 | fwrite(buff(Ax[0], Axsize)) 297 | 298 | if by_col and is_hyper: 299 | check_status( 300 | A, 301 | lib.GxB_Matrix_pack_HyperCSC( 302 | A[0], 303 | Ap, 304 | Ah, 305 | Ai, 306 | Ax, 307 | Ap_size[0], 308 | Ah_size[0], 309 | Ai_size[0], 310 | Ax_size[0], 311 | is_iso[0], 312 | nvec[0], 313 | is_jumbled[0], 314 | NULL, 315 | ), 316 | ) 317 | 318 | elif by_row and is_hyper: 319 | check_status( 320 | A, 321 | lib.GxB_Matrix_pack_HyperCSR( 322 | A[0], 323 | Ap, 324 | Ah, 325 | Ai, 326 | Ax, 327 | Ap_size[0], 328 | Ah_size[0], 329 | Ai_size[0], 330 | Ax_size[0], 331 | is_iso[0], 332 | nvec[0], 333 | is_jumbled[0], 334 | NULL, 335 | ), 336 | ) 337 | 338 | elif by_col and is_sparse: 339 | check_status( 340 | A, 341 | lib.GxB_Matrix_pack_CSC( 342 | A[0], Ap, Ai, Ax, Ap_size[0], Ai_size[0], Ax_size[0], is_iso[0], is_jumbled[0], NULL 343 | ), 344 | ) 345 | 346 | elif by_row and is_sparse: 347 | check_status( 348 | A, 349 | lib.GxB_Matrix_pack_CSR( 350 | A[0], Ap, Ai, Ax, Ap_size[0], Ai_size[0], Ax_size[0], is_iso[0], is_jumbled[0], NULL 351 | ), 352 | ) 353 | 354 | elif by_col and is_bitmap: 355 | check_status( 356 | A, 357 | lib.GxB_Matrix_pack_BitmapC( 358 | A[0], Ab, Ax, Ab_size[0], Ax_size[0], is_iso[0], nvals[0], NULL 359 | ), 360 | ) 361 | 362 | elif by_row and is_bitmap: 363 | check_status( 364 | A, 365 | lib.GxB_Matrix_pack_BitmapR( 366 | A[0], Ab, Ax, Ab_size[0], Ax_size[0], is_iso[0], nvals[0], NULL 367 | ), 368 | ) 369 | 370 | elif by_col and is_full: 371 | check_status(A, lib.GxB_Matrix_pack_FullC(A[0], Ax, Ax_size[0], is_iso[0], NULL)) 372 | 373 | elif by_row and is_full: 374 | check_status(A, lib.GxB_Matrix_pack_FullR(A[0], Ax, Ax_size[0], is_iso[0], NULL)) 375 | else: 376 | raise TypeError("This should hever happen") 377 | 378 | 379 | def binread(filename, opener=Path.open): 380 | if isinstance(filename, str): 381 | filename = Path(filename) 382 | 383 | with opener(filename, "rb") as f: 384 | fread = f.read 385 | 386 | fread(GRB_HEADER_LEN) 387 | impl = frombuff("uint64_t*", fread(sizeof("uint64_t"))) 388 | 389 | assert impl[0] == lib.GxB_IMPLEMENTATION 390 | 391 | format = frombuff("GxB_Format_Value*", fread(sizeof("GxB_Format_Value"))) 392 | sparsity_status = frombuff("int32_t*", fread(sizeof("int32_t"))) 393 | sparsity_control = frombuff("int32_t*", fread(sizeof("int32_t"))) 394 | hyper_switch = frombuff("double*", fread(sizeof("double"))) 395 | bitmap_switch = frombuff("double*", fread(sizeof("double"))) 396 | nrows = frombuff("GrB_Index*", fread(Isize)) 397 | ncols = frombuff("GrB_Index*", fread(Isize)) 398 | nvec = frombuff("GrB_Index*", fread(Isize)) 399 | nvals = frombuff("GrB_Index*", fread(Isize)) 400 | typecode = frombuff("int32_t*", fread(sizeof("int32_t"))) 401 | typesize = frombuff("size_t*", fread(sizeof("size_t"))) 402 | is_iso = frombuff("bool*", fread(sizeof("bool"))) 403 | is_jumbled = ffi.new("bool*", 0) 404 | 405 | by_row = format[0] == lib.GxB_BY_ROW 406 | by_col = format[0] == lib.GxB_BY_COL 407 | 408 | is_hyper = sparsity_status[0] == lib.GxB_HYPERSPARSE 409 | is_sparse = sparsity_status[0] == lib.GxB_SPARSE 410 | is_bitmap = sparsity_status[0] == lib.GxB_BITMAP 411 | is_full = sparsity_status[0] == lib.GxB_FULL 412 | 413 | atype = _ss_codetypes[typecode[0]] 414 | 415 | Ap = ffinew("GrB_Index**") 416 | Ai = ffinew("GrB_Index**") 417 | Ah = ffinew("GrB_Index**") 418 | Ax = ffinew("void**") 419 | Ab = ffinew("int8_t**") 420 | 421 | Ap_size = ffinew("GrB_Index*") 422 | Ai_size = ffinew("GrB_Index*") 423 | Ah_size = ffinew("GrB_Index*") 424 | Ax_size = ffinew("GrB_Index*") 425 | Ab_size = ffinew("GrB_Index*") 426 | 427 | if is_hyper: 428 | Ap_size[0] = (nvec[0] + 1) * Isize 429 | Ah_size[0] = nvec[0] * Isize 430 | Ai_size[0] = nvals[0] * Isize 431 | Ax_size[0] = nvals[0] * typesize[0] 432 | 433 | Ap[0] = readinto_new_buffer(f, "GrB_Index*", Ap_size[0]) 434 | Ah[0] = readinto_new_buffer(f, "GrB_Index*", Ah_size[0]) 435 | Ai[0] = readinto_new_buffer(f, "GrB_Index*", Ai_size[0]) 436 | elif is_sparse: 437 | Ap_size[0] = (nvec[0] + 1) * Isize 438 | Ai_size[0] = nvals[0] * Isize 439 | Ax_size[0] = nvals[0] * typesize[0] 440 | Ap[0] = readinto_new_buffer(f, "GrB_Index*", Ap_size[0]) 441 | Ai[0] = readinto_new_buffer(f, "GrB_Index*", Ai_size[0]) 442 | elif is_bitmap: 443 | Ab_size[0] = nrows[0] * ncols[0] * ffi.sizeof("int8_t") 444 | Ax_size[0] = nrows[0] * ncols[0] * typesize[0] 445 | Ab[0] = readinto_new_buffer(f, "int8_t*", Ab_size[0]) 446 | elif is_full: 447 | Ax_size[0] = nrows[0] * ncols[0] * typesize[0] 448 | 449 | Ax[0] = readinto_new_buffer(f, "uint8_t*", typesize[0] if is_iso[0] else Ax_size[0]) 450 | 451 | A = matrix.new(atype, nrows[0], ncols[0]) 452 | 453 | if by_col and is_hyper: 454 | check_status( 455 | A, 456 | lib.GxB_Matrix_pack_HyperCSC( 457 | A[0], 458 | Ap, 459 | Ah, 460 | Ai, 461 | Ax, 462 | Ap_size[0], 463 | Ah_size[0], 464 | Ai_size[0], 465 | Ax_size[0], 466 | is_iso[0], 467 | nvec[0], 468 | is_jumbled[0], 469 | NULL, 470 | ), 471 | ) 472 | 473 | elif by_row and is_hyper: 474 | check_status( 475 | A, 476 | lib.GxB_Matrix_pack_HyperCSR( 477 | A[0], 478 | Ap, 479 | Ah, 480 | Ai, 481 | Ax, 482 | Ap_size[0], 483 | Ah_size[0], 484 | Ai_size[0], 485 | Ax_size[0], 486 | is_iso[0], 487 | nvec[0], 488 | is_jumbled[0], 489 | NULL, 490 | ), 491 | ) 492 | 493 | elif by_col and is_sparse: 494 | check_status( 495 | A, 496 | lib.GxB_Matrix_pack_CSC( 497 | A[0], 498 | Ap, 499 | Ai, 500 | Ax, 501 | Ap_size[0], 502 | Ai_size[0], 503 | Ax_size[0], 504 | is_iso[0], 505 | is_jumbled[0], 506 | NULL, 507 | ), 508 | ) 509 | 510 | elif by_row and is_sparse: 511 | check_status( 512 | A, 513 | lib.GxB_Matrix_pack_CSR( 514 | A[0], 515 | Ap, 516 | Ai, 517 | Ax, 518 | Ap_size[0], 519 | Ai_size[0], 520 | Ax_size[0], 521 | is_iso[0], 522 | is_jumbled[0], 523 | NULL, 524 | ), 525 | ) 526 | 527 | elif by_col and is_bitmap: 528 | check_status( 529 | A, 530 | lib.GxB_Matrix_pack_BitmapC( 531 | A[0], Ab, Ax, Ab_size[0], Ax_size[0], is_iso[0], nvals[0], NULL 532 | ), 533 | ) 534 | 535 | elif by_row and is_bitmap: 536 | check_status( 537 | A, 538 | lib.GxB_Matrix_pack_BitmapR( 539 | A[0], Ab, Ax, Ab_size[0], Ax_size[0], is_iso[0], nvals[0], NULL 540 | ), 541 | ) 542 | 543 | elif by_col and is_full: 544 | check_status(A, lib.GxB_Matrix_pack_FullC(A[0], Ax, Ax_size[0], is_iso[0], NULL)) 545 | 546 | elif by_row and is_full: 547 | check_status(A, lib.GxB_Matrix_pack_FullR(A[0], Ax, Ax_size[0], is_iso[0], NULL)) 548 | else: 549 | raise TypeError("Unknown format {format[0]}") 550 | 551 | matrix.set_sparsity_control(A, sparsity_control[0]) 552 | matrix.set_hyper_switch(A, hyper_switch[0]) 553 | matrix.set_bitmap_switch(A, bitmap_switch[0]) 554 | return A 555 | -------------------------------------------------------------------------------- /suitesparse_graphblas/io/serialize.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from suitesparse_graphblas import check_status, ffi, lib 4 | from suitesparse_graphblas.utils import claim_buffer 5 | 6 | 7 | def free_desc(desc): 8 | """Free a descriptor.""" 9 | check_status(desc, lib.GrB_Descriptor_free(desc)) 10 | 11 | 12 | def get_serialize_desc(compression=lib.GxB_COMPRESSION_DEFAULT, level=None, nthreads=None): 13 | """Create a descriptor for serializing or deserializing. 14 | 15 | This returns None (for NULL descriptor) or a pointer to a GrB_Descriptor. 16 | """ 17 | if nthreads is None and (compression is None or compression == lib.GxB_COMPRESSION_DEFAULT): 18 | return None 19 | desc = ffi.new("GrB_Descriptor*") 20 | check_status(desc, lib.GrB_Descriptor_new(desc)) 21 | desc = ffi.gc(desc, free_desc) 22 | if nthreads is not None: 23 | check_status( 24 | desc, 25 | lib.GxB_Desc_set_INT32(desc[0], lib.GxB_NTHREADS, ffi.cast("int32_t", nthreads)), 26 | ) 27 | if compression is not None: 28 | if level is not None and compression in { 29 | lib.GxB_COMPRESSION_LZ4HC, 30 | lib.GxB_COMPRESSION_ZSTD, 31 | }: 32 | compression += level 33 | check_status( 34 | desc, 35 | lib.GxB_Desc_set_INT32(desc[0], lib.GxB_COMPRESSION, ffi.cast("int32_t", compression)), 36 | ) 37 | return desc 38 | 39 | 40 | def serialize_matrix(A, compression=lib.GxB_COMPRESSION_DEFAULT, level=None, *, nthreads=None): 41 | """Serialize a Matrix into an array of bytes. 42 | 43 | Parameters 44 | ---------- 45 | compression : int, optional 46 | One of None, GxB_COMPRESSION_NONE, GxB_COMPRESSION_DEFAULT, 47 | GxB_COMPRESSION_LZ4, GxB_COMPRESSION_LZ4HC, or GxB_COMPRESSION_ZSTD 48 | level : int, optional 49 | For GxB_COMPRESSION_LZ4HC, should be between 1 and 9, where 9 is most compressed. 50 | For GxB_COMPRESSION_ZSTD, should be between 1 and 19, where 19 is most compressed. 51 | 52 | nthreads : int, optional 53 | The maximum number of OpenMP threads to use. 54 | """ 55 | desc = get_serialize_desc(compression, level, nthreads) 56 | data_ptr = ffi.new("void**") 57 | size_ptr = ffi.new("GrB_Index*") 58 | check_status( 59 | A, lib.GxB_Matrix_serialize(data_ptr, size_ptr, A[0], ffi.NULL if desc is None else desc[0]) 60 | ) 61 | return claim_buffer(ffi, data_ptr[0], size_ptr[0], np.dtype(np.uint8)) 62 | 63 | 64 | def serialize_vector(v, compression=lib.GxB_COMPRESSION_DEFAULT, level=None, *, nthreads=None): 65 | """Serialize a Vector into an array of bytes. 66 | 67 | Parameters 68 | ---------- 69 | compression : int, optional 70 | One of None, GxB_COMPRESSION_NONE, GxB_COMPRESSION_DEFAULT, 71 | GxB_COMPRESSION_LZ4, GxB_COMPRESSION_LZ4HC, or GxB_COMPRESSION_ZSTD 72 | level : int, optional 73 | For GxB_COMPRESSION_LZ4HC, should be between 1 and 9, where 9 is most compressed. 74 | For GxB_COMPRESSION_ZSTD, should be between 1 and 19, where 19 is most compressed. 75 | nthreads : int, optional 76 | The maximum number of OpenMP threads to use. 77 | """ 78 | desc = get_serialize_desc(compression, level, nthreads) 79 | data_ptr = ffi.new("void**") 80 | size_ptr = ffi.new("GrB_Index*") 81 | check_status( 82 | v, lib.GxB_Vector_serialize(data_ptr, size_ptr, v[0], ffi.NULL if desc is None else desc[0]) 83 | ) 84 | return claim_buffer(ffi, data_ptr[0], size_ptr[0], np.dtype(np.uint8)) 85 | 86 | 87 | def deserialize_matrix(data, *, free=True, nthreads=None): 88 | """Deserialize a Matrix from bytes. 89 | 90 | The `free` argument is called when the object is garbage 91 | collected, the default is `matrix.free()`. If `free` is None then 92 | there is no automatic garbage collection and it is up to the user 93 | to free the matrix. 94 | """ 95 | data = np.frombuffer(data, np.uint8) 96 | desc = get_serialize_desc(None, nthreads) 97 | A = ffi.new("GrB_Matrix*") 98 | check_status( 99 | A, 100 | lib.GxB_Matrix_deserialize( 101 | A, 102 | ffi.NULL, # dtype; we don't check for now 103 | ffi.from_buffer("void*", data), 104 | data.nbytes, 105 | ffi.NULL if desc is None else desc[0], 106 | ), 107 | ) 108 | if free: 109 | if callable(free): 110 | return ffi.gc(A, free) 111 | return ffi.gc(A, matrix.free) 112 | return A 113 | 114 | 115 | def deserialize_vector(data, *, free=True, nthreads=None): 116 | """Deserialize a Vector from bytes. 117 | 118 | The `free` argument is called when the object is garbage 119 | collected, the default is `vector.free()`. If `free` is None then 120 | there is no automatic garbage collection and it is up to the user 121 | to free the vector. 122 | """ 123 | data = np.frombuffer(data, np.uint8) 124 | desc = get_serialize_desc(None, nthreads) 125 | v = ffi.new("GrB_Vector*") 126 | check_status( 127 | v, 128 | lib.GxB_Vector_deserialize( 129 | v, 130 | ffi.NULL, # dtype; we don't check for now 131 | ffi.from_buffer("void*", data), 132 | data.nbytes, 133 | ffi.NULL if desc is None else desc[0], 134 | ), 135 | ) 136 | if free: 137 | if callable(free): 138 | return ffi.gc(v, free) 139 | return ffi.gc(v, vector.free) 140 | return v 141 | 142 | 143 | from suitesparse_graphblas import matrix, vector # noqa: E402 isort:skip 144 | -------------------------------------------------------------------------------- /suitesparse_graphblas/matrix.py: -------------------------------------------------------------------------------- 1 | from suitesparse_graphblas import check_status, ffi, lib 2 | 3 | from .io.serialize import deserialize_matrix as deserialize # noqa: F401 4 | from .io.serialize import serialize_matrix as serialize # noqa: F401 5 | 6 | 7 | def free(A): 8 | """Free a matrix.""" 9 | check_status(A, lib.GrB_Matrix_free(A)) 10 | 11 | 12 | def new(T, nrows=lib.GxB_INDEX_MAX, ncols=lib.GxB_INDEX_MAX, *, free=free): 13 | """Create a new `GrB_Matrix` of type `T` and initialize it. The 14 | following example creates an eight bit unsigned 2x2 matrix: 15 | 16 | >>> A = new(lib.GrB_UINT8, 2, 2) 17 | >>> shape(A) 18 | (2, 2) 19 | 20 | The default value for `nrows` and `ncols` is `lib.GxB_INDEX_MAX` 21 | which creates a Matrix with maximal bounds: 22 | 23 | >>> A = new(lib.GrB_UINT8) 24 | >>> shape(A) == (lib.GxB_INDEX_MAX, lib.GxB_INDEX_MAX) 25 | True 26 | 27 | The `free` argument is called when the object is garbage 28 | collected, the default is `matrix.free()`. If `free` is None then 29 | there is no automatic garbage collection and it is up to the user 30 | to free the matrix. 31 | 32 | """ 33 | A = ffi.new("GrB_Matrix*") 34 | check_status(A, lib.GrB_Matrix_new(A, T, nrows, ncols)) 35 | if free: 36 | return ffi.gc(A, free) 37 | return A 38 | 39 | 40 | def type(A): 41 | """Return the GraphBLAS type of the vector. 42 | 43 | >>> A = new(lib.GrB_UINT8) 44 | >>> type(A) == lib.GrB_UINT8 45 | True 46 | 47 | """ 48 | T = ffi.new("GrB_Type*") 49 | check_status(A, lib.GxB_Matrix_type(T, A[0])) 50 | return T[0] 51 | 52 | 53 | def nrows(A): 54 | """Return the number of rows in the matrix. 55 | 56 | >>> A = new(lib.GrB_UINT8, 2, 3) 57 | >>> nrows(A) 58 | 2 59 | 60 | """ 61 | n = ffi.new("GrB_Index*") 62 | check_status(A, lib.GrB_Matrix_nrows(n, A[0])) 63 | return n[0] 64 | 65 | 66 | def ncols(A): 67 | """Return the number of columns in the matrix. 68 | 69 | >>> A = new(lib.GrB_UINT8, 2, 3) 70 | >>> ncols(A) 71 | 3 72 | 73 | """ 74 | n = ffi.new("GrB_Index*") 75 | check_status(A, lib.GrB_Matrix_ncols(n, A[0])) 76 | return n[0] 77 | 78 | 79 | def nvals(A): 80 | """Return the number of stored elements in the matrix. 81 | 82 | >>> A = new(lib.GrB_UINT8, 2, 3) 83 | >>> nvals(A) 84 | 0 85 | 86 | """ 87 | n = ffi.new("GrB_Index*") 88 | check_status(A, lib.GrB_Matrix_nvals(n, A[0])) 89 | return n[0] 90 | 91 | 92 | def shape(A): 93 | """Return the shape of the matrix as a two tuple `(nrows, ncols)` 94 | 95 | >>> A = new(lib.GrB_UINT8, 2, 2) 96 | >>> shape(A) 97 | (2, 2) 98 | 99 | """ 100 | return (nrows(A), ncols(A)) 101 | 102 | 103 | def format(A): 104 | """Return the format of the matrix. 105 | 106 | >>> A = new(lib.GrB_UINT8, 2, 2) 107 | >>> format(A) == lib.GxB_BY_ROW 108 | True 109 | 110 | """ 111 | format = ffi.new("int32_t*") 112 | check_status(A, lib.GxB_Matrix_Option_get_INT32(A[0], lib.GxB_FORMAT, format)) 113 | return format[0] 114 | 115 | 116 | def set_format(A, format): 117 | """Set the format of the matrix. 118 | 119 | >>> A = new(lib.GrB_UINT8, 2, 2) 120 | >>> set_format(A, lib.GxB_BY_COL) 121 | >>> format(A) == lib.GxB_BY_COL 122 | True 123 | 124 | """ 125 | format_val = ffi.cast("int32_t", format) 126 | check_status(A, lib.GxB_Matrix_Option_set_INT32(A[0], lib.GxB_FORMAT, format_val)) 127 | 128 | 129 | def sparsity_status(A): 130 | """Get the sparsity status of the matrix.""" 131 | sparsity_status = ffi.new("int32_t*") 132 | check_status(A, lib.GxB_Matrix_Option_get_INT32(A[0], lib.GxB_SPARSITY_STATUS, sparsity_status)) 133 | return sparsity_status[0] 134 | 135 | 136 | def sparsity_control(A): 137 | """Get the sparsity control of the matrix.""" 138 | sparsity_control = ffi.new("int32_t*") 139 | check_status( 140 | A, lib.GxB_Matrix_Option_get_INT32(A[0], lib.GxB_SPARSITY_CONTROL, sparsity_control) 141 | ) 142 | return sparsity_control[0] 143 | 144 | 145 | def set_sparsity_control(A, sparsity): 146 | """Set the sparsity control of the matrix.""" 147 | sparsity_control = ffi.cast("int32_t", sparsity) 148 | check_status( 149 | A, lib.GxB_Matrix_Option_set_INT32(A[0], lib.GxB_SPARSITY_CONTROL, sparsity_control) 150 | ) 151 | 152 | 153 | def hyper_switch(A): 154 | """Get the hyper switch of the matrix.""" 155 | hyper_switch = ffi.new("double*") 156 | check_status(A, lib.GxB_Matrix_Option_get_FP64(A[0], lib.GxB_HYPER_SWITCH, hyper_switch)) 157 | return hyper_switch[0] 158 | 159 | 160 | def set_hyper_switch(A, hyper_switch): 161 | """Set the hyper switch of the matrix.""" 162 | hyper_switch = ffi.cast("double", hyper_switch) 163 | check_status(A, lib.GxB_Matrix_Option_set_FP64(A[0], lib.GxB_HYPER_SWITCH, hyper_switch)) 164 | 165 | 166 | def bitmap_switch(A): 167 | """Get the bitmap switch of the matrix.""" 168 | bitmap_switch = ffi.new("double*") 169 | check_status(A, lib.GxB_Matrix_Option_get_FP64(A[0], lib.GxB_BITMAP_SWITCH, bitmap_switch)) 170 | return bitmap_switch[0] 171 | 172 | 173 | def set_bitmap_switch(A, bitmap_switch): 174 | """Set the bitmap switch of the matrix.""" 175 | bitmap_switch = ffi.cast("double", bitmap_switch) 176 | check_status(A, lib.GxB_Matrix_Option_set_FP64(A[0], lib.GxB_BITMAP_SWITCH, bitmap_switch)) 177 | 178 | 179 | def set_bool(A, value, i, j): 180 | """Set a boolean value to the matrix at row `i` column `j`. 181 | 182 | >>> A = new(lib.GrB_BOOL, 3, 3) 183 | >>> set_bool(A, True, 2, 2) 184 | >>> bool(A, 2, 2) == True 185 | True 186 | 187 | """ 188 | check_status(A, lib.GrB_Matrix_setElement_BOOL(A[0], value, i, j)) 189 | 190 | 191 | def bool(A, i, j): 192 | """Get a boolean value from the matrix at row `i` column `j`. 193 | 194 | >>> A = new(lib.GrB_BOOL, 3, 3) 195 | >>> set_bool(A, True, 2, 2) 196 | >>> bool(A, 2, 2) == True 197 | True 198 | 199 | """ 200 | value = ffi.new("bool*") 201 | check_status(A, lib.GrB_Matrix_extractElement_BOOL(value, A[0], i, j)) 202 | return value[0] 203 | -------------------------------------------------------------------------------- /suitesparse_graphblas/scalar.py: -------------------------------------------------------------------------------- 1 | from suitesparse_graphblas import check_status, exceptions, ffi, lib 2 | 3 | 4 | def free(v): 5 | """Free a scalar.""" 6 | check_status(v, lib.GxB_Scalar_free(v)) 7 | 8 | 9 | def new(T, *, free=free): 10 | """Create a new `GxB_Scalar` of type `T` and initialize it. 11 | 12 | The `free` argument is called when the object is garbage 13 | collected, the default is `scalar.free()`. If `free` is None then 14 | there is no automatic garbage collection and it is up to the user 15 | to free the scalar. 16 | 17 | >>> S = new(lib.GrB_UINT8) 18 | 19 | """ 20 | s = ffi.new("GxB_Scalar*") 21 | check_status(s, lib.GxB_Scalar_new(s, T)) 22 | if free: 23 | return ffi.gc(s, free) 24 | return s 25 | 26 | 27 | def type(s): 28 | """Return the GraphBLAS type of the scalar. 29 | 30 | >>> S = new(lib.GrB_UINT8) 31 | >>> type(S) == lib.GrB_UINT8 32 | True 33 | 34 | """ 35 | T = ffi.new("GrB_Type*") 36 | check_status(s, lib.GxB_Scalar_type(T, s[0])) 37 | return T[0] 38 | 39 | 40 | def set_bool(s, value): 41 | """Set a boolean value to the scalar. 42 | 43 | >>> s = new(lib.GrB_BOOL) 44 | >>> set_bool(s, True) 45 | >>> bool(s) == True 46 | True 47 | 48 | """ 49 | check_status(s, lib.GxB_Scalar_setElement_BOOL(s[0], value)) 50 | 51 | 52 | def bool(s): 53 | """Get a boolean value from the scalar. 54 | 55 | >>> s = new(lib.GrB_BOOL) 56 | >>> set_bool(s, True) 57 | >>> bool(s) == True 58 | True 59 | 60 | """ 61 | value = ffi.new("bool*") 62 | res = check_status(s, lib.GxB_Scalar_extractElement_BOOL(value, s[0])) 63 | if res == exceptions.NoValue: 64 | return None 65 | return value[0] 66 | -------------------------------------------------------------------------------- /suitesparse_graphblas/source.c: -------------------------------------------------------------------------------- 1 | /* This file is automatically generated */ 2 | #include "GraphBLAS.h" 3 | char *GxB_IMPLEMENTATION_ABOUT_STR = GxB_IMPLEMENTATION_ABOUT; 4 | char *GxB_IMPLEMENTATION_DATE_STR = GxB_IMPLEMENTATION_DATE; 5 | char *GxB_IMPLEMENTATION_LICENSE_STR = GxB_IMPLEMENTATION_LICENSE; 6 | char *GxB_IMPLEMENTATION_NAME_STR = GxB_IMPLEMENTATION_NAME; 7 | char *GxB_SPEC_ABOUT_STR = GxB_SPEC_ABOUT; 8 | char *GxB_SPEC_DATE_STR = GxB_SPEC_DATE; 9 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GraphBLAS/python-suitesparse-graphblas/74c45cc0e9ca2f454cb518a3df09b573b151bca1/suitesparse_graphblas/tests/__init__.py -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from suitesparse_graphblas import initialize 4 | 5 | 6 | @pytest.fixture(scope="session", autouse=True) 7 | def intialize_suitesparse_graphblas(): 8 | initialize() 9 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_doctest.py: -------------------------------------------------------------------------------- 1 | def test_run_doctests(): 2 | import doctest 3 | 4 | from suitesparse_graphblas import matrix, scalar, vector 5 | 6 | for mod in ( 7 | matrix, 8 | vector, 9 | scalar, 10 | ): 11 | doctest.testmod(mod, optionflags=doctest.ELLIPSIS, raise_on_error=True) 12 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_exceptions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from suitesparse_graphblas import check_status, exceptions, ffi, lib 4 | 5 | 6 | def test_check_status(): 7 | A = ffi.new("GrB_Matrix*") 8 | check_status(A, lib.GrB_Matrix_new(A, lib.GrB_BOOL, 2, 2)) 9 | with pytest.raises(exceptions.Panic): 10 | check_status(A, lib.GrB_PANIC) 11 | with pytest.raises(exceptions.Panic): 12 | check_status(A[0], lib.GrB_PANIC) 13 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_initialize.py: -------------------------------------------------------------------------------- 1 | if __name__ == "__main__": 2 | import pytest 3 | 4 | import suitesparse_graphblas as ssgb 5 | 6 | assert ssgb.is_initialized() is False 7 | ssgb.initialize() 8 | assert ssgb.is_initialized() is True 9 | with pytest.raises(RuntimeError, match="GraphBLAS is already initialized"): 10 | ssgb.initialize() 11 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_io.py: -------------------------------------------------------------------------------- 1 | import bz2 2 | import gzip 3 | import lzma 4 | import platform 5 | from pathlib import Path 6 | 7 | import pytest 8 | 9 | from suitesparse_graphblas import ( 10 | bool_types, 11 | check_status, 12 | complex_types, 13 | ffi, 14 | grb_types, 15 | lib, 16 | matrix, 17 | real_types, 18 | signed_integer_types, 19 | supports_complex, 20 | unsigned_integer_types, 21 | vector, 22 | ) 23 | 24 | if platform.system() == "Windows": 25 | pytest.skip("skipping windows-only tests", allow_module_level=True) 26 | 27 | from suitesparse_graphblas.io import binary # isort:skip 28 | 29 | NULL = ffi.NULL 30 | 31 | 32 | def _test_elements(T): 33 | if T in bool_types: 34 | return [True, False], [0, 0], [1, 1] 35 | elif T in signed_integer_types: 36 | return [1, -42], [0, 0], [1, 1] 37 | elif T in unsigned_integer_types: 38 | return [1, 42], [0, 0], [1, 1] 39 | elif T in real_types: 40 | return [1.0, -42.42], [0, 0], [1, 1] 41 | elif T in complex_types: 42 | return [complex(1.0, 1.0), complex(42.0, -42.0)], [0, 0], [1, 1] 43 | 44 | 45 | _element_setters = { 46 | lib.GrB_BOOL: lib.GrB_Matrix_setElement_BOOL, 47 | lib.GrB_INT8: lib.GrB_Matrix_setElement_INT8, 48 | lib.GrB_INT16: lib.GrB_Matrix_setElement_INT16, 49 | lib.GrB_INT32: lib.GrB_Matrix_setElement_INT32, 50 | lib.GrB_INT64: lib.GrB_Matrix_setElement_INT64, 51 | lib.GrB_UINT8: lib.GrB_Matrix_setElement_UINT8, 52 | lib.GrB_UINT16: lib.GrB_Matrix_setElement_UINT16, 53 | lib.GrB_UINT32: lib.GrB_Matrix_setElement_UINT32, 54 | lib.GrB_UINT64: lib.GrB_Matrix_setElement_UINT64, 55 | lib.GrB_FP32: lib.GrB_Matrix_setElement_FP32, 56 | lib.GrB_FP64: lib.GrB_Matrix_setElement_FP64, 57 | } 58 | 59 | if supports_complex(): 60 | _element_setters.update( 61 | { 62 | lib.GxB_FC32: lib.GxB_Matrix_setElement_FC32, 63 | lib.GxB_FC64: lib.GxB_Matrix_setElement_FC64, 64 | } 65 | ) 66 | 67 | 68 | _eq_ops = { 69 | lib.GrB_BOOL: lib.GrB_EQ_BOOL, 70 | lib.GrB_INT8: lib.GrB_EQ_INT8, 71 | lib.GrB_INT16: lib.GrB_EQ_INT16, 72 | lib.GrB_INT32: lib.GrB_EQ_INT32, 73 | lib.GrB_INT64: lib.GrB_EQ_INT64, 74 | lib.GrB_UINT8: lib.GrB_EQ_UINT8, 75 | lib.GrB_UINT16: lib.GrB_EQ_UINT16, 76 | lib.GrB_UINT32: lib.GrB_EQ_UINT32, 77 | lib.GrB_UINT64: lib.GrB_EQ_UINT64, 78 | lib.GrB_FP32: lib.GrB_EQ_FP32, 79 | lib.GrB_FP64: lib.GrB_EQ_FP64, 80 | } 81 | 82 | if supports_complex(): 83 | _eq_ops.update( 84 | { 85 | lib.GxB_FC32: lib.GxB_EQ_FC32, 86 | lib.GxB_FC64: lib.GxB_EQ_FC64, 87 | } 88 | ) 89 | 90 | 91 | def test_serialize_matrix(): 92 | T = lib.GrB_INT64 93 | A = matrix.new(T, 2, 2) 94 | for args in zip(*_test_elements(T)): 95 | f = _element_setters[T] 96 | check_status(A, f(A[0], *args)) 97 | data = matrix.serialize(A) 98 | B = matrix.deserialize(data) 99 | 100 | # Test equal 101 | C = matrix.new(lib.GrB_BOOL, 2, 2) 102 | check_status( 103 | C, 104 | lib.GrB_Matrix_eWiseAdd_BinaryOp(C[0], NULL, NULL, _eq_ops[T], A[0], B[0], NULL), 105 | ) 106 | assert matrix.nvals(A) == matrix.nvals(B) == matrix.nvals(C) 107 | is_eq = ffi.new("bool*") 108 | check_status( 109 | C, 110 | lib.GrB_Matrix_reduce_BOOL(is_eq, NULL, lib.GrB_LAND_MONOID_BOOL, C[0], NULL), 111 | ) 112 | assert is_eq[0] 113 | 114 | 115 | def test_serialize_vector(): 116 | T = lib.GrB_INT64 117 | v = vector.new(T, 3) 118 | check_status(v, lib.GrB_Vector_setElement_INT64(v[0], 2, 0)) 119 | check_status(v, lib.GrB_Vector_setElement_INT64(v[0], 10, 1)) 120 | data = vector.serialize(v, lib.GxB_COMPRESSION_LZ4HC, level=7) 121 | w = vector.deserialize(data) 122 | 123 | # Test equal 124 | x = vector.new(lib.GrB_BOOL, 3) 125 | check_status( 126 | x, 127 | lib.GrB_Vector_eWiseAdd_BinaryOp(x[0], NULL, NULL, _eq_ops[T], v[0], w[0], NULL), 128 | ) 129 | assert vector.nvals(v) == vector.nvals(w) == vector.nvals(x) 130 | is_eq = ffi.new("bool*") 131 | check_status( 132 | x, 133 | lib.GrB_Vector_reduce_BOOL(is_eq, NULL, lib.GrB_LAND_MONOID_BOOL, x[0], NULL), 134 | ) 135 | assert is_eq[0] 136 | 137 | 138 | def test_matrix_binfile_read_write(tmp_path): 139 | for opener in (Path.open, gzip.open, bz2.open, lzma.open): 140 | for format in (lib.GxB_BY_ROW, lib.GxB_BY_COL): 141 | for T in grb_types: 142 | for sparsity in (lib.GxB_HYPERSPARSE, lib.GxB_SPARSE, lib.GxB_BITMAP, lib.GxB_FULL): 143 | A = matrix.new(T, 2, 2) 144 | 145 | if T is not lib.GxB_FULL: 146 | for args in zip(*_test_elements(T)): 147 | f = _element_setters[T] 148 | check_status(A, f(A[0], *args)) 149 | else: 150 | Tone = _test_elements(T)[0][0] 151 | check_status( 152 | A[0], 153 | lib.GrB_assign( 154 | A, 155 | NULL, 156 | NULL, 157 | Tone, 158 | lib.GrB_ALL, 159 | 0, 160 | lib.GrB_ALL, 161 | 0, 162 | NULL, 163 | ), 164 | ) 165 | matrix.set_sparsity_control(A, sparsity) 166 | matrix.set_format(A, format) 167 | 168 | binfilef = tmp_path / "binfilewrite_test.binfile" 169 | binary.binwrite(A, binfilef, opener=opener) 170 | B = binary.binread(binfilef, opener=opener) 171 | 172 | assert matrix.type(A) == matrix.type(B) 173 | assert matrix.nrows(A) == matrix.nrows(B) 174 | assert matrix.ncols(A) == matrix.ncols(B) 175 | assert matrix.hyper_switch(A) == matrix.hyper_switch(B) 176 | assert matrix.bitmap_switch(A) == matrix.bitmap_switch(B) 177 | # assert matrix.sparsity_control(A) == matrix.sparsity_control(B) 178 | 179 | C = matrix.new(lib.GrB_BOOL, 2, 2) 180 | 181 | check_status( 182 | C, 183 | lib.GrB_Matrix_eWiseAdd_BinaryOp( 184 | C[0], NULL, NULL, _eq_ops[T], A[0], B[0], NULL 185 | ), 186 | ) 187 | 188 | assert matrix.nvals(A) == matrix.nvals(B) == matrix.nvals(C) 189 | 190 | is_eq = ffi.new("bool*") 191 | check_status( 192 | C, 193 | lib.GrB_Matrix_reduce_BOOL( 194 | is_eq, NULL, lib.GrB_LAND_MONOID_BOOL, C[0], NULL 195 | ), 196 | ) 197 | 198 | assert is_eq[0] 199 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_jit.py: -------------------------------------------------------------------------------- 1 | from suitesparse_graphblas import ffi, lib 2 | 3 | 4 | def test_print_jit_config(): 5 | print() 6 | print("===================================") 7 | print("Printing default JIT configurations") 8 | print("-----------------------------------") 9 | jit_c_control = { 10 | lib.GxB_JIT_OFF: "off", 11 | lib.GxB_JIT_PAUSE: "pause", 12 | lib.GxB_JIT_RUN: "run", 13 | lib.GxB_JIT_LOAD: "load", 14 | lib.GxB_JIT_ON: "on", 15 | } 16 | val_ptr = ffi.new("int32_t*") 17 | assert lib.GxB_Global_Option_get_INT32(lib.GxB_JIT_C_CONTROL, val_ptr) == lib.GrB_SUCCESS 18 | print("JIT_C_CONTROL", jit_c_control[val_ptr[0]]) 19 | 20 | assert lib.GxB_Global_Option_get_INT32(lib.GxB_JIT_USE_CMAKE, val_ptr) == lib.GrB_SUCCESS 21 | print("JIT_USE_CMAKE", bool(val_ptr[0])) 22 | 23 | func = lib.GxB_Global_Option_get_CHAR 24 | names = [ 25 | "JIT_C_COMPILER_NAME", 26 | "JIT_C_COMPILER_FLAGS", 27 | "JIT_C_LINKER_FLAGS", 28 | "JIT_C_LIBRARIES", 29 | "JIT_C_CMAKE_LIBS", 30 | "JIT_C_PREFACE", 31 | "JIT_ERROR_LOG", 32 | "JIT_CACHE_PATH", 33 | ] 34 | val_ptr = ffi.new("char**") 35 | for name in names: 36 | obj = getattr(lib, f"GxB_{name}") 37 | assert func(obj, val_ptr) == lib.GrB_SUCCESS 38 | print(name, ffi.string(val_ptr[0]).decode()) 39 | print("===================================") 40 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_package.py: -------------------------------------------------------------------------------- 1 | import suitesparse_graphblas 2 | from suitesparse_graphblas import ffi, lib # noqa: F401 3 | 4 | 5 | def test_matrix_existence(): 6 | assert hasattr(lib, "GrB_Matrix_new") 7 | 8 | 9 | def test_version(): 10 | # Example dev version: 9.4.5.0+2.g5590dba8.dirty 11 | # Example reslease version: 9.4.5.0 12 | version = suitesparse_graphblas.__version__ 13 | version = [int(x) for x in version.split("+")[0].split(".")] 14 | assert version > [9, 4, 4, 0] 15 | -------------------------------------------------------------------------------- /suitesparse_graphblas/tests/test_scalar.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from suitesparse_graphblas import ffi, lib, supports_complex # noqa: F401 4 | 5 | 6 | @pytest.mark.skipif("not supports_complex()") 7 | def test_complex(): 8 | s = ffi.new("GrB_Scalar*") 9 | success = lib.GrB_SUCCESS 10 | assert lib.GrB_Scalar_new(s, lib.GxB_FC64) == success 11 | assert lib.GxB_Scalar_setElement_FC64(s[0], 1j) == success 12 | assert lib.GrB_Scalar_free(s) == success 13 | -------------------------------------------------------------------------------- /suitesparse_graphblas/utils.pxd: -------------------------------------------------------------------------------- 1 | from libc.stdint cimport uint64_t 2 | from numpy cimport dtype as dtype_t 3 | from numpy cimport ndarray, npy_intp 4 | 5 | 6 | cdef extern from "numpy/arrayobject.h" nogil: 7 | # These aren't public (i.e., "extern"), but other projects use them too 8 | void *PyDataMem_NEW(size_t size) 9 | void *PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) 10 | void *PyDataMem_RENEW(void *ptr, size_t size) 11 | void PyDataMem_FREE(void *ptr) 12 | # These are available in newer Cython versions 13 | void PyArray_ENABLEFLAGS(ndarray array, int flags) 14 | void PyArray_CLEARFLAGS(ndarray array, int flags) 15 | # Not exposed by Cython (b/c it steals a reference from dtype) 16 | ndarray PyArray_NewFromDescr( 17 | type subtype, dtype_t dtype, int nd, npy_intp *dims, npy_intp *strides, void *data, int flags, object obj 18 | ) 19 | 20 | ctypedef enum GrB_Mode: 21 | GrB_NONBLOCKING 22 | GrB_BLOCKING 23 | 24 | ctypedef uint64_t (*GxB_init)( 25 | GrB_Mode, 26 | void *(*user_malloc_function)(size_t), 27 | void *(*user_calloc_function)(size_t, size_t), 28 | void *(*user_realloc_function)(void *, size_t), 29 | void (*user_free_function)(void *), 30 | ) 31 | 32 | cpdef int call_gxb_init(object ffi, object lib, int mode) 33 | 34 | cpdef ndarray claim_buffer(object ffi, object cdata, size_t size, dtype_t dtype) 35 | 36 | cpdef ndarray claim_buffer_2d( 37 | object ffi, object cdata, size_t cdata_size, size_t nrows, size_t ncols, dtype_t dtype, bint is_c_order 38 | ) 39 | 40 | cpdef unclaim_buffer(ndarray array) 41 | -------------------------------------------------------------------------------- /suitesparse_graphblas/utils.pyx: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from cpython.ref cimport Py_INCREF 3 | from libc.stdint cimport uintptr_t 4 | from numpy cimport NPY_ARRAY_F_CONTIGUOUS, NPY_ARRAY_OWNDATA, NPY_ARRAY_WRITEABLE 5 | from numpy cimport dtype as dtype_t 6 | from numpy cimport import_array, ndarray, npy_intp 7 | 8 | import_array() 9 | 10 | cpdef int call_gxb_init(object ffi, object lib, int mode): 11 | # We need to call `GxB_init`, but we didn't compile Cython against GraphBLAS. So, we get it from cffi. 12 | # Step 1: ffi.addressof(lib, "GxB_init") 13 | # Return type: cffi.cdata object of a function pointer. Can't cast to int. 14 | # Step 2: ffi.cast("uintptr_t", ...) 15 | # Return type: cffi.cdata object of a uintptr_t type, an unsigned pointer. Can cast to int. 16 | # Step 3: int(...) 17 | # Return type: int. The physical address of the function. 18 | # Step 4: (...) 19 | # Return type: uintptr_t in Cython. Cast Python int to Cython integer for pointers. 20 | # Step 5: (...) 21 | # Return: function pointer in Cython! 22 | 23 | cdef GxB_init func = int(ffi.cast("uintptr_t", ffi.addressof(lib, "GxB_init"))) 24 | return func(mode, PyDataMem_NEW, PyDataMem_NEW_ZEROED, PyDataMem_RENEW, PyDataMem_FREE) 25 | 26 | 27 | cpdef ndarray claim_buffer(object ffi, object cdata, size_t size, dtype_t dtype): 28 | cdef: 29 | npy_intp dims = size 30 | uintptr_t ptr = int(ffi.cast("uintptr_t", cdata)) 31 | ndarray array 32 | Py_INCREF(dtype) 33 | array = PyArray_NewFromDescr( 34 | ndarray, dtype, 1, &dims, NULL, ptr, NPY_ARRAY_WRITEABLE, NULL 35 | ) 36 | PyArray_ENABLEFLAGS(array, NPY_ARRAY_OWNDATA) 37 | return array 38 | 39 | 40 | cpdef ndarray claim_buffer_2d( 41 | object ffi, object cdata, size_t cdata_size, size_t nrows, size_t ncols, dtype_t dtype, bint is_c_order 42 | ): 43 | cdef: 44 | size_t size = nrows * ncols 45 | ndarray array 46 | uintptr_t ptr 47 | npy_intp dims[2] 48 | int flags = NPY_ARRAY_WRITEABLE 49 | if cdata_size == size: 50 | ptr = int(ffi.cast("uintptr_t", cdata)) 51 | dims[0] = nrows 52 | dims[1] = ncols 53 | if not is_c_order: 54 | flags |= NPY_ARRAY_F_CONTIGUOUS 55 | Py_INCREF(dtype) 56 | array = PyArray_NewFromDescr( 57 | ndarray, dtype, 2, dims, NULL, ptr, flags, NULL 58 | ) 59 | PyArray_ENABLEFLAGS(array, NPY_ARRAY_OWNDATA) 60 | elif cdata_size > size: # pragma: no cover 61 | array = claim_buffer(ffi, cdata, cdata_size, dtype) 62 | if is_c_order: 63 | array = array[:size].reshape((nrows, ncols)) 64 | else: 65 | array = array[:size].reshape((ncols, nrows)).T 66 | else: # pragma: no cover 67 | raise ValueError( 68 | f"Buffer size too small: {cdata_size}. " 69 | f"Unable to create matrix of size {nrows}x{ncols} = {size}" 70 | ) 71 | return array 72 | 73 | 74 | cpdef unclaim_buffer(ndarray array): 75 | PyArray_CLEARFLAGS(array, NPY_ARRAY_OWNDATA | NPY_ARRAY_WRITEABLE) 76 | -------------------------------------------------------------------------------- /suitesparse_graphblas/vector.py: -------------------------------------------------------------------------------- 1 | from suitesparse_graphblas import check_status, ffi, lib 2 | 3 | from .io.serialize import deserialize_vector as deserialize # noqa: F401 4 | from .io.serialize import serialize_vector as serialize # noqa: F401 5 | 6 | 7 | def free(v): 8 | """Free a vector.""" 9 | check_status(v, lib.GrB_Vector_free(v)) 10 | 11 | 12 | def new(T, size=lib.GxB_INDEX_MAX, *, free=free): 13 | """Create a new `GrB_Vector` of type `T` and initialize it. 14 | 15 | >>> A = new(lib.GrB_UINT8, 2) 16 | >>> size(A) 17 | 2 18 | 19 | The default `size` is `lib.GxB_INDEX_MAX`. 20 | 21 | >>> A = new(lib.GrB_UINT8) 22 | >>> size(A) == lib.GxB_INDEX_MAX 23 | True 24 | 25 | The `free` argument is called when the object is garbage 26 | collected, the default is `vector.free()`. If `free` is None then 27 | there is no automatic garbage collection and it is up to the user 28 | to free the vector. 29 | """ 30 | v = ffi.new("GrB_Vector*") 31 | check_status(v, lib.GrB_Vector_new(v, T, size)) 32 | if free: 33 | return ffi.gc(v, free) 34 | return v 35 | 36 | 37 | def type(v): 38 | """Return the GraphBLAS type of the vector. 39 | 40 | >>> v = new(lib.GrB_UINT8, 2) 41 | >>> type(v) == lib.GrB_UINT8 42 | True 43 | 44 | 45 | """ 46 | T = ffi.new("GrB_Type*") 47 | check_status(v, lib.GxB_Vector_type(T, v[0])) 48 | return T[0] 49 | 50 | 51 | def size(v): 52 | """Return the size of the vector. 53 | 54 | >>> v = new(lib.GrB_UINT8, 2) 55 | >>> size(v) == 2 56 | True 57 | 58 | """ 59 | n = ffi.new("GrB_Index*") 60 | check_status(v, lib.GrB_Vector_size(n, v[0])) 61 | return n[0] 62 | 63 | 64 | def nvals(v): 65 | """Return the number of stored elements in the vector. 66 | 67 | >>> v = new(lib.GrB_BOOL, 2) 68 | >>> nvals(v) 69 | 0 70 | >>> set_bool(v, True, 1) 71 | >>> nvals(v) 72 | 1 73 | 74 | """ 75 | n = ffi.new("GrB_Index*") 76 | check_status(v, lib.GrB_Vector_nvals(n, v[0])) 77 | return n[0] 78 | 79 | 80 | def set_bool(v, value, i): 81 | """Set a boolean value to the vector at position `i`. 82 | 83 | >>> v = new(lib.GrB_BOOL, 3) 84 | >>> set_bool(v, True, 2) 85 | >>> bool(v, 2) == True 86 | True 87 | 88 | """ 89 | check_status(v, lib.GrB_Vector_setElement_BOOL(v[0], value, i)) 90 | 91 | 92 | def bool(v, i): 93 | """Get a boolean value from the vector at position `i`. 94 | 95 | >>> v = new(lib.GrB_BOOL, 3) 96 | >>> set_bool(v, True, 2) 97 | >>> bool(v, 2) == True 98 | True 99 | 100 | """ 101 | value = ffi.new("bool*") 102 | check_status(v, lib.GrB_Vector_extractElement_BOOL(value, v[0], i)) 103 | return value[0] 104 | --------------------------------------------------------------------------------