├── .clang-format
├── .clang-format-ignore
├── .github
└── workflows
│ ├── debug.yml
│ ├── deploy_and_release.yml
│ └── test_and_deploy.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── LICENSE
├── LICENSE.thirdparty
├── MANIFEST.in
├── README.md
├── conftest.py
├── continuous_integration
├── conda
│ ├── build.sh
│ └── meta.yaml
└── environment.yml
├── dev-environment.yml
├── docs
├── Makefile
├── _static
│ ├── css
│ │ └── custom.css
│ └── js
│ │ └── custom.js
├── conf.py
├── custom_sphinx_extensions
│ └── ops_reference_sphinx_extension.py
├── dialect
│ ├── graphblas_dialect_tutorials
│ │ ├── graphblas_lower
│ │ │ ├── debugging_ops.ipynb
│ │ │ ├── index.rst
│ │ │ ├── matrix_ops.ipynb
│ │ │ ├── python_utilities.ipynb
│ │ │ ├── sparse_layouts.ipynb
│ │ │ └── vector_ops.ipynb
│ │ ├── graphblas_optimize
│ │ │ ├── fuse_multiply_apply.ipynb
│ │ │ ├── fuse_multiply_reduce.ipynb
│ │ │ └── index.rst
│ │ ├── graphblas_structuralize
│ │ │ ├── index.rst
│ │ │ ├── lower_apply_rewrite.ipynb
│ │ │ ├── lower_matrix_multiply_rewrite.ipynb
│ │ │ └── lower_reduce_to_scalar_rewrite.ipynb
│ │ └── index.rst
│ ├── index.rst
│ ├── ops_reference.rst
│ ├── ops_table.rst
│ └── passes_reference.rst
├── index.rst
├── installation
│ └── index.rst
├── make.bat
└── tools
│ ├── cli
│ ├── apply_passes_to_string_or_file.ipynb
│ ├── index.rst
│ └── using_debugresult.ipynb
│ ├── engine
│ ├── index.rst
│ ├── matrix_plus_broadcasted_vector.ipynb
│ ├── scalar_plus_scalar.ipynb
│ ├── scalar_times_tensor.ipynb
│ ├── sparse_tensor_sum.ipynb
│ ├── sparse_vector_times_sparse_vector.ipynb
│ ├── spmv.ipynb
│ ├── tensor_plus_tensor.ipynb
│ └── tensor_sum.ipynb
│ ├── explorer.rst
│ ├── explorer_edit.png
│ ├── explorer_sequential.png
│ ├── explorer_topbar.png
│ └── index.rst
├── mlir_graphblas
├── RandomUtils.cpp
├── SparseTensorUtils.cpp
├── __init__.py
├── _version.py
├── algo_utils.py
├── algorithms.py
├── cli.py
├── engine.py
├── explorer.py
├── mlir_builder.py
├── ops.py
├── random_utils.pyx
├── sparse_utils.pyx
├── src
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── build.py
│ ├── graphblas-opt
│ │ ├── CMakeLists.txt
│ │ └── graphblas-opt.cpp
│ ├── include
│ │ ├── CMakeLists.txt
│ │ └── GraphBLAS
│ │ │ ├── CMakeLists.txt
│ │ │ ├── GraphBLASArrayUtils.h
│ │ │ ├── GraphBLASCommonPasses.h
│ │ │ ├── GraphBLASDialect.h
│ │ │ ├── GraphBLASDialect.td
│ │ │ ├── GraphBLASOps.h
│ │ │ ├── GraphBLASOps.td
│ │ │ ├── GraphBLASPasses.h
│ │ │ ├── GraphBLASPasses.td
│ │ │ └── GraphBLASUtils.h
│ ├── lib
│ │ ├── CMakeLists.txt
│ │ └── GraphBLAS
│ │ │ ├── CMakeLists.txt
│ │ │ ├── GraphBLASArrayUtils.cpp
│ │ │ ├── GraphBLASDialect.cpp
│ │ │ ├── GraphBLASLinalgLowerPass.cpp
│ │ │ ├── GraphBLASLowerPass.cpp
│ │ │ ├── GraphBLASOps.cpp
│ │ │ ├── GraphBLASOptimizePass.cpp
│ │ │ ├── GraphBLASStructuralizePass.cpp
│ │ │ └── GraphBLASUtils.cpp
│ ├── test
│ │ ├── CMakeLists.txt
│ │ ├── GraphBLAS
│ │ │ ├── check_ops.mlir
│ │ │ ├── graphblas-opt.mlir
│ │ │ ├── invalid.mlir
│ │ │ ├── invalid_apply.mlir
│ │ │ ├── invalid_cast.mlir
│ │ │ ├── invalid_diag.mlir
│ │ │ ├── invalid_equal.mlir
│ │ │ ├── invalid_matrix_convert_layout.mlir
│ │ │ ├── invalid_matrix_multiply.mlir
│ │ │ ├── invalid_matrix_transpose.mlir
│ │ │ ├── invalid_matrix_vector_multiply.mlir
│ │ │ ├── invalid_print.mlir
│ │ │ ├── invalid_reduce_to_scalar.mlir
│ │ │ ├── invalid_reduce_to_vector.mlir
│ │ │ ├── invalid_select.mlir
│ │ │ ├── invalid_vector_argminmax.mlir
│ │ │ ├── invalid_vector_dot_product.mlir
│ │ │ ├── lower_comment.mlir
│ │ │ ├── lower_convert_layout.mlir
│ │ │ ├── lower_random_select.mlir
│ │ │ ├── opt_matrix_multiply_reduce.mlir
│ │ │ ├── opt_multiply_apply.mlir
│ │ │ ├── structuralize_apply.mlir
│ │ │ ├── structuralize_matrix_multiply.mlir
│ │ │ ├── structuralize_reduce_to_scalar.mlir
│ │ │ ├── structuralize_semirings.mlir
│ │ │ ├── structuralize_transpose.mlir
│ │ │ ├── test_apply.mlir
│ │ │ ├── test_apply_inplace.mlir
│ │ │ ├── test_cast.mlir
│ │ │ ├── test_comment.mlir
│ │ │ ├── test_convert_layout.mlir
│ │ │ ├── test_diag_mat.mlir
│ │ │ ├── test_diag_vec.mlir
│ │ │ ├── test_dup.mlir
│ │ │ ├── test_equal.mlir
│ │ │ ├── test_from_coo.mlir
│ │ │ ├── test_intersect_generic_mat.mlir
│ │ │ ├── test_intersect_generic_vec.mlir
│ │ │ ├── test_intersect_mat.mlir
│ │ │ ├── test_intersect_vec.mlir
│ │ │ ├── test_matrix_multiply_generic.mlir
│ │ │ ├── test_matrix_multiply_generic_extra.mlir
│ │ │ ├── test_matrix_multiply_mxm.mlir
│ │ │ ├── test_matrix_multiply_mxv.mlir
│ │ │ ├── test_matrix_multiply_mxv_extra.mlir
│ │ │ ├── test_matrix_multiply_reduce_to_scalar_generic.mlir
│ │ │ ├── test_matrix_multiply_vxm.mlir
│ │ │ ├── test_matrix_multiply_vxm_extra.mlir
│ │ │ ├── test_matrix_multiply_vxv.mlir
│ │ │ ├── test_reduce_to_scalar.mlir
│ │ │ ├── test_reduce_to_scalar_extra.mlir
│ │ │ ├── test_reduce_to_scalar_generic.mlir
│ │ │ ├── test_reduce_to_vector.mlir
│ │ │ ├── test_reduce_to_vector_generic.mlir
│ │ │ ├── test_select_generic.mlir
│ │ │ ├── test_select_index.mlir
│ │ │ ├── test_select_mask.mlir
│ │ │ ├── test_select_mask_extra.mlir
│ │ │ ├── test_select_probability.mlir
│ │ │ ├── test_select_value.mlir
│ │ │ ├── test_size_num.mlir
│ │ │ ├── test_to_coo_vals.mlir
│ │ │ ├── test_transpose.mlir
│ │ │ ├── test_uniform_complement.mlir
│ │ │ ├── test_union.mlir
│ │ │ ├── test_union_generic.mlir
│ │ │ ├── test_update.mlir
│ │ │ ├── test_vector_update_accumulate.mlir
│ │ │ └── test_vector_update_generic.mlir
│ │ ├── lit.cfg.py
│ │ └── lit.site.cfg.py.in
│ └── triangle_count.mlir
├── tests
│ ├── __init__.py
│ ├── __main__.py
│ ├── data
│ │ └── application_classification.npz
│ ├── jit_engine_test_utils.py
│ ├── test_algo_utils.py
│ ├── test_algorithms.py
│ ├── test_cli.py
│ ├── test_io.py
│ ├── test_jit_engine.py
│ ├── test_jit_engine_bad_inputs.py
│ ├── test_mlir_builder.py
│ ├── test_mlir_builder_bad_inputs.py
│ ├── test_profile.py
│ ├── test_tools.py
│ └── test_verify.py
├── tools
│ ├── __init__.py
│ ├── tersify_mlir.py
│ └── utils.py
└── types.py
├── pyproject.toml
├── run-clang-format.py
├── run_tests.sh
├── setup.cfg
├── setup.py
└── versioneer.py
/.clang-format:
--------------------------------------------------------------------------------
1 | BasedOnStyle: LLVM
2 | AlwaysBreakTemplateDeclarations: Yes
3 |
--------------------------------------------------------------------------------
/.clang-format-ignore:
--------------------------------------------------------------------------------
1 | # ignore generated code
2 | mlir_graphblas/src/build/*
3 | # ignore file vendored from MLIR
4 | mlir_graphblas/SparseTensorUtils.cpp
5 | # ignore Cython
6 | mlir_graphblas/sparse_utils.cpp
7 |
--------------------------------------------------------------------------------
/.github/workflows/debug.yml:
--------------------------------------------------------------------------------
1 | name: Debug CI Jobs
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | debug_enabled:
7 | description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
8 | required: false
9 | default: false
10 |
11 | jobs:
12 | build:
13 | strategy:
14 | matrix:
15 | os:
16 | - ubuntu-latest
17 | # - macos-latest
18 | pyver:
19 | - 3.8
20 | runs-on: ${{ matrix.os }}
21 | env:
22 | ARTIFACT_NAME: built_package_py${{matrix.pyver}}_${{matrix.os}}.tar.bz2
23 | defaults:
24 | run:
25 | shell: bash -l {0}
26 | steps:
27 | # Enable tmate debugging of manually-triggered workflows if the input option was provided
28 | - name: Checkout
29 | uses: actions/checkout@v2
30 | with:
31 | fetch-depth: 0
32 | if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }}
33 | - name: Create env
34 | uses: conda-incubator/setup-miniconda@v2
35 | with:
36 | auto-update-conda: true
37 | activate-environment: mg
38 | environment-file: continuous_integration/environment.yml
39 | if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }}
40 | - name: Update env
41 | run: |
42 | conda install -q conda-build
43 | pip install -e .
44 | conda list
45 | if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }}
46 | - name: Setup tmate session
47 | uses: mxschmitt/action-tmate@v3
48 | if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }}
49 |
--------------------------------------------------------------------------------
/.github/workflows/deploy_and_release.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - '*'
7 |
8 | jobs:
9 | deploy_pypi:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v2
14 | - uses: conda-incubator/setup-miniconda@v2
15 | with:
16 | auto-update-conda: true
17 | activate-environment: mg
18 | environment-file: continuous_integration/environment.yml
19 | - name: Build
20 | shell: bash -l {0}
21 | run: |
22 | pip install -e .
23 | python setup.py sdist bdist_wheel
24 | - name: Publish to PyPI
25 | uses: pypa/gh-action-pypi-publish@master
26 | with:
27 | user: __token__
28 | password: ${{ secrets.PYPI_SECRET }}
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Cython
2 | mlir_graphblas/sparse_utils.cpp
3 | mlir_graphblas/sparse_utils.html
4 | mlir_graphblas/random_utils.cpp
5 | mlir_graphblas/random_utils.html
6 |
7 | # Byte-compiled / optimized / DLL files
8 | __pycache__/
9 | *.py[cod]
10 | *$py.class
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | !mlir_graphblas/src/lib
25 | lib64/
26 | parts/
27 | sdist/
28 | var/
29 | wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | .hypothesis/
55 | .pytest_cache/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # pyenv
83 | .python-version
84 |
85 | # celery beat schedule file
86 | celerybeat-schedule
87 |
88 | # SageMath parsed files
89 | *.sage.py
90 |
91 | # Environments
92 | .env
93 | .venv
94 | env/
95 | venv/
96 | ENV/
97 | env.bak/
98 | venv.bak/
99 |
100 | # Spyder project settings
101 | .spyderproject
102 | .spyproject
103 |
104 | # Rope project settings
105 | .ropeproject
106 |
107 | # mkdocs documentation
108 | /site
109 |
110 | # mypy
111 | .mypy_cache/
112 |
113 | # PyCharm
114 | .idea
115 |
116 | # Vi
117 | *.sw?
118 |
119 | # Mac
120 | .DS_Store
121 |
122 | # VSCode
123 | .vscode
124 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: stable
4 | hooks:
5 | - id: black
6 | language_version: python3.7
7 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
2 |
3 | version: 2
4 |
5 | sphinx:
6 | configuration: docs/conf.py
7 |
8 | formats:
9 | - pdf
10 |
11 | conda:
12 | environment: dev-environment.yml
13 |
14 |
--------------------------------------------------------------------------------
/LICENSE.thirdparty:
--------------------------------------------------------------------------------
1 | ---
2 | run-clang-format.py
3 | original repository: https://github.com/Sarcasm/run-clang-format
4 |
5 | MIT License
6 |
7 | Copyright (c) 2017 Guillaume Papin
8 |
9 | Permission is hereby granted, free of charge, to any person obtaining a copy
10 | of this software and associated documentation files (the "Software"), to deal
11 | in the Software without restriction, including without limitation the rights
12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 | copies of the Software, and to permit persons to whom the Software is
14 | furnished to do so, subject to the following conditions:
15 |
16 | The above copyright notice and this permission notice shall be included in all
17 | copies or substantial portions of the Software.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 | SOFTWARE.
26 | ---
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include MANIFEST.in
2 | include README.md setup.py versioneer.py
3 | include versioneer.py
4 | include mlir_graphblas/_version.py
5 | include mlir_graphblas/tests/data/*
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # MLIR dialect for GraphBLAS + Python tools
3 |
4 | [](https://anaconda.org/metagraph/mlir-graphblas)
5 | [](https://github.com/metagraph-dev/mlir-graphblas/actions/workflows/test_and_deploy.yml?query=branch%3Amain)
6 |
7 | *Note that this code currently requires [llvm-project@bd0cae6](https://github.com/llvm/llvm-project/commit/bd0cae6).*
8 |
9 | ## graphblas-opt
10 |
11 | In order to build `graphblas-opt`, run `python3 build.py` from `mlir_graphblas/src/`. This will build `graphblas-opt` and run the tests for `graphblas-opt`. The built files will be stored in `mlir_graphblas/src/build`.
12 |
13 | `build.py` does not rebuild from scratch by default. To perform a clean build, run `python3 build.py -build-clean`.
14 |
15 | ## Linting with clang-format
16 |
17 | Ensure that `clang-format` is available (install the `clang-tools` conda package) and run:
18 |
19 | ```
20 | ./run-clang-format.py -r mlir_graphblas/src/
21 | ```
22 |
23 | If changes required, can make changes in place with
24 | ```
25 | ./run-clang-format.py -i -r mlir_graphblas/src/
26 | ```
27 |
28 | ## Note about Transition
29 |
30 | mlir-graphblas is transitioning away from lowering code targeting the SCF dialect and towards lowering code targeting
31 | `linalg.generic`. This process is happening in tandem with changes to the sparse-tensor dialect's lowering of
32 | `linalg.generic` with dynamically-shaped output. As a result, mlir-graphblas is temporarily pointing at the `mlir-ac`
33 | conda package which is built from a branch off the LLVM project code. Once these changes are merged into the main branch
34 | on LLVM, mlir-graphblas will be updated to target that. Until then, we will be out of sync with the LLVM project.
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import distutils.core
3 | import subprocess
4 |
5 |
6 | def pytest_configure(config):
7 | distutils.core.run_setup(
8 | "./setup.py", script_args=["build_ext", "--inplace"], stop_after="run"
9 | )
10 |
11 | # Ensure graphblas-opt is built
12 | subprocess.run(["python", os.path.join("mlir_graphblas", "src", "build.py")])
13 |
14 | return
15 |
--------------------------------------------------------------------------------
/continuous_integration/conda/build.sh:
--------------------------------------------------------------------------------
1 |
2 | python setup.py build_ext
3 | python setup.py install --single-version-externally-managed --record=record.txt
4 |
5 | # Build graphblas-opt
6 |
7 | python3 ./mlir_graphblas/src/build.py -build-clean
8 | GRAPHBLAS_OPT_BUID_DIR=./mlir_graphblas/src/build
9 | cp $GRAPHBLAS_OPT_BUID_DIR/bin/graphblas-opt $PREFIX/bin
10 |
--------------------------------------------------------------------------------
/continuous_integration/conda/meta.yaml:
--------------------------------------------------------------------------------
1 | # conda build -c metagraph/label/dev -c conda-forge .
2 |
3 | package:
4 | name: mlir-graphblas
5 | version: {{ environ.get('GIT_DESCRIBE_TAG', 'unknown') }}
6 |
7 | source:
8 | path: ../..
9 |
10 | build:
11 | number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0)|int }}
12 | string: py{{PY_VER}}_{{GIT_DESCRIBE_HASH}}_{{ environ.get('GIT_DESCRIBE_NUMBER', 0) }}
13 |
14 | requirements:
15 | host:
16 | - python
17 | - numpy >=1.19.2
18 | - cython >=0.29.23
19 | - mlir-ac ==14.0=g4e57a19_0
20 | - cmake >=3.13.4
21 | - ninja
22 | - lit
23 | - donfig
24 | - pymlir >=0.3.1
25 | - llvmlite <0.37
26 | - jinja2
27 |
28 | run:
29 | - python
30 | - mlir-ac ==14.0=g4e57a19_0
31 | - pymlir >=0.3.1
32 | - llvmlite <0.37
33 | - pygments
34 | - donfig
35 | - panel
36 | - numpy
37 | - jinja2
38 | - bokeh<2.3 # temp restriction until panel/bokeh bugs are worked out
39 |
40 | test:
41 | requires:
42 | - pytest
43 | - pytest-cov
44 | - coverage
45 | - grblas >=1.3.13
46 | - scipy
47 |
48 | commands:
49 | - python -c "import mlir_graphblas"
50 | - pytest --pyargs mlir_graphblas.tests
51 |
52 | about:
53 | home: https://github.com/metagraph-dev/mlir-graphblas
54 | license: Apache 2.0
55 | license_family: Apache
56 | license_file: LICENSE
57 | summary: 'MLIR dialect for GraphBLAS'
58 | description: |
59 | MLIR dialect for GraphBLAS + Python tools
60 | dev_url: https://github.com/metagraph-dev/mlir-graphblas
61 |
--------------------------------------------------------------------------------
/continuous_integration/environment.yml:
--------------------------------------------------------------------------------
1 | name: mg
2 |
3 | channels:
4 | - conda-forge
5 | - metagraph
6 |
7 | dependencies:
8 | # dev environment
9 | - python=3.8
10 | - coverage
11 | - pytest
12 | - pytest-cov
13 | - pytest-forked
14 | - black
15 | - clang-tools
16 | - grblas>=1.3.13
17 |
18 |
19 | # dependencies (so setup.py develop doesn't pip install them)
20 | - metagraph/label/dev::mlir-ac=14.0=*_0 # temp restriction to use metagraph dev label
21 | - scipy
22 | - numpy
23 | - pymlir
24 | - llvmlite <0.37
25 | - donfig
26 | - pygments
27 | - cython
28 | - jinja2
29 | # temp restrictions until graphblas-opt is built in setup.py
30 | - cmake>=3.13.4
31 | - ninja
32 | - lit
33 |
--------------------------------------------------------------------------------
/dev-environment.yml:
--------------------------------------------------------------------------------
1 | name: mg
2 |
3 | channels:
4 | - conda-forge
5 | - metagraph
6 |
7 | dependencies:
8 | # dev environment
9 | - python=3.8
10 | - coverage
11 | - pytest
12 | - pytest-cov
13 | - black
14 | - clang-tools
15 |
16 | # documentation
17 | - sphinx=3.0.4
18 | - jinja2<3.1
19 | - nbsphinx
20 | - notebook
21 | - conda-forge::pydata-sphinx-theme=0.5.2
22 | - pypandoc
23 | - pip
24 | - pip:
25 | - rst2pdf
26 |
27 | # dependencies (so setup.py develop doesn't pip install them)
28 | - metagraph/label/dev::mlir-ac=14.0=*_0 # temp restriction to use metagraph dev label
29 | - metagraph::pymlir
30 | - scipy
31 | - conda-forge::grblas
32 | - llvmlite
33 | - donfig
34 | - pygments
35 | - cython
36 | - panel
37 | - bokeh<2.3 # temp restriction until panel/bokeh bugs are worked out
38 | - cmake>=3.13.4
39 | - ninja
40 | - lit
41 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/_static/css/custom.css:
--------------------------------------------------------------------------------
1 | .with-border {
2 | border-width: 1.5px;
3 | border-style: solid;
4 | border-color: #cecbcb;
5 | }
--------------------------------------------------------------------------------
/docs/_static/js/custom.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/docs/_static/js/custom.js
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 |
13 | import os
14 | import sys
15 |
16 | sys.path.append(os.path.abspath("./custom_sphinx_extensions"))
17 |
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = "mlir-graphblas"
22 | copyright = "2021, Anaconda, Inc"
23 | author = "Anaconda, Inc"
24 |
25 |
26 | # -- General configuration ---------------------------------------------------
27 |
28 | # Add any Sphinx extension module names here, as strings. They can be
29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
30 | # ones.
31 | extensions = [
32 | "sphinx.ext.autodoc",
33 | "rst2pdf.pdfbuilder",
34 | "nbsphinx",
35 | "ops_reference_sphinx_extension",
36 | ]
37 | html_css_files = ["css/custom.css"]
38 | html_js_files = ["js/custom.js"]
39 |
40 | # Add any paths that contain templates here, relative to this directory.
41 | templates_path = ["_templates"]
42 |
43 | # List of patterns, relative to source directory, that match files and
44 | # directories to ignore when looking for source files.
45 | # This pattern also affects html_static_path and html_extra_path.
46 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
47 |
48 |
49 | # -- Options for HTML output -------------------------------------------------
50 |
51 | # The theme to use for HTML and HTML Help pages. See the documentation for
52 | # a list of builtin themes.
53 | #
54 | html_theme = "pydata_sphinx_theme"
55 | # html_logo = "_static/mlir_graphblas_small.png"
56 |
57 | # Add any paths that contain custom static files (such as style sheets) here,
58 | # relative to this directory. They are copied after the builtin static files,
59 | # so a file named "default.css" will overwrite the builtin "default.css".
60 | html_static_path = ["_static"]
61 |
62 | html_theme_options = {"github_url": "https://github.com/metagraph-dev/mlir-graphblas"}
63 |
64 | # -- Options for notebook output -------------------------------------------------
65 |
66 | ### nbsphinx config
67 | nbsphinx_input_prompt = "%.0s" # suppress prompt
68 | nbsphinx_output_prompt = "%.0s" # suppress prompt
69 |
70 | # from
71 | nbsphinx_prolog = r"""
72 | {% set nbname = env.doc2path(env.docname, base=False) %}
73 |
74 | .. raw:: html
75 |
76 |
77 |
78 | This page was generated from
79 | {{ nbname|e }}.
80 |
81 |
82 |
83 | .. raw:: latex
84 |
85 | \nbsphinxstartnotebook{\scriptsize\noindent\strut
86 | \textcolor{gray}{The following section was generated from
87 | \sphinxcode{\sphinxupquote{\strut {{ nbname | escape_latex }}}} \dotfill}}
88 | """
89 |
--------------------------------------------------------------------------------
/docs/custom_sphinx_extensions/ops_reference_sphinx_extension.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import subprocess
3 | import sys
4 | import os
5 | import re
6 | import pypandoc
7 | import docutils
8 |
9 |
10 | class OpsReference(docutils.parsers.rst.Directive):
11 |
12 | _op_header_regex_pattern = re.compile(
13 | r"^### `graphblas\..*` \(::mlir::graphblas::.*\)$"
14 | )
15 |
16 | def run(self):
17 | # build the command
18 | current_file_dir = os.path.dirname(__file__)
19 | src_dir = os.path.join(current_file_dir, "..", "..", "mlir_graphblas", "src")
20 | src_dir = os.path.abspath(src_dir)
21 | assert os.path.isdir(src_dir)
22 | includes = [
23 | os.path.join(src_dir, rel_dir)
24 | for rel_dir in (
25 | "include",
26 | "include/GraphBLAS",
27 | "build/include",
28 | )
29 | ]
30 | includes.append(os.path.join(sys.exec_prefix, "include"))
31 | includes = [f"-I{directory}" for directory in includes]
32 | command = (
33 | [shutil.which("mlir-tblgen"), "--gen-dialect-doc"]
34 | + includes
35 | + [os.path.join(src_dir, "include/GraphBLAS/GraphBLASOps.td")]
36 | )
37 |
38 | # run the command
39 | process = subprocess.run(
40 | command,
41 | stdout=subprocess.PIPE,
42 | stderr=subprocess.PIPE,
43 | cwd=os.path.dirname(__file__),
44 | )
45 | assert process.returncode == 0
46 | assert len(process.stderr) == 0
47 |
48 | # process the markdown into restructured text
49 | markdown = process.stdout.decode()
50 | markdown = markdown.replace(
51 | "## Operation definition", "## Operation Definitions", 1
52 | )
53 | markdown = markdown.replace("# 'graphblas' Dialect", "", 1)
54 | markdown = markdown.replace("[TOC]", "", 1)
55 | lines = markdown.splitlines()
56 | lines = map(str.rstrip, lines)
57 | lines = (
58 | " ".join(l.split()[:-1]) if self._op_header_regex_pattern.match(l) else l
59 | for l in lines
60 | )
61 | markdown = "\n".join(lines)
62 | rst_text = pypandoc.convert_text(markdown, "rst", format="md")
63 |
64 | # generate nodes
65 | default_settings = docutils.frontend.OptionParser(
66 | components=(docutils.parsers.rst.Parser,)
67 | ).get_default_values()
68 | document = docutils.utils.new_document("dummy_name", default_settings)
69 | parser = docutils.parsers.rst.Parser()
70 | parser.parse(rst_text, document)
71 |
72 | return document.children
73 |
74 |
75 | def setup(app):
76 | app.add_directive("ops_reference", OpsReference)
77 |
78 | return {
79 | "version": "0.1",
80 | "parallel_read_safe": True,
81 | "parallel_write_safe": True,
82 | }
83 |
--------------------------------------------------------------------------------
/docs/dialect/graphblas_dialect_tutorials/graphblas_lower/index.rst:
--------------------------------------------------------------------------------
1 | .. _graphblas_lowering_pass:
2 |
3 | GraphBLAS Lowering Pass
4 | =======================
5 |
6 | These tutorials and examples will cover how to use the GraphBLAS dialect's ``--graphblas-lower`` pass by using the :ref:`engine` to lower several ops from the GraphBLAS dialect.
7 |
8 | The main purpose of the ``--graphblas-lower`` pass is to lower from the GraphBLAS dialect into a lower level dialect, e.g. the `SCF dialect `_ or the `Sparse Tensor dialect `_. Rather than simply showing the code transformations, we'll use the :ref:`engine` to take some example MLIR code using the GraphBLAS dialect and create executable Python code from it. Since we won't be able to go over all of the ops in the GraphBLAS dialect and since all of the ops are documented with examples in the :ref:`graphblas_ops_reference`, our examples in this section will mostly cover ops that are commonly used.
9 |
10 | The content of the tutorials are somewhat sequentially dependent as some later tutorials assume completion of previous tutorials. They also assume familiarity with the content of the :ref:`graphblas_ops_reference`.
11 |
12 | .. toctree::
13 | :maxdepth: 1
14 |
15 | python_utilities
16 | sparse_layouts
17 | vector_ops
18 | matrix_ops
19 | debugging_ops
20 |
--------------------------------------------------------------------------------
/docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/index.rst:
--------------------------------------------------------------------------------
1 | .. _graphblas_optimizing_pass:
2 |
3 | GraphBLAS Optimizing Pass
4 | =========================
5 |
6 | These tutorials and examples will cover how to use the GraphBLAS dialect's ``--graphblas-structuralize`` and ``--graphblas-optimize`` passes by using the :ref:`engine` to lower several ops from the GraphBLAS dialect.
7 |
8 | These tutorials assume the completion of the :ref:`graphblas_lowering_pass` and :ref:`graphblas_structuralizing_pass` tutorials.
9 |
10 | The content of the tutorials are somewhat sequentially dependent as some later tutorials assume completion of previous tutorials.
11 |
12 | Rather than using the JIT engine to lower the MLIR code examples down to something executable, these tutorials will use the ``--graphblas-structuralize`` and ``--graphblas-optimize`` passes to demonstrate the code transformations in order to demonstrate the expected behavior.
13 |
14 | .. toctree::
15 | :maxdepth: 1
16 |
17 | fuse_multiply_reduce
18 | fuse_multiply_apply
19 |
--------------------------------------------------------------------------------
/docs/dialect/graphblas_dialect_tutorials/graphblas_structuralize/index.rst:
--------------------------------------------------------------------------------
1 | .. _graphblas_structuralizing_pass:
2 |
3 | GraphBLAS Structuralizing Pass
4 | ==============================
5 |
6 | These tutorials and examples will cover how to use the GraphBLAS dialect's ``--graphblas-structuralize`` passes by using the :ref:`engine` to lower several ops from the GraphBLAS dialect into a form that can be more easily optimized by the ``--graphblas-optimize`` pass. This mostly happens by lowering certain ops into their more generic equivalents, e.g. lowering a ``graphblas.apply`` op into a ``graphblas.apply_generic`` op, that are more easily optimizable by the ``--graphblas-optimize`` pass via op fusion and similar optimizations.
7 |
8 | These tutorials assume the completion of the :ref:`graphblas_lowering_pass` tutorials and knowledge of the content in the :ref:`graphblas_ops_reference` (in particular, the ``graphblas.*_generic`` ops).
9 |
10 | The content of the tutorials are somewhat sequentially dependent as some later tutorials assume completion of previous tutorials.
11 |
12 | Rather than using the JIT engine to lower the MLIR code examples down to something executable, these tutorials will solely use the ``--graphblas-structuralize`` pass to demonstrate the code transformations in order to demonstrate the expected behavior. Since the transformations are pretty simple, these tutorials will not do a deep dive but will act more as example demonstrations.
13 |
14 | .. toctree::
15 | :maxdepth: 1
16 |
17 | lower_matrix_multiply_rewrite
18 | lower_apply_rewrite
19 | lower_reduce_to_scalar_rewrite
20 |
--------------------------------------------------------------------------------
/docs/dialect/graphblas_dialect_tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. _graphblas_dialect_tutorials:
2 |
3 | GraphBLAS Dialect Tutorials
4 | ===========================
5 |
6 | These tutorials will cover all the available ops and passes in the GraphBLAS dialect.
7 | They will focus on using the GraphBLAS dialect with the :ref:`engine` and assume completion of
8 | the :ref:`engine` tutorials.
9 |
10 | The content of the tutorials are somewhat sequentially dependent as some later tutorials assume completion of previous tutorials.
11 |
12 | .. toctree::
13 | :maxdepth: 1
14 |
15 | graphblas_lower/index
16 | graphblas_structuralize/index
17 | graphblas_optimize/index
18 |
--------------------------------------------------------------------------------
/docs/dialect/index.rst:
--------------------------------------------------------------------------------
1 | .. _dialect:
2 |
3 | GraphBLAS Dialect
4 | =================
5 |
6 | The ``graphblas`` dialect is designed to make it possible to express
7 | `GraphBLAS`_ algorithms in `MLIR`_ in a compact way. The dialect does not
8 | define any new types, but rather operates on `MLIR sparse tensors`_.
9 |
10 | .. _GraphBLAS: https://graphblas.github.io/
11 | .. _MLIR: https://mlir.llvm.org/
12 | .. _MLIR sparse tensors: https://mlir.llvm.org/docs/Dialects/SparseTensorOps/
13 |
14 | .. toctree::
15 | :maxdepth: 2
16 | :caption: Contents:
17 |
18 | ops_reference
19 | ops_table
20 | passes_reference
21 | graphblas_dialect_tutorials/index
22 |
--------------------------------------------------------------------------------
/docs/dialect/ops_reference.rst:
--------------------------------------------------------------------------------
1 |
2 | .. include::
3 |
4 | .. _graphblas_ops_reference:
5 |
6 | GraphBLAS Dialect Op Reference
7 | ==============================
8 |
9 | .. ops_reference::
10 |
--------------------------------------------------------------------------------
/docs/dialect/ops_table.rst:
--------------------------------------------------------------------------------
1 | Supported GraphBLAS Spec Operations
2 | ===================================
3 |
4 | .. csv-table:: Supported GraphBLAS Spec operations
5 | :header: Operation, Matrix, Vector, accum, mask, compl. mask, mlir name, comment
6 | :widths: 20, 10, 10, 10, 10, 10, 20, 20
7 |
8 | mxm , Y , , Y , Y , Y , matrix_multiply,
9 | vxm , Y , Y , Y , Y , Y , matrix_multiply,
10 | mxv , Y , Y , Y , Y , Y , matrix_multiply,
11 | eWiseMult , Y , Y , Y , Y , Y , intersect,
12 | eWiseAdd , Y , Y , Y , Y , Y , union,
13 | apply , Y , Y , Y , N , N , apply,
14 | apply_Binop1st , Y , Y , Y , N , N , apply,
15 | apply_Binop2nd , Y , Y , Y , N , N , apply,
16 | select (no val) , Y , Y , Y , N , N , select,
17 | select (w/ val) , Y , Y , Y , N , N , select,
18 | select mask , Y , Y , Y , Y , Y , select_mask,
19 | reduce_to_scalar, Y , Y , N , , , reduce_to_scalar,
20 | reduce_to_vector, Y , , Y , Y , Y , reduce_to_vector,
21 | transpose , Y , , Y , N , N , transpose,
22 | kronecker , N , , N , N , N ,,
23 | diag , Y , Y , , , , diag,
24 | assign , N , N , N , N , N ,,
25 | col/row assign , N , , N , N , N ,,
26 | subassign , N , N , N , N , N ,, GxB
27 | assign scalar many, Y , Y , Y , Y , Y ,apply/uniform_complement, custom
28 | extract , N , N , N , N , N ,,
29 | col extract , N , , N , N , N ,,
30 | set element , N , N , , , ,,
31 | extract element , N , N , , , ,,
32 | remove element , N , N , , , ,,
33 | build , Y , Y , , , ,from_coo,
34 | clear , N , N , , , ,,
35 | dup , Y , Y , , , , dup,
36 | size/nrows/ncols, Y , Y , , , , size/num_rows/num_cols,
37 | nvals , Y , Y , , , , num_vals,
38 | resize , N , N , , , ,,
39 | extractTuples , Y , Y , , , ,to_coo,
40 | concat , N , , , , ,, GxB
41 | split , N , , , , ,, GxB
42 | isequal , Y , Y , , , , equal, custom
43 | vxv/inner , , Y , N , , , matrix_multiply, custom
44 | select_rowwise , Y , , Y , N , N , matrix_select_random, custom
45 |
46 | .. csv-table:: Supported GraphBLAS operations for updating
47 | :header: accumulation, mask, replace, is supported
48 | :widths: 10, 10, 10, 10
49 |
50 | True , True , True , Y
51 | True , True , False, Y
52 | True , False, , Y
53 | False, True , True , Y
54 | False, True , False, Y
55 | False, False, , Y
56 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 |
2 | mlir-graphblas Documentation
3 | ============================
4 |
5 | mlir-graphblas is an implementation of `GraphBLAS `_ using `MLIR `_.
6 | The goal is to use MLIR's compiler optimizations to perform delayed fusion and achieve good performance with
7 | a relatively simple and understandable implementation of the required API functions.
8 |
9 | mlir-graphblas also includes several tools to assist in writing MLIR code.
10 |
11 | mlir-graphblas is licensed under the `Apache 2.0 license `_.
12 |
13 | .. toctree::
14 | :maxdepth: 2
15 |
16 | installation/index
17 | dialect/index
18 | tools/index
19 |
--------------------------------------------------------------------------------
/docs/installation/index.rst:
--------------------------------------------------------------------------------
1 | .. _installation:
2 |
3 | Installation
4 | ============
5 |
6 | mlir-graphblas is implemented via Python and Cython.
7 |
8 | Building from source or installing from ``pip`` is possible, but the recommended method to install is using ``conda``.
9 |
10 | Python version support
11 | ----------------------
12 |
13 | Python 3.8 and above is supported.
14 |
15 | Installing using conda
16 | ----------------------
17 |
18 | ::
19 |
20 | conda install -c conda-forge -c metagraph mlir-graphblas
21 |
22 | Installing from source
23 | ----------------------
24 |
25 | Instructions for building from source are currently a work in progress.
26 |
27 | Installing using venv
28 | ---------------------
29 |
30 | Instructions for installing via venv are currently a work in progress.
31 |
32 | Required Dependencies
33 | ---------------------
34 |
35 | These should be automatically installed when ``mlir-graphblas`` is installed
36 |
37 | - `NumPy `__
38 | - `SciPy `__
39 | - `PyMLIR `__
40 | - `llvmlite `__
41 | - `pygments `__
42 | - `donfig `__
43 | - `panel `__
44 | - `bokeh `__
45 | - `MLIR `__
46 | - `Cython `__
47 | - `CMake `__
48 | - `Ninja `__
49 | - `lit `__
50 | - `Jinja `__
51 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/tools/cli/index.rst:
--------------------------------------------------------------------------------
1 | .. _cli:
2 |
3 | mlir-opt CLI Wrapper
4 | ====================
5 |
6 | ``mlir-opt`` is the standard command line tool for applying passes to MLIR and lowering from one dialect
7 | to another.
8 |
9 | ``mlir_graphblas.MlirOptCli`` is a wrapper around the ``mlir-opt`` command line executable.
10 |
11 | .. code-block:: python
12 |
13 | cli = MlirOptCli(executable=None, options=None)
14 |
15 |
16 | The executable defaults to ``mlir-opt``, but can be specified to include the full path or a differently
17 | named executable if needed.
18 |
19 | ``options``, if provided, must be a list of strings to pass to ``mlir-opt`` with every call. These options
20 | are in addition to the passes that will be applied.
21 |
22 | Applying Passes
23 | ---------------
24 |
25 | The first way to apply passes is by calling
26 |
27 | .. code-block:: python
28 |
29 | result = cli.apply_passes(input_mlir, list_of_passes)
30 |
31 | This will return a string containing the final result of applying the list of passes to the input.
32 |
33 | If any errors occur, ``MlirOptError`` will be raised. This error contains a ``.debug_result`` attribute,
34 | which is explained below.
35 |
36 | The second way to apply passes is by calling
37 |
38 | .. code-block:: python
39 |
40 | result = cli.debug_passes(input_mlir, list_of_passes)
41 |
42 | This always returns a ``DebugResult`` object.
43 |
44 | DebugResult
45 | -----------
46 |
47 | A ``DebugResult`` object contains a list of ``.passes`` applied (or attempted to apply) and a list of
48 | ``.stages`` which resulted, including the original. As a result, there is always one more stage than
49 | pass.
50 |
51 | These stages and passes can be inspected manually, but the easiest way to interact with them is through
52 | the :ref:`explorer`. To open the explorer, call
53 |
54 | .. code-block:: python
55 |
56 | result.explore()
57 |
58 | A new browser tab will appear showing the explorer.
59 |
60 | Examples
61 | --------
62 |
63 | Here are some examples of our CLI tool:
64 |
65 | .. toctree::
66 | :maxdepth: 1
67 |
68 | apply_passes_to_string_or_file
69 | using_debugresult
70 |
--------------------------------------------------------------------------------
/docs/tools/engine/index.rst:
--------------------------------------------------------------------------------
1 | .. _engine:
2 |
3 | JIT Engine
4 | ==========
5 |
6 | ``mlir-graphblas.MlirJitEngine`` provides a way to go from MLIR code and a set of passes to runnable Python code
7 | using a Just-in-Time compiler strategy.
8 |
9 | .. code-block:: python
10 |
11 | engine = MlirJitEngine(llvmlite_engine=None)
12 |
13 | An optional ``llvmlite`` engine can be passes in. Otherwise a new one will be created.
14 |
15 | The workflows for the JIT engine is:
16 |
17 | - mlir-opt converts MLIR code into LLVM IR through a series of passes
18 | - mlir-translate converts LLVM IR into LLVM code
19 | - llvmlite compiles LLVM code into machine code
20 | - pymlir is used to inspect the original MLIR code signatures for type information
21 | - Python functions are created which accept numeric or numpy types
22 |
23 | The mechanism to trigger this workflow is
24 |
25 | .. code-block:: python
26 |
27 | engine.add(mlir_code, passes)
28 |
29 | If an error is not raised, the functions defined in ``mlir_code`` will be available by indexing into the engine.
30 |
31 | .. code-block:: python
32 |
33 | some_func = engine["some_func"]
34 |
35 |
36 | Example
37 | -------
38 |
39 | .. code-block:: python
40 |
41 | >>> mlir_code = b"""
42 | #trait_1d_scalar = {
43 | indexing_maps = [
44 | affine_map<(i) -> (i)>, // A
45 | affine_map<(i) -> (i)> // X (out)
46 | ],
47 | iterator_types = ["parallel"],
48 | doc = "X(i) = A(i) OP Scalar"
49 | }
50 | func @scale_array(%input: tensor, %scale: f64) -> tensor {
51 | %0 = linalg.generic #trait_1d_scalar
52 | ins(%input: tensor)
53 | outs(%input: tensor) {
54 | ^bb(%a: f64, %s: f64):
55 | %0 = mulf %a, %scale : f64
56 | linalg.yield %0 : f64
57 | } -> tensor
58 | return %0 : tensor
59 | }
60 | """
61 | >>> passes = [
62 | '--linalg-bufferize',
63 | '--func-bufferize',
64 | '--finalizing-bufferize',
65 | '--convert-linalg-to-affine-loops',
66 | '--lower-affine',
67 | '--convert-scf-to-std',
68 | '--convert-std-to-llvm',
69 | ]
70 | >>> from mlir_graphblas import MlirJitEngine
71 | >>> engine = MlirJitEngine()
72 | >>> engine.add(mlir_code, passes)
73 | ['scale_array']
74 | >>> import numpy as np
75 | >>> x = np.array([1.1, 2.2, 3.3, 4.4, 5.5])
76 | >>> engine['scale_array'](x, 20.0)
77 | array([ 22., 44., 66., 88., 110.])
78 |
79 | More Examples
80 | -------------
81 |
82 | Here is a series of tutorials and examples for the JIT engine.
83 |
84 | They assume knowledge of the MLIR's `linalg dialect `_ and go over how to compile and use MLIR code via the JIT engine.
85 |
86 | The content of the tutorials are somewhat sequentially dependent as some later tutorials assume completion of previous tutorials.
87 |
88 | Much of the complexity when using the JIT engine in practice comes from writing the MLIR code itself. While some of these tutorials go over features specific to the JIT engine, many of them are simply example uses of the JIT engine plus some MLIR code that can be useful as a template to learn from.
89 |
90 | .. toctree::
91 | :maxdepth: 1
92 |
93 | scalar_plus_scalar
94 | tensor_plus_tensor
95 | matrix_plus_broadcasted_vector
96 | scalar_times_tensor
97 | tensor_sum
98 | spmv
99 | sparse_vector_times_sparse_vector
100 | sparse_tensor_sum
101 |
--------------------------------------------------------------------------------
/docs/tools/explorer.rst:
--------------------------------------------------------------------------------
1 | .. _explorer:
2 |
3 | MLIR Explorer
4 | =============
5 |
6 | The MLIR Explorer is the preferred way to view ``DebugResults``.
7 |
8 | The usual way to open the explorer is by calling
9 |
10 | .. code-block:: python
11 |
12 | result.explore()
13 |
14 | This will open a new browser tab showing the Explorer linked to ``result``.
15 |
16 | Another option, when running from a Jupyter notebook, is to use embedded mode.
17 |
18 | .. code-block:: python
19 |
20 | result.explore(embed=True)
21 |
22 | This will open the explorer directly in the notebook.
23 |
24 | Note that for this to work in JupyterLab,
25 | the `pyviz lab extension `_ must be installed.
26 |
27 |
28 | Explorer Interface
29 | ------------------
30 |
31 | The primary view of the Explorer is on the **Sequential** tab. This shows the result of each pass,
32 | with the input on the left panel and the result on the right panel.
33 |
34 | .. image:: explorer_sequential.png
35 | :class: with-border
36 |
37 | Walking through each pass can be accomplished by clicking the left and right arrows or by using the "Passes"
38 | dropdown widget.
39 |
40 | Code lines which are too wide will be wrapped to fit in the available width.
41 |
42 | The **Single** tab contains the same "Passes" dropdown widget and provides more screen width for viewing
43 | a single code panel.
44 |
45 | The **Double** tab provides two dropdown selectors, allowing a comparison of arbitrary passes next to each other.
46 |
47 | Styling
48 | ~~~~~~~
49 |
50 | .. image:: explorer_topbar.png
51 |
52 | The top bar of the Explorer contains styling options for the displayed code.
53 |
54 | Line numbers can be toggled on or off. Having line numbers generally makes the code easier to compare or
55 | discuss with another person. However, selecting and copying the code will include the line numbers. When
56 | copying is required, toggle the line numbers off.
57 |
58 | Syntax highlighting helps differentiate things like keywords, punctuation, variable names, and comments.
59 | The actual highlighting style based on these tokens can be changed. Several light-mode and dark-mode
60 | options are available in the dropdown.
61 |
62 | The chosen styles are not saved when exiting the Explorer.
63 |
64 |
65 | Live Editing
66 | ------------
67 |
68 | When developing code, an iterative workflow is common. To avoid the need to close the Explorer tab,
69 | edit input code, re-run the CLI tool, and re-explore every time, an **Edit** tab is provided.
70 |
71 | .. image:: explorer_edit.png
72 | :class: with-border
73 |
74 | The primary purpose of the edit tab is to change the input MLIR code. After editing, click anywhere outside the
75 | code text editor and the **Apply Changes** button will become active. Clicking this button will regenerate the
76 | content of the other tabs based on this new input.
77 |
78 | Another edit that can be performed is to disable passes. Reordering or adding new passes is not available
79 | currently. You must **Apply Changes** after deselecting or reselecting passes to regenerate the
80 | content of the other tabs.
81 |
--------------------------------------------------------------------------------
/docs/tools/explorer_edit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/docs/tools/explorer_edit.png
--------------------------------------------------------------------------------
/docs/tools/explorer_sequential.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/docs/tools/explorer_sequential.png
--------------------------------------------------------------------------------
/docs/tools/explorer_topbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/docs/tools/explorer_topbar.png
--------------------------------------------------------------------------------
/docs/tools/index.rst:
--------------------------------------------------------------------------------
1 | .. _tools:
2 |
3 | Tools
4 | =====
5 |
6 | Given the fast changing pace of MLIR, built-in tools are not as mature as desired.
7 | We created several tools for use in Python to assist with writing MLIR code, validating passes, and debugging.
8 |
9 |
10 | .. toctree::
11 | :maxdepth: 2
12 | :caption: Contents:
13 |
14 | cli/index
15 | explorer
16 | engine/index
17 |
18 |
--------------------------------------------------------------------------------
/mlir_graphblas/RandomUtils.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | using namespace std;
9 |
10 | extern "C" {
11 |
12 | // This is a simple, fast, and "wrong" implementation that "randomly" chooses
13 | // the first n indices every time
14 | void choose_first(int64_t rngContext, int64_t n, int64_t maxIndex,
15 | int64_t *outAlloc, int64_t *outBase, int64_t outOffset,
16 | int64_t outSize, int64_t outStride, double *valAlloc,
17 | double *valBase, int64_t valOffset, int64_t valSize,
18 | int64_t valStride) {
19 | assert(rngContext == 0x0B00); // only checked when built in debug mode
20 | cerr << "calling choose_first()" << endl;
21 | cerr << "NOTE: choose_first sampler is only for testing!" << endl;
22 | for (int i = 0; i < n; i++) {
23 | outBase[outOffset + outStride * i] = i;
24 | }
25 | }
26 |
27 | // A uniform sampler using a temporary set
28 | void *create_choose_uniform_context(uint64_t seed) {
29 | auto generator = new std::mt19937_64(seed);
30 | return (void *)generator;
31 | }
32 |
33 | void choose_uniform(void *rngContext, int64_t n, int64_t maxIndex,
34 | int64_t *outAlloc, int64_t *outBase, int64_t outOffset,
35 | int64_t outSize, int64_t outStride, double *valAlloc,
36 | double *valBase, int64_t valOffset, int64_t valSize,
37 | int64_t valStride) {
38 |
39 | std::set selected;
40 | std::uniform_int_distribution choose_int(0, maxIndex - 1);
41 |
42 | auto generator = (std::mt19937_64 *)rngContext;
43 |
44 | while (selected.size() < (size_t)n) {
45 | int64_t choice = choose_int(*generator);
46 | if (selected.count(choice) == 0)
47 | selected.insert(choice);
48 | }
49 |
50 | // sets are stored in sorted order
51 | int i = 0;
52 | for (int64_t element : selected) {
53 | outBase[outOffset + outStride * i] = element;
54 | i++;
55 | }
56 | }
57 |
58 | void destroy_choose_uniform_context(void *rngContext) {
59 | auto generator = (std::mt19937_64 *)rngContext;
60 | delete generator;
61 | }
62 |
63 | // A weighted sampler using a temporary set
64 | void *create_choose_weighted_context(uint64_t seed) {
65 | auto generator = new std::mt19937_64(seed);
66 | return (void *)generator;
67 | }
68 |
69 | void choose_weighted(void *rngContext, int64_t n, int64_t maxIndex,
70 | int64_t *outAlloc, int64_t *outBase, int64_t outOffset,
71 | int64_t outSize, int64_t outStride, double *valAlloc,
72 | double *valBase, int64_t valOffset, int64_t valSize,
73 | int64_t valStride) {
74 |
75 | std::set selected;
76 |
77 | auto generator = (std::mt19937_64 *)rngContext;
78 |
79 | // compute cumulative distribution
80 | std::vector cumulative(maxIndex);
81 | double acc = 0.0;
82 | for (int64_t i = 0; i < maxIndex; i++) {
83 | acc += valBase[valOffset + i * valStride];
84 | cumulative[i] = acc;
85 | }
86 |
87 | std::uniform_real_distribution choose_double(
88 | 0, cumulative[maxIndex - 1]);
89 |
90 | while (selected.size() < (size_t)n) {
91 | double r = choose_double(*generator);
92 |
93 | // find smallest element in cumulative distribution greater than r
94 | int64_t choice = std::distance(
95 | cumulative.begin(),
96 | std::upper_bound(cumulative.begin(), cumulative.end(), r));
97 |
98 | if (selected.count(choice) == 0)
99 | selected.insert(choice);
100 | }
101 |
102 | // sets are stored in sorted order
103 | int i = 0;
104 | for (int64_t element : selected) {
105 | outBase[outOffset + outStride * i] = element;
106 | i++;
107 | }
108 | }
109 |
110 | void destroy_choose_weighted_context(void *rngContext) {
111 | auto generator = (std::mt19937_64 *)rngContext;
112 | delete generator;
113 | }
114 |
115 | std::uniform_real_distribution<> doubleDistribution(0.0, 1.0);
116 |
117 | double random_double(void *rngContext) {
118 | auto generator = (std::mt19937_64 *)rngContext;
119 | return doubleDistribution(*generator);
120 | }
121 |
122 | } // extern "C"
123 |
--------------------------------------------------------------------------------
/mlir_graphblas/__init__.py:
--------------------------------------------------------------------------------
1 | import donfig
2 | from ._version import get_versions
3 | from .cli import MlirOptCli, MlirOptError
4 | from .engine import MlirJitEngine
5 | from . import tools
6 |
7 | __version__ = get_versions()["version"]
8 | del get_versions
9 |
10 |
11 | config = donfig.Config("mlir-graphblas")
12 |
--------------------------------------------------------------------------------
/mlir_graphblas/random_utils.pyx:
--------------------------------------------------------------------------------
1 | """ This wraps RandomUtils.cpp """
2 | cimport cython
3 | import random
4 | from libc.stdint cimport int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t
5 |
6 |
7 | cdef extern from "RandomUtils.cpp" nogil:
8 | void *create_choose_uniform_context(uint64_t seed)
9 | void choose_uniform(void *rngContext, int64_t n, int64_t maxIndex,
10 | int64_t *outAlloc, int64_t *outBase, int64_t outOffset,
11 | int64_t outSize, int64_t outStride, double *valAlloc,
12 | double *valBase, int64_t valOffset, int64_t valSize,
13 | int64_t valStride)
14 | void destroy_choose_uniform_context(void *rngContext)
15 |
16 | void *create_choose_weighted_context(uint64_t seed)
17 | void choose_weighted(void *rngContext, int64_t n, int64_t maxIndex,
18 | int64_t *outAlloc, int64_t *outBase, int64_t outOffset,
19 | int64_t outSize, int64_t outStride, double *valAlloc,
20 | double *valBase, int64_t valOffset, int64_t valSize,
21 | int64_t valStride)
22 | void destroy_choose_weighted_context(void *rngContext)
23 |
24 | double random_double(void *rngContext);
25 |
26 |
27 | cdef class ChooseUniformContext:
28 | cdef void *_data
29 |
30 | def __init__(self, seed=None):
31 | if seed is None:
32 | seed = random.getrandbits(64)
33 | self._data = create_choose_uniform_context(seed)
34 |
35 | def __dealloc__(self):
36 | destroy_choose_uniform_context(self._data)
37 |
38 | @property
39 | def __mlir_void_ptr__(self):
40 | return self._data
41 |
42 |
43 | cdef class ChooseWeightedContext:
44 | cdef void *_data
45 |
46 | def __init__(self, seed=None):
47 | if seed is None:
48 | seed = random.getrandbits(64)
49 | self._data = create_choose_weighted_context(seed)
50 |
51 | def __dealloc__(self):
52 | destroy_choose_weighted_context(self._data)
53 |
54 | @property
55 | def __mlir_void_ptr__(self):
56 | return self._data
--------------------------------------------------------------------------------
/mlir_graphblas/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.13.4)
2 | project(graphblas-dialect LANGUAGES CXX C)
3 |
4 | set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
5 |
6 | set(CMAKE_CXX_STANDARD 14 CACHE STRING "C++ standard to conform to")
7 |
8 | find_package(MLIR REQUIRED CONFIG)
9 |
10 | message(STATUS "Using MLIRConfig.cmake in: ${MLIR_DIR}")
11 | message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
12 |
13 | set(LLVM_RUNTIME_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/bin)
14 | set(LLVM_LIBRARY_OUTPUT_INTDIR ${CMAKE_BINARY_DIR}/lib)
15 | set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR})
16 |
17 | list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")
18 | list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
19 | include(TableGen)
20 | include(AddLLVM)
21 | include(AddMLIR)
22 | include(HandleLLVMOptions)
23 |
24 | include_directories(${LLVM_INCLUDE_DIRS})
25 | include_directories(${MLIR_INCLUDE_DIRS})
26 | include_directories(${PROJECT_SOURCE_DIR}/include)
27 | include_directories(${PROJECT_BINARY_DIR}/include)
28 | link_directories(${LLVM_BUILD_LIBRARY_DIR})
29 | add_definitions(${LLVM_DEFINITIONS})
30 |
31 | add_subdirectory(include)
32 | add_subdirectory(lib)
33 | add_subdirectory(test)
34 | add_subdirectory(graphblas-opt)
35 |
36 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/README.md:
--------------------------------------------------------------------------------
1 | # C++ Development Cheat Sheet
2 |
3 | This document assumes you are only worried about building the C++
4 | applications, not the Python library.
5 |
6 | ## Creating a new environment
7 |
8 | ```
9 | conda create -n mgcpp -c metagraph/label/dev python=3.8 cmake ninja mlir
10 | ```
11 |
12 | You will also likely want to have a copy of LLVM sitting around somewhere you can browse and search:
13 | ```
14 | git clone https://github.com/llvm/llvm-project/
15 | ```
16 |
17 | ## Setting up VSCode
18 |
19 | VSCode's Intellisense features are basically mandatory to make progress, especially given how incomplete MLIR's documentation is. At the top level of the repository, create a file `.vscode/c_cpp_properties.json` which contains:
20 |
21 | ```
22 | {
23 | "configurations": [
24 | {
25 | "name": "macOS",
26 | "includePath": [
27 | "~/miniconda3/envs/mgcpp/include"
28 | ],
29 | "macFrameworkPath": [
30 | "/System/Library/Frameworks",
31 | "/Library/Frameworks"
32 | ],
33 | "intelliSenseMode": "macos-clang-x64",
34 | "compilerPath": "/usr/bin/g++",
35 | "cStandard": "c11",
36 | "cppStandard": "c++14"
37 | }
38 | ],
39 | "version": 4
40 | }
41 | ```
42 |
43 | Adjust `includePath` to match your environment. The `name` attribute doesn't appear to have any special meaning.
44 |
45 | ## Setting up CMake build
46 |
47 | From the top level of the repository
48 |
49 | ```
50 | conda activate mgcpp
51 | cd mlir_graphblas/src/
52 | mkdir build
53 | cd build
54 | cmake -G Ninja .. -DMLIR_DIR=$CONDA_PREFIX/lib/cmake/mlir -DLLVM_EXTERNAL_LIT=$BUILD_DIR/bin/llvm-lit -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc -DCMAKE_BUILD_TYPE=Debug
55 | ```
56 | This command line overrides cmake to force it to use the system g++ (which is actually clang) rather than any version of clang that might be in your conda environment.
57 |
58 | ## Build and Test Loop
59 |
60 | Next, you'll likely run the following command over and over when testing the GraphBLAS passes:
61 | ```
62 | cmake --build . --target check-graphblas
63 | ```
64 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/build.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import site
4 | import subprocess
5 | import shutil
6 | import argparse
7 |
8 | from typing import Iterable, Tuple
9 |
10 | _SCRIPT_DIR = os.path.dirname(__file__)
11 | _BUILD_DIR = os.path.join(_SCRIPT_DIR, "build")
12 | GRAPHBLAS_OPT_LOCATION = os.path.join(_BUILD_DIR, "bin", "graphblas-opt")
13 |
14 |
15 | def run_shell_commands(
16 | directory: str, *commands: Iterable[str], **environment_variables
17 | ) -> Tuple[str, str, str]:
18 | command = " && ".join(
19 | [f"pushd {directory}"]
20 | + [f"export {name}={value}" for name, value in environment_variables.items()]
21 | + list(commands)
22 | + ["popd"]
23 | )
24 |
25 | process = subprocess.Popen(
26 | ["/bin/bash"],
27 | stdin=subprocess.PIPE,
28 | stdout=subprocess.PIPE,
29 | stderr=subprocess.PIPE,
30 | )
31 | stdout_string, stderr_string = map(
32 | bytes.decode, process.communicate(command.encode())
33 | )
34 |
35 | if process.returncode != 0:
36 | error_string = (
37 | "\n\n"
38 | + "STDERR Messages:"
39 | + "\n\n"
40 | + stderr_string
41 | + "\n\n"
42 | + "STDOUT Messages:"
43 | + "\n\n"
44 | + stdout_string
45 | + "\n\n"
46 | + f"Command Failed with exit code {process.returncode}:"
47 | + "\n\n"
48 | + command
49 | + "\n\n"
50 | )
51 | raise RuntimeError(error_string)
52 |
53 | return command, stdout_string, stderr_string
54 |
55 |
56 | def build_graphblas_opt(build_clean: bool) -> None:
57 | if build_clean and os.path.isdir(_BUILD_DIR):
58 | shutil.rmtree(_BUILD_DIR)
59 |
60 | if not os.path.isdir(_BUILD_DIR):
61 | os.makedirs(_BUILD_DIR)
62 |
63 | env_lib_path = os.path.join(sys.exec_prefix, "lib")
64 | LD_LIBRARY_PATH = os.environ.get("LD_LIBRARY_PATH", env_lib_path)
65 | if env_lib_path not in LD_LIBRARY_PATH.split(":"):
66 | LD_LIBRARY_PATH = LD_LIBRARY_PATH + ":" + env_lib_path
67 |
68 | PYTHONPATH = ":".join(site.getsitepackages())
69 |
70 | command, stdout_string, stderr_string = run_shell_commands(
71 | _BUILD_DIR,
72 | f"cmake -G Ninja .. -DMLIR_DIR=$PREFIX/lib/cmake/mlir -DCMAKE_BUILD_TYPE=Debug -DLLVM_EXTERNAL_LIT=$BUILD_DIR/bin/llvm-lit -DCMAKE_PREFIX_PATH={sys.exec_prefix}", # creates the directory ./build/graphblas-opt/
73 | "cmake --build . --target check-graphblas --verbose", # creates the executable ./build/bin/graphblas-opt and runs tests
74 | PREFIX=sys.exec_prefix,
75 | BUILD_DIR=sys.exec_prefix,
76 | LD_LIBRARY_PATH=LD_LIBRARY_PATH,
77 | PYTHONPATH=PYTHONPATH,
78 | )
79 | assert os.path.isfile(GRAPHBLAS_OPT_LOCATION)
80 |
81 | print(
82 | f"""
83 | STDERR:
84 |
85 | {stderr_string}
86 |
87 | STDOUT:
88 |
89 | {stdout_string}
90 |
91 | Status: Success
92 |
93 | Command:
94 |
95 | {command}
96 | """
97 | )
98 |
99 | return
100 |
101 |
102 | if __name__ == "__main__":
103 | parser = argparse.ArgumentParser(
104 | prog="tool",
105 | formatter_class=lambda prog: argparse.HelpFormatter(
106 | prog, max_help_position=9999
107 | ),
108 | )
109 | parser.add_argument(
110 | "-build-clean", action="store_true", help="Rebuild from scratch."
111 | )
112 | args = parser.parse_args()
113 |
114 | build_graphblas_opt(args.build_clean)
115 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/graphblas-opt/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
2 | get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
3 | set(LIBS
4 | ${dialect_libs}
5 | ${conversion_libs}
6 | MLIRIR
7 | MLIROptLib
8 | MLIRGraphBLAS
9 | )
10 | add_llvm_executable(graphblas-opt graphblas-opt.cpp)
11 |
12 | llvm_update_compile_flags(graphblas-opt)
13 | target_link_libraries(graphblas-opt PRIVATE ${LIBS})
14 |
15 | mlir_check_all_link_libraries(graphblas-opt)
16 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/graphblas-opt/graphblas-opt.cpp:
--------------------------------------------------------------------------------
1 | //===- graphblas-opt.cpp ---------------------------------------*- C++ -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===---------------------------------------------------------------------===//
6 |
7 | #include "mlir/IR/Dialect.h"
8 | #include "mlir/IR/MLIRContext.h"
9 | #include "mlir/InitAllDialects.h"
10 | #include "mlir/InitAllPasses.h"
11 | #include "mlir/Pass/Pass.h"
12 | #include "mlir/Pass/PassManager.h"
13 | #include "mlir/Support/FileUtilities.h"
14 | #include "mlir/Support/MlirOptMain.h"
15 | #include "llvm/Support/CommandLine.h"
16 | #include "llvm/Support/InitLLVM.h"
17 | #include "llvm/Support/SourceMgr.h"
18 | #include "llvm/Support/ToolOutputFile.h"
19 |
20 | #include "GraphBLAS/GraphBLASDialect.h"
21 | #include "GraphBLAS/GraphBLASPasses.h"
22 |
23 | int main(int argc, char **argv) {
24 | mlir::registerAllPasses();
25 | registerGraphBLASPasses();
26 |
27 | mlir::DialectRegistry registry;
28 | registry.insert();
29 |
30 | // Add the following to include *all* MLIR Core dialects, or selectively
31 | // include what you need like above. You only need to register dialects that
32 | // will be *parsed* by the tool, not the one generated
33 | registerAllDialects(registry);
34 |
35 | return failed(
36 | mlir::MlirOptMain(argc, argv, "GraphBLAS optimizer driver\n", registry));
37 | }
38 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_subdirectory(GraphBLAS)
2 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | set(LLVM_TARGET_DEFINITIONS GraphBLASPasses.td)
3 | mlir_tablegen(GraphBLASPasses.h.inc -gen-pass-decls -name GraphBLAS)
4 | add_public_tablegen_target(MLIRGraphBLASIncGen)
5 | add_mlir_doc(GraphBLASPasses GraphBLASPasses ./ -gen-pass-doc)
6 |
7 | set(LLVM_TARGET_DEFINITIONS GraphBLASOps.td)
8 | mlir_tablegen(GraphBLASOpsEnums.h.inc -gen-enum-decls)
9 | mlir_tablegen(GraphBLASOpsEnums.cpp.inc -gen-enum-defs)
10 |
11 | add_mlir_dialect(GraphBLASOps graphblas)
12 |
13 | add_mlir_doc(GraphBLASDialect -gen-dialect-doc GraphBLASDialect GraphBLAS/)
14 | add_mlir_doc(GraphBLASOps -gen-op-doc GraphBLASOps GraphBLAS/)
15 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASArrayUtils.h:
--------------------------------------------------------------------------------
1 | #ifndef GRAPHBLAS_GRAPHBLASARRAYUTILS_H
2 | #define GRAPHBLAS_GRAPHBLASARRAYUTILS_H
3 |
4 | #include "GraphBLAS/GraphBLASUtils.h"
5 | #include "mlir/Dialect/StandardOps/IR/Ops.h"
6 | #include "mlir/Dialect/Tensor/IR/Tensor.h"
7 | #include "mlir/IR/BuiltinOps.h"
8 | #include "llvm/ADT/APInt.h"
9 | #include
10 | #include
11 |
12 | using namespace mlir;
13 |
14 | enum EwiseBehavior {
15 | UNION,
16 | INTERSECT,
17 | MASK,
18 | MASK_COMPLEMENT,
19 | };
20 |
21 | ValueRange buildMaskComplement(PatternRewriter &rewriter, Location loc,
22 | Value fullSize, Value maskIndices,
23 | Value maskStart, Value maskEnd);
24 |
25 | ValueRange sparsifyDensePointers(PatternRewriter &rewriter, Location loc,
26 | Value size, Value pointers);
27 |
28 | ValueRange buildIndexOverlap(PatternRewriter &rewriter, Location loc,
29 | Value aSize, Value a, Value bSize, Value b);
30 |
31 | Value computeNumOverlaps(PatternRewriter &rewriter, Location loc, Value nk,
32 | Value fixedIndices, Value fixedIndexStart,
33 | Value fixedIndexEnd, Value iterPointers,
34 | Value iterIndices, Value maskIndices, Value maskStart,
35 | Value maskEnd, Type valueType);
36 |
37 | void computeInnerProduct(PatternRewriter &rewriter, Location loc, Value nk,
38 | Value fixedRowIndex, Value fixedIndices,
39 | Value fixedValues, Value fixedIndexStart,
40 | Value fixedIndexEnd, Value iterPointers,
41 | Value iterIndices, Value iterValues, Value maskIndices,
42 | Value maskStart, Value maskEnd, Type valueType,
43 | ExtensionBlocks extBlocks, Value outputIndices,
44 | Value outputValues, Value indexOffset,
45 | bool swapMultOps);
46 |
47 | Value computeIndexOverlapSize(PatternRewriter &rewriter, Location loc,
48 | bool intersect, Value aPosStart, Value aPosEnd,
49 | Value Ai, Value bPosStart, Value bPosEnd,
50 | Value Bi);
51 |
52 | Value computeUnionAggregation(PatternRewriter &rewriter, Location loc,
53 | bool intersect, Block *binaryBlock,
54 | Type valueType, Value aPosStart, Value aPosEnd,
55 | Value Ai, Value Ax, Value bPosStart,
56 | Value bPosEnd, Value Bi, Value Bx,
57 | Value oPosStart, Value Oi, Value Ox);
58 |
59 | void computeVectorElementWise(PatternRewriter &rewriter, Location loc,
60 | ModuleOp module, Value lhs, Value rhs,
61 | Value output, Block *binaryBlock,
62 | EwiseBehavior behavior);
63 | void computeMatrixElementWise(PatternRewriter &rewriter, Location loc,
64 | ModuleOp module, Value lhs, Value rhs,
65 | Value output, Block *binaryBlock,
66 | EwiseBehavior behavior);
67 |
68 | #endif // GRAPHBLAS_GRAPHBLASARRAYUTILS_H
69 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASCommonPasses.h:
--------------------------------------------------------------------------------
1 | #ifndef GRAPHBLAS_GRAPHBLASCOMMONPASSES_H
2 | #define GRAPHBLAS_GRAPHBLASCOMMONPASSES_H
3 |
4 | #include "GraphBLAS/GraphBLASDialect.h"
5 | #include "GraphBLAS/GraphBLASUtils.h"
6 |
7 | class LowerCommentRewrite
8 | : public mlir::OpRewritePattern {
9 | public:
10 | using mlir::OpRewritePattern::OpRewritePattern;
11 | mlir::LogicalResult
12 | matchAndRewrite(mlir::graphblas::CommentOp op,
13 | mlir::PatternRewriter &rewriter) const override;
14 | };
15 |
16 | class LowerPrintRewrite
17 | : public mlir::OpRewritePattern {
18 | public:
19 | using mlir::OpRewritePattern::OpRewritePattern;
20 | mlir::LogicalResult
21 | matchAndRewrite(mlir::graphblas::PrintOp op,
22 | mlir::PatternRewriter &rewriter) const override;
23 | };
24 |
25 | class LowerPrintTensorRewrite
26 | : public mlir::OpRewritePattern {
27 | public:
28 | using mlir::OpRewritePattern<
29 | mlir::graphblas::PrintTensorOp>::OpRewritePattern;
30 | mlir::LogicalResult
31 | matchAndRewrite(mlir::graphblas::PrintTensorOp op,
32 | mlir::PatternRewriter &rewriter) const override;
33 | };
34 |
35 | class LowerSizeRewrite
36 | : public mlir::OpRewritePattern {
37 | public:
38 | using OpRewritePattern::OpRewritePattern;
39 | mlir::LogicalResult
40 | matchAndRewrite(mlir::graphblas::SizeOp op,
41 | mlir::PatternRewriter &rewriter) const override;
42 | };
43 |
44 | class LowerNumRowsRewrite
45 | : public mlir::OpRewritePattern {
46 | public:
47 | using OpRewritePattern::OpRewritePattern;
48 | mlir::LogicalResult
49 | matchAndRewrite(mlir::graphblas::NumRowsOp op,
50 | mlir::PatternRewriter &rewriter) const override;
51 | };
52 |
53 | class LowerNumColsRewrite
54 | : public mlir::OpRewritePattern {
55 | public:
56 | using OpRewritePattern::OpRewritePattern;
57 | mlir::LogicalResult
58 | matchAndRewrite(mlir::graphblas::NumColsOp op,
59 | mlir::PatternRewriter &rewriter) const override;
60 | };
61 |
62 | class LowerNumValsRewrite
63 | : public mlir::OpRewritePattern {
64 | public:
65 | using OpRewritePattern::OpRewritePattern;
66 | mlir::LogicalResult
67 | matchAndRewrite(mlir::graphblas::NumValsOp op,
68 | mlir::PatternRewriter &rewriter) const override;
69 | };
70 |
71 | #endif // GRAPHBLAS_GRAPHBLASCOMMONPASSES_H
72 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASDialect.h:
--------------------------------------------------------------------------------
1 | //===- GraphBLASDialect.h - GraphBLAS dialect -----------------*- C++ -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===--------------------------------------------------------------------===//
6 |
7 | #ifndef GRAPHBLAS_GRAPHBLASDIALECT_H
8 | #define GRAPHBLAS_GRAPHBLASDIALECT_H
9 |
10 | #include "mlir/IR/Dialect.h"
11 |
12 | #include "GraphBLAS/GraphBLASOpsDialect.h.inc"
13 |
14 | #endif // GRAPHBLAS_GRAPHBLASDIALECT_H
15 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASDialect.td:
--------------------------------------------------------------------------------
1 | //===- GraphBLASDialect.td - GraphBLAS dialect -----------*- tablegen -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===--------------------------------------------------------------------===//
6 |
7 | #ifndef GRAPHBLAS_DIALECT
8 | #define GRAPHBLAS_DIALECT
9 |
10 | include "mlir/IR/OpBase.td"
11 |
12 | //===--------------------------------------------------------------------===//
13 | // GraphBLAS dialect definition.
14 | //===--------------------------------------------------------------------===//
15 |
16 | def GraphBLAS_Dialect : Dialect {
17 | let name = "graphblas";
18 | let summary = "The `graphblas` dialect describes standard sparse tensor operations that are found in the [GraphBLAS spec](http://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf).";
19 | let description = [{
20 | The ops are not one-to-one equivalents of GraphBLAS function calls in order to fit into MLIR’s SSA requirements.
21 |
22 | This document is not intended to be a tutorial and acts more as a reference manual for the ops in the GraphBLAS dialect. Tutorials can be found in later sections of our documentation.
23 |
24 | ## Assumptions
25 |
26 | Although the [sparse tensor encoding](https://mlir.llvm.org/docs/Dialects/SparseTensorOps/#sparsetensorencodingattr)
27 | in MLIR is extremely flexible, the ``graphblas`` dialect and associated
28 | lowering pass only supports three encodings currently.
29 |
30 | The *CSR64* encoding is usually defined with the alias:
31 |
32 | ```mlir
33 | #CSR64 = #sparse_tensor.encoding<{
34 | dimLevelType = [ "dense", "compressed" ],
35 | dimOrdering = affine_map<(i,j) -> (i,j)>,
36 | pointerBitWidth = 64,
37 | indexBitWidth = 64
38 | }>
39 | ```
40 |
41 | The *CSC64* encoding can be defined with the alias:
42 |
43 | ```mlir
44 | #CSC64 = #sparse_tensor.encoding<{
45 | dimLevelType = [ "dense", "compressed" ],
46 | dimOrdering = affine_map<(i,j) -> (j,i)>,
47 | pointerBitWidth = 64,
48 | indexBitWidth = 64
49 | }>
50 | ```
51 |
52 | In terms of data structure contents CSR and CSC are identical (with index,
53 | pointer, and value arrays), just the indexing is reversed for CSC. The sparse
54 | tensor is then defined in the same way as a regular MLIR tensor, but with this
55 | additional encoding attribute:
56 |
57 | ```mlir
58 | tensor
59 | ```
60 |
61 | The *CV64* encoding (for sparse vectors) is usually defined with the alias:
62 |
63 | ```mlir
64 | #CV64 = #sparse_tensor.encoding<{
65 | dimLevelType = [ "compressed" ],
66 | pointerBitWidth = 64,
67 | indexBitWidth = 64
68 | }>
69 | ```
70 |
71 | Note that the `--graphblas-lower` pass only supports tensors with unknown
72 | dimensions (indicated by the ``?``).
73 | }];
74 | let cppNamespace = "::mlir::graphblas";
75 | }
76 |
77 | #endif // GRAPHBLAS_DIALECT
78 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASOps.h:
--------------------------------------------------------------------------------
1 | //===- GraphBLASOps.h - GraphBLAS dialect ops -----------------*- C++ -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===--------------------------------------------------------------------===//
6 |
7 | #ifndef GRAPHBLAS_GRAPHBLASOPS_H
8 | #define GRAPHBLAS_GRAPHBLASOPS_H
9 |
10 | #include "mlir/Dialect/StandardOps/IR/Ops.h"
11 | #include "mlir/IR/BuiltinTypes.h"
12 | #include "mlir/IR/Dialect.h"
13 | #include "mlir/IR/OpDefinition.h"
14 | #include "mlir/Interfaces/SideEffectInterfaces.h"
15 |
16 | #include "GraphBLAS/GraphBLASOpsEnums.h.inc"
17 |
18 | #define GET_OP_CLASSES
19 | #include "GraphBLAS/GraphBLASOps.h.inc"
20 |
21 | #endif // GRAPHBLAS_GRAPHBLASOPS_H
22 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASPasses.h:
--------------------------------------------------------------------------------
1 |
2 | //===- GraphBLASPasses.h - GraphBLAS dialect passes -----------------*- C++
3 | //-*-===//
4 | //
5 | // TODO add documentation
6 | //
7 | //===--------------------------------------------------------------------------===//
8 |
9 | #ifndef GRAPHBLAS_GRAPHBLASPASSES_H
10 | #define GRAPHBLAS_GRAPHBLASPASSES_H
11 |
12 | #include "mlir/Pass/Pass.h"
13 |
14 | namespace mlir {
15 | std::unique_ptr> createGraphBLASLoweringPass();
16 | std::unique_ptr> createGraphBLASLinalgLoweringPass();
17 | std::unique_ptr> createGraphBLASOptimizePass();
18 | std::unique_ptr> createGraphBLASStructuralizePass();
19 | std::unique_ptr> createGraphBLASDWIMPass();
20 | } // namespace mlir
21 |
22 | //===----------------------------------------------------------------------===//
23 | // Registration.
24 | //===----------------------------------------------------------------------===//
25 |
26 | // Generate the code for registering passes.
27 | #define GEN_PASS_REGISTRATION
28 | #include "GraphBLAS/GraphBLASPasses.h.inc"
29 |
30 | #endif // GRAPHBLAS_GRAPHBLASPASSES_H
31 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/include/GraphBLAS/GraphBLASPasses.td:
--------------------------------------------------------------------------------
1 | //===-- GraphBLASPasses.td - Conversion pass definition file --------*- tablegen -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===-------------------------------------------------------------------------------===//
6 |
7 | #ifndef MLIR_CONVERSION_PASSES
8 | #define MLIR_CONVERSION_PASSES
9 |
10 | include "mlir/Pass/PassBase.td"
11 |
12 | //===----------------------------------------------------------------------===//
13 | // GraphBLASLowering
14 | //===----------------------------------------------------------------------===//
15 |
16 | def GraphBLASLowering : Pass<"graphblas-lower", "ModuleOp"> {
17 | let summary = "TODO add documentation";
18 | let constructor = "mlir::createGraphBLASLoweringPass()";
19 | let dependentDialects = [
20 | "LLVM::LLVMDialect",
21 | "linalg::LinalgDialect",
22 | "AffineDialect",
23 | "memref::MemRefDialect",
24 | "tensor::TensorDialect",
25 | "math::MathDialect",
26 | "arith::ArithmeticDialect",
27 | "scf::SCFDialect"
28 | ];
29 | }
30 |
31 | def GraphBLASLinalgLowering : Pass<"graphblas-linalg-lower", "ModuleOp"> {
32 | let summary = "TODO add documentation";
33 | let constructor = "mlir::createGraphBLASLinalgLoweringPass()";
34 | let dependentDialects = [
35 | "LLVM::LLVMDialect",
36 | "linalg::LinalgDialect",
37 | "math::MathDialect",
38 | "arith::ArithmeticDialect"
39 | ];
40 | }
41 |
42 | //===----------------------------------------------------------------------===//
43 | // GraphBLASOptimize
44 | //===----------------------------------------------------------------------===//
45 |
46 | def GraphBLASOptimize : Pass<"graphblas-optimize", "ModuleOp"> {
47 | let summary = "TODO add documentation";
48 | let constructor = "mlir::createGraphBLASOptimizePass()";
49 | let dependentDialects = [
50 | ];
51 | }
52 |
53 | //===----------------------------------------------------------------------===//
54 | // GraphBLASStructuralize
55 | //===----------------------------------------------------------------------===//
56 |
57 | def GraphBLASStructuralize : Pass<"graphblas-structuralize", "ModuleOp"> {
58 | let summary = "TODO add documentation";
59 | let constructor = "mlir::createGraphBLASStructuralizePass()";
60 | let dependentDialects = [
61 | "memref::MemRefDialect",
62 | "math::MathDialect",
63 | "arith::ArithmeticDialect",
64 | "scf::SCFDialect"
65 | ];
66 | }
67 |
68 | def GraphBLASDWIM : Pass<"graphblas-dwim", "ModuleOp"> {
69 | let summary = "TODO add documentation";
70 | let constructor = "mlir::createGraphBLASDWIMPass()";
71 | let dependentDialects = [
72 | ];
73 | }
74 |
75 | #endif // MLIR_CONVERSION_PASSES
76 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/lib/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_subdirectory(GraphBLAS)
2 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/lib/GraphBLAS/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_mlir_dialect_library(MLIRGraphBLAS
2 | GraphBLASDialect.cpp
3 | GraphBLASOps.cpp
4 | GraphBLASLowerPass.cpp
5 | GraphBLASLinalgLowerPass.cpp
6 | GraphBLASStructuralizePass.cpp
7 | GraphBLASOptimizePass.cpp
8 | GraphBLASUtils.cpp
9 | GraphBLASArrayUtils.cpp
10 |
11 | ADDITIONAL_HEADER_DIRS
12 | ${PROJECT_SOURCE_DIR}/include/GraphBLAS
13 |
14 | DEPENDS
15 | MLIRGraphBLASOpsIncGen
16 |
17 | LINK_LIBS PUBLIC
18 | MLIRIR
19 | )
20 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/lib/GraphBLAS/GraphBLASDialect.cpp:
--------------------------------------------------------------------------------
1 | //===- GraphBLASDialect.cpp - GraphBLAS dialect ---------------*- C++ -*-===//
2 | //
3 | // TODO add documentation
4 | //
5 | //===--------------------------------------------------------------------===//
6 |
7 | #include "GraphBLAS/GraphBLASDialect.h"
8 | #include "GraphBLAS/GraphBLASOps.h"
9 |
10 | using namespace mlir;
11 | using namespace mlir::graphblas;
12 |
13 | //===--------------------------------------------------------------------===//
14 | // GraphBLAS dialect.
15 | //===--------------------------------------------------------------------===//
16 |
17 | void GraphBLASDialect::initialize() {
18 | addOperations<
19 | #define GET_OP_LIST
20 | #include "GraphBLAS/GraphBLASOps.cpp.inc"
21 | >();
22 | }
23 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | configure_lit_site_cfg(
2 | ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
3 | ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py
4 | MAIN_CONFIG
5 | ${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py
6 | )
7 |
8 | set(GRAPHBLAS_TEST_DEPENDS
9 | FileCheck count not
10 | graphblas-opt
11 |
12 | )
13 |
14 | add_lit_testsuite(check-graphblas "Running the graphblas regression tests"
15 | ${CMAKE_CURRENT_BINARY_DIR}
16 | DEPENDS ${GRAPHBLAS_TEST_DEPENDS}
17 | )
18 | set_target_properties(check-graphblas PROPERTIES FOLDER "Tests")
19 |
20 | add_lit_testsuites(GRAPHBLAS ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${GRAPHBLAS_TEST_DEPENDS})
21 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/graphblas-opt.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt --show-dialects | FileCheck %s
2 | // CHECK: Available Dialects:
3 | // CHECK: graphblas
4 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | func @matrix_multiply_wrapper(%argA: tensor<1x2x3xi64>, %argB: tensor<3x2xi64>) -> tensor<2x2xi64> {
4 | %answer = graphblas.matrix_multiply_generic %argA, %argB {mask_complement=false} : (tensor<1x2x3xi64>, tensor<3x2xi64>) to tensor<2x2xi64> // expected-error {{op operand #0 must be 1D/2D tensor of any type values, but got 'tensor<1x2x3xi64>'}}
5 | return %answer : tensor<2x2xi64>
6 | }
7 |
8 | // -----
9 |
10 | func @size_wrapper(%argA: tensor<1x1xi64>) -> index {
11 | %answer = graphblas.size %argA : tensor<1x1xi64> // expected-error {{op operand #0 must be 1D tensor of any type values, but got 'tensor<1x1xi64>'}}
12 | return %answer : index
13 | }
14 |
15 | // -----
16 |
17 | func @num_rows_wrapper(%argA: tensor<1xi64>) -> index {
18 | %answer = graphblas.num_rows %argA : tensor<1xi64> // expected-error {{op operand #0 must be 2D tensor of any type values, but got 'tensor<1xi64>'}}
19 | return %answer : index
20 | }
21 |
22 | // -----
23 |
24 | func @num_cols_wrapper(%argA: tensor<1xi64>) -> index {
25 | %answer = graphblas.num_cols %argA : tensor<1xi64> // expected-error {{op operand #0 must be 2D tensor of any type values, but got 'tensor<1xi64>'}}
26 | return %answer : index
27 | }
28 |
29 | // -----
30 |
31 | func @dup_wrapper(%argA: tensor<1x1xi64>) -> tensor<1x1xi64> {
32 | %answer = graphblas.dup %argA : tensor<1x1xi64> // expected-error {{operand must be a sparse tensor}}
33 | return %answer : tensor<1x1xi64>
34 | }
35 |
36 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_cast.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | module {
18 | func @cast(%m: tensor) -> tensor {
19 | %answer = graphblas.cast %m : tensor to tensor // expected-error {{result must have CSR compression.}}
20 | return %answer : tensor
21 | }
22 | }
23 |
24 | // -----
25 |
26 | #CV64 = #sparse_tensor.encoding<{
27 | dimLevelType = [ "compressed" ],
28 | pointerBitWidth = 64,
29 | indexBitWidth = 64
30 | }>
31 |
32 | #CV32 = #sparse_tensor.encoding<{
33 | dimLevelType = [ "compressed" ],
34 | pointerBitWidth = 32,
35 | indexBitWidth = 32
36 | }>
37 |
38 | module {
39 | func @cast(%v: tensor) -> tensor {
40 | %answer = graphblas.cast %v : tensor to tensor // expected-error {{Changing bit width is not yet supported. Input and output pointer bit widths do not match: 64!=32}}
41 | return %answer : tensor
42 | }
43 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_equal.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CV64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 |
11 | func @vector_equals_wrapper(%argA: tensor<3xi64>, %argB: tensor<3xi64>) -> i1 {
12 | %answer = graphblas.equal %argA, %argB : tensor<3xi64>, tensor<3xi64> // expected-error {{"a" must be a sparse tensor.}}
13 | return %answer : i1
14 | }
15 |
16 | }
17 |
18 | // -----
19 |
20 | #CV64 = #sparse_tensor.encoding<{
21 | dimLevelType = [ "compressed" ],
22 | pointerBitWidth = 64,
23 | indexBitWidth = 64
24 | }>
25 |
26 | module {
27 |
28 | func @vector_equals_wrapper(%argA: tensor<3xi64, #CV64>, %argB: tensor<3xf64, #CV64>) -> i1 {
29 | %answer = graphblas.equal %argA, %argB : tensor<3xi64, #CV64>, tensor<3xf64, #CV64> // expected-error {{"a" and "b" must have identical types.}}
30 | return %answer : i1
31 | }
32 |
33 | }
34 |
35 | // -----
36 |
37 | #CV64 = #sparse_tensor.encoding<{
38 | dimLevelType = [ "compressed" ],
39 | pointerBitWidth = 64,
40 | indexBitWidth = 64
41 | }>
42 |
43 | module {
44 |
45 | func @vector_equals_wrapper(%argA: tensor<3xi64, #CV64>, %argB: tensor<99xi64, #CV64>) -> i1 {
46 | %answer = graphblas.equal %argA, %argB : tensor<3xi64, #CV64>, tensor<99xi64, #CV64> // expected-error {{"a" and "b" must have identical shapes.}}
47 | return %answer : i1
48 | }
49 |
50 | }
51 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_matrix_convert_layout.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | module {
11 | func @convert_layout_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<3x2xi16, #CSR64> {
12 | %answer = graphblas.convert_layout %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<3x2xi16, #CSR64> // expected-error {{Input and output shapes are expected to be the same.}}
13 | return %answer : tensor<3x2xi16, #CSR64>
14 | }
15 | }
16 |
17 | // -----
18 |
19 | #CSR64 = #sparse_tensor.encoding<{
20 | dimLevelType = [ "dense", "compressed" ],
21 | dimOrdering = affine_map<(i,j) -> (i,j)>,
22 | pointerBitWidth = 64,
23 | indexBitWidth = 64
24 | }>
25 |
26 | #CSR_BOGUS = #sparse_tensor.encoding<{
27 | dimLevelType = [ "dense", "compressed" ],
28 | dimOrdering = affine_map<(i,j) -> (j,i)>,
29 | pointerBitWidth = 32,
30 | indexBitWidth = 64
31 | }>
32 |
33 | module {
34 | func @convert_layout_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<2x3xi16, #CSR_BOGUS> {
35 | %answer = graphblas.convert_layout %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<2x3xi16, #CSR_BOGUS> // expected-error {{Input and output pointer bit widths do not match: 64!=32}}
36 | return %answer : tensor<2x3xi16, #CSR_BOGUS>
37 | }
38 | }
39 |
40 | // -----
41 |
42 | #CSR64 = #sparse_tensor.encoding<{
43 | dimLevelType = [ "dense", "compressed" ],
44 | dimOrdering = affine_map<(i,j) -> (i,j)>,
45 | pointerBitWidth = 64,
46 | indexBitWidth = 64
47 | }>
48 |
49 | #CSR_BOGUS = #sparse_tensor.encoding<{
50 | dimLevelType = [ "dense", "compressed" ],
51 | dimOrdering = affine_map<(i,j) -> (j,i)>,
52 | pointerBitWidth = 64,
53 | indexBitWidth = 32
54 | }>
55 |
56 | module {
57 | func @convert_layout_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<2x3xi16, #CSR_BOGUS> {
58 | %answer = graphblas.convert_layout %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<2x3xi16, #CSR_BOGUS> // expected-error {{Input and output index bit widths do not match: 64!=32}}
59 | return %answer : tensor<2x3xi16, #CSR_BOGUS>
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_matrix_transpose.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | module {
11 | func @transpose_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<2x3xi16, #CSR64> {
12 | %answer = graphblas.transpose %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<2x3xi16, #CSR64> // expected-error {{Input and output shapes are expected to be swapped.}}
13 | return %answer : tensor<2x3xi16, #CSR64>
14 | }
15 | }
16 |
17 | // -----
18 |
19 | #CSR64 = #sparse_tensor.encoding<{
20 | dimLevelType = [ "dense", "compressed" ],
21 | dimOrdering = affine_map<(i,j) -> (i,j)>,
22 | pointerBitWidth = 64,
23 | indexBitWidth = 64
24 | }>
25 |
26 | module {
27 | func @transpose_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<99x2xi16, #CSR64> {
28 | %answer = graphblas.transpose %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<99x2xi16, #CSR64> // expected-error {{Input and output shapes are expected to be swapped.}}
29 | return %answer : tensor<99x2xi16, #CSR64>
30 | }
31 | }
32 |
33 | // -----
34 |
35 | #CSR64 = #sparse_tensor.encoding<{
36 | dimLevelType = [ "dense", "compressed" ],
37 | dimOrdering = affine_map<(i,j) -> (i,j)>,
38 | pointerBitWidth = 64,
39 | indexBitWidth = 64
40 | }>
41 |
42 | #CSR_BOGUS = #sparse_tensor.encoding<{
43 | dimLevelType = [ "dense", "compressed" ],
44 | dimOrdering = affine_map<(i,j) -> (j,i)>,
45 | pointerBitWidth = 32,
46 | indexBitWidth = 64
47 | }>
48 |
49 | module {
50 | func @transpose_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<3x2xi16, #CSR_BOGUS> {
51 | %answer = graphblas.transpose %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<3x2xi16, #CSR_BOGUS> // expected-error {{Input and output pointer bit widths do not match: 64!=32}}
52 | return %answer : tensor<3x2xi16, #CSR_BOGUS>
53 | }
54 | }
55 |
56 | // -----
57 |
58 | #CSR64 = #sparse_tensor.encoding<{
59 | dimLevelType = [ "dense", "compressed" ],
60 | dimOrdering = affine_map<(i,j) -> (i,j)>,
61 | pointerBitWidth = 64,
62 | indexBitWidth = 64
63 | }>
64 |
65 | #CSR_BOGUS = #sparse_tensor.encoding<{
66 | dimLevelType = [ "dense", "compressed" ],
67 | dimOrdering = affine_map<(i,j) -> (j,i)>,
68 | pointerBitWidth = 64,
69 | indexBitWidth = 32
70 | }>
71 |
72 | module {
73 | func @transpose_wrapper(%sparse_tensor: tensor<2x3xi16, #CSR64>) -> tensor<3x2xi16, #CSR_BOGUS> {
74 | %answer = graphblas.transpose %sparse_tensor : tensor<2x3xi16, #CSR64> to tensor<3x2xi16, #CSR_BOGUS> // expected-error {{Input and output index bit widths do not match: 64!=32}}
75 | return %answer : tensor<3x2xi16, #CSR_BOGUS>
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_print.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #BadSparseEncoding = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 | func @printer_func(%tensor: tensor) {
11 | graphblas.print %tensor { strings = ["printed : "] } : tensor // expected-error {{Vectors must be dense or sparse.}}
12 | return
13 | }
14 | }
15 |
16 | // -----
17 |
18 | #BadSparseEncoding = #sparse_tensor.encoding<{
19 | dimLevelType = [ "singleton" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @printer_func(%tensor: tensor) {
26 | graphblas.print %tensor { strings = ["printed : "] } : tensor // expected-error {{Vectors must be dense or sparse.}}
27 | return
28 | }
29 | }
30 |
31 | // -----
32 |
33 | #SparseEncoding = #sparse_tensor.encoding<{
34 | dimLevelType = [ "compressed", "compressed" ],
35 | dimOrdering = affine_map<(i,j) -> (i,j)>,
36 | pointerBitWidth = 64,
37 | indexBitWidth = 64
38 | }>
39 |
40 | module {
41 | func @printer_func(%tensor: tensor) {
42 | graphblas.print %tensor { strings = ["printed : "] } : tensor // expected-error {{must have CSR or CSC compression, i.e. must have dimLevelType = [ "dense", "compressed" ] in the sparse encoding.}}
43 | return
44 | }
45 | }
46 |
47 | // -----
48 |
49 | #SparseEncoding = #sparse_tensor.encoding<{
50 | dimLevelType = [ "dense", "compressed", "compressed" ],
51 | dimOrdering = affine_map<(i,j,k) -> (i,j,k)>,
52 | pointerBitWidth = 64,
53 | indexBitWidth = 64
54 | }>
55 |
56 | module {
57 | func @printer_func(%tensor: tensor) {
58 | graphblas.print %tensor { strings = ["printed : "] } : tensor // expected-error {{Can only print sparse tensors with rank 1 or 2.}}
59 | return
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_select.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | module {
11 | func @matrix_select_wrapper(%sparse_tensor: tensor<2x3xbf16, #CSR64>) -> tensor<2x3xbf16> {
12 | %answer = graphblas.select %sparse_tensor { selector = "triu" } : tensor<2x3xbf16, #CSR64> to tensor<2x3xbf16> // expected-error {{result must be a sparse tensor.}}
13 | return %answer : tensor<2x3xbf16>
14 | }
15 | }
16 |
17 | // -----
18 |
19 | #CSR64 = #sparse_tensor.encoding<{
20 | dimLevelType = [ "dense", "compressed" ],
21 | dimOrdering = affine_map<(i,j) -> (i,j)>,
22 | pointerBitWidth = 64,
23 | indexBitWidth = 64
24 | }>
25 |
26 | module {
27 | func @matrix_select_wrapper(%sparse_tensor: tensor<2x3xi8, #CSR64>) -> tensor<2x3xi8, #CSR64> {
28 | %answer = graphblas.select %sparse_tensor { selector = "BADSELECTOR" } : tensor<2x3xi8, #CSR64> to tensor<2x3xi8, #CSR64> // expected-error {{"BADSELECTOR" is not a supported selector.}}
29 | return %answer : tensor<2x3xi8, #CSR64>
30 | }
31 | }
32 |
33 | // -----
34 |
35 | #CSR64 = #sparse_tensor.encoding<{
36 | dimLevelType = [ "dense", "compressed" ],
37 | dimOrdering = affine_map<(i,j) -> (i,j)>,
38 | pointerBitWidth = 64,
39 | indexBitWidth = 64
40 | }>
41 |
42 | module {
43 | func @matrix_select_wrapper(%sparse_tensor: tensor<2x3xbf16, #CSR64>) -> tensor<2x3xbf16, #CSR64> {
44 | %thunk = arith.constant 0.0 : f64
45 | %answer = graphblas.select %sparse_tensor, %thunk { selector = "gt" } : tensor<2x3xbf16, #CSR64>, f64 to tensor<2x3xbf16, #CSR64> // expected-error {{Thunk type must match operand type.}}
46 | return %answer : tensor<2x3xbf16, #CSR64>
47 | }
48 | }
49 |
50 | // -----
51 |
52 | #CSR64 = #sparse_tensor.encoding<{
53 | dimLevelType = [ "dense", "compressed" ],
54 | dimOrdering = affine_map<(i,j) -> (i,j)>,
55 | pointerBitWidth = 64,
56 | indexBitWidth = 64
57 | }>
58 |
59 | module {
60 | func @matrix_select_wrapper(%sparse_tensor: tensor<2x3xbf16, #CSR64>) -> tensor<2x3xbf16, #CSR64> {
61 | %answer = graphblas.select %sparse_tensor { selector = "gt" } : tensor<2x3xbf16, #CSR64> to tensor<2x3xbf16, #CSR64> // expected-error {{Selector 'gt' requires a thunk.}}
62 | return %answer : tensor<2x3xbf16, #CSR64>
63 | }
64 | }
65 |
66 | // -----
67 |
68 | #CSR64 = #sparse_tensor.encoding<{
69 | dimLevelType = [ "dense", "compressed" ],
70 | dimOrdering = affine_map<(i,j) -> (i,j)>,
71 | pointerBitWidth = 64,
72 | indexBitWidth = 64
73 | }>
74 |
75 | module {
76 | func @matrix_select_wrapper(%sparse_tensor: tensor<2x3xbf16, #CSR64>) -> tensor<2x3xbf16, #CSR64> {
77 | %thunk = arith.constant 0.0 : bf16
78 | %answer = graphblas.select %sparse_tensor, %thunk { selector = "triu" } : tensor<2x3xbf16, #CSR64>, bf16 to tensor<2x3xbf16, #CSR64> // expected-error {{Selector 'triu' cannot take a thunk.}}
79 | return %answer : tensor<2x3xbf16, #CSR64>
80 | }
81 | }
82 |
83 | // -----
84 |
85 | #CV64 = #sparse_tensor.encoding<{
86 | dimLevelType = [ "compressed" ],
87 | pointerBitWidth = 64,
88 | indexBitWidth = 64
89 | }>
90 |
91 | module {
92 | func @vector_select_wrapper(%sparse_tensor: tensor<2xf64, #CV64>) -> tensor<2xf64, #CV64> {
93 | %answer_0 = graphblas.select %sparse_tensor { selector = "triu" } : tensor<2xf64, #CV64> to tensor<2xf64, #CV64> // expected-error {{Selector 'triu' not allowed for vectors}}
94 | return %answer_0 : tensor<2xf64, #CV64>
95 | }
96 | }
97 |
98 | // -----
99 |
100 | #CV64 = #sparse_tensor.encoding<{
101 | dimLevelType = [ "compressed" ],
102 | pointerBitWidth = 64,
103 | indexBitWidth = 64
104 | }>
105 |
106 | module {
107 | func @vector_select_wrapper(%sparse_tensor: tensor<2xf64, #CV64>, %rng_context: !llvm.ptr) -> tensor<2xf64, #CV64> {
108 | %thunk = arith.constant 0.65 : f64
109 | %thunk2 = arith.constant 0.75 : f64
110 | %answer_0 = graphblas.select %sparse_tensor, %thunk, %rng_context, %thunk2 { selector = "probability" } : tensor<2xf64, #CV64>, f64, !llvm.ptr, f64 to tensor<2xf64, #CV64> // expected-error {{Too many thunk values provided.}}
111 | return %answer_0 : tensor<2xf64, #CV64>
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_vector_argminmax.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CV64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 |
11 | func @vector_argmin_wrapper(%argA: tensor<3xi64, #CV64>) -> i64 {
12 | %answer = graphblas.reduce_to_scalar %argA { aggregator = "bogus" } : tensor<3xi64, #CV64> to i64 // expected-error {{"bogus" is not a supported aggregator.}}
13 | return %answer : i64
14 | }
15 |
16 | }
17 |
18 | // -----
19 |
20 | module {
21 |
22 | func @vector_argmax_wrapper(%argA: tensor<3xi64>) -> i64 {
23 | %answer = graphblas.reduce_to_scalar %argA { aggregator = "argmax" } : tensor<3xi64> to i64 // expected-error {{operand must be a sparse tensor.}}
24 | return %answer : i64
25 | }
26 |
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/invalid_vector_dot_product.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s -split-input-file -verify-diagnostics
2 |
3 | #CV64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 |
11 | func @vector_dot_product_wrapper(%argA: tensor, %argB: tensor) -> i64 {
12 | %answer = graphblas.matrix_multiply %argA, %argB { semiring = "BAD" } : (tensor, tensor) to i64 // expected-error {{"BAD" is not a supported monoid.}}
13 | return %answer : i64
14 | }
15 |
16 | }
17 |
18 | // -----
19 |
20 | #CV64 = #sparse_tensor.encoding<{
21 | dimLevelType = [ "compressed" ],
22 | pointerBitWidth = 64,
23 | indexBitWidth = 64
24 | }>
25 |
26 | module {
27 |
28 | func @vector_dot_product_wrapper(%argA: tensor<3xi64>, %argB: tensor) -> i64 {
29 | %answer = graphblas.matrix_multiply %argA, %argB { semiring = "plus_times" } : (tensor<3xi64>, tensor) to i64 // expected-error {{1st operand must be a sparse tensor.}}
30 | return %answer : i64
31 | }
32 |
33 | }
34 |
35 | // -----
36 |
37 | #CV64 = #sparse_tensor.encoding<{
38 | dimLevelType = [ "compressed" ],
39 | pointerBitWidth = 64,
40 | indexBitWidth = 64
41 | }>
42 |
43 | module {
44 |
45 | func @vector_dot_product_wrapper(%argA: tensor<3xi64, #CV64>, %argB: tensor<3xf64, #CV64>) -> i64 {
46 | %answer = graphblas.matrix_multiply %argA, %argB { semiring = "plus_times" } : (tensor<3xi64, #CV64>, tensor<3xf64, #CV64>) to i64 // expected-error {{Operand element types must be identical.}}
47 | return %answer : i64
48 | }
49 |
50 | }
51 |
52 | // -----
53 |
54 | #CV64 = #sparse_tensor.encoding<{
55 | dimLevelType = [ "compressed" ],
56 | pointerBitWidth = 64,
57 | indexBitWidth = 64
58 | }>
59 |
60 | module {
61 |
62 | func @vector_dot_product_wrapper(%argA: tensor<3xi64, #CV64>, %argB: tensor<3xi64, #CV64>) -> i8 {
63 | %answer = graphblas.matrix_multiply %argA, %argB { semiring = "plus_times" } : (tensor<3xi64, #CV64>, tensor<3xi64, #CV64>) to i8 // expected-error {{Result element type differs from the input element types.}}
64 | return %answer : i8
65 | }
66 |
67 | }
68 |
69 | // -----
70 |
71 | #CV64 = #sparse_tensor.encoding<{
72 | dimLevelType = [ "compressed" ],
73 | pointerBitWidth = 64,
74 | indexBitWidth = 64
75 | }>
76 |
77 | module {
78 |
79 | func @vector_dot_product_wrapper(%argA: tensor<3xi64, #CV64>, %argB: tensor<9xi64, #CV64>) -> i64 {
80 | %answer = graphblas.matrix_multiply %argA, %argB { semiring = "plus_times" } : (tensor<3xi64, #CV64>, tensor<9xi64, #CV64>) to i64 // expected-error {{Operand shapes are incompatible.}}
81 | return %answer : i64
82 | }
83 |
84 | }
85 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/lower_comment.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-opt --graphblas-lower | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | // CHECK-LABEL: func @create_and_destroy(
11 | // CHECK-SAME: %[[VAL_0:.*]]: index,
12 | // CHECK-SAME: %[[VAL_1:.*]]: index) -> index {
13 | // CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_1]] : index
14 | // CHECK: %[[VAL_3:.*]] = sparse_tensor.init{{\[}}%[[VAL_0]], %[[VAL_1]]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
15 | // CHECK: sparse_tensor.release %[[VAL_3]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
16 | // CHECK: return %[[VAL_2]] : index
17 | // CHECK: }
18 |
19 | func @create_and_destroy(%nrows: index, %ncols: index) -> index {
20 | graphblas.comment { comment = "comment number 1" }
21 | %sum = arith.addi %nrows, %ncols : index
22 | graphblas.comment { comment = "comment number 2" }
23 | %new_tensor = sparse_tensor.init [%nrows, %ncols] : tensor
24 | graphblas.comment { comment = "comment number 3" }
25 | sparse_tensor.release %new_tensor : tensor
26 | return %sum : index
27 | }
28 |
29 | // CHECK-LABEL: func @do_nothing_func() {
30 | // CHECK-NEXT: return
31 | func @do_nothing_func() -> () {
32 | graphblas.comment { comment = "comment number 1" }
33 | graphblas.comment { comment = "comment number 2" }
34 | graphblas.comment { comment = "comment number 3" }
35 | return
36 | }
37 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/lower_convert_layout.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-opt --graphblas-lower | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | // CHECK-LABEL: func @convert_layout_csr_noop(
18 | // CHECK-SAME: %[[VAL_0:.*]]: tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
19 | // CHECK: return %[[VAL_0]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
20 | func @convert_layout_csr_noop(%sparse_tensor: tensor) -> tensor {
21 | %answer = graphblas.convert_layout %sparse_tensor : tensor to tensor
22 | return %answer : tensor
23 | }
24 |
25 | // CHECK-LABEL: func @convert_layout_csc_noop(
26 | // CHECK-SAME: %[[VAL_0:.*]]: tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
27 | // CHECK: return %[[VAL_0]] : tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
28 | func @convert_layout_csc_noop(%sparse_tensor: tensor) -> tensor {
29 | %answer = graphblas.convert_layout %sparse_tensor : tensor to tensor
30 | return %answer : tensor
31 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/structuralize_matrix_multiply.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-opt --graphblas-structuralize | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | // CHECK-LABEL: func @matrix_multiply_plus_times(
18 | // CHECK-SAME: %[[VAL_0:.*]]: tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>,
19 | // CHECK-SAME: %[[VAL_1:.*]]: tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
20 | // CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f64
21 | // CHECK: %[[VAL_3:.*]] = graphblas.matrix_multiply_generic %[[VAL_0]], %[[VAL_1]] {mask_complement = false} : (tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>, tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) to tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
22 | // CHECK: graphblas.yield add_identity %[[VAL_2]] : f64
23 | // CHECK: }, {
24 | // CHECK: ^bb0(%[[VAL_4:.*]]: f64, %[[VAL_5:.*]]: f64):
25 | // CHECK: %[[VAL_6:.*]] = arith.addf %[[VAL_4]], %[[VAL_5]] : f64
26 | // CHECK: graphblas.yield add %[[VAL_6]] : f64
27 | // CHECK: }, {
28 | // CHECK: ^bb0(%[[VAL_7:.*]]: f64, %[[VAL_8:.*]]: f64):
29 | // CHECK: %[[VAL_11:.*]] = arith.mulf %[[VAL_7]], %[[VAL_8]] : f64
30 | // CHECK: graphblas.yield mult %[[VAL_11]] : f64
31 | // CHECK: }
32 | // CHECK: return %[[VAL_12:.*]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
33 | // CHECK: }
34 | func @matrix_multiply_plus_times(%a: tensor, %b: tensor) -> tensor {
35 | %answer = graphblas.matrix_multiply %a, %b { semiring = "plus_times" } : (tensor, tensor) to tensor
36 | return %answer : tensor
37 | }
38 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/structuralize_reduce_to_scalar.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-opt --graphblas-structuralize | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | module {
11 |
12 | // CHECK-LABEL: func @matrix_reduce_to_scalar_f64(
13 | // CHECK-SAME: %[[VAL_0:.*]]: tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> f64 {
14 | // CHECK: %[[VAL_1:.*]] = arith.constant 0.000000e+00 : f64
15 | // CHECK: %[[VAL_2:.*]] = graphblas.reduce_to_scalar_generic %[[VAL_0]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> to f64 {
16 | // CHECK: graphblas.yield agg_identity %[[VAL_1]] : f64
17 | // CHECK: }, {
18 | // CHECK: ^bb0(%[[VAL_3:.*]]: f64, %[[VAL_4:.*]]: f64):
19 | // CHECK: %[[VAL_5:.*]] = arith.addf %[[VAL_3]], %[[VAL_4]] : f64
20 | // CHECK: graphblas.yield agg %[[VAL_5]] : f64
21 | // CHECK: }
22 | // CHECK: return %[[VAL_2]] : f64
23 | // CHECK: }
24 | func @matrix_reduce_to_scalar_f64(%sparse_tensor: tensor) -> f64 {
25 | %answer = graphblas.reduce_to_scalar %sparse_tensor { aggregator = "plus" } : tensor to f64
26 | return %answer : f64
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/structuralize_transpose.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-opt --graphblas-dwim --graphblas-structuralize | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | module {
18 | // CHECK: func @transpose_different_compression(%[[VAL_0:.*]]: tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
19 | // CHECK: %[[VAL_1:.*]] = graphblas.transpose %[[VAL_0]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> to tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
20 | // CHECK: return %[[VAL_1]] : tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
21 | // CHECK: }
22 | func @transpose_different_compression(%sparse_tensor: tensor) -> tensor {
23 | %answer = graphblas.transpose %sparse_tensor : tensor to tensor
24 | return %answer : tensor
25 | }
26 |
27 |
28 | // CHECK: func @transpose_same_compression(%[[VAL_0:.*]]: tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>) -> tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> {
29 | // CHECK: %[[VAL_1:.*]] = graphblas.convert_layout %[[VAL_0]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>> to tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
30 | // CHECK: %[[VAL_2:.*]] = graphblas.transpose %[[VAL_1]] : tensor (d1, d0)>, pointerBitWidth = 64, indexBitWidth = 64 }>> to tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
31 | // CHECK: return %[[VAL_2]] : tensor (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>
32 | // CHECK: }
33 | func @transpose_same_compression(%sparse_tensor: tensor) -> tensor {
34 | %answer = graphblas.transpose %sparse_tensor : tensor to tensor
35 | return %answer : tensor
36 | }
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_apply_inplace.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CV = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 | func @entry() {
11 | ///////////////
12 | // Test Vector
13 | ///////////////
14 |
15 | %v = arith.constant sparse<[
16 | [1], [2], [4], [7]
17 | ], [1, 2, 3, 4]> : tensor<9xi32>
18 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
19 |
20 | // CV apply in place
21 | //
22 | // CHECK: [_, 0.479426, 0.863209, _, 0.916166, -0.57844, _, _]
23 | //
24 | %130 = arith.constant sparse<[
25 | [1], [2], [4], [5]
26 | ], [0.5, 2.1, -4.3, -6.9]> : tensor<8xf32>
27 | %131 = sparse_tensor.convert %130 : tensor<8xf32> to tensor
28 | graphblas.apply %131 { apply_operator="sin", in_place=true } : (tensor) to tensor
29 | graphblas.print_tensor %131 { level=0 } : tensor
30 |
31 | return
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_cast.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | ///////////////
26 | // Test Float
27 | ///////////////
28 |
29 | %m = arith.constant sparse<[
30 | [0, 1], [0, 2],
31 | [1, 0], [1, 3], [1, 4],
32 | [3, 2]
33 | ], [1., 2.25, 3., 4., 5., 6.]> : tensor<4x5xf64>
34 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
35 |
36 | // CSR f64 cast to f32
37 | //
38 | // CHECK: pointers=(0, 2, 5, 5, 6)
39 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
40 | // CHECK-NEXT: values=(1, 2.25, 3, 4, 5, 6)
41 | //
42 | %0 = graphblas.cast %m_csr : tensor to tensor
43 | graphblas.print_tensor %0 { level=3 } : tensor
44 |
45 | // CSR f64 cast to i64
46 | //
47 | // CHECK: pointers=(0, 2, 5, 5, 6)
48 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
49 | // CHECK-NEXT: values=(1, 2, 3, 4, 5, 6)
50 | //
51 | %10 = graphblas.cast %m_csr : tensor to tensor
52 | graphblas.print_tensor %10 { level=3 } : tensor
53 |
54 |
55 | ///////////////
56 | // Test Integer
57 | ///////////////
58 |
59 | %v = arith.constant sparse<[
60 | [1], [2], [4], [7]
61 | ], [1, 2, 3, 4]> : tensor<9xi32>
62 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
63 |
64 | // CV i32 cast to i8
65 | //
66 | // CHECK: pointers=(0, 4)
67 | // CHECK-NEXT: indices=(1, 2, 4, 7)
68 | // CHECK-NEXT: values=(1, 2, 3, 4)
69 | //
70 | %20 = graphblas.cast %v_cv : tensor to tensor
71 | graphblas.print_tensor %20 { level=3 } : tensor
72 |
73 | // CV i32 cast to i64
74 | //
75 | // CHECK: pointers=(0, 4)
76 | // CHECK-NEXT: indices=(1, 2, 4, 7)
77 | // CHECK-NEXT: values=(1, 2, 3, 4)
78 | //
79 | %30 = graphblas.cast %v_cv : tensor to tensor
80 | graphblas.print_tensor %30 { level=3 } : tensor
81 |
82 | // CV i32 cast to f32
83 | //
84 | // CHECK: pointers=(0, 4)
85 | // CHECK-NEXT: indices=(1, 2, 4, 7)
86 | // CHECK-NEXT: values=(1, 2, 3, 4)
87 | //
88 | %40 = graphblas.cast %v_cv : tensor to tensor
89 | graphblas.print_tensor %40 { level=3 } : tensor
90 |
91 | return
92 | }
93 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_comment.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | func @main() -> () {
24 | graphblas.comment { comment = "comment number 0" }
25 | %m = arith.constant sparse<[
26 | [0, 1], [0, 2],
27 | [1, 0], [1, 3], [1, 4],
28 | [3, 2]
29 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
30 | graphblas.comment { comment = "comment number 1" }
31 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
32 | graphblas.comment { comment = "comment number 2" }
33 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
34 | graphblas.comment { comment = "comment number 3" }
35 |
36 | // CSR apply generic
37 | //
38 | // CHECK: pointers=(0, 2, 5, 5, 6)
39 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
40 | // CHECK-NEXT: values=(1, 2, 3, 4, 4.5, 4.5)
41 | //
42 | graphblas.comment { comment = "comment number 4" }
43 | %thunk_f64 = arith.constant 4.5 : f64
44 | graphblas.comment { comment = "comment number 5" }
45 | %0 = graphblas.apply_generic %m_csr : tensor to tensor {
46 | ^bb0(%val: f64):
47 | graphblas.comment { comment = "comment number 6" }
48 | %pick = arith.cmpf olt, %val, %thunk_f64 : f64
49 | graphblas.comment { comment = "comment number 7" }
50 | %result = arith.select %pick, %val, %thunk_f64 : f64
51 | graphblas.comment { comment = "comment number 8" }
52 | graphblas.yield transform_out %result : f64
53 | }
54 | graphblas.comment { comment = "comment number 9" }
55 | graphblas.print_tensor %0 { level=3 } : tensor
56 | graphblas.comment { comment = "comment number 10" }
57 |
58 | return
59 | }
60 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_convert_layout.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | ///////////////
26 | // Test Matrix
27 | ///////////////
28 |
29 | %m = arith.constant sparse<[
30 | [0, 1], [0, 2],
31 | [1, 0], [1, 3], [1, 4],
32 | [3, 2]
33 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
34 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
35 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 |
37 | // CSR -> CSC
38 | //
39 | // CHECK: rev=(1, 0)
40 | // CHECK-NEXT: shape=(4, 5)
41 | // CHECK-NEXT: pointers=(0, 1, 2, 4, 5, 6)
42 | // CHECK-NEXT: indices=(1, 0, 0, 3, 1, 1)
43 | // CHECK-NEXT: values=(3, 1, 2, 6, 4, 5)
44 | //
45 | %0 = graphblas.convert_layout %m_csr : tensor to tensor
46 | graphblas.print_tensor %0 { level=5 } : tensor
47 |
48 | // CSC -> CSR
49 | //
50 | // CHECK: rev=(0, 1)
51 | // CHECK-NEXT: shape=(4, 5)
52 | // CHECK-NEXT: pointers=(0, 2, 5, 5, 6)
53 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
54 | // CHECK-NEXT: values=(1, 2, 3, 4, 5, 6)
55 | //
56 | %10 = graphblas.convert_layout %m_csc : tensor to tensor
57 | graphblas.print_tensor %10 { level=5 } : tensor
58 |
59 | // CSC -> CSC (should be unchanged)
60 | //
61 | // CHECK: rev=(1, 0)
62 | // CHECK-NEXT: shape=(4, 5)
63 | // CHECK-NEXT: pointers=(0, 1, 2, 4, 5, 6)
64 | // CHECK-NEXT: indices=(1, 0, 0, 3, 1, 1)
65 | // CHECK-NEXT: values=(3, 1, 2, 6, 4, 5)
66 | //
67 | %20 = graphblas.convert_layout %m_csc : tensor to tensor
68 | graphblas.print_tensor %20 { level=5 } : tensor
69 |
70 | // CSR -> CSR (should be unchanged)
71 | //
72 | // CHECK: rev=(0, 1)
73 | // CHECK-NEXT: shape=(4, 5)
74 | // CHECK-NEXT: pointers=(0, 2, 5, 5, 6)
75 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
76 | // CHECK-NEXT: values=(1, 2, 3, 4, 5, 6)
77 | //
78 | %30 = graphblas.convert_layout %10 : tensor to tensor
79 | graphblas.print_tensor %30 { level=5 } : tensor
80 |
81 | return
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_diag_mat.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @main() -> () {
25 | %dense_mat = arith.constant dense<[
26 | [0, 1, 2, 0],
27 | [0, 0, 0, 3],
28 | [4, 0, 5, 0],
29 | [0, 6, 0, 7]
30 | ]> : tensor<4x4xi64>
31 | %mat_csr = sparse_tensor.convert %dense_mat : tensor<4x4xi64> to tensor
32 | %mat_csc = sparse_tensor.convert %dense_mat : tensor<4x4xi64> to tensor
33 |
34 | %answer_via_csr = graphblas.diag %mat_csr : tensor to tensor
35 | // CHECK: %answer_via_csr [_, _, 5, 7]
36 | graphblas.print %answer_via_csr { strings = ["%answer_via_csr "] } : tensor
37 |
38 | %answer_via_csc = graphblas.diag %mat_csc : tensor to tensor
39 | // CHECK: %answer_via_csc [_, _, 5, 7]
40 | graphblas.print %answer_via_csc { strings = ["%answer_via_csc "] } : tensor
41 |
42 | return
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_diag_vec.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @main() -> () {
26 | %dense_vec = arith.constant dense<[0.0, 7.0, 4.0, 0.0, 5.0, 0.0, 6.0, 8.0]> : tensor<8xf64>
27 | %vec = sparse_tensor.convert %dense_vec : tensor<8xf64> to tensor
28 |
29 | %answer_csr = graphblas.diag %vec : tensor to tensor
30 | // CHECK: %answer_csr [
31 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
32 | // CHECK-NEXT: [_, 7, _, _, _, _, _, _],
33 | // CHECK-NEXT: [_, _, 4, _, _, _, _, _],
34 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
35 | // CHECK-NEXT: [_, _, _, _, 5, _, _, _],
36 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
37 | // CHECK-NEXT: [_, _, _, _, _, _, 6, _],
38 | // CHECK-NEXT: [_, _, _, _, _, _, _, 8]
39 | // CHECK-NEXT: ]
40 | graphblas.print %answer_csr { strings = ["%answer_csr "] } : tensor
41 |
42 | %answer_csc = graphblas.diag %vec : tensor to tensor
43 | // CHECK: %answer_csc [
44 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
45 | // CHECK-NEXT: [_, 7, _, _, _, _, _, _],
46 | // CHECK-NEXT: [_, _, 4, _, _, _, _, _],
47 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
48 | // CHECK-NEXT: [_, _, _, _, 5, _, _, _],
49 | // CHECK-NEXT: [_, _, _, _, _, _, _, _],
50 | // CHECK-NEXT: [_, _, _, _, _, _, 6, _],
51 | // CHECK-NEXT: [_, _, _, _, _, _, _, 8]
52 | // CHECK-NEXT: ]
53 | graphblas.print %answer_csc { strings = ["%answer_csc "] } : tensor
54 |
55 | return
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_dup.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | %c0 = arith.constant 0 : index
26 | %c1 = arith.constant 1 : index
27 |
28 | ///////////////
29 | // Test Matrix
30 | ///////////////
31 |
32 | %m = arith.constant sparse<[
33 | [0, 1], [0, 2],
34 | [1, 0], [1, 3], [1, 4],
35 | [3, 2]
36 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
37 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
38 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
39 |
40 | // CSR dup: make a duplicate, modify a value, print and check modified and original
41 | //
42 | // CHECK: pointers=(0, 2, 5, 5, 6)
43 | // CHECK-NEXT: indices=(1, 2, 0, 3, 4, 2)
44 | // CHECK-NEXT: values=(33.33, 2, 3, 4, 5, 6)
45 | // CHECK-NEXT: values=(1, 2, 3, 4, 5, 6)
46 | //
47 | %0 = graphblas.dup %m_csr : tensor
48 | %1 = sparse_tensor.values %0 : tensor to memref
49 | %2 = arith.constant 33.33 : f64
50 | memref.store %2, %1[%c0] : memref
51 | graphblas.print_tensor %0 { level=3 } : tensor
52 | graphblas.print_tensor %m_csr { level=1 } : tensor
53 |
54 | // CSC dup: make a duplicate, modify a value, print and check modified and original
55 | //
56 | // CHECK: pointers=(0, 1, 2, 4, 5, 6)
57 | // CHECK-NEXT: indices=(1, 0, 0, 3, 1, 1)
58 | // CHECK-NEXT: values=(3, 44.44, 2, 6, 4, 5)
59 | // CHECK-NEXT: values=(3, 1, 2, 6, 4, 5)
60 | //
61 | %10 = graphblas.dup %m_csc : tensor
62 | %11 = sparse_tensor.values %10 : tensor to memref
63 | %12 = arith.constant 44.44 : f64
64 | memref.store %12, %11[%c1] : memref
65 | graphblas.print_tensor %10 { level=3 } : tensor
66 | graphblas.print_tensor %m_csc { level=1 } : tensor
67 |
68 |
69 | ///////////////
70 | // Test Vector
71 | ///////////////
72 |
73 | %v = arith.constant sparse<[
74 | [1], [2], [4], [7]
75 | ], [1, 2, 3, 4]> : tensor<9xi32>
76 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
77 |
78 | // CV dup: make a duplicate, modify an index, print and check modified and original
79 | //
80 | // CHECK: indices=(1, 3, 4, 7)
81 | // CHECK-NEXT: values=(1, 2, 3, 4)
82 | // CHECK-NEXT: indices=(1, 2, 4, 7)
83 | // CHECK-NEXT: values=(1, 2, 3, 4)
84 | //
85 | %20 = graphblas.dup %v_cv : tensor
86 | %21 = sparse_tensor.indices %20, %c0 : tensor to memref
87 | %22 = arith.constant 3 : i64
88 | memref.store %22, %21[%c1] : memref
89 | graphblas.print_tensor %20 { level=2 } : tensor
90 | graphblas.print_tensor %v_cv { level=2 } : tensor
91 |
92 | return
93 | }
94 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_from_coo.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CV64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "compressed" ],
12 | pointerBitWidth = 64,
13 | indexBitWidth = 64
14 | }>
15 |
16 | module {
17 | func @entry() {
18 | %c3 = arith.constant 3 : index
19 | %c4 = arith.constant 4 : index
20 |
21 | %indices = arith.constant dense<[
22 | [0, 1],
23 | [2, 3]
24 | ]> : tensor<2x2xindex>
25 | %vals = arith.constant dense<[11.1, 22.2]> : tensor<2xf64>
26 |
27 | %csr = graphblas.from_coo %indices, %vals [%c3, %c4] : tensor<2x2xindex>, tensor<2xf64> to tensor
28 | // CHECK: %csr [
29 | // CHECK-NEXT: [_, 11.1, _, _],
30 | // CHECK-NEXT: [_, _, _, _],
31 | // CHECK-NEXT: [_, _, _, 22.2]
32 | // CHECK-NEXT: ]
33 | graphblas.print %csr { strings=["%csr "] } : tensor
34 |
35 | return
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_intersect_generic_mat.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | func @main() -> () {
18 | %dense_m1 = arith.constant dense<[
19 | [0, 1, 2, 0],
20 | [0, 0, 0, 3],
21 | [4, 0, 5, 0],
22 | [0, 6, 0, 7]
23 | ]> : tensor<4x4xi64>
24 | %m1_csr = sparse_tensor.convert %dense_m1 : tensor<4x4xi64> to tensor
25 | %m1_csc = sparse_tensor.convert %dense_m1 : tensor<4x4xi64> to tensor
26 |
27 | %dense_m2 = arith.constant dense<[
28 | [0, 0, 9, 0],
29 | [0, 0, 0, 8],
30 | [7, 0, 6, 0],
31 | [0, 0, 0, 5]
32 | ]> : tensor<4x4xi64>
33 | %m2_csr = sparse_tensor.convert %dense_m2 : tensor<4x4xi64> to tensor
34 | %m2_csc = sparse_tensor.convert %dense_m2 : tensor<4x4xi64> to tensor
35 |
36 | %csr_csr = graphblas.intersect %m1_csr, %m2_csr { intersect_operator = "times" } : (tensor, tensor) to tensor
37 | // CHECK: %csr_csr [
38 | // CHECK-NEXT: [_, _, 18, _],
39 | // CHECK-NEXT: [_, _, _, 24],
40 | // CHECK-NEXT: [28, _, 30, _],
41 | // CHECK-NEXT: [_, _, _, 35]
42 | // CHECK-NEXT: ]
43 | graphblas.print %csr_csr { strings = ["%csr_csr "] } : tensor
44 |
45 | %csc_csc = graphblas.intersect %m1_csc, %m2_csc { intersect_operator = "times" } : (tensor, tensor) to tensor
46 | // CHECK: %csc_csc [
47 | // CHECK-NEXT: [_, _, 18, _],
48 | // CHECK-NEXT: [_, _, _, 24],
49 | // CHECK-NEXT: [28, _, 30, _],
50 | // CHECK-NEXT: [_, _, _, 35]
51 | // CHECK-NEXT: ]
52 | graphblas.print %csc_csc { strings = ["%csc_csc "] } : tensor
53 |
54 | return
55 | }
56 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_intersect_generic_vec.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CV64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 |
10 | func @main() -> () {
11 | %dense_v1 = arith.constant dense<[0.0, 7.0, 4.0, 0.0, 5.0, 0.0, 6.0, 8.0]> : tensor<8xf64>
12 | %v1 = sparse_tensor.convert %dense_v1 : tensor<8xf64> to tensor
13 |
14 | %dense_v2 = arith.constant dense<[0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 3.0]> : tensor<8xf64>
15 | %v2 = sparse_tensor.convert %dense_v2 : tensor<8xf64> to tensor
16 |
17 | %result = graphblas.intersect_generic %v1, %v2 : (tensor, tensor) to tensor {
18 | ^bb0(%arg0: f64, %arg1: f64):
19 | %3 = arith.mulf %arg0, %arg1 : f64
20 | graphblas.yield mult %3 : f64
21 | }
22 | // CHECK: %result [_, 7, _, _, 10, _, _, 24]
23 | graphblas.print %result { strings = ["%result "] } : tensor
24 |
25 | return
26 | }
27 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_intersect_mat.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | func @main() -> () {
19 | %dense_m1 = arith.constant dense<[
20 | [0, 1, 2, 0],
21 | [0, 0, 0, 3],
22 | [4, 0, 5, 0],
23 | [0, 6, 0, 7]
24 | ]> : tensor<4x4xi64>
25 | %m1_csr = sparse_tensor.convert %dense_m1 : tensor<4x4xi64> to tensor
26 | %m1_csc = sparse_tensor.convert %dense_m1 : tensor<4x4xi64> to tensor
27 |
28 | %dense_m2 = arith.constant dense<[
29 | [0, 0, 9, 0],
30 | [0, 0, 0, 8],
31 | [7, 0, 6, 0],
32 | [0, 0, 0, 5]
33 | ]> : tensor<4x4xi64>
34 | %m2_csr = sparse_tensor.convert %dense_m2 : tensor<4x4xi64> to tensor
35 | %m2_csc = sparse_tensor.convert %dense_m2 : tensor<4x4xi64> to tensor
36 |
37 | %csr_csr = graphblas.intersect %m1_csr, %m2_csr { intersect_operator = "times" } : (tensor, tensor) to tensor
38 | // CHECK: %csr_csr [
39 | // CHECK-NEXT: [_, _, 18, _],
40 | // CHECK-NEXT: [_, _, _, 24],
41 | // CHECK-NEXT: [28, _, 30, _],
42 | // CHECK-NEXT: [_, _, _, 35]
43 | // CHECK-NEXT: ]
44 | graphblas.print %csr_csr { strings = ["%csr_csr "] } : tensor
45 |
46 | %csc_csc = graphblas.intersect %m1_csc, %m2_csc { intersect_operator = "times" } : (tensor, tensor) to tensor
47 | // CHECK: %csc_csc [
48 | // CHECK-NEXT: [_, _, 18, _],
49 | // CHECK-NEXT: [_, _, _, 24],
50 | // CHECK-NEXT: [28, _, 30, _],
51 | // CHECK-NEXT: [_, _, _, 35]
52 | // CHECK-NEXT: ]
53 | graphblas.print %csc_csc { strings = ["%csc_csc "] } : tensor
54 |
55 | return
56 | }
57 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_intersect_vec.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CV64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "compressed" ],
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 |
11 | func @main() -> () {
12 | %dense_v1 = arith.constant dense<[0.0, 7.0, 4.0, 0.0, 5.0, 0.0, 6.0, 8.0]> : tensor<8xf64>
13 | %v1 = sparse_tensor.convert %dense_v1 : tensor<8xf64> to tensor
14 |
15 | %dense_v2 = arith.constant dense<[0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 3.0]> : tensor<8xf64>
16 | %v2 = sparse_tensor.convert %dense_v2 : tensor<8xf64> to tensor
17 |
18 | %result = graphblas.intersect %v1, %v2 { intersect_operator = "times" } : (tensor, tensor) to tensor
19 | // CHECK: %result [_, 7, _, _, 10, _, _, 24]
20 | graphblas.print %result { strings = ["%result "] } : tensor
21 |
22 | return
23 | }
24 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_matrix_multiply_mxv.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | func @main() -> () {
25 | %a_dense = arith.constant dense<[
26 | [0, 1, 2, 0],
27 | [0, 0, 0, 3]
28 | ]> : tensor<2x4xi64>
29 | %a_csr = sparse_tensor.convert %a_dense : tensor<2x4xi64> to tensor
30 | %a_csc = sparse_tensor.convert %a_dense : tensor<2x4xi64> to tensor
31 |
32 | %b_dense = arith.constant dense<[0, 7, 0, 8]> : tensor<4xi64>
33 | %b = sparse_tensor.convert %b_dense : tensor<4xi64> to tensor
34 |
35 | %mask_dense = arith.constant dense<[9, 0]> : tensor<2xi64>
36 | %mask = sparse_tensor.convert %mask_dense : tensor<2xi64> to tensor
37 |
38 | %answer_1 = graphblas.matrix_multiply %a_csr, %b { semiring = "plus_times" } : (tensor, tensor) to tensor
39 | // CHECK: answer_1 [7, 24]
40 | graphblas.print %answer_1 { strings = ["answer_1 "] } : tensor
41 |
42 | %answer_2 = graphblas.matrix_multiply %a_csr, %b { semiring = "min_plus" } : (tensor, tensor) to tensor
43 | // CHECK-NEXT: answer_2 [8, 11]
44 | graphblas.print %answer_2 { strings = ["answer_2 "] } : tensor
45 |
46 | %answer_4 = graphblas.matrix_multiply %a_csc, %b { semiring = "any_pair" } : (tensor, tensor) to tensor
47 | // CHECK-NEXT: answer_4 [1, 1]
48 | graphblas.print %answer_4 { strings = ["answer_4 "] } : tensor
49 |
50 | %answer_5 = graphblas.matrix_multiply %a_csr, %b, %mask { semiring = "plus_times" } : (tensor, tensor, tensor) to tensor
51 | // CHECK-NEXT: answer_5 [7, _]
52 | graphblas.print %answer_5 { strings = ["answer_5 "] } : tensor
53 |
54 | %answer_6 = graphblas.matrix_multiply %a_csr, %b, %mask { semiring = "min_plus" } : (tensor, tensor, tensor) to tensor
55 | // CHECK-NEXT: answer_6 [8, _]
56 | graphblas.print %answer_6 { strings = ["answer_6 "] } : tensor
57 |
58 | %answer_8 = graphblas.matrix_multiply %a_csc, %b, %mask { semiring = "any_pair" } : (tensor, tensor, tensor) to tensor
59 | // CHECK-NEXT: answer_8 [1, _]
60 | graphblas.print %answer_8 { strings = ["answer_8 "] } : tensor
61 |
62 | %answer_9 = graphblas.matrix_multiply %a_csr, %b, %mask { semiring = "plus_times" } : (tensor, tensor, tensor) to tensor
63 | // CHECK-NEXT: answer_9 [7, _]
64 | graphblas.print %answer_9 { strings = ["answer_9 "] } : tensor
65 |
66 | %answer_10 = graphblas.matrix_multiply %a_csr, %b, %mask { semiring = "min_plus" } : (tensor, tensor, tensor) to tensor
67 | // CHECK-NEXT: answer_10 [8, _]
68 | graphblas.print %answer_10 { strings = ["answer_10 "] } : tensor
69 |
70 | return
71 | }
72 |
73 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_matrix_multiply_mxv_extra.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | // COM: linalg-lowering does not yet support:
24 | // COM: - including indices inside intersect block
25 | // COM: - mask_complement=true
26 |
27 | func @main() -> () {
28 | %a_dense = arith.constant dense<[
29 | [0, 1, 2, 0],
30 | [0, 0, 0, 3]
31 | ]> : tensor<2x4xi64>
32 | %a_csr = sparse_tensor.convert %a_dense : tensor<2x4xi64> to tensor
33 | %a_csc = sparse_tensor.convert %a_dense : tensor<2x4xi64> to tensor
34 |
35 | %b_dense = arith.constant dense<[0, 7, 0, 8]> : tensor<4xi64>
36 | %b = sparse_tensor.convert %b_dense : tensor<4xi64> to tensor
37 |
38 | %mask_dense = arith.constant dense<[9, 0]> : tensor<2xi64>
39 | %mask = sparse_tensor.convert %mask_dense : tensor<2xi64> to tensor
40 |
41 | %answer_3 = graphblas.matrix_multiply %a_csc, %b { semiring = "any_overlapi" } : (tensor, tensor) to tensor
42 | // CHECK: answer_3 [1, 3]
43 | graphblas.print %answer_3 { strings = ["answer_3 "] } : tensor
44 |
45 | %answer_7 = graphblas.matrix_multiply %a_csc, %b, %mask { semiring = "any_overlapi" } : (tensor, tensor, tensor) to tensor
46 | // CHECK-NEXT: answer_7 [1, _]
47 | graphblas.print %answer_7 { strings = ["answer_7 "] } : tensor
48 |
49 | %answer_11 = graphblas.matrix_multiply %a_csc, %b, %mask { semiring = "any_overlapi" } : (tensor, tensor, tensor) to tensor
50 | // CHECK-NEXT: answer_11 [1, _]
51 | graphblas.print %answer_11 { strings = ["answer_11 "] } : tensor
52 |
53 | %answer_12 = graphblas.matrix_multiply %a_csc, %b, %mask { semiring = "any_pair", mask_complement = true } : (tensor, tensor, tensor) to tensor
54 | // CHECK-NEXT: answer_12 [_, 1]
55 | graphblas.print %answer_12 { strings = ["answer_12 "] } : tensor
56 |
57 | return
58 | }
59 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_matrix_multiply_vxm.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | func @main() -> () {
25 | %mat_dense = arith.constant dense<[
26 | [0, 1, 2, 0],
27 | [0, 0, 0, 3]
28 | ]> : tensor<2x4xi64>
29 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
30 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
31 |
32 | %vec_dense = arith.constant dense<[6, 8]> : tensor<2xi64>
33 | %vec = sparse_tensor.convert %vec_dense : tensor<2xi64> to tensor
34 |
35 | %mask_dense = arith.constant dense<[9, 0, 0, 2]> : tensor<4xi64>
36 | %mask = sparse_tensor.convert %mask_dense : tensor<4xi64> to tensor
37 |
38 | %answer_1 = graphblas.matrix_multiply %vec, %mat_csr { semiring = "plus_times" } : (tensor, tensor) to tensor
39 | // CHECK: answer_1 [_, 6, 12, 24]
40 | graphblas.print %answer_1 { strings = ["answer_1 "] } : tensor
41 |
42 | %answer_2 = graphblas.matrix_multiply %vec, %mat_csr { semiring = "min_plus" } : (tensor, tensor) to tensor
43 | // CHECK-NEXT: answer_2 [_, 7, 8, 11]
44 | graphblas.print %answer_2 { strings = ["answer_2 "] } : tensor
45 |
46 | %answer_4 = graphblas.matrix_multiply %vec, %mat_csc { semiring = "any_pair" } : (tensor, tensor) to tensor
47 | // CHECK-NEXT: answer_4 [_, 1, 1, 1]
48 | graphblas.print %answer_4 { strings = ["answer_4 "] } : tensor
49 |
50 | %answer_5 = graphblas.matrix_multiply %vec, %mat_csr, %mask { semiring = "plus_times" } : (tensor, tensor, tensor) to tensor
51 | // CHECK-NEXT: answer_5 [_, _, _, 24]
52 | graphblas.print %answer_5 { strings = ["answer_5 "] } : tensor
53 |
54 | %answer_6 = graphblas.matrix_multiply %vec, %mat_csr, %mask { semiring = "min_plus" } : (tensor, tensor, tensor) to tensor
55 | // CHECK-NEXT: answer_6 [_, _, _, 11]
56 | graphblas.print %answer_6 { strings = ["answer_6 "] } : tensor
57 |
58 | %answer_8 = graphblas.matrix_multiply %vec, %mat_csc, %mask { semiring = "any_pair" } : (tensor, tensor, tensor) to tensor
59 | // CHECK-NEXT: answer_8 [_, _, _, 1]
60 | graphblas.print %answer_8 { strings = ["answer_8 "] } : tensor
61 |
62 | %answer_9 = graphblas.matrix_multiply %vec, %mat_csr, %mask { semiring = "plus_times" } : (tensor, tensor, tensor) to tensor
63 | // CHECK-NEXT: answer_9 [_, _, _, 24]
64 | graphblas.print %answer_9 { strings = ["answer_9 "] } : tensor
65 |
66 | %answer_10 = graphblas.matrix_multiply %vec, %mat_csr, %mask { semiring = "min_plus" } : (tensor, tensor, tensor) to tensor
67 | // CHECK-NEXT: answer_10 [_, _, _, 11]
68 | graphblas.print %answer_10 { strings = ["answer_10 "] } : tensor
69 |
70 | return
71 | }
72 |
73 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_matrix_multiply_vxm_extra.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | // COM: linalg-lowering does not yet support:
24 | // COM: - including indices inside intersect block
25 | // COM: - mask_complement=true
26 |
27 | func @main() -> () {
28 | %mat_dense = arith.constant dense<[
29 | [0, 1, 2, 0],
30 | [0, 0, 0, 3]
31 | ]> : tensor<2x4xi64>
32 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
33 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
34 |
35 | %vec_dense = arith.constant dense<[6, 8]> : tensor<2xi64>
36 | %vec = sparse_tensor.convert %vec_dense : tensor<2xi64> to tensor
37 |
38 | %mask_dense = arith.constant dense<[9, 0, 0, 2]> : tensor<4xi64>
39 | %mask = sparse_tensor.convert %mask_dense : tensor<4xi64> to tensor
40 |
41 | %answer_3 = graphblas.matrix_multiply %vec, %mat_csc { semiring = "any_overlapi" } : (tensor, tensor, ) to tensor
42 | // CHECK: answer_3 [_, 0, 0, 1]
43 | graphblas.print %answer_3 { strings = ["answer_3 "] } : tensor
44 |
45 | %answer_7 = graphblas.matrix_multiply %vec, %mat_csc, %mask { semiring = "any_overlapi" } : (tensor, tensor, tensor) to tensor
46 | // CHECK-NEXT: answer_7 [_, _, _, 1]
47 | graphblas.print %answer_7 { strings = ["answer_7 "] } : tensor
48 |
49 | %answer_11 = graphblas.matrix_multiply %vec, %mat_csc, %mask { semiring = "any_overlapi" } : (tensor, tensor, tensor) to tensor
50 | // CHECK-NEXT: answer_11 [_, _, _, 1]
51 | graphblas.print %answer_11 { strings = ["answer_11 "] } : tensor
52 |
53 | %answer_12 = graphblas.matrix_multiply %vec, %mat_csc, %mask { semiring = "any_pair", mask_complement = true } : (tensor, tensor, tensor) to tensor
54 | // CHECK-NEXT: answer_12 [_, 1, 1, _]
55 | graphblas.print %answer_12 { strings = ["answer_12 "] } : tensor
56 |
57 | return
58 | }
59 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_matrix_multiply_vxv.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CV64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "compressed" ],
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | func @main() -> () {
11 | %a_dense = arith.constant dense<[4, 0, 2, 3]> : tensor<4xi64>
12 | %a = sparse_tensor.convert %a_dense : tensor<4xi64> to tensor
13 |
14 | %b_dense = arith.constant dense<[0, 7, 0, 8]> : tensor<4xi64>
15 | %b = sparse_tensor.convert %b_dense : tensor<4xi64> to tensor
16 |
17 | %answer = graphblas.matrix_multiply %a, %b { semiring = "plus_times" } : (tensor, tensor) to i64
18 | // CHECK: answer 24
19 | graphblas.print %answer { strings = ["answer "] } : i64
20 |
21 | return
22 | }
23 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_reduce_to_scalar.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | func @main() -> () {
25 | %mat_dense = arith.constant dense<[
26 | [0, 1, 2, 0],
27 | [0, 0, 0, 3]
28 | ]> : tensor<2x4xi64>
29 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
30 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
31 |
32 | %dense_vec = arith.constant dense<[0, 7, 4, 0, 8, 0, 6, 5]> : tensor<8xi64>
33 | %vec = sparse_tensor.convert %dense_vec : tensor<8xi64> to tensor
34 |
35 | %answer_1 = graphblas.reduce_to_scalar %mat_csr { aggregator = "plus" } : tensor to i64
36 | // CHECK: answer_1 6
37 | graphblas.print %answer_1 { strings = ["answer_1 "] } : i64
38 |
39 | %answer_2 = graphblas.reduce_to_scalar %mat_csc { aggregator = "max" } : tensor to i64
40 | // CHECK: answer_2 3
41 | graphblas.print %answer_2 { strings = ["answer_2 "] } : i64
42 |
43 | return
44 | }
45 |
46 | // COM: TODO write tests for all tensor element types
47 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_reduce_to_scalar_extra.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | // COM: linalg-lowering does not yet support:
24 | // COM: - including indices inside reduce block
25 |
26 | func @main() -> () {
27 | %mat_dense = arith.constant dense<[
28 | [0, 1, 2, 0],
29 | [0, 0, 0, 3]
30 | ]> : tensor<2x4xi64>
31 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
32 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
33 |
34 | %dense_vec = arith.constant dense<[0, 7, 4, 0, 8, 0, 6, 5]> : tensor<8xi64>
35 | %vec = sparse_tensor.convert %dense_vec : tensor<8xi64> to tensor
36 |
37 | %answer_3 = graphblas.reduce_to_scalar %vec { aggregator = "argmax" } : tensor to i64
38 | // CHECK: answer_3 4
39 | graphblas.print %answer_3 { strings = ["answer_3 "] } : i64
40 |
41 | return
42 | }
43 |
44 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_reduce_to_scalar_generic.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | func @main() -> () {
25 | %c0_i64 = arith.constant 0 : i64
26 | %c_min_i64 = arith.constant -9223372036854775808 : i64
27 |
28 | %mat_dense = arith.constant dense<[
29 | [0, 1, 2, 0],
30 | [0, 0, 0, 3]
31 | ]> : tensor<2x4xi64>
32 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
33 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
34 |
35 | %dense_vec = arith.constant dense<[0, 7, 4, 0, 8, 0, 6, 5]> : tensor<8xi64>
36 | %vec = sparse_tensor.convert %dense_vec : tensor<8xi64> to tensor
37 |
38 | %answer_1 = graphblas.reduce_to_scalar_generic %mat_csr : tensor to i64 {
39 | graphblas.yield agg_identity %c0_i64 : i64
40 | }, {
41 | ^bb0(%arg0: i64, %arg1: i64):
42 | %13 = arith.addi %arg0, %arg1 : i64
43 | graphblas.yield agg %13 : i64
44 | }
45 | // CHECK: answer_1 6
46 | graphblas.print %answer_1 { strings = ["answer_1 "] } : i64
47 |
48 | %answer_2 = graphblas.reduce_to_scalar_generic %mat_csc : tensor to i64 {
49 | graphblas.yield agg_identity %c_min_i64 : i64
50 | }, {
51 | ^bb0(%arg0: i64, %arg1: i64):
52 | %13 = arith.cmpi sgt, %arg0, %arg1 : i64
53 | %14 = arith.select %13, %arg0, %arg1 : i64
54 | graphblas.yield agg %14 : i64
55 | }
56 | // CHECK: answer_2 3
57 | graphblas.print %answer_2 { strings = ["answer_2 "] } : i64
58 |
59 | %answer_3 = graphblas.reduce_to_scalar_generic %vec : tensor to i64 {
60 | graphblas.yield agg_identity %c0_i64 : i64
61 | }, {
62 | ^bb0(%a : i64, %b : i64):
63 | %result = arith.addi %a, %b : i64
64 | graphblas.yield agg %result : i64
65 | }
66 | // CHECK: answer_3 30
67 | graphblas.print %answer_3 { strings = ["answer_3 "] } : i64
68 |
69 | return
70 | }
71 |
72 | // COM: TODO write tests for all tensor element types
73 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_reduce_to_vector.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec main | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | func @main() -> () {
25 | %ci0 = arith.constant 0 : i64
26 |
27 | %mat_dense = arith.constant dense<[
28 | [0, 1, 2, 0],
29 | [0, 0, 3, 4]
30 | ]> : tensor<2x4xi64>
31 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
32 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
33 |
34 | %mask_2 = arith.constant sparse<[
35 | [1]
36 | ], [111]> : tensor<2xi64>
37 | %mask_2_cv = sparse_tensor.convert %mask_2 : tensor<2xi64> to tensor
38 |
39 | %mask_4 = arith.constant sparse<[
40 | [0], [2]
41 | ], [0, 200]> : tensor<4xi64>
42 | %mask_4_cv = sparse_tensor.convert %mask_4 : tensor<4xi64> to tensor
43 |
44 | %answer_1 = graphblas.reduce_to_vector %mat_csr { aggregator = "plus", axis = 0 } : tensor to tensor
45 | // CHECK: answer_1 [_, 1, 5, 4]
46 | graphblas.print %answer_1 { strings = ["answer_1 "] } : tensor
47 |
48 | %answer_3 = graphblas.reduce_to_vector %mat_csr { aggregator = "plus", axis = 1 } : tensor to tensor
49 | // CHECK: answer_3 [3, 7]
50 | graphblas.print %answer_3 { strings = ["answer_3 "] } : tensor
51 |
52 | %answer_4 = graphblas.reduce_to_vector %mat_csc { aggregator = "max", axis = 1 } : tensor to tensor
53 | // CHECK: answer_4 [2, 4]
54 | graphblas.print %answer_4 { strings = ["answer_4 "] } : tensor
55 |
56 | return
57 | }
58 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_reduce_to_vector_generic.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | // COM: linalg-lower doesn't has a bug which affects reduceToVector with lex_insert order
24 | // COM: It should work like CSR@CSC, but it doesn't
25 |
26 | func @main() -> () {
27 | %ci0 = arith.constant 0 : i64
28 |
29 | %mat_dense = arith.constant dense<[
30 | [0, 1, 2, 0],
31 | [0, 0, 3, 4]
32 | ]> : tensor<2x4xi64>
33 | %mat_csr = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
34 | %mat_csc = sparse_tensor.convert %mat_dense : tensor<2x4xi64> to tensor
35 |
36 | %mask_2 = arith.constant sparse<[
37 | [1]
38 | ], [111]> : tensor<2xi64>
39 | %mask_2_cv = sparse_tensor.convert %mask_2 : tensor<2xi64> to tensor
40 |
41 | %mask_4 = arith.constant sparse<[
42 | [0], [2]
43 | ], [0, 200]> : tensor<4xi64>
44 | %mask_4_cv = sparse_tensor.convert %mask_4 : tensor<4xi64> to tensor
45 |
46 | %answer_1 = graphblas.reduce_to_vector_generic %mat_csr, %mask_2_cv { axis = 1, mask_complement = true } : tensor, tensor to tensor {
47 | graphblas.yield agg_identity %ci0 : i64
48 | }, {
49 | ^bb0(%a : i64, %b : i64):
50 | %result = arith.addi %a, %b : i64
51 | graphblas.yield agg %result : i64
52 | }
53 | // CHECK: answer_1 [3, _]
54 | graphblas.print %answer_1 { strings = ["answer_1 "] } : tensor
55 |
56 | %answer_2 = graphblas.reduce_to_vector_generic %mat_csc, %mask_4_cv { axis = 0 } : tensor, tensor to tensor {
57 | graphblas.yield agg_identity %ci0 : i64
58 | }, {
59 | ^bb0(%a : i64, %b : i64):
60 | %result = arith.addi %a, %b : i64
61 | graphblas.yield agg %result : i64
62 | }
63 | // CHECK-NEXT: answer_2 [_, _, 5, _]
64 | graphblas.print %answer_2 { strings = ["answer_2 "] } : tensor
65 |
66 | %answer_20 = graphblas.reduce_to_vector %mat_csc { aggregator = "max", axis = 0 } : tensor to tensor
67 | // CHECK-NEXT: answer_20 [_, 1, 3, 4]
68 | graphblas.print %answer_20 { strings = ["answer_20 "] } : tensor
69 |
70 | return
71 | }
72 |
73 | // COM: TODO write tests for all tensor element types
74 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_generic.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(d0, d1) -> (d0, d1)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(d0, d1) -> (d1, d0)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | ///////////////
26 | // Test Matrix
27 | ///////////////
28 |
29 | %m = arith.constant sparse<[
30 | [0, 1], [0, 2],
31 | [1, 0], [1, 3], [1, 4],
32 | [3, 2]
33 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
34 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
35 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 |
37 | // CSR select upper triangle
38 | //
39 | // CHECK: shape=(4, 5)
40 | // CHECK-NEXT: pointers=(0, 2, 4, 4, 4)
41 | // CHECK-NEXT: indices=(1, 2, 3, 4)
42 | // CHECK-NEXT: values=(1, 2, 4, 5)
43 | //
44 | %2 = graphblas.select_generic %m_csr : tensor to tensor {
45 | ^bb0(%arg0: f64, %arg1: index, %arg2: index):
46 | %ans = arith.cmpi ugt, %arg2, %arg1 : index
47 | graphblas.yield select_out %ans : i1
48 | }
49 | graphblas.print_tensor %2 {level = 4 : i64} : tensor
50 |
51 | // CSC select lt thunk
52 | //
53 | // CHECK: shape=(4, 5)
54 | // CHECK-NEXT: pointers=(0, 1, 2, 3, 3, 3)
55 | // CHECK-NEXT: indices=(1, 0, 0)
56 | // CHECK-NEXT: values=(3, 1, 2)
57 | //
58 | %c3_5_f64 = arith.constant 3.500000e+00 : f64
59 | %3 = graphblas.select_generic %m_csc : tensor to tensor {
60 | ^bb0(%arg0: f64):
61 | %ans = arith.cmpf olt, %arg0, %c3_5_f64 : f64
62 | graphblas.yield select_out %ans : i1
63 | }
64 | graphblas.print_tensor %3 {level = 4 : i64} : tensor
65 |
66 | ///////////////
67 | // Test Vector
68 | ///////////////
69 |
70 | %v = arith.constant sparse<[
71 | [1], [2], [4], [7]
72 | ], [1, 2, 3, 4]> : tensor<9xi32>
73 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
74 |
75 | // CV select eq thunk with empty result
76 | //
77 | // CHECK: shape=(9)
78 | // CHECK-NEXT: pointers=(0, 0)
79 | // CHECK-NEXT: indices=()
80 | // CHECK-NEXT: values=()
81 | //
82 | %c6_i32 = arith.constant 6 : i32
83 | %21 = graphblas.select_generic %v_cv : tensor to tensor {
84 | ^bb0(%arg0: i32):
85 | %ans = arith.cmpi eq, %arg0, %c6_i32 : i32
86 | graphblas.yield select_out %ans : i1
87 | }
88 | graphblas.print_tensor %21 { level=4 } : tensor
89 |
90 | return
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_index.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec entry | FileCheck %s
3 |
4 | #CSR = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @entry() {
26 | ///////////////
27 | // Test Matrix
28 | ///////////////
29 |
30 | %m = arith.constant sparse<[
31 | [0, 1], [0, 2],
32 | [1, 0], [1, 3], [1, 4],
33 | [3, 2]
34 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
35 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
37 |
38 | // CSR select upper triangle
39 | //
40 | // CHECK: shape=(4, 5)
41 | // CHECK-NEXT: pointers=(0, 2, 4, 4, 4)
42 | // CHECK-NEXT: indices=(1, 2, 3, 4)
43 | // CHECK-NEXT: values=(1, 2, 4, 5)
44 | //
45 | %0 = graphblas.select %m_csr { selector="triu" } : tensor to tensor
46 | graphblas.print_tensor %0 { level=4 } : tensor
47 |
48 | return
49 | }
50 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_mask.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec entry | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @entry() {
26 |
27 | ///////////////
28 | // Test Matrix
29 | ///////////////
30 |
31 | %m = arith.constant sparse<[
32 | [0, 1], [0, 2],
33 | [1, 0], [1, 3], [1, 4],
34 | [3, 2]
35 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
36 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
37 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
38 |
39 | %mask = arith.constant sparse<[
40 | [0, 1], [0, 3], [0, 4],
41 | [1, 3],
42 | [2, 2],
43 | [3, 2]
44 | ], [100., 200., 300., 400., 500., 600.]> : tensor<4x5xf64>
45 | %mask_csr = sparse_tensor.convert %mask : tensor<4x5xf64> to tensor
46 |
47 | // CSR select mask
48 | //
49 | // CHECK: shape=(4, 5)
50 | // CHECK: pointers=(0, 1, 2, 2, 3)
51 | // CHECK-NEXT: indices=(1, 3, 2)
52 | // CHECK-NEXT: values=(1, 4, 6)
53 | //
54 | %0 = graphblas.select_mask %m_csr, %mask_csr : tensor, tensor to tensor
55 | graphblas.print_tensor %0 { level=4 } : tensor
56 |
57 | ///////////////
58 | // Test Vector
59 | ///////////////
60 |
61 | %v = arith.constant sparse<[
62 | [1], [2], [4], [7]
63 | ], [1., 2., 3., 4.]> : tensor<9xf64>
64 | %v_cv = sparse_tensor.convert %v : tensor<9xf64> to tensor
65 |
66 | %mask3 = arith.constant sparse<[
67 | [2], [3], [4]
68 | ], [200., 300., 400.]> : tensor<9xf64>
69 | %mask_cv = sparse_tensor.convert %mask3 : tensor<9xf64> to tensor
70 |
71 | // Vector select mask
72 | //
73 | // CHECK: shape=(9)
74 | // CHECK: pointers=(0, 2)
75 | // CHECK-NEXT: indices=(2, 4)
76 | // CHECK-NEXT: values=(2, 3)
77 | //
78 | %20 = graphblas.select_mask %v_cv, %mask_cv : tensor, tensor to tensor
79 | graphblas.print_tensor %20 { level=4 } : tensor
80 |
81 | return
82 | }
83 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_mask_extra.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | // COM: linalg-lower cannot yet handle mask_complement=true
24 | // COM: linalg-lower cannot handle intersection with different dtype
25 |
26 | module {
27 | func @entry() {
28 |
29 | ///////////////
30 | // Test Matrix
31 | ///////////////
32 |
33 | %m = arith.constant sparse<[
34 | [0, 1], [0, 2],
35 | [1, 0], [1, 3], [1, 4],
36 | [3, 2]
37 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
38 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
39 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
40 |
41 | %mask = arith.constant sparse<[
42 | [0, 1], [0, 3], [0, 4],
43 | [1, 3],
44 | [2, 2],
45 | [3, 2]
46 | ], [100., 200., 300., 400., 500., 600.]> : tensor<4x5xf64>
47 | %mask_csr = sparse_tensor.convert %mask : tensor<4x5xf64> to tensor
48 |
49 | // CSR select mask complement
50 | //
51 | // CHECK: shape=(4, 5)
52 | // CHECK: pointers=(0, 1, 3, 3, 3)
53 | // CHECK-NEXT: indices=(2, 0, 4)
54 | // CHECK-NEXT: values=(2, 3, 5)
55 | %1 = graphblas.select_mask %m_csr, %mask_csr {mask_complement=true} : tensor, tensor to tensor
56 | graphblas.print_tensor %1 { level=4 } : tensor
57 |
58 | // CSC select mask different element type
59 | //
60 | // CHECK: rev=(1, 0)
61 | // CHECK: shape=(4, 5)
62 | // CHECK: pointers=(0, 0, 1, 2, 3, 3)
63 | // CHECK-NEXT: indices=(0, 3, 1)
64 | // CHECK-NEXT: values=(1, 6, 4)
65 | //
66 | %mask2 = arith.constant sparse<[
67 | [0, 1], [0, 3], [0, 4],
68 | [1, 3],
69 | [2, 2],
70 | [3, 2]
71 | ], [100, 200, 300, 400, 500, 600]> : tensor<4x5xi32>
72 | %mask_csc = sparse_tensor.convert %mask2 : tensor<4x5xi32> to tensor
73 | %10 = graphblas.select_mask %m_csc, %mask_csc : tensor, tensor to tensor
74 | graphblas.print_tensor %10 { level=5 } : tensor
75 |
76 | ///////////////
77 | // Test Vector
78 | ///////////////
79 |
80 | %v = arith.constant sparse<[
81 | [1], [2], [4], [7]
82 | ], [1., 2., 3., 4.]> : tensor<9xf64>
83 | %v_cv = sparse_tensor.convert %v : tensor<9xf64> to tensor
84 |
85 | %mask3 = arith.constant sparse<[
86 | [2], [3], [4]
87 | ], [200., 300., 400.]> : tensor<9xf64>
88 | %mask_cv = sparse_tensor.convert %mask3 : tensor<9xf64> to tensor
89 |
90 | // Vector select mask complement
91 | //
92 | // CHECK: shape=(9)
93 | // CHECK: pointers=(0, 2)
94 | // CHECK-NEXT: indices=(1, 7)
95 | // CHECK-NEXT: values=(1, 4)
96 | //
97 | %21 = graphblas.select_mask %v_cv, %mask_cv {mask_complement=true} : tensor, tensor to tensor
98 | graphblas.print_tensor %21 { level=4 } : tensor
99 |
100 | return
101 | }
102 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_probability.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CV64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "compressed" ],
5 | pointerBitWidth = 64,
6 | indexBitWidth = 64
7 | }>
8 |
9 | module {
10 | func private @create_choose_uniform_context(i64) -> !llvm.ptr
11 | func private @destroy_choose_uniform_context(!llvm.ptr)
12 | func private @random_double(!llvm.ptr) -> f64
13 |
14 | func @entry() {
15 | %c7 = arith.constant 7 : index
16 | %cf10 = arith.constant 0.10 : f64
17 | %cf90 = arith.constant 0.90 : f64
18 | %seed = arith.constant 123456789 : i64
19 | %ctx = call @create_choose_uniform_context(%seed) : (i64) -> !llvm.ptr
20 |
21 | ///////////////
22 | // Test random select by assuming
23 | // Prob=10% will always select less than half the values
24 | // Prob=90% will always select more than half the values
25 | ///////////////
26 |
27 | %v = arith.constant dense<
28 | [ 1.0, 2.0, 0.0, -4.0, 5.0, 0.0, 7.0, 8.0, 9.0, 2.1, 2.2, 2.3, 0.0, 2.5 ]
29 | > : tensor<14xf64>
30 | %v_cv = sparse_tensor.convert %v : tensor<14xf64> to tensor
31 |
32 | // P10
33 | //
34 | // CHECK: (10) size<=7? 1
35 | //
36 | %10 = graphblas.select %v_cv, %cf10, %ctx { selector = "probability" } : tensor, f64, !llvm.ptr to tensor
37 | %11 = graphblas.num_vals %10 : tensor
38 | %12 = arith.cmpi "ule", %11, %c7 : index
39 | graphblas.print %12 { strings=["(10) size<=7? "] } : i1
40 |
41 | // P90
42 | //
43 | // CHECK: (20) size>=7? 1
44 | //
45 | %20 = graphblas.select %v_cv, %cf90, %ctx { selector = "probability" } : tensor, f64, !llvm.ptr to tensor
46 | %21 = graphblas.num_vals %20 : tensor
47 | %22 = arith.cmpi "uge", %21, %c7 : index
48 | graphblas.print %22 { strings=["(20) size>=7? "] } : i1
49 |
50 | call @destroy_choose_uniform_context(%ctx) : (!llvm.ptr) -> ()
51 |
52 | return
53 | }
54 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_select_value.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | ///////////////
26 | // Test Matrix
27 | ///////////////
28 |
29 | %m = arith.constant sparse<[
30 | [0, 1], [0, 2],
31 | [1, 0], [1, 3], [1, 4],
32 | [3, 2]
33 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
34 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
35 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 |
37 | // CSC select lt thunk
38 | //
39 | // CHECK: shape=(4, 5)
40 | // CHECK-NEXT: pointers=(0, 1, 2, 3, 3, 3)
41 | // CHECK-NEXT: indices=(1, 0, 0)
42 | // CHECK-NEXT: values=(3, 1, 2)
43 | //
44 | %10 = arith.constant 3.5 : f64
45 | %11 = graphblas.select %m_csc, %10 { selector="lt" } : tensor, f64 to tensor
46 | graphblas.print_tensor %11 { level=4 } : tensor
47 |
48 |
49 | ///////////////
50 | // Test Vector
51 | ///////////////
52 |
53 | %v = arith.constant sparse<[
54 | [1], [2], [4], [7]
55 | ], [1, 2, 3, 4]> : tensor<9xi32>
56 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
57 |
58 | // CV select eq thunk with empty result
59 | //
60 | // CHECK: shape=(9)
61 | // CHECK-NEXT: pointers=(0, 0)
62 | // CHECK-NEXT: indices=()
63 | // CHECK-NEXT: values=()
64 | //
65 | %20 = arith.constant 6 : i32
66 | %21 = graphblas.select %v_cv, %20 { selector="eq" } : tensor, i32 to tensor
67 | graphblas.print_tensor %21 { level=4 } : tensor
68 |
69 | return
70 | }
71 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_size_num.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 | ///////////////
26 | // Test Matrix
27 | ///////////////
28 |
29 | %m = arith.constant sparse<[
30 | [0, 1], [0, 2],
31 | [1, 0], [1, 3], [1, 4],
32 | [3, 2]
33 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
34 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
35 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 |
37 | // CSR num rows, cols, vals
38 | //
39 | // CHECK: nrows=4 ncols=5 nvals=6
40 | //
41 | %0 = graphblas.num_rows %m_csr : tensor
42 | %1 = graphblas.num_cols %m_csr : tensor
43 | %2 = graphblas.num_vals %m_csr : tensor
44 | graphblas.print %0, %1, %2 { strings=["nrows=", " ncols=", " nvals="] } : index, index, index
45 |
46 | // CSC num rows, cols, vals
47 | //
48 | // CHECK-NEXT: nrows=4 ncols=5 nvals=6
49 | //
50 | %10 = graphblas.num_rows %m_csc : tensor
51 | %11 = graphblas.num_cols %m_csc : tensor
52 | %12 = graphblas.num_vals %m_csc : tensor
53 | graphblas.print %10, %11, %12 { strings=["nrows=", " ncols=", " nvals="] } : index, index, index
54 |
55 |
56 | ///////////////
57 | // Test Vector
58 | ///////////////
59 |
60 | %v = arith.constant sparse<[
61 | [1], [2], [4], [7]
62 | ], [1, 2, 3, 4]> : tensor<9xi32>
63 | %v_cv = sparse_tensor.convert %v : tensor<9xi32> to tensor
64 |
65 | // CV size, num vals
66 | //
67 | // CHECK: size=9 nvals=4
68 | //
69 | %20 = graphblas.size %v_cv : tensor
70 | %21 = graphblas.num_vals %v_cv : tensor
71 | graphblas.print %20 , %21{ strings=["size=", " nvals="] } : index, index
72 |
73 | return
74 | }
75 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_to_coo_vals.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CV64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "compressed" ],
12 | pointerBitWidth = 64,
13 | indexBitWidth = 64
14 | }>
15 |
16 | module {
17 | func @entry() {
18 | %mat = arith.constant dense<[
19 | [0, 0, 9, 0],
20 | [0, 0, 0, 8],
21 | [7, 0, 6, 0],
22 | [0, 0, 0, 5]
23 | ]> : tensor<4x4xi64>
24 | %csr = sparse_tensor.convert %mat : tensor<4x4xi64> to tensor
25 |
26 | %indices_dense, %vals_dense = graphblas.to_coo %csr : tensor to tensor, tensor
27 | %indices = sparse_tensor.convert %indices_dense : tensor to tensor
28 | %vals = sparse_tensor.convert %vals_dense : tensor to tensor
29 | // CHECK: %vals [9, 8, 7, 6, 5]
30 | graphblas.print %vals { strings=["%vals "] } : tensor
31 |
32 | return
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_transpose.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec functional_transpose | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec functional_transpose | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | module {
19 | func @functional_transpose() -> () {
20 | %dense_mat = arith.constant dense<[
21 | [0, 1, 2, 0],
22 | [0, 0, 0, 3]
23 | ]> : tensor<2x4xi64>
24 | %mat = sparse_tensor.convert %dense_mat : tensor<2x4xi64> to tensor
25 | %mat_csc = sparse_tensor.convert %dense_mat : tensor<2x4xi64> to tensor
26 |
27 | // CHECK: pointers=(0, 2, 3)
28 | // CHECK-NEXT: indices=(1, 2, 3)
29 | // CHECK-NEXT: values=(1, 2, 3)
30 | %csr_to_csc_transpose = graphblas.transpose %mat : tensor to tensor
31 | graphblas.print_tensor %csr_to_csc_transpose { level=3 } : tensor
32 |
33 | // CHECK: pointers=(0, 0, 1, 2, 3)
34 | // CHECK-NEXT: indices=(0, 0, 1)
35 | // CHECK-NEXT: values=(1, 2, 3)
36 | %csc_to_csr_transpose = graphblas.transpose %mat_csc : tensor to tensor
37 | graphblas.print_tensor %csc_to_csr_transpose { level=3 } : tensor
38 |
39 | // CHECK: pointers=(0, 0, 1, 2, 3)
40 | // CHECK-NEXT: indices=(0, 0, 1)
41 | // CHECK-NEXT: values=(1, 2, 3)
42 | %csr_to_csr_transpose = graphblas.transpose %mat : tensor to tensor
43 | graphblas.print_tensor %csr_to_csr_transpose { level=3 } : tensor
44 |
45 | // CHECK: pointers=(0, 2, 3)
46 | // CHECK-NEXT: indices=(1, 2, 3)
47 | // CHECK-NEXT: values=(1, 2, 3)
48 | %csc_to_csc_transpose = graphblas.transpose %mat_csc : tensor to tensor
49 | graphblas.print_tensor %csc_to_csc_transpose { level=3 } : tensor
50 |
51 | return
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_uniform_complement.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 |
3 | #CSR64 = #sparse_tensor.encoding<{
4 | dimLevelType = [ "dense", "compressed" ],
5 | dimOrdering = affine_map<(i,j) -> (i,j)>,
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | #CSC64 = #sparse_tensor.encoding<{
11 | dimLevelType = [ "dense", "compressed" ],
12 | dimOrdering = affine_map<(i,j) -> (j,i)>,
13 | pointerBitWidth = 64,
14 | indexBitWidth = 64
15 | }>
16 |
17 | #CV64 = #sparse_tensor.encoding<{
18 | dimLevelType = [ "compressed" ],
19 | pointerBitWidth = 64,
20 | indexBitWidth = 64
21 | }>
22 |
23 | module {
24 | func @entry() {
25 |
26 | ///////////////
27 | // Test Matrix
28 | ///////////////
29 |
30 | %m = arith.constant sparse<[
31 | [0, 1], [0, 2],
32 | [1, 0], [1, 3], [1, 4],
33 | [3, 2]
34 | ], [1., 2., 3., 4., 5., 6.]> : tensor<4x5xf64>
35 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
36 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
37 |
38 | // CSR uniform complement
39 | //
40 | // CHECK: shape=(4, 5)
41 | // CHECK: pointers=(0, 3, 5, 10, 14)
42 | // CHECK-NEXT: indices=(0, 3, 4, 1, 2, 0, 1, 2, 3, 4, 0, 1, 3, 4)
43 | // CHECK-NEXT: values=(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)
44 | //
45 | %cf2 = arith.constant 2.0 : f64
46 | %0 = graphblas.uniform_complement %m_csr, %cf2 : tensor, f64 to tensor
47 | graphblas.print_tensor %0 { level=4 } : tensor
48 |
49 | // CSC uniform_complement different element type
50 | //
51 | // CHECK: rev=(1, 0)
52 | // CHECK: shape=(4, 5)
53 | // CHECK: pointers=(0, 3, 6, 8, 11, 14)
54 | // CHECK-NEXT: indices=(0, 2, 3, 1, 2, 3, 1, 2, 0, 2, 3, 0, 2, 3)
55 | // CHECK-NEXT: values=(9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
56 | //
57 | %ci9 = arith.constant 9 : i64
58 | %10 = graphblas.uniform_complement %m_csc, %ci9 : tensor, i64 to tensor
59 | graphblas.print_tensor %10 { level=5 } : tensor
60 |
61 | ///////////////
62 | // Test Vector
63 | ///////////////
64 |
65 | %v = arith.constant sparse<[
66 | [1], [2], [4], [7]
67 | ], [1., 2., 3., 4.]> : tensor<9xf64>
68 | %v_cv = sparse_tensor.convert %v : tensor<9xf64> to tensor
69 |
70 | // Vector uniform_complement
71 | //
72 | // CHECK: shape=(9)
73 | // CHECK: pointers=(0, 5)
74 | // CHECK-NEXT: indices=(0, 3, 5, 6, 8)
75 | // CHECK-NEXT: values=(1, 1, 1, 1, 1)
76 | //
77 | %ci1 = arith.constant 1 : i32
78 | %20 = graphblas.uniform_complement %v_cv, %ci1 : tensor, i32 to tensor
79 | graphblas.print_tensor %20 { level=4 } : tensor
80 |
81 | return
82 | }
83 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_union.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec entry | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @entry() {
26 | %c0 = arith.constant 0 : index
27 | %c1 = arith.constant 1 : index
28 | %ci0 = arith.constant 0 : i64
29 | %cf0 = arith.constant 0.0 : f64
30 |
31 | ///////////////
32 | // Test Matrix
33 | ///////////////
34 |
35 | %m = arith.constant dense<[
36 | [ 1.0, 0.0, 2.0, 0.0, 0.0],
37 | [ 0.0, 0.0, 0.0, 0.0, 0.0],
38 | [ 0.0, 3.0, 4.0, 0.0, 0.0],
39 | [ 0.0, 0.0, 0.0, 0.0, 0.0]
40 | ]> : tensor<4x5xf64>
41 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
42 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
43 |
44 | %m2 = arith.constant dense<[
45 | [ 3.2, 0.0, 0.0, 0.0, 0.0],
46 | [ 0.0, 0.0, 0.0, 0.0, 0.0],
47 | [ 0.0, 0.0, -4.0, 0.0, 12.0],
48 | [ 0.0, 1.0, 0.0, 0.0, 0.0]
49 | ]> : tensor<4x5xf64>
50 | %m2_csr = sparse_tensor.convert %m2 : tensor<4x5xf64> to tensor
51 | %m2_csc = sparse_tensor.convert %m2 : tensor<4x5xf64> to tensor
52 |
53 | // CSR union plus
54 | //
55 | // CHECK: pointers=(0, 2, 2, 5, 6)
56 | // CHECK-NEXT: indices=(0, 2, 1, 2, 4, 1)
57 | // CHECK-NEXT: values=(4.2, 2, 3, 0, 12, 1)
58 | //
59 | %0 = graphblas.union %m_csr, %m2_csr { union_operator = "plus" } : (tensor, tensor) to tensor
60 | graphblas.print_tensor %0 { level=3 } : tensor
61 |
62 | // CSC union min
63 | //
64 | // CHECK: pointers=(0, 1, 3, 5, 5, 6)
65 | // CHECK-NEXT: indices=(0, 2, 3, 0, 2, 2)
66 | // CHECK-NEXT: values=(1, 3, 1, 2, -4, 12)
67 | //
68 | %10 = graphblas.union %m_csc, %m2_csc { union_operator = "min" } : (tensor, tensor) to tensor
69 | graphblas.print_tensor %10 { level=3 } : tensor
70 |
71 | ///////////////
72 | // Test Vector
73 | ///////////////
74 |
75 | %v = arith.constant dense<
76 | [ 1.0, 2.0, 0.0, 0.0, -4.0, 0.0 ]
77 | > : tensor<6xf64>
78 | %v_cv = sparse_tensor.convert %v : tensor<6xf64> to tensor
79 |
80 | %v2 = arith.constant dense<
81 | [ 0.0, 3.0, 0.0, 6.2, 4.0, 0.0 ]
82 | > : tensor<6xf64>
83 | %v2_cv = sparse_tensor.convert %v2 : tensor<6xf64> to tensor
84 |
85 | // Union second
86 | //
87 | // CHECK: pointers=(0, 4)
88 | // CHECK-NEXT: indices=(0, 1, 3, 4)
89 | // CHECK-NEXT: values=(1, 3, 6.2, 4)
90 | //
91 | %20 = graphblas.union %v_cv, %v2_cv { union_operator = "second" } : (tensor, tensor) to tensor
92 | graphblas.print_tensor %20 { level=3 } : tensor
93 |
94 | return
95 | }
96 | }
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_union_generic.mlir:
--------------------------------------------------------------------------------
1 | // RUN: graphblas-opt %s | graphblas-exec entry | FileCheck %s
2 | // RUN: graphblas-opt %s | graphblas-linalg-exec entry | FileCheck %s
3 |
4 | #CSR64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "dense", "compressed" ],
6 | dimOrdering = affine_map<(i,j) -> (i,j)>,
7 | pointerBitWidth = 64,
8 | indexBitWidth = 64
9 | }>
10 |
11 | #CSC64 = #sparse_tensor.encoding<{
12 | dimLevelType = [ "dense", "compressed" ],
13 | dimOrdering = affine_map<(i,j) -> (j,i)>,
14 | pointerBitWidth = 64,
15 | indexBitWidth = 64
16 | }>
17 |
18 | #CV64 = #sparse_tensor.encoding<{
19 | dimLevelType = [ "compressed" ],
20 | pointerBitWidth = 64,
21 | indexBitWidth = 64
22 | }>
23 |
24 | module {
25 | func @entry() {
26 | %c0 = arith.constant 0 : index
27 | %c1 = arith.constant 1 : index
28 | %ci0 = arith.constant 0 : i64
29 | %cf0 = arith.constant 0.0 : f64
30 |
31 | ///////////////
32 | // Test Matrix
33 | ///////////////
34 |
35 | %m = arith.constant dense<[
36 | [ 1.0, 0.0, 2.0, 0.0, 0.0],
37 | [ 0.0, 0.0, 0.0, 0.0, 0.0],
38 | [ 0.0, 3.0, 4.0, 0.0, 0.0],
39 | [ 0.0, 0.0, 0.0, 0.0, 0.0]
40 | ]> : tensor<4x5xf64>
41 | %m_csr = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
42 | %m_csc = sparse_tensor.convert %m : tensor<4x5xf64> to tensor
43 |
44 | %m2 = arith.constant dense<[
45 | [ 3.2, 0.0, 0.0, 0.0, 0.0],
46 | [ 0.0, 0.0, 0.0, 0.0, 0.0],
47 | [ 0.0, 0.0, -4.0, 0.0, 12.0],
48 | [ 0.0, 1.0, 0.0, 0.0, 0.0]
49 | ]> : tensor<4x5xf64>
50 | %m2_csr = sparse_tensor.convert %m2 : tensor<4x5xf64> to tensor
51 | %m2_csc = sparse_tensor.convert %m2 : tensor<4x5xf64> to tensor
52 |
53 | // CSR union plus
54 | //
55 | // CHECK: pointers=(0, 2, 2, 5, 6)
56 | // CHECK-NEXT: indices=(0, 2, 1, 2, 4, 1)
57 | // CHECK-NEXT: values=(4.2, 2, 3, 0, 12, 1)
58 | //
59 | %0 = graphblas.union_generic %m_csr, %m2_csr : (tensor, tensor) to tensor {
60 | ^bb0(%arg0: f64, %arg1: f64):
61 | %9 = arith.addf %arg0, %arg1 : f64
62 | graphblas.yield mult %9 : f64
63 | }
64 | graphblas.print_tensor %0 { level=3 } : tensor
65 |
66 | // CSC union min
67 | //
68 | // CHECK: pointers=(0, 1, 3, 5, 5, 6)
69 | // CHECK-NEXT: indices=(0, 2, 3, 0, 2, 2)
70 | // CHECK-NEXT: values=(1, 3, 1, 2, -4, 12)
71 | //
72 | %10 = graphblas.union_generic %m_csc, %m2_csc : (tensor, tensor) to tensor {
73 | ^bb0(%arg0: f64, %arg1: f64):
74 | %9 = arith.cmpf olt, %arg0, %arg1 : f64
75 | %10 = arith.select %9, %arg0, %arg1 : f64
76 | graphblas.yield mult %10 : f64
77 | }
78 | graphblas.print_tensor %10 { level=3 } : tensor
79 |
80 | ///////////////
81 | // Test Vector
82 | ///////////////
83 |
84 | %v = arith.constant dense<
85 | [ 1.0, 2.0, 0.0, 0.0, -4.0, 0.0 ]
86 | > : tensor<6xf64>
87 | %v_cv = sparse_tensor.convert %v : tensor<6xf64> to tensor
88 |
89 | %v2 = arith.constant dense<
90 | [ 0.0, 3.0, 0.0, 6.2, 4.0, 0.0 ]
91 | > : tensor<6xf64>
92 | %v2_cv = sparse_tensor.convert %v2 : tensor<6xf64> to tensor
93 |
94 | // Union second
95 | //
96 | // CHECK: pointers=(0, 4)
97 | // CHECK-NEXT: indices=(0, 1, 3, 4)
98 | // CHECK-NEXT: values=(1, 3, 6.2, 4)
99 | //
100 | %20 = graphblas.union_generic %v_cv, %v2_cv : (tensor, tensor) to tensor {
101 | ^bb0(%arg0: f64, %arg1: f64):
102 | graphblas.yield mult %arg1 : f64
103 | }
104 | graphblas.print_tensor %20 { level=3 } : tensor
105 |
106 | return
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_vector_update_accumulate.mlir:
--------------------------------------------------------------------------------
1 |
2 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
3 |
4 | #CV64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "compressed" ],
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | func @main() {
11 |
12 | %input_dense = arith.constant dense<[9, 0, 0, 2]> : tensor<4xi64>
13 | %input= sparse_tensor.convert %input_dense : tensor<4xi64> to tensor
14 |
15 | %output_dense = arith.constant dense<[0, 0, 5, 6]> : tensor<4xi64>
16 | %output= sparse_tensor.convert %output_dense : tensor<4xi64> to tensor
17 |
18 | graphblas.print %input { strings = ["input "] } : tensor
19 | // CHECK: input [9, _, _, 2]
20 | graphblas.print %output { strings = ["output "] } : tensor
21 | // CHECK: output [_, _, 5, 6]
22 |
23 | graphblas.update %input -> %output { accumulate_operator = "plus" } : tensor -> tensor
24 |
25 | graphblas.print %input { strings = ["input "] } : tensor
26 | // CHECK: input [9, _, _, 2]
27 | graphblas.print %output { strings = ["output "] } : tensor
28 | // CHECK: output [9, _, 5, 8]
29 |
30 | return
31 | }
32 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/GraphBLAS/test_vector_update_generic.mlir:
--------------------------------------------------------------------------------
1 |
2 | // RUN: graphblas-opt %s | graphblas-exec main | FileCheck %s
3 |
4 | #CV64 = #sparse_tensor.encoding<{
5 | dimLevelType = [ "compressed" ],
6 | pointerBitWidth = 64,
7 | indexBitWidth = 64
8 | }>
9 |
10 | func @main() {
11 |
12 | %input_dense = arith.constant dense<[9, 0, 0, 2]> : tensor<4xi64>
13 | %input= sparse_tensor.convert %input_dense : tensor<4xi64> to tensor
14 |
15 | %output_dense = arith.constant dense<[0, 0, 5, 6]> : tensor<4xi64>
16 | %output= sparse_tensor.convert %output_dense : tensor<4xi64> to tensor
17 |
18 | graphblas.print %input { strings = ["input "] } : tensor
19 | // CHECK: input [9, _, _, 2]
20 | graphblas.print %output { strings = ["output "] } : tensor
21 | // CHECK: output [_, _, 5, 6]
22 |
23 | graphblas.update_generic %input -> %output {mask_complement = false, replace = false} : tensor -> tensor {
24 | ^bb0(%arg0: i64, %arg1: i64):
25 | %2 = arith.addi %arg0, %arg1 : i64
26 | graphblas.yield accumulate %2 : i64
27 | }
28 |
29 | graphblas.print %input { strings = ["input "] } : tensor
30 | // CHECK: input [9, _, _, 2]
31 | graphblas.print %output { strings = ["output "] } : tensor
32 | // CHECK: output [9, _, 5, 8]
33 |
34 | return
35 | }
36 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/lit.cfg.py:
--------------------------------------------------------------------------------
1 | # -*- Python -*-
2 |
3 | import glob
4 | import os
5 | import sys
6 | import platform
7 | import re
8 | import subprocess
9 | import tempfile
10 |
11 | import lit.formats
12 | import lit.util
13 |
14 | from lit.llvm import llvm_config
15 | from lit.llvm.subst import ToolSubst
16 | from lit.llvm.subst import FindTool
17 |
18 |
19 | def _build_graphblas_exec():
20 | from mlir_graphblas.engine import EXTERNAL_LIBS
21 | from mlir_graphblas.mlir_builder import GRAPHBLAS_PASSES
22 |
23 | ex = ["graphblas-opt"] + list(GRAPHBLAS_PASSES) + ["|", "mlir-cpu-runner"]
24 | for ext_lib in EXTERNAL_LIBS:
25 | ex.append(f"-shared-libs={ext_lib}")
26 | bin_dir = os.path.dirname(sys.executable)
27 | lib_dir = os.path.join(os.path.dirname(bin_dir), "lib")
28 | ex.append(f"-shared-libs={lib_dir}/libmlir_c_runner_utils{config.llvm_shlib_ext}")
29 | ex.append("-entry-point-result=void")
30 | # This comes last because the name of the function to run comes after `graphblas-exec`
31 | ex.append("-e")
32 | return " ".join(ex)
33 |
34 |
35 | def _build_graphblas_linalg_exec():
36 | from mlir_graphblas.engine import EXTERNAL_LIBS
37 | from mlir_graphblas.mlir_builder import GRAPHBLAS_LINALG_PASSES
38 |
39 | ex = ["graphblas-opt"] + list(GRAPHBLAS_LINALG_PASSES) + ["|", "mlir-cpu-runner"]
40 | for ext_lib in EXTERNAL_LIBS:
41 | ex.append(f"-shared-libs={ext_lib}")
42 | bin_dir = os.path.dirname(sys.executable)
43 | lib_dir = os.path.join(os.path.dirname(bin_dir), "lib")
44 | ex.append(f"-shared-libs={lib_dir}/libmlir_c_runner_utils{config.llvm_shlib_ext}")
45 | ex.append("-entry-point-result=void")
46 | ex.append("-e")
47 | return " ".join(ex)
48 |
49 |
50 | # Configuration file for the 'lit' test runner.
51 |
52 | # name: The name of this test suite.
53 | config.name = "GRAPHBLAS"
54 |
55 | config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
56 |
57 | # suffixes: A list of file extensions to treat as test files.
58 | config.suffixes = [".mlir"]
59 |
60 | # test_source_root: The root path where tests are located.
61 | config.test_source_root = os.path.dirname(__file__)
62 |
63 | # test_exec_root: The root path where tests should be run.
64 | config.test_exec_root = os.path.join(config.graphblas_obj_root, "test")
65 |
66 | config.substitutions.append(("%PATH%", config.environment["PATH"]))
67 | config.substitutions.append(("%shlibext", config.llvm_shlib_ext))
68 | config.substitutions.append(("graphblas-exec", _build_graphblas_exec()))
69 | config.substitutions.append(("graphblas-linalg-exec", _build_graphblas_linalg_exec()))
70 |
71 |
72 | _SCRIPT_DIR = os.path.dirname(__file__)
73 | _BUILD_DIR = os.path.join(_SCRIPT_DIR, "..", "..")
74 | _BUILD_DIR = os.path.abspath(_BUILD_DIR)
75 | _SPARSE_UTILS_SO_PATTERN = os.path.join(_BUILD_DIR, "SparseTensorUtils*.so")
76 | [_SPARSE_UTILS_SO] = glob.glob(_SPARSE_UTILS_SO_PATTERN)
77 | config.substitutions.append(
78 | (
79 | "%sparse_utils_so",
80 | _SPARSE_UTILS_SO,
81 | )
82 | )
83 |
84 | llvm_config.with_system_environment(["HOME", "INCLUDE", "LIB", "TMP", "TEMP"])
85 |
86 | llvm_config.use_default_substitutions()
87 |
88 | # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
89 | # subdirectories contain auxiliary inputs for various tests in their parent
90 | # directories.
91 | config.excludes = ["Inputs", "Examples", "CMakeLists.txt", "README.txt", "LICENSE.txt"]
92 |
93 | # test_source_root: The root path where tests are located.
94 | config.test_source_root = os.path.dirname(__file__)
95 |
96 | # test_exec_root: The root path where tests should be run.
97 | config.test_exec_root = os.path.join(config.graphblas_obj_root, "test")
98 | config.graphblas_tools_dir = os.path.join(config.graphblas_obj_root, "bin")
99 |
100 | # Tweak the PATH to include the tools dir.
101 | llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True)
102 |
103 | tool_dirs = [config.graphblas_tools_dir, config.llvm_tools_dir]
104 | tools = ["graphblas-opt"]
105 |
106 | llvm_config.add_tool_substitutions(tools, tool_dirs)
107 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/test/lit.site.cfg.py.in:
--------------------------------------------------------------------------------
1 | @LIT_SITE_CFG_IN_HEADER@
2 |
3 | import sys
4 |
5 | config.host_triple = "@LLVM_HOST_TRIPLE@"
6 | config.target_triple = "@TARGET_TRIPLE@"
7 | config.llvm_src_root = "@LLVM_SOURCE_DIR@"
8 | config.llvm_obj_root = "@LLVM_BINARY_DIR@"
9 | config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
10 | config.llvm_lib_dir = "@LLVM_LIBS_DIR@"
11 | config.llvm_shlib_dir = "@SHLIBDIR@"
12 | config.llvm_shlib_ext = "@SHLIBEXT@"
13 | config.llvm_exe_ext = "@EXEEXT@"
14 | config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
15 | config.python_executable = "@PYTHON_EXECUTABLE@"
16 | config.gold_executable = "@GOLD_EXECUTABLE@"
17 | config.ld64_executable = "@LD64_EXECUTABLE@"
18 | config.enable_shared = @ENABLE_SHARED@
19 | config.enable_assertions = @ENABLE_ASSERTIONS@
20 | config.targets_to_build = "@TARGETS_TO_BUILD@"
21 | config.native_target = "@LLVM_NATIVE_ARCH@"
22 | config.llvm_bindings = "@LLVM_BINDINGS@".split(' ')
23 | config.host_os = "@HOST_OS@"
24 | config.host_cc = "@HOST_CC@"
25 | config.host_cxx = "@HOST_CXX@"
26 | config.enable_libcxx = "@LLVM_ENABLE_LIBCXX@"
27 | # Note: ldflags can contain double-quoted paths, so must use single quotes here.
28 | config.host_ldflags = '@HOST_LDFLAGS@'
29 | config.llvm_use_sanitizer = "@LLVM_USE_SANITIZER@"
30 | config.llvm_host_triple = '@LLVM_HOST_TRIPLE@'
31 | config.host_arch = "@HOST_ARCH@"
32 | config.graphblas_src_root = "@CMAKE_SOURCE_DIR@"
33 | config.graphblas_obj_root = "@CMAKE_BINARY_DIR@"
34 |
35 | # Support substitution of the tools_dir with user parameters. This is
36 | # used when we can't determine the tool dir at configuration time.
37 | try:
38 | config.llvm_tools_dir = config.llvm_tools_dir % lit_config.params
39 | config.llvm_lib_dir = config.llvm_lib_dir % lit_config.params
40 | config.llvm_shlib_dir = config.llvm_shlib_dir % lit_config.params
41 | except KeyError:
42 | e = sys.exc_info()[1]
43 | key, = e.args
44 | lit_config.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key,key))
45 |
46 |
47 | import lit.llvm
48 | lit.llvm.initialize(lit_config, config)
49 |
50 | # Let the main config do the real work.
51 | lit_config.load_config(config, "@CMAKE_SOURCE_DIR@/test/lit.cfg.py")
52 |
--------------------------------------------------------------------------------
/mlir_graphblas/src/triangle_count.mlir:
--------------------------------------------------------------------------------
1 | #CSR64 = #sparse_tensor.encoding<{
2 | dimLevelType = [ "dense", "compressed" ],
3 | dimOrdering = affine_map<(i,j) -> (i,j)>,
4 | pointerBitWidth = 64,
5 | indexBitWidth = 64
6 | }>
7 |
8 | #CSC64 = #sparse_tensor.encoding<{
9 | dimLevelType = [ "dense", "compressed" ],
10 | dimOrdering = affine_map<(i,j) -> (j,i)>,
11 | pointerBitWidth = 64,
12 | indexBitWidth = 64
13 | }>
14 |
15 | module {
16 | func @triangle_count(%A: tensor) -> f64 {
17 | %U = graphblas.matrix_select %A { selectors = ["triu"] } : tensor to tensor
18 | %L = graphblas.matrix_select %A { selectors = ["tril"] } : tensor to tensor
19 | %U_csc = graphblas.convert_layout %U : tensor to tensor
20 | %C = graphblas.matrix_multiply %L, %U_csc, %L { semiring = "plus_pair" } : (tensor, tensor, tensor) to tensor
21 | %reduce_result = graphblas.matrix_reduce_to_scalar %C { aggregator = "sum" } : tensor to f64
22 | return %reduce_result : f64
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/mlir_graphblas/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/mlir_graphblas/tests/__init__.py
--------------------------------------------------------------------------------
/mlir_graphblas/tests/__main__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | HERE = os.path.dirname(__file__)
5 |
6 | if __name__ == "__main__":
7 | import pytest
8 |
9 | errcode = pytest.main([HERE] + sys.argv[1:])
10 | sys.exit(errcode)
11 |
--------------------------------------------------------------------------------
/mlir_graphblas/tests/data/application_classification.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/metagraph-dev/mlir-graphblas/8552a4b794c2da411b1b83fc3e380b50ece3493e/mlir_graphblas/tests/data/application_classification.npz
--------------------------------------------------------------------------------
/mlir_graphblas/tests/jit_engine_test_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | MLIR_TYPE_TO_NP_TYPE = {
4 | "i8": np.int8,
5 | "i16": np.int16,
6 | "i32": np.int32,
7 | "i64": np.int64,
8 | # 'f16': np.float16, # 16-bit floats don't seem to be supported in ctypes
9 | "f32": np.float32,
10 | "f64": np.float64,
11 | }
12 |
--------------------------------------------------------------------------------
/mlir_graphblas/tests/test_algo_utils.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | from mlir_graphblas import algo_utils, algorithms as mlalgo
4 | from mlir_graphblas.sparse_utils import MLIRSparseTensor
5 |
6 |
7 | def test_haversine_distance():
8 | # Sanity check
9 | # https://www.igismap.com/haversine-formula-calculate-geographic-distance-earth/
10 | # Nebraska
11 | # v1 = Vector.from_values([0], [41.507483])
12 | # w1 = Vector.from_values([0], [-99.436554])
13 | # Kansas
14 | # v2 = Vector.from_values([0], [38.504048])
15 | # w2 = Vector.from_values([0], [-98.315949])
16 |
17 | # Build a function to call the haversine_distance utility
18 | from mlir_graphblas.mlir_builder import MLIRFunctionBuilder
19 |
20 | irb = MLIRFunctionBuilder(
21 | "haversine_distance",
22 | input_types=[
23 | "tensor",
24 | "tensor",
25 | "tensor",
26 | "tensor",
27 | ],
28 | return_types=["tensor"],
29 | aliases=mlalgo._build_common_aliases(),
30 | )
31 | v1, w1, v2, w2 = irb.inputs
32 | result = algo_utils.haversine_distance(irb, v1, w1, v2, w2)
33 | irb.return_vars(result)
34 | compiled_func = irb.compile()
35 |
36 | # haversine_distance(v1, w1, v2, w2)[0].new().isclose(347.3, abs_tol=0.1)
37 | v1 = MLIRSparseTensor(
38 | np.array([[0]], dtype=np.uint64),
39 | np.array([41.507483], dtype=np.float64),
40 | np.array([1], dtype=np.uint64),
41 | np.array([True], dtype=np.bool8),
42 | )
43 | w1 = MLIRSparseTensor(
44 | np.array([[0]], dtype=np.uint64),
45 | np.array([-99.436554], dtype=np.float64),
46 | np.array([1], dtype=np.uint64),
47 | np.array([True], dtype=np.bool8),
48 | )
49 | v2 = MLIRSparseTensor(
50 | np.array([[0]], dtype=np.uint64),
51 | np.array([38.504048], dtype=np.float64),
52 | np.array([1], dtype=np.uint64),
53 | np.array([True], dtype=np.bool8),
54 | )
55 | w2 = MLIRSparseTensor(
56 | np.array([[0]], dtype=np.uint64),
57 | np.array([-98.315949], dtype=np.float64),
58 | np.array([1], dtype=np.uint64),
59 | np.array([True], dtype=np.bool8),
60 | )
61 |
62 | dist = compiled_func(v1, w1, v2, w2)
63 | assert math.isclose(dist.values[0], 347.3, abs_tol=0.1)
64 |
--------------------------------------------------------------------------------
/mlir_graphblas/tests/test_cli.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from mlir_graphblas import MlirOptCli, MlirOptError
3 | from mlir_graphblas.cli import DebugResult
4 |
5 |
6 | @pytest.fixture
7 | def cli_input():
8 | return b"""\
9 | #trait_1d_scalar = {
10 | indexing_maps = [
11 | affine_map<(i) -> (i)>, // A
12 | affine_map<(i) -> (i)> // X (out)
13 | ],
14 | iterator_types = ["parallel"],
15 | doc = "X(i) = A(i) OP Scalar"
16 | }
17 | func @scale_func(%input: tensor, %scale: f32) -> tensor {
18 | %0 = linalg.generic #trait_1d_scalar
19 | ins(%input: tensor)
20 | outs(%input: tensor) {
21 | ^bb(%a: f32, %s: f32):
22 | %0 = arith.mulf %a, %scale : f32
23 | linalg.yield %0 : f32
24 | } -> tensor
25 | return %0 : tensor
26 | }
27 | """
28 |
29 |
30 | def test_apply_passes(cli_input):
31 | cli = MlirOptCli()
32 | passes = [
33 | "--linalg-bufferize",
34 | "--func-bufferize",
35 | "--finalizing-bufferize",
36 | "--convert-linalg-to-affine-loops",
37 | "--lower-affine",
38 | "--convert-scf-to-cf",
39 | ]
40 | result = cli.apply_passes(cli_input, passes)
41 | assert (
42 | result
43 | == """\
44 | module {
45 | func @scale_func(%arg0: memref, %arg1: f32) -> memref {
46 | %c0 = arith.constant 0 : index
47 | %0 = memref.dim %arg0, %c0 : memref
48 | %1 = memref.alloc(%0) : memref
49 | %2 = memref.dim %arg0, %c0 : memref
50 | %c0_0 = arith.constant 0 : index
51 | %c1 = arith.constant 1 : index
52 | cf.br ^bb1(%c0_0 : index)
53 | ^bb1(%3: index): // 2 preds: ^bb0, ^bb2
54 | %4 = arith.cmpi slt, %3, %2 : index
55 | cf.cond_br %4, ^bb2, ^bb3
56 | ^bb2: // pred: ^bb1
57 | %5 = memref.load %arg0[%3] : memref
58 | %6 = arith.mulf %5, %arg1 : f32
59 | memref.store %6, %1[%3] : memref
60 | %7 = arith.addi %3, %c1 : index
61 | cf.br ^bb1(%7 : index)
62 | ^bb3: // pred: ^bb1
63 | return %1 : memref
64 | }
65 | }
66 |
67 | """
68 | )
69 |
70 |
71 | def test_apply_passes_fails(cli_input):
72 | cli = MlirOptCli()
73 | passes = ["--linalg-bufferize"]
74 | cli_input = cli_input.replace(
75 | b"return %0 : tensor", b"return %0 : memref"
76 | )
77 | with pytest.raises(MlirOptError) as excinfo:
78 | cli.apply_passes(cli_input, passes)
79 | err = excinfo.value
80 | assert hasattr(err, "debug_result")
81 | assert isinstance(err.debug_result, DebugResult)
82 |
83 |
84 | def test_debug_passes(cli_input):
85 | cli = MlirOptCli()
86 | passes = [
87 | "--linalg-bufferize",
88 | "--func-bufferize",
89 | "--finalizing-bufferize",
90 | "--convert-linalg-to-affine-loops",
91 | "--lower-affine",
92 | "--convert-scf-to-std",
93 | ]
94 | result = cli.debug_passes(cli_input, passes)
95 | assert isinstance(result, DebugResult)
96 | assert result.passes == [p[2:] for p in passes]
97 | assert len(result.stages) == len(passes) + 1
98 | assert result.stages[0] == cli_input.decode()
99 |
--------------------------------------------------------------------------------
/mlir_graphblas/tests/test_mlir_builder_bad_inputs.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from mlir_graphblas import MlirJitEngine
3 | from mlir_graphblas.mlir_builder import MLIRVar, MLIRFunctionBuilder
4 |
5 |
6 | @pytest.fixture(scope="module")
7 | def engine():
8 | return MlirJitEngine()
9 |
10 |
11 | def test_ir_builder_bad_input_multi_value_mlir_variable():
12 | ir_builder = MLIRFunctionBuilder("some_func", input_types=[], return_types=("i8",))
13 |
14 | iter_i8_var = ir_builder.new_var("i8")
15 | lower_i8_var = ir_builder.arith.constant(1, "i8")
16 | iter_i64_var = ir_builder.new_var("i64")
17 | lower_i64_var = ir_builder.arith.constant(1, "i64")
18 | with ir_builder.for_loop(
19 | 0, 1, 1, iter_vars=[(iter_i8_var, lower_i8_var), (iter_i64_var, lower_i64_var)]
20 | ) as for_vars:
21 | constant_i8_var = ir_builder.arith.constant(8, "i8")
22 | constant_i64_var = ir_builder.arith.constant(64, "i64")
23 |
24 | # Raise when yielding too few values
25 | with pytest.raises(ValueError, match="Expected 2 yielded values, but got 1."):
26 | for_vars.yield_vars(constant_i8_var)
27 |
28 | # Raise when yielding too many values
29 | with pytest.raises(ValueError, match="Expected 2 yielded values, but got 3."):
30 | for_vars.yield_vars(constant_i8_var, constant_i64_var, lower_i64_var)
31 |
32 | # Raise when yielding incorrect types
33 | with pytest.raises(TypeError, match=" have different types."):
34 | for_vars.yield_vars(constant_i64_var, constant_i8_var)
35 |
36 | for_vars.yield_vars(constant_i8_var, constant_i64_var)
37 |
38 | # Raise when returning multiple valued variable
39 | with pytest.raises(TypeError, match=" is not a valid return value"):
40 | ir_builder.return_vars(for_vars.returned_variable)
41 |
42 | # Raise when using multiple valued variable as operand
43 | assigned_to_i8_var = ir_builder.new_var("i8")
44 | c1_i8_var = ir_builder.arith.constant(1, "i8")
45 | with pytest.raises(
46 | TypeError,
47 | match="Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
48 | ):
49 | ir_builder.add_statement(
50 | f"{assigned_to_i8_var.assign} = arith.addi {c1_i8_var}, {for_vars.returned_variable} : i8"
51 | )
52 |
53 | with pytest.raises(
54 | TypeError,
55 | match="Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
56 | ):
57 | ir_builder.arith.addi(c1_i8_var, for_vars.returned_variable)
58 |
59 | with pytest.raises(
60 | TypeError,
61 | match="Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
62 | ):
63 | ir_builder.arith.addi(for_vars.returned_variable, c1_i8_var)
64 |
65 | # Raise when using multiple valued variable indexed via out-of-bound int index as operand
66 | with pytest.raises(IndexError):
67 | for_vars.returned_variable[999]
68 |
69 | # Raise when indexing into multiple valued variable via slice
70 | with pytest.raises(TypeError, match="Expects int, not"):
71 | ir_builder.return_vars(for_vars.returned_variable[:])
72 |
73 | # Raise when returning a non-MLIRVar
74 | with pytest.raises(
75 | TypeError, match="10 is not a valid return value, expected MLIRVar."
76 | ):
77 | ir_builder.return_vars(10)
78 |
79 | # Raise when returning value incompatible with return type.
80 | c1_i64_var = ir_builder.arith.constant(1, "i64")
81 | with pytest.raises(
82 | TypeError,
83 | match=r"Return type of MLIRVar\(name=.+, type=i64\) does not match i8",
84 | ):
85 | ir_builder.return_vars(c1_i64_var)
86 |
87 | # Raise when iterator variables have incompatible types
88 | with pytest.raises(TypeError, match=" have different types."):
89 | with ir_builder.for_loop(
90 | 0,
91 | 1,
92 | 1,
93 | iter_vars=[(iter_i8_var, lower_i64_var), (iter_i64_var, lower_i8_var)],
94 | ) as bad_for_vars:
95 | pass
96 |
97 | ir_builder.return_vars(for_vars.returned_variable[0])
98 |
--------------------------------------------------------------------------------
/mlir_graphblas/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .tersify_mlir import *
2 | from . import utils
3 |
--------------------------------------------------------------------------------
/mlir_graphblas/tools/tersify_mlir.py:
--------------------------------------------------------------------------------
1 | import fileinput
2 | import subprocess
3 | from collections import OrderedDict
4 |
5 | from ..types import AliasMap, SparseEncodingType, AffineMap
6 | from ..cli import MlirOptCli
7 |
8 |
9 | DEFAULT_ALIASES = AliasMap()
10 | csr64 = SparseEncodingType(["dense", "compressed"], [0, 1], 64, 64)
11 | csc64 = SparseEncodingType(["dense", "compressed"], [1, 0], 64, 64)
12 | cv64 = SparseEncodingType(["compressed"], None, 64, 64)
13 | DEFAULT_ALIASES["CSR64"] = csr64
14 | DEFAULT_ALIASES["CSC64"] = csc64
15 | DEFAULT_ALIASES["CV64"] = cv64
16 | DEFAULT_ALIASES["map1d"] = AffineMap("(d0)[s0, s1] -> (d0 * s1 + s0)")
17 |
18 | CLI = None
19 |
20 |
21 | def tersify_mlir(input_string: str, alias_map=None) -> str:
22 | global CLI
23 | if CLI is None:
24 | # Lazily initialize CLI to avoid circular import in MlirOptCli.__init__
25 | CLI = MlirOptCli()
26 | terse_string = CLI.apply_passes(input_string.encode(), [])
27 | if not isinstance(terse_string, str):
28 | raise terse_string
29 | if alias_map is None:
30 | alias_map = DEFAULT_ALIASES
31 | for alias_name, alias_type in reversed(alias_map.items()):
32 | alias_text = str(alias_type)
33 | if alias_text in terse_string:
34 | # Make a pretty version of the string
35 | terse_string = terse_string.replace(alias_text, "#" + alias_name)
36 | terse_string = (
37 | f"#{alias_name} = {alias_type.to_pretty_string()}\n\n" + terse_string
38 | )
39 | return terse_string
40 |
41 |
42 | def tersify_mlir_cli():
43 | input_string = "\n".join(fileinput.input())
44 | output_string = tersify_mlir(input_string)
45 | print(output_string)
46 | return
47 |
--------------------------------------------------------------------------------
/mlir_graphblas/tools/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mlir_graphblas.sparse_utils import MLIRSparseTensor
3 |
4 | from typing import Sequence
5 |
6 |
7 | def sparsify_array(
8 | input_array: np.ndarray, sparsity_values: Sequence[bool], missing=0
9 | ) -> MLIRSparseTensor:
10 | """Converts a numpy array into a MLIRSparseTensor."""
11 |
12 | indices = np.array(
13 | list(zip(*(input_array != missing).nonzero())),
14 | dtype=np.uint64,
15 | )
16 | values = np.array(
17 | [input_array[coordinate] for coordinate in map(tuple, indices)],
18 | dtype=input_array.dtype,
19 | )
20 | sizes = np.array(input_array.shape, dtype=np.uint64)
21 | sparsity = np.array(sparsity_values, dtype=np.bool8)
22 |
23 | sparse_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
24 | return sparse_tensor
25 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel", "numpy >= 1.15", "cython"]
3 | [tool.black]
4 | extend-exclude = '''
5 | # A regex preceded with ^/ will apply only to files and directories
6 | # in the root of the project.
7 | ^/run-clang-format.py # exclude a file named foo.py in the root of the project (in addition to the defaults)
8 | '''
9 |
--------------------------------------------------------------------------------
/run_tests.sh:
--------------------------------------------------------------------------------
1 | # Run local tests
2 | pytest
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | addopts = --cov-report term-missing:skip-covered --cov=mlir_graphblas
3 | testpaths = mlir_graphblas/tests
4 |
5 | [versioneer]
6 | VCS = git
7 | style = pep440
8 | versionfile_source = mlir_graphblas/_version.py
9 | versionfile_build = mlir_graphblas/_version.py
10 | tag_prefix=
11 | parentdir_prefix=mlir_graphblas-
12 |
13 | [coverage:run]
14 | omit =
15 | mlir_graphblas/_version.py
16 |
17 | [flake8]
18 | max-line-length = 120
19 | ignore =
20 | E402, # module level import not at top of file
21 | F401, # module imported but unused
22 | W503 # line break before binary operator
23 | exclude =
24 | versioneer.py,
25 | mlir_graphblas/_version.py
26 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import subprocess
4 | import distutils
5 | import numpy as np
6 | from setuptools import setup, find_packages, Extension
7 | from Cython.Build import cythonize
8 | from Cython.Compiler.Options import get_directive_defaults
9 | import versioneer
10 |
11 | if sys.platform == "darwin":
12 | # Create a dynamiclib instead of bundles so that it may be linked.
13 | # https://stackoverflow.com/questions/32419594/how-to-create-a-dylib-c-extension-on-mac-os-x-with-distutils-and-or-setuptools/32765319#32765319
14 | from distutils import sysconfig
15 |
16 | vars = sysconfig.get_config_vars()
17 | vars["LDSHARED"] = vars["LDSHARED"].replace("-bundle", "-dynamiclib")
18 |
19 | ##################################
20 | # SparseTensorUtils.cpp Cython Wrapper #
21 | ##################################
22 |
23 | directive_defaults = get_directive_defaults()
24 | directive_defaults["binding"] = True
25 | directive_defaults["language_level"] = 3
26 |
27 | environment_include_dir = os.path.join(sys.exec_prefix, "include")
28 | include_dirs = [np.get_include(), environment_include_dir]
29 | define_macros = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")]
30 | annotate = True # Creates html file
31 |
32 | #########
33 | # setup #
34 | #########
35 |
36 | ext_modules = cythonize(
37 | [
38 | Extension(
39 | "mlir_graphblas.sparse_utils",
40 | language="c++",
41 | sources=["mlir_graphblas/sparse_utils.pyx"],
42 | extra_compile_args=["-std=c++11"],
43 | include_dirs=include_dirs,
44 | define_macros=define_macros,
45 | ),
46 | Extension(
47 | "mlir_graphblas.random_utils",
48 | language="c++",
49 | sources=["mlir_graphblas/random_utils.pyx"],
50 | extra_compile_args=["-std=c++11"],
51 | include_dirs=include_dirs,
52 | define_macros=define_macros,
53 | ),
54 | ],
55 | annotate=annotate,
56 | )
57 |
58 | ext_modules.append(
59 | Extension(
60 | "mlir_graphblas.SparseTensorUtils",
61 | sources=["mlir_graphblas/SparseTensorUtils.cpp"],
62 | include_dirs=[environment_include_dir],
63 | extra_compile_args=["-std=c++11"],
64 | )
65 | )
66 |
67 | ext_modules.append(
68 | Extension(
69 | "mlir_graphblas.RandomUtils",
70 | sources=["mlir_graphblas/RandomUtils.cpp"],
71 | include_dirs=[environment_include_dir],
72 | extra_compile_args=["-std=c++11"],
73 | )
74 | )
75 |
76 | setup(
77 | name="mlir-graphblas",
78 | version=versioneer.get_version(),
79 | cmdclass=versioneer.get_cmdclass(),
80 | description="MLIR dialect for GraphBLAS",
81 | author="Anaconda, Inc.",
82 | packages=find_packages(include=["mlir_graphblas", "mlir_graphblas.*"]),
83 | ext_modules=ext_modules,
84 | package_data={"mlir_graphblas": ["*.pyx"]},
85 | include_package_data=True,
86 | entry_points={
87 | "console_scripts": [
88 | "tersify_mlir=mlir_graphblas.tools:tersify_mlir_cli",
89 | ]
90 | },
91 | install_requires=["pymlir", "pygments"],
92 | )
93 |
--------------------------------------------------------------------------------