├── docs
├── .gitignore
├── sources
│ ├── sdc-sphinx-theme
│ │ ├── localtoc.html
│ │ ├── globaltoc.html
│ │ ├── static
│ │ │ ├── DPEP.png
│ │ │ ├── sdc_email.png
│ │ │ ├── intel_logo.png
│ │ │ ├── sdc_examples.png
│ │ │ ├── sdc_github.png
│ │ │ ├── sdc_issues.png
│ │ │ ├── copybutton.js
│ │ │ └── sidebar.js
│ │ ├── theme.conf
│ │ ├── searchbox.html
│ │ └── layout.html
│ ├── _images
│ │ ├── DPEP.png
│ │ ├── dpep-all.png
│ │ ├── dpep-ilp.png
│ │ ├── DPEP-large.png
│ │ ├── dpctl-logo.png
│ │ ├── dpep-cores.png
│ │ ├── dpep-simd.png
│ │ ├── dpnp-logo.png
│ │ ├── fp-cancellation.png
│ │ ├── hetero-devices.png
│ │ ├── numba-dpex-logo.png
│ │ ├── queue-exception1.png
│ │ ├── queue-exception2.png
│ │ ├── queue-exception3.png
│ │ ├── kernel-queue-device.png
│ │ └── advisor_roofline_gen9.png
│ ├── demos.rst
│ ├── index.rst
│ ├── examples.rst
│ ├── ext_links.txt
│ ├── jupyter_notebook.rst
│ ├── prerequisites_and_installation.rst
│ ├── useful_links.rst
│ ├── parallelism.rst
│ ├── benchmarks.rst
│ ├── conf.py
│ └── heterogeneous_computing.rst
├── Makefile
└── make.bat
├── demos
├── mcpi
│ ├── mcpi_demo
│ │ ├── __init__.py
│ │ ├── impl
│ │ │ ├── __init__.py
│ │ │ ├── impl_dpnp.py
│ │ │ ├── impl_numpy.py
│ │ │ ├── impl_numba.py
│ │ │ ├── impl_numba_dpex.py
│ │ │ ├── impl_versioner.py
│ │ │ └── arg_parser.py
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── test_dpnp.py
│ │ │ ├── test_numba.py
│ │ │ ├── test_numpy.py
│ │ │ └── _test_numba_dpex.py
│ │ ├── __main__.py
│ │ └── pi.py
│ ├── mcpi.py
│ ├── draw_points.png
│ ├── pi-animation.gif
│ ├── setup.py
│ ├── conda-recipe
│ │ ├── conda_build_config.yaml
│ │ └── meta.yaml
│ ├── .pre-commit-config.yaml
│ └── README.md
├── game-of-life
│ ├── game_of_life_demo
│ │ ├── impl
│ │ │ ├── __init__.py
│ │ │ ├── impl_versioner.py
│ │ │ ├── impl_numpy.py
│ │ │ ├── impl_dpnp.py
│ │ │ ├── impl_numba.py
│ │ │ ├── impl_numba_dpex.py
│ │ │ ├── arg_parser.py
│ │ │ └── visualization.py
│ │ ├── tests
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── test_parser.py
│ │ │ └── test_grid.py
│ │ ├── __main__.py
│ │ ├── __init__.py
│ │ └── game_of_life.py
│ ├── conda-recipe
│ │ ├── conda_build_config.yaml
│ │ └── meta.yaml
│ ├── setup.py
│ ├── game-of-life.gif
│ ├── game-of-life-lowres.gif
│ ├── game_of_life_demo.ipynb
│ └── README.md
└── mandelbrot
│ ├── mandelbrot_demo
│ ├── impl
│ │ ├── __init__.py
│ │ ├── settings.py
│ │ ├── impl_versioner.py
│ │ ├── arg_parser.py
│ │ ├── impl_numpy.py
│ │ ├── impl_dpnp.py
│ │ ├── impl_numba.py
│ │ ├── impl_numba_dpex.py
│ │ └── visualization.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── test_numba.py
│ │ ├── test_numpy.py
│ │ ├── test_dpnp.py
│ │ └── test_numba_dpex.py
│ ├── __main__.py
│ ├── __init__.py
│ └── mandelbrot.py
│ ├── mandelbrot_demo.py
│ ├── setup.py
│ ├── conda-recipe
│ ├── conda_build_config.yaml
│ └── meta.yaml
│ └── README.md
├── .gitignore
├── .flake8
├── environment.yml
├── SECURITY.md
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── examples
├── 01-hello_dpnp.py
├── 04-dpctl_device_query.py
├── 02-dpnp_device.py
└── 03-dpnp2numba-dpex.py
├── .github
└── workflows
│ ├── python_style_checks.yml
│ ├── scorecard.yml
│ ├── gh-pages.yml
│ ├── mandelbrot_build_test_deploy.yml
│ ├── mcpi_build_test_upload.yml
│ └── gol_build_test_upload.yml
├── README.md
└── notebooks
├── 02-dpnp_numpy_fallback.ipynb
└── .ipynb_checkpoints
└── 02-dpnp_numpy_fallback-checkpoint.ipynb
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | _build
2 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | *.xml
3 | *.iml
4 | *.pyc
5 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/settings.py:
--------------------------------------------------------------------------------
1 | MAX_ITER = 30
2 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi.py:
--------------------------------------------------------------------------------
1 | from mcpi_demo.pi import main
2 |
3 | main()
4 |
--------------------------------------------------------------------------------
/demos/game-of-life/conda-recipe/conda_build_config.yaml:
--------------------------------------------------------------------------------
1 | numpy:
2 | - 1.23
3 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/__main__.py:
--------------------------------------------------------------------------------
1 | from mcpi_demo.pi import main
2 |
3 | main()
4 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/localtoc.html:
--------------------------------------------------------------------------------
1 |
Page Contents
2 | {{ toc }}
3 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo.py:
--------------------------------------------------------------------------------
1 | from mandelbrot_demo.mandelbrot import main
2 |
3 | main()
4 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/__main__.py:
--------------------------------------------------------------------------------
1 | from mandelbrot_demo.mandelbrot import main
2 |
3 | main()
4 |
--------------------------------------------------------------------------------
/demos/mcpi/draw_points.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/demos/mcpi/draw_points.png
--------------------------------------------------------------------------------
/demos/mcpi/pi-animation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/demos/mcpi/pi-animation.gif
--------------------------------------------------------------------------------
/demos/mcpi/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | if __name__ == "__main__":
4 | setup()
5 |
--------------------------------------------------------------------------------
/demos/game-of-life/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | if __name__ == "__main__":
4 | setup()
5 |
--------------------------------------------------------------------------------
/demos/mandelbrot/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | if __name__ == "__main__":
4 | setup()
5 |
--------------------------------------------------------------------------------
/docs/sources/_images/DPEP.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/DPEP.png
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/__main__.py:
--------------------------------------------------------------------------------
1 | from game_of_life_demo.game_of_life import main
2 |
3 | main()
4 |
--------------------------------------------------------------------------------
/docs/sources/_images/dpep-all.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpep-all.png
--------------------------------------------------------------------------------
/docs/sources/_images/dpep-ilp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpep-ilp.png
--------------------------------------------------------------------------------
/demos/game-of-life/game-of-life.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/demos/game-of-life/game-of-life.gif
--------------------------------------------------------------------------------
/demos/mcpi/conda-recipe/conda_build_config.yaml:
--------------------------------------------------------------------------------
1 | python:
2 | - 3.8
3 | - 3.9
4 | - 3.10
5 | numpy:
6 | - 1.23
7 |
--------------------------------------------------------------------------------
/docs/sources/_images/DPEP-large.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/DPEP-large.png
--------------------------------------------------------------------------------
/docs/sources/_images/dpctl-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpctl-logo.png
--------------------------------------------------------------------------------
/docs/sources/_images/dpep-cores.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpep-cores.png
--------------------------------------------------------------------------------
/docs/sources/_images/dpep-simd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpep-simd.png
--------------------------------------------------------------------------------
/docs/sources/_images/dpnp-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/dpnp-logo.png
--------------------------------------------------------------------------------
/demos/mandelbrot/conda-recipe/conda_build_config.yaml:
--------------------------------------------------------------------------------
1 | python:
2 | - 3.8
3 | - 3.9
4 | - 3.10
5 | numpy:
6 | - 1.23
7 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/globaltoc.html:
--------------------------------------------------------------------------------
1 | Table of Contents
2 | {{ toctree(maxdepth=-1, titles_only=true) }}
3 |
--------------------------------------------------------------------------------
/docs/sources/_images/fp-cancellation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/fp-cancellation.png
--------------------------------------------------------------------------------
/docs/sources/_images/hetero-devices.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/hetero-devices.png
--------------------------------------------------------------------------------
/docs/sources/_images/numba-dpex-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/numba-dpex-logo.png
--------------------------------------------------------------------------------
/demos/game-of-life/game-of-life-lowres.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/demos/game-of-life/game-of-life-lowres.gif
--------------------------------------------------------------------------------
/docs/sources/_images/queue-exception1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/queue-exception1.png
--------------------------------------------------------------------------------
/docs/sources/_images/queue-exception2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/queue-exception2.png
--------------------------------------------------------------------------------
/docs/sources/_images/queue-exception3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/queue-exception3.png
--------------------------------------------------------------------------------
/docs/sources/_images/kernel-queue-device.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/kernel-queue-device.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/DPEP.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/DPEP.png
--------------------------------------------------------------------------------
/docs/sources/_images/advisor_roofline_gen9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/_images/advisor_roofline_gen9.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/sdc_email.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/sdc_email.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/intel_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/intel_logo.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/sdc_examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/sdc_examples.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/sdc_github.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/sdc_github.png
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/sdc_issues.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IntelPython/DPEP/HEAD/docs/sources/sdc-sphinx-theme/static/sdc_issues.png
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E203, E266, E501, W503, F403, F401
3 | max-line-length = 89
4 | max-complexity = 18
5 | select = B,C,E,F,W,T4,B9
6 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/impl_dpnp.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 |
3 |
4 | def monte_carlo_pi_batch(batch_size):
5 | x = np.random.random(batch_size)
6 | y = np.random.random(batch_size)
7 | acc = np.count_nonzero(x * x + y * y <= 1.0)
8 | return 4.0 * acc / batch_size
9 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/impl_numpy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def monte_carlo_pi_batch(batch_size):
5 | x = np.random.random(batch_size)
6 | y = np.random.random(batch_size)
7 | acc = np.count_nonzero(x * x + y * y <= 1.0)
8 | return 4.0 * acc / batch_size
9 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: docs
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python=3.9
6 | - ipython
7 | - pip
8 | - sphinx
9 | - sphinxcontrib-programoutput
10 | - nbsphinx
11 | - nbconvert=7.3.1
12 | - pip:
13 | - sphinxcontrib-googleanalytics
14 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/theme.conf:
--------------------------------------------------------------------------------
1 | # Intel(R) SDC theme based on Bootstrap-AstroPy CSS
2 | [theme]
3 | inherit = basic
4 | stylesheet = sdc.css
5 | pygments_style = sphinx
6 |
7 | [options]
8 | logotext1 = astro
9 | logotext2 = py
10 | logotext3 = :docs
11 | astropy_project_menubar = False
12 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/tests/test_dpnp.py:
--------------------------------------------------------------------------------
1 | from math import fabs
2 |
3 | import dpnp as np
4 | from mcpi_demo.impl.impl_dpnp import monte_carlo_pi_batch
5 |
6 |
7 | def test_dpnp():
8 | batch_size = 100000
9 | np.random.seed(7777777)
10 | pi = monte_carlo_pi_batch(batch_size)
11 | assert fabs(pi - 3.14) <= 0.1
12 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/tests/test_numba.py:
--------------------------------------------------------------------------------
1 | from math import fabs
2 |
3 | import numpy as np
4 | from mcpi_demo.impl.impl_numba import monte_carlo_pi_batch
5 |
6 |
7 | def test_numba():
8 | batch_size = 100000
9 | np.random.seed(7777777)
10 | pi = monte_carlo_pi_batch(batch_size)
11 | assert fabs(pi - 3.14) <= 0.1
12 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/tests/test_numpy.py:
--------------------------------------------------------------------------------
1 | from math import fabs
2 |
3 | import numpy as np
4 | from mcpi_demo.impl.impl_numpy import monte_carlo_pi_batch
5 |
6 |
7 | def test_numpy():
8 | batch_size = 100000
9 | np.random.seed(7777777)
10 | pi = monte_carlo_pi_batch(batch_size)
11 | assert fabs(pi - 3.14) <= 0.1
12 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/searchbox.html:
--------------------------------------------------------------------------------
1 | {%- if pagename != "search" %}
2 |
7 | {%- endif %}
8 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/tests/_test_numba_dpex.py:
--------------------------------------------------------------------------------
1 | from math import fabs
2 |
3 | import dpnp as np
4 | from mcpi_demo.impl.impl_numba_dpex import monte_carlo_pi_batch
5 |
6 |
7 | def test_numba_dpex():
8 | batch_size = 100000
9 | np.random.seed(7777777)
10 | pi = monte_carlo_pi_batch(batch_size)
11 | assert fabs(pi - 3.14) <= 0.1
12 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/tests/conftest.py:
--------------------------------------------------------------------------------
1 | def pytest_addoption(parser):
2 | parser.addoption("--variant", action="store", help="Implementation variant")
3 | parser.addoption(
4 | "--parallel", action="store_true", help="@njit(parallel=True/False) setting"
5 | )
6 | parser.addoption(
7 | "--no-parallel", action="store_false", help="@njit(parallel=True/False) setting"
8 | )
9 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 | Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation.
3 |
4 | ## Reporting a Vulnerability
5 | Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html).
6 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/tests/test_numba.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mandelbrot_demo.impl.impl_numba import mandelbrot
3 |
4 |
5 | def test_numba():
6 | w = 2
7 | h = 2
8 | zoom = 1.0
9 | offset = (0.0, 0.0)
10 | colors = np.full((w, h, 3), 0, dtype=np.uint8)
11 |
12 | colors = mandelbrot(w, h, zoom, offset, colors)
13 | s = colors.astype(np.int32).sum()
14 | assert s == 1405
15 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/tests/test_numpy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mandelbrot_demo.impl.impl_numpy import mandelbrot
3 |
4 |
5 | def test_numpy():
6 | w = 2
7 | h = 2
8 | zoom = 1.0
9 | offset = (0.0, 0.0)
10 | colors = np.full((w, h, 3), 0, dtype=np.uint8)
11 |
12 | colors = mandelbrot(w, h, zoom, offset, colors)
13 | s = colors.astype(np.int32).sum()
14 | assert s == 2055
15 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/tests/test_dpnp.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | from mandelbrot_demo.impl.impl_dpnp import mandelbrot
3 |
4 |
5 | def test_dpnp():
6 | w = 2
7 | h = 2
8 | zoom = 1.0
9 | offset = (0.0, 0.0)
10 | colors = np.full((w, h, 3), 0, dtype=np.int32)
11 |
12 | colors = mandelbrot(w, h, zoom, offset, colors)
13 | s = colors.astype(np.int32).sum()
14 | assert np.asnumpy(s) == 2055
15 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/impl_numba.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numba import njit
3 | from numba import prange
4 |
5 |
6 | @njit(parallel=True)
7 | def monte_carlo_pi_batch(batch_size):
8 | x = np.random.random(batch_size)
9 | y = np.random.random(batch_size)
10 | acc = 0
11 | for i in prange(batch_size):
12 | if x[i] * x[i] + y[i] * y[i] <= 1.0:
13 | acc += 1
14 | return 4.0 * acc / batch_size
15 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/tests/test_numba_dpex.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | from mandelbrot_demo.impl.impl_numba_dpex import mandelbrot
3 |
4 |
5 | def _test_numba_dpex():
6 | w = 2
7 | h = 2
8 | zoom = 1.0
9 | offset = (0.0, 0.0)
10 | colors = np.full((w, h, 3), 0, dtype=np.int32)
11 |
12 | colors = mandelbrot(w, h, zoom, offset, colors)
13 | s = colors.astype(np.int32).sum()
14 | assert np.asnumpy(s) == 1405
15 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/impl_numba_dpex.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | from numba_dpex import dpjit
3 | from numba_dpex import prange
4 |
5 |
6 | @dpjit(parallel=True)
7 | def monte_carlo_pi_batch(batch_size):
8 | x = np.random.random(batch_size)
9 | y = np.random.random(batch_size)
10 | acc = 0
11 | for i in prange(batch_size):
12 | if x[i] * x[i] + y[i] * y[i] <= 1.0:
13 | acc += 1
14 | return 4.0 * acc / batch_size
15 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/__init__.py:
--------------------------------------------------------------------------------
1 | from time import time
2 |
3 |
4 | def time_meter(last, total):
5 | def _time_meter(func):
6 | def impl(self, *args, **kwargs):
7 | start = time()
8 | res = func(self, *args, **kwargs)
9 | end = time()
10 | self.time[last] = end - start
11 | self.time[total] += end - start
12 |
13 | return res
14 |
15 | return impl
16 |
17 | return _time_meter
18 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/pi.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from mcpi_demo.impl.arg_parser import parse_args
4 | from mcpi_demo.impl.impl_versioner import monte_carlo_pi
5 |
6 |
7 | def main():
8 | args = parse_args()
9 | batch_size = args.batch_size
10 | n_batches = args.n_batches
11 | print(
12 | f"Estimating Pi with {args.variant} for {n_batches} "
13 | f"Sbatches of size {batch_size}..."
14 | )
15 | t1 = time.time()
16 | pi, pi_std = monte_carlo_pi(batch_size, n_batches)
17 | t2 = time.time()
18 | print(f"Pi={pi}, std={pi_std}")
19 | print("Done in ", t2 - t1, "seconds...")
20 |
21 |
22 | if __name__ == "__main__":
23 | main()
24 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v4.3.0
6 | hooks:
7 | - id: end-of-file-fixer
8 | - id: trailing-whitespace
9 | - repo: https://github.com/psf/black
10 | rev: 23.1.0
11 | hooks:
12 | - id: black
13 | - repo: https://github.com/pycqa/isort
14 | rev: 5.12.0
15 | hooks:
16 | - id: isort
17 | name: isort (python)
18 | args: ["--profile", "black", "--filter-files", "-sl"]
19 | - repo: https://github.com/pycqa/flake8
20 | rev: 3.9.2
21 | hooks:
22 | - id: flake8
23 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/__init__.py:
--------------------------------------------------------------------------------
1 | from time import time
2 |
3 | PROB_ON = 0.2
4 | MAX_FRAMES = 2000
5 |
6 |
7 | def int_tuple(tuple_str):
8 | return tuple(map(int, tuple_str.split(",")))
9 |
10 |
11 | def time_meter(last, total):
12 | def _time_meter(func):
13 | def impl(self, *args, **kwargs):
14 | start = time()
15 | res = func(self, *args, **kwargs)
16 | end = time()
17 | self.time[last] = end - start
18 | self.time[total] += end - start
19 |
20 | return res
21 |
22 | return impl
23 |
24 | return _time_meter
25 |
26 |
27 | def get_task_size_string(w, h):
28 | return f"Task size {w}x{h}"
29 |
--------------------------------------------------------------------------------
/demos/mcpi/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v4.3.0
6 | hooks:
7 | - id: end-of-file-fixer
8 | - id: trailing-whitespace
9 | - repo: https://github.com/psf/black
10 | rev: 23.1.0
11 | hooks:
12 | - id: black
13 | - repo: https://github.com/pycqa/isort
14 | rev: 5.12.0
15 | hooks:
16 | - id: isort
17 | name: isort (python)
18 | args: ["--profile", "black", "--filter-files", "-sl"]
19 | - repo: https://github.com/pycqa/flake8
20 | rev: 3.9.2
21 | hooks:
22 | - id: flake8
23 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = ./sources
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/impl_versioner.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mcpi_demo.impl.arg_parser import parse_args
3 |
4 | RUN_VERSION = parse_args().variant
5 |
6 | if RUN_VERSION == "Numba".casefold():
7 | from mcpi_demo.impl.impl_numba import monte_carlo_pi_batch
8 | elif RUN_VERSION == "NumPy".casefold():
9 | from mcpi_demo.impl.impl_numpy import monte_carlo_pi_batch
10 | elif RUN_VERSION == "DPNP".casefold():
11 | from mcpi_demo.impl.impl_dpnp import monte_carlo_pi_batch
12 | elif RUN_VERSION == "Numba-DPEX".casefold():
13 | from mcpi_demo.impl.impl_numba_dpex import monte_carlo_pi_batch
14 |
15 |
16 | def monte_carlo_pi(batch_size, n_batches):
17 | s = np.empty(n_batches)
18 | for i in range(n_batches):
19 | print(f"Batch #{i}")
20 | s[i] = monte_carlo_pi_batch(batch_size)
21 | return s.mean(), s.std()
22 |
--------------------------------------------------------------------------------
/docs/sources/demos.rst:
--------------------------------------------------------------------------------
1 | .. _demos:
2 | .. include:: ./ext_links.txt
3 |
4 | Demos
5 | =====
6 |
7 | There are several demo applications illustrating the power of the **Data Parallel Extensions for Python**. They are:
8 |
9 | - `Monte Carlo Pi `_
10 | The Monte Carlo method to estimate the value of :math:`\pi`.
11 |
12 | - `Mandelbrot Set `_
13 | Visualization of the breathtaking process of diving in the famous Mandelbrot fractal
14 |
15 | - `Game of Life `_
16 | Visualization of the life evolution using famous Conway's model
17 |
18 | All demos are located in the `GitHub repository `_.
19 | For more details please refer to the documentation located in the individual demo directory.
20 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=./sources
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/demos/mcpi/mcpi_demo/impl/arg_parser.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | def int_tuple(tuple_str):
5 | return tuple(map(int, tuple_str.split(",")))
6 |
7 |
8 | def parse_args(argv=None):
9 | parser = argparse.ArgumentParser(description="Monte Carlo Pi")
10 | parser.add_argument(
11 | "--variant",
12 | help="Implementation variant",
13 | type=str.casefold,
14 | choices=["numpy", "numba", "dpnp", "numba-dpex"],
15 | default="numpy",
16 | )
17 | batch_size = 102400000
18 | parser.add_argument(
19 | "--batch-size",
20 | help=f"Size of the grid. E.g. 102400000. Default {batch_size}",
21 | type=int,
22 | default=int(f"{batch_size}"),
23 | )
24 | n_batches = 8
25 | parser.add_argument(
26 | "--n-batches",
27 | help=f"Size of the grid. E.g. 8. Default {n_batches}",
28 | type=int,
29 | default=int(f"{n_batches}"),
30 | )
31 |
32 | args = parser.parse_args(argv)
33 | return args
34 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/impl_versioner.py:
--------------------------------------------------------------------------------
1 | from mandelbrot_demo.impl.arg_parser import parse_args
2 |
3 | RUN_VERSION = parse_args().variant
4 |
5 | if RUN_VERSION == "Numba".casefold():
6 | from mandelbrot_demo.impl.impl_numba import asnumpy
7 | from mandelbrot_demo.impl.impl_numba import init_values
8 | from mandelbrot_demo.impl.impl_numba import mandelbrot
9 | elif RUN_VERSION == "NumPy".casefold():
10 | from mandelbrot_demo.impl.impl_numpy import asnumpy
11 | from mandelbrot_demo.impl.impl_numpy import init_values
12 | from mandelbrot_demo.impl.impl_numpy import mandelbrot
13 | elif RUN_VERSION == "DPNP".casefold():
14 | from mandelbrot_demo.impl.impl_dpnp import asnumpy
15 | from mandelbrot_demo.impl.impl_dpnp import init_values
16 | from mandelbrot_demo.impl.impl_dpnp import mandelbrot
17 | elif RUN_VERSION == "Numba-DPEX".casefold():
18 | from mandelbrot_demo.impl.impl_numba_dpex import asnumpy
19 | from mandelbrot_demo.impl.impl_numba_dpex import init_values
20 | from mandelbrot_demo.impl.impl_numba_dpex import mandelbrot
21 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/arg_parser.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | def int_tuple(tuple_str):
5 | return tuple(map(int, tuple_str.split(",")))
6 |
7 |
8 | def parse_args(argv=None):
9 | parser = argparse.ArgumentParser(description="Mandelbrot Set")
10 | parser.add_argument(
11 | "--variant",
12 | help="Implementation variant",
13 | type=str.casefold,
14 | choices=["numpy", "numba", "dpnp", "numba-dpex"],
15 | default="numpy",
16 | )
17 | parser.add_argument(
18 | "--max-frames",
19 | help="Stop game after specified amount of frames "
20 | "(default 0 - no stop frame)",
21 | type=int,
22 | default=0,
23 | )
24 | parser.add_argument(
25 | "--gui",
26 | help="Render the evolution of the grid or do computation only and "
27 | "print statistics in the end. Default --no-gui",
28 | action="store_true",
29 | default=False,
30 | )
31 | w = 1024
32 | h = 800
33 | parser.add_argument(
34 | "--task-size",
35 | help=f"Window size. E.g. 800,600. Default {w},{h}",
36 | type=int_tuple,
37 | default=int_tuple(f"{w},{h}"),
38 | )
39 |
40 | args, _ = parser.parse_known_args(argv)
41 | return args
42 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Documentation
4 |
5 | ### Conda environment for documentation
6 |
7 | Install miniconda first.
8 |
9 | ```bash
10 | conda env create -f environment.yml
11 | conda activate docs
12 | ```
13 |
14 | ### Generating documentation
15 |
16 | Install Sphinx and plugins:
17 | ```bash
18 | pip install sphinx autodoc recommonmark sphinx-rtd-theme
19 | ```
20 |
21 | Generate HTML:
22 | ```bash
23 | cd docs
24 | make html
25 | ```
26 |
27 | Run HTTP server:
28 | ```bash
29 | cd docs/_build/html
30 | python -m http.server 8000
31 | ```
32 |
33 | Don't forget to change the version in `docs/conf.py` before generating.
34 | ```python
35 | release = ""
36 | ```
37 |
38 | Generated documentation will be in `docs/_build/html`.
39 |
40 | ### Uploading to GitHub Pages
41 |
42 | Documentation for GitHub Pages is placed in following branch
43 | [`gh-pages`](https://github.com/IntelPython/numba-dppy/tree/gh-pages).
44 |
45 | Folders:
46 | - `dev` folder contains current documentation for default branch.
47 | - `0.12.0` folder and other similar folders contain documentation for releases.
48 | - `latest` folder is a link to the latest release folder.
49 |
50 | Copy generated documentation into corresponding folder and create pull request
51 | to `gh-pages` branch.
52 |
--------------------------------------------------------------------------------
/demos/mcpi/conda-recipe/meta.yaml:
--------------------------------------------------------------------------------
1 | package:
2 | name: mcpi-demo
3 | version: {{ GIT_DESCRIBE_TAG }}
4 |
5 | source:
6 | path: ../../..
7 |
8 | build:
9 | entry_points:
10 | - mcpi = mcpi_demo.pi:main
11 | number: {{ GIT_DESCRIBE_NUMBER }}
12 | script: {{ PYTHON }} -m pip install ./demos/mcpi --no-deps --ignore-installed --no-cache-dir -vvv
13 |
14 | requirements:
15 | host:
16 | - python
17 | run:
18 | - python
19 | - matplotlib >=3.7.1
20 | - numpy >=1.21
21 | - numba >=0.55
22 | - dpnp
23 | - numba-dpex
24 |
25 | test:
26 | requires:
27 | - pytest
28 | commands:
29 | - mcpi --help
30 | - python -m mcpi_demo --variant numba
31 | - mcpi --batch-size 10 --n-batches 1
32 | imports:
33 | - mcpi_demo
34 | - mcpi_demo.impl.impl_versioner
35 | - mcpi_demo.impl.arg_parser
36 |
37 | about:
38 | home: https://intelpython.github.io/DPEP/main/
39 | license: BSD-2-Clause
40 | license_family: BSD
41 | license_file:
42 | - LICENSE
43 | summary: Monte Carlo demo to compute Pi using numpy, numba, dpnp, and numba-dpex
44 | description: |
45 | This is a Hello, World application in Monte Carlo methods. It stresses random number generation along
46 | with some other math required for implementation of the Acceptance-Rejection technique.
47 |
48 | extra:
49 | recipe-maintainers:
50 | - samaid
51 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2023, Sergey Maydanov
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/impl_numpy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mandelbrot_demo.impl.settings import MAX_ITER
3 |
4 | c1 = np.asarray([0.0, 0.0, 0.2])
5 | c2 = np.asarray([1.0, 0.7, 0.9])
6 | c3 = np.asarray([0.6, 1.0, 0.2])
7 |
8 |
9 | def color_by_intensity(intensity):
10 | intensity = np.broadcast_to(intensity[:, :, np.newaxis], intensity.shape + (3,))
11 | return np.where(
12 | intensity < 0.5,
13 | c3 * intensity + c2 * (1.0 - intensity),
14 | c1 * intensity + c2 * (1.0 - intensity),
15 | )
16 |
17 |
18 | def mandelbrot(w, h, zoom, offset, values):
19 | x = np.linspace(0, w, num=w, dtype=np.float32)
20 | y = np.linspace(0, h, num=h, dtype=np.float32)
21 | xx = (x - offset[0]) * zoom
22 | yy = (y - offset[1]) * zoom
23 | c = xx + 1j * yy[:, np.newaxis]
24 |
25 | n_iter = np.full(c.shape, 0) # 2d array
26 | z = np.zeros(c.shape, np.csingle) # 2d array too
27 | mask = n_iter < MAX_ITER # Initialize with True
28 | for i in range(MAX_ITER):
29 | z[mask] = z[mask] ** 2 + c[mask]
30 | mask = mask & (np.abs(z) <= 2.0)
31 | n_iter[mask] = i
32 |
33 | intensity = n_iter.T / MAX_ITER
34 | values = (color_by_intensity(intensity) * 255).astype(np.uint8)
35 | return values
36 |
37 |
38 | def init_values(w, h):
39 | return np.full((w, h, 3), 0, dtype=np.uint8)
40 |
41 |
42 | def asnumpy(values):
43 | return values
44 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/impl_versioner.py:
--------------------------------------------------------------------------------
1 | from game_of_life_demo.impl.arg_parser import parse_args
2 |
3 | RUN_VERSION = parse_args().variant
4 |
5 | if RUN_VERSION == "Numba".casefold():
6 | from game_of_life_demo.impl.impl_numba import asnumpy
7 | from game_of_life_demo.impl.impl_numba import grid_update
8 | from game_of_life_demo.impl.impl_numba import init_grid
9 | from numba import config
10 |
11 | config.THREADING_LAYER = parse_args().threading_layer
12 | elif RUN_VERSION == "NumPy".casefold():
13 | from game_of_life_demo.impl.impl_numpy import asnumpy
14 | from game_of_life_demo.impl.impl_numpy import grid_update
15 | from game_of_life_demo.impl.impl_numpy import init_grid
16 | elif RUN_VERSION == "DPNP".casefold():
17 | from game_of_life_demo.impl.impl_dpnp import asnumpy
18 | from game_of_life_demo.impl.impl_dpnp import grid_update
19 | from game_of_life_demo.impl.impl_dpnp import init_grid
20 | elif RUN_VERSION == "Numba-DPEX".casefold():
21 | from game_of_life_demo.impl.impl_numba_dpex import asnumpy
22 | from game_of_life_demo.impl.impl_numba_dpex import grid_update
23 | from game_of_life_demo.impl.impl_numba_dpex import init_grid
24 |
25 |
26 | def get_variant_string():
27 | if RUN_VERSION == "Numba".casefold():
28 | return f"Numba, threading layer: {parse_args().threading_layer}, parallel: {parse_args().parallel}"
29 | else:
30 | return "NumPy"
31 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/impl_dpnp.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | from mandelbrot_demo.impl.settings import MAX_ITER
3 |
4 | c1 = np.asarray([0.0, 0.0, 0.2])
5 | c2 = np.asarray([1.0, 0.7, 0.9])
6 | c3 = np.asarray([0.6, 1.0, 0.2])
7 |
8 |
9 | def color_by_intensity(intensity):
10 | intensity = np.broadcast_to(intensity[:, :, np.newaxis], intensity.shape + (3,))
11 | return np.where(
12 | intensity < 0.5,
13 | c3 * intensity + c2 * (1.0 - intensity),
14 | c1 * intensity + c2 * (1.0 - intensity),
15 | )
16 |
17 |
18 | def mandelbrot(w, h, zoom, offset, values):
19 | x = np.linspace(0, w, num=w, dtype=np.float32)
20 | y = np.linspace(0, h, num=h, dtype=np.float32)
21 | xx = (x - offset[0]) * zoom
22 | yy = (y - offset[1]) * zoom
23 | c = xx + 1j * yy[:, np.newaxis]
24 |
25 | n_iter = np.full(c.shape, 0) # 2d array
26 | z = np.zeros(c.shape, dtype=np.csingle) # 2d array too
27 | mask = n_iter < MAX_ITER # Initialize with True
28 | for i in range(MAX_ITER):
29 | z[mask] = z[mask] ** 2 + c[mask]
30 | mask = mask & (np.abs(z) <= 2.0)
31 | n_iter[mask] = i
32 |
33 | intensity = n_iter.T / MAX_ITER
34 | values = (color_by_intensity(intensity) * 255).astype(np.int32)
35 | return values
36 |
37 |
38 | def init_values(w, h):
39 | return np.full((w, h, 3), 0, dtype=np.int32)
40 |
41 |
42 | def asnumpy(values):
43 | return np.asnumpy(values)
44 |
--------------------------------------------------------------------------------
/demos/game-of-life/conda-recipe/meta.yaml:
--------------------------------------------------------------------------------
1 | package:
2 | name: game-of-life-demo
3 | version: {{ GIT_DESCRIBE_TAG }}
4 |
5 | source:
6 | path: ../../..
7 |
8 | build:
9 | entry_points:
10 | - game_of_life = game_of_life_demo.game_of_life:main
11 | number: {{ GIT_DESCRIBE_NUMBER }}
12 | script: {{ PYTHON }} -m pip install ./demos/game-of-life --no-deps --ignore-installed --no-cache-dir -vvv
13 |
14 | test:
15 | requires:
16 | - pytest
17 | commands:
18 | - game_of_life --help
19 | - python -m game_of_life_demo --variant numba --threading-layer tbb --no-parallel --no-gui
20 | - game_of_life --frame-count 5 --no-gui --no-stats --task-size 10,10
21 | imports:
22 | - game_of_life_demo
23 | - game_of_life_demo.impl.impl_versioner
24 | - game_of_life_demo.impl.arg_parser
25 |
26 | requirements:
27 | host:
28 | - python
29 | run:
30 | - numpy <1.24
31 | - scipy
32 | - numba >=0.56.4
33 | - opencv =4.6.0
34 | - dpnp
35 | - numba-dpex
36 |
37 | about:
38 | home: https://intelpython.github.io/DPEP/main/
39 | summary: Conway's Game-Of-Life demo with numpy, numba, dpnp, numba-dpex
40 | description: |
41 | The Game of Life is a cellular automaton devised by John Horton Conway in 1970.
42 | This demo provides multiple implementations using different libraries/tools for CPU and GPU.
43 | license: BSD-2
44 | license_file: LICENSE
45 |
46 | extra:
47 | recipe-maintainers:
48 | - samaid
49 |
--------------------------------------------------------------------------------
/demos/mcpi/README.md:
--------------------------------------------------------------------------------
1 | # MCPI - Monte Carlo estimation of Pi using numpy, numba, dpnp, numba-dpex
2 |
3 | This is a "Hello, World" application in Monte Carlo methods. It stresses random number generation
4 | along with some other math required for implementation of the Acceptance-Rejection technique.
5 |
6 | 
7 |
8 | For details please refer to [Wikipedia](https://en.wikipedia.org/wiki/Monte_Carlo_method)
9 |
10 | ## How to run
11 |
12 | `python -m mcpi_demo [options]`
13 |
14 | Demo can be invoked in several ways:
15 |
16 | 1. Cloning Github repo and running `python mcpi.py [options]`
17 | 2. Cloning Github repo and running `python -m mcpi_demo [options]`
18 | 3. Installing conda package and invoking executable
19 | * `conda install -c pycoddiy/label/dev mcpi-demo`
20 | * `mcpi [options]`
21 |
22 | ### Options
23 |
24 | The following options are allowed:
25 | * `--variant [numpy, numba, dpnp, numba-dpex]` (default `numpy`): Implementation variant
26 | * `--batch-size`: Number of trial points in the batch
27 | * `--n-batches`: Number of batches
28 |
29 | ## Jupyter Notebook
30 | The Monte Carlo Pi demo is also supplemented with the [Jupyter Notebook](https://github.com/IntelPython/DPEP/blob/main/demos/mcpi/mcpi.ipynb),
31 | where step by step we illustrate the idea of the algorithm.
32 |
33 | 
34 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/impl_numpy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def init_grid(w, h, p):
5 | return np.random.choice((0, 1), w * h, p=(1.0 - p, p)).reshape(h, w)
6 |
7 |
8 | def grid_update(grid):
9 | m, n = grid.shape
10 |
11 | grid_neighbor = np.zeros((m + 2, n + 2), dtype=grid.dtype)
12 |
13 | grid_neighbor[0:-2, 0:-2] = grid
14 | grid_neighbor[1:-1, 0:-2] += grid
15 | grid_neighbor[2:, 0:-2] += grid
16 | grid_neighbor[0:-2, 1:-1] += grid
17 | grid_neighbor[2:, 1:-1] += grid
18 | grid_neighbor[0:-2, 2:] += grid
19 | grid_neighbor[1:-1, 2:] += grid
20 | grid_neighbor[2:, 2:] += grid
21 |
22 | grid_neighbor[1, 1:-1] += grid_neighbor[-1, 1:-1]
23 | grid_neighbor[-2, 1:-1] += grid_neighbor[0, 1:-1]
24 | grid_neighbor[1:-1, 1] += grid_neighbor[1:-1, -1]
25 | grid_neighbor[1:-1, -2] += grid_neighbor[1:-1, 0]
26 |
27 | grid_neighbor[1, 1] += grid_neighbor[-1, -1]
28 | grid_neighbor[-2, -2] += grid_neighbor[0, 0]
29 | grid_neighbor[1, -2] += grid_neighbor[-1, 0]
30 | grid_neighbor[-2, 1] += grid_neighbor[0, -1]
31 |
32 | dead_rules = np.logical_and(grid == 0, grid_neighbor[1:-1, 1:-1] == 3)
33 | alive_rules = np.logical_and(
34 | grid == 1,
35 | np.logical_or(grid_neighbor[1:-1, 1:-1] == 2, grid_neighbor[1:-1, 1:-1] == 3),
36 | )
37 |
38 | grid_out = np.logical_or(alive_rules, dead_rules)
39 |
40 | return grid_out.astype(grid.dtype)
41 |
42 |
43 | def asnumpy(x):
44 | return x
45 |
--------------------------------------------------------------------------------
/docs/sources/index.rst:
--------------------------------------------------------------------------------
1 | .. _index:
2 | .. include:: ./ext_links.txt
3 |
4 | .. image:: ./_images/DPEP-large.png
5 | :width: 400px
6 | :align: center
7 | :alt: Data Parallel Extensions for Python
8 |
9 | Data Parallel Extensions for Python
10 | ===================================
11 |
12 | Data Parallel Extensions for Python* extend numerical Python capabilities beyond CPU and allow even higher performance
13 | gains on data parallel devices, such as GPUs. It consists of three foundational packages:
14 |
15 | * **dpnp** - Data Parallel Extensions for `Numpy*`_ - a library that implements a subset of
16 | Numpy that can be executed on any data parallel device. The subset is a drop-in replacement
17 | of core Numpy functions and numerical data types.
18 | * **numba_dpex** - Data Parallel Extensions for `Numba*`_ - an extension for Numba compiler
19 | that lets you program data-parallel devices as you program CPU with Numba.
20 | * **dpctl - Data Parallel Control library** that provides utilities for device selection,
21 | allocation of data on devices, tensor data structure along with `Python* Array API Standard`_ implementation, and support for creation of user-defined data-parallel extensions.
22 |
23 | Table of Contents
24 | *****************
25 | .. toctree::
26 | :maxdepth: 2
27 |
28 | prerequisites_and_installation
29 | parallelism
30 | heterogeneous_computing
31 | programming_dpep
32 | examples
33 | jupyter_notebook
34 | benchmarks
35 | demos
36 | useful_links
37 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/impl_numba.py:
--------------------------------------------------------------------------------
1 | import numba as nb
2 | import numpy as np
3 | from mandelbrot_demo.impl.settings import MAX_ITER
4 |
5 | nb.config.THREADING_LAYER = "omp"
6 |
7 |
8 | @nb.jit(fastmath=True, nopython=True)
9 | def color_by_intensity(intensity, c1, c2, c3):
10 | if intensity < 0.5:
11 | return c3 * intensity + c2 * (1.0 - intensity)
12 | else:
13 | return c1 * intensity + c2 * (1.0 - intensity)
14 |
15 |
16 | @nb.jit(fastmath=True, nopython=True)
17 | def mandel(x, y):
18 | c = complex(x, y)
19 | z = 0.0j
20 | for i in range(MAX_ITER):
21 | z = z * z + c
22 | if (z.real * z.real + z.imag * z.imag) > 4.0:
23 | return i
24 | return MAX_ITER
25 |
26 |
27 | @nb.jit(fastmath=True, nopython=True, parallel=True)
28 | def mandelbrot(w, h, zoom, offset, values):
29 | c1 = np.asarray([0.0, 0.0, 0.2])
30 | c2 = np.asarray([1.0, 0.7, 0.9])
31 | c3 = np.asarray([0.6, 1.0, 0.2])
32 |
33 | for x in nb.prange(w):
34 | for y in range(h):
35 | xx = (x - offset[0]) * zoom
36 | yy = (y - offset[1]) * zoom
37 | intensity = mandel(xx, yy) / MAX_ITER
38 | for c in range(3):
39 | color = color_by_intensity(intensity, c1[c], c2[c], c3[c])
40 | color = int(color * 255.0)
41 | values[x, y, c] = color
42 | return values
43 |
44 |
45 | def init_values(w, h):
46 | return np.full((w, h, 3), 0, dtype=np.uint8)
47 |
48 |
49 | def asnumpy(values):
50 | return values
51 |
--------------------------------------------------------------------------------
/docs/sources/examples.rst:
--------------------------------------------------------------------------------
1 | .. _examples:
2 | .. include:: ./ext_links.txt
3 |
4 | Examples
5 | ========
6 | All examples are located in the `GitHub repository `_.
7 | Their names start with the 2-digit number followed by a descriptive name.
8 | You can run examples in any order, however, if
9 | you are new to the Data Parallel Extensions for Python, it is recommended to go in the order examples are enumerated.
10 |
11 | The following command will run the very first example of using Data Parallel Extensions for Python
12 |
13 | .. code-block:: console
14 |
15 | python ./examples/01-hello_dpnp.py
16 |
17 | These are listings of these examples:
18 |
19 | .. literalinclude:: ../../examples/01-hello_dpnp.py
20 | :language: python
21 | :lines: 27-
22 | :caption: **EXAMPLE 01:** Your first NumPy code running on GPU
23 | :name: examples_01_hello_dpnp
24 |
25 | .. literalinclude:: ../../examples/02-dpnp_device.py
26 | :language: python
27 | :lines: 27-
28 | :caption: **EXAMPLE 02:** Select device type while creating array
29 | :name: examples_02_dpnp_device
30 |
31 | .. literalinclude:: ../../examples/03-dpnp2numba-dpex.py
32 | :language: python
33 | :lines: 27-
34 | :caption: **EXAMPLE 03:** Compile dpnp code with numba-dpex
35 | :name: examples_03_dpnp2numba_dpex
36 |
37 | .. literalinclude:: ../../examples/04-dpctl_device_query.py
38 | :language: python
39 | :lines: 27-
40 | :caption: **EXAMPLE 04:** Get information about devices
41 | :name: examples_04_dpctl_device_query
42 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/impl_dpnp.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 |
3 |
4 | def init_grid(w, h, p):
5 | u = np.random.random(w * h) # Working around the lack of random.choice
6 | return np.where(u <= p, 1, 0).reshape(h, w)
7 |
8 |
9 | def grid_update(grid):
10 | m, n = grid.shape
11 |
12 | grid_neighbor = np.zeros((m + 2, n + 2), dtype=grid.dtype)
13 |
14 | grid_neighbor[0:-2, 0:-2] = grid
15 | grid_neighbor[1:-1, 0:-2] += grid
16 | grid_neighbor[2:, 0:-2] += grid
17 | grid_neighbor[0:-2, 1:-1] += grid
18 | grid_neighbor[2:, 1:-1] += grid
19 | grid_neighbor[0:-2, 2:] += grid
20 | grid_neighbor[1:-1, 2:] += grid
21 | grid_neighbor[2:, 2:] += grid
22 |
23 | grid_neighbor[1, 1:-1] += grid_neighbor[-1, 1:-1]
24 | grid_neighbor[-2, 1:-1] += grid_neighbor[0, 1:-1]
25 | grid_neighbor[1:-1, 1] += grid_neighbor[1:-1, -1]
26 | grid_neighbor[1:-1, -2] += grid_neighbor[1:-1, 0]
27 |
28 | grid_neighbor[1, 1] += grid_neighbor[-1, -1]
29 | grid_neighbor[-2, -2] += grid_neighbor[0, 0]
30 | grid_neighbor[1, -2] += grid_neighbor[-1, 0]
31 | grid_neighbor[-2, 1] += grid_neighbor[0, -1]
32 |
33 | dead_rules = np.logical_and(grid == 0, grid_neighbor[1:-1, 1:-1] == 3)
34 | alive_rules = np.logical_and(
35 | grid == 1,
36 | np.logical_or(grid_neighbor[1:-1, 1:-1] == 2, grid_neighbor[1:-1, 1:-1] == 3),
37 | )
38 |
39 | grid_out = np.logical_or(alive_rules, dead_rules)
40 |
41 | return grid_out.astype(grid.dtype)
42 |
43 |
44 | def asnumpy(x):
45 | return np.asnumpy(x)
46 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/impl_numba_dpex.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | import numba_dpex as nb
3 | from mandelbrot_demo.impl.settings import MAX_ITER
4 |
5 | nb.config.THREADING_LAYER = "omp"
6 |
7 |
8 | @nb.dpjit(fastmath=True, nopython=True)
9 | def color_by_intensity(intensity, c1, c2, c3):
10 | if intensity < 0.5:
11 | return c3 * intensity + c2 * (1.0 - intensity)
12 | else:
13 | return c1 * intensity + c2 * (1.0 - intensity)
14 |
15 |
16 | @nb.dpjit(fastmath=True, nopython=True)
17 | def mandel(x, y):
18 | c = complex(x, y)
19 | z = 0.0j
20 | for i in range(MAX_ITER):
21 | z = z * z + c
22 | if (z.real * z.real + z.imag * z.imag) > 4.0:
23 | return i
24 | return MAX_ITER
25 |
26 |
27 | @nb.dpjit(fastmath=True, nopython=True, parallel=True)
28 | def mandelbrot(w, h, zoom, offset, values):
29 | c1 = np.asarray([0.0, 0.0, 0.2])
30 | c2 = np.asarray([1.0, 0.7, 0.9])
31 | c3 = np.asarray([0.6, 1.0, 0.2])
32 |
33 | for x in nb.prange(w):
34 | for y in range(h):
35 | xx = (x - offset[0]) * zoom
36 | yy = (y - offset[1]) * zoom
37 | intensity = mandel(xx, yy) / MAX_ITER
38 | for c in range(3):
39 | color = color_by_intensity(intensity, c1[c], c2[c], c3[c])
40 | color = int(color * 255.0)
41 | values[x, y, c] = color
42 | return values
43 |
44 |
45 | def init_values(w, h):
46 | return np.full((w, h, 3), 0, dtype=np.uint8)
47 |
48 |
49 | def asnumpy(values):
50 | return np.asnumpy(values)
51 |
--------------------------------------------------------------------------------
/docs/sources/ext_links.txt:
--------------------------------------------------------------------------------
1 | ..
2 | **********************************************************
3 | THESE ARE EXTERNAL PROJECT LINKS USED IN THE DOCUMENTATION
4 | **********************************************************
5 | .. _NumPy*: https://numpy.org/
6 | .. _Numba*: https://numba.pydata.org/
7 | .. _Python* Array API Standard: https://data-apis.org/array-api/
8 | .. _Intel Distribution for Python*: https://www.intel.com/content/www/us/en/developer/tools/oneapi/distribution-for-python.html
9 | .. _OpenCl*: https://www.khronos.org/opencl/
10 | .. _DPC++: https://www.apress.com/gp/book/9781484255735
11 | .. _Data Parallel Extension for Numba*: https://intelpython.github.io/numba-dpex/latest/index.html
12 | .. _SYCL*: https://www.khronos.org/sycl/
13 | .. _Data Parallel Control: https://intelpython.github.io/dpctl/latest/index.html
14 | .. _Data Parallel Extension for Numpy*: https://intelpython.github.io/dpnp/
15 | .. _IEEE 754-2019 Standard for Floating-Point Arithmetic: https://standards.ieee.org/ieee/754/6210/
16 | .. _David Goldberg, What every computer scientist should know about floating-point arithmetic: https://www.itu.dk/~sestoft/bachelor/IEEE754_article.pdf>
17 | .. _Intel oneAPI Base Toolkit: https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html
18 | .. _Intel VTune Profiler: https://www.intel.com/content/www/us/en/developer/tools/oneapi/vtune-profiler.html
19 | .. _Intel Advisor: https://www.intel.com/content/www/us/en/developer/tools/oneapi/advisor.html
20 | .. _Data Parallel Extensions for Python GitHub repository: https://github.com/IntelPython/dpep
21 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/impl_numba.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from game_of_life_demo.impl.arg_parser import parse_args
3 | from numba import njit
4 | from numba import prange
5 |
6 | rules = np.array(
7 | [
8 | # 0 1 2 3 4 5 6 7 8 # Number of alive cell neighbors
9 | [0, 0, 0, 1, 0, 0, 0, 0, 0], # Rule for dead cells
10 | [0, 0, 1, 1, 0, 0, 0, 0, 0], # Rule for alive cells
11 | ]
12 | )
13 |
14 |
15 | def init_grid(w, h, p):
16 | return np.random.choice((0, 1), w * h, p=(1.0 - p, p)).reshape(h, w)
17 |
18 |
19 | @njit(
20 | ["int32[:,:](int32[:,:])", "int64[:,:](int64[:,:])"], parallel=parse_args().parallel
21 | )
22 | def grid_update(grid):
23 | m, n = grid.shape
24 | grid_out = np.empty_like(grid)
25 | grid_padded = np.empty((m + 2, n + 2), dtype=grid.dtype)
26 | grid_padded[1:-1, 1:-1] = grid # copy input grid into the center of padded one
27 | grid_padded[0, 1:-1] = grid[-1] # top row of padded grid
28 | grid_padded[-1, 1:-1] = grid[0] # bottom
29 | grid_padded[1:-1, 0] = grid[:, -1]
30 | grid_padded[1:-1, -1] = grid[:, 0]
31 | grid_padded[0, 0] = grid[-1, -1]
32 | grid_padded[-1, -1] = grid[0, 0]
33 | grid_padded[0, -1] = grid[-1, 0]
34 | grid_padded[-1, 0] = grid[0, -1]
35 | for i in prange(m):
36 | for j in range(n):
37 | v_self = grid[i, j]
38 | neighbor_population = grid_padded[i : i + 3, j : j + 3].sum() - v_self
39 | grid_out[i, j] = rules[v_self, neighbor_population]
40 | return grid_out
41 |
42 |
43 | def asnumpy(x):
44 | return x
45 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/impl_numba_dpex.py:
--------------------------------------------------------------------------------
1 | import dpnp as np
2 | from game_of_life_demo.impl.arg_parser import parse_args
3 | from numba_dpex import dpjit as njit
4 | from numba_dpex import prange
5 |
6 | rules = np.array(
7 | [
8 | # 0 1 2 3 4 5 6 7 8 # Number of alive cell neighbors
9 | [0, 0, 0, 1, 0, 0, 0, 0, 0], # Rule for dead cells
10 | [0, 0, 1, 1, 0, 0, 0, 0, 0], # Rule for alive cells
11 | ]
12 | )
13 |
14 |
15 | def init_grid(w, h, p):
16 | u = np.random.random(w * h) # Working around the lack of random.choice()
17 | return np.where(u <= p, 1, 0).reshape(h, w)
18 |
19 |
20 | @njit(
21 | ["int32[:,:](int32[:,:])", "int64[:,:](int64[:,:])"], parallel=parse_args().parallel
22 | )
23 | def grid_update(grid):
24 | m, n = grid.shape
25 | grid_out = np.empty_like(grid)
26 | grid_padded = np.empty((m + 2, n + 2), dtype=grid.dtype)
27 | grid_padded[1:-1, 1:-1] = grid # copy input grid into the center of padded one
28 | grid_padded[0, 1:-1] = grid[-1] # top row of padded grid
29 | grid_padded[-1, 1:-1] = grid[0] # bottom
30 | grid_padded[1:-1, 0] = grid[:, -1]
31 | grid_padded[1:-1, -1] = grid[:, 0]
32 | grid_padded[0, 0] = grid[-1, -1]
33 | grid_padded[-1, -1] = grid[0, 0]
34 | grid_padded[0, -1] = grid[-1, 0]
35 | grid_padded[-1, 0] = grid[0, -1]
36 | for i in prange(m):
37 | for j in range(n):
38 | v_self = grid[i, j]
39 | neighbor_population = grid_padded[i : i + 3, j : j + 3].sum() - v_self
40 | grid_out[i, j] = rules[v_self, neighbor_population]
41 | return grid_out
42 |
43 |
44 | def asnumpy(x):
45 | return np.asnumpy(x)
46 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/tests/test_parser.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from game_of_life_demo import int_tuple
3 | from game_of_life_demo.impl.arg_parser import parse_args
4 |
5 |
6 | @pytest.mark.parametrize("variant_str", ["numpy", "numba", "dpnp", "numba-dpex"])
7 | def test_variant(variant_str):
8 | args = parse_args(["--variant", variant_str])
9 | assert args.variant == variant_str
10 |
11 |
12 | @pytest.mark.parametrize("threading_str", ["omp", "tbb", "workqueue"])
13 | def test_threading(threading_str):
14 | args = parse_args(["--threading-layer", threading_str])
15 | assert args.threading_layer == threading_str
16 |
17 |
18 | @pytest.mark.parametrize("frames_count", [0, 100])
19 | def _test_frames_count(frames_count):
20 | args = parse_args(["--frames-count", frames_count])
21 | assert args.frames_count == frames_count
22 |
23 |
24 | @pytest.mark.parametrize("task_size", ["3, 3", "100, 100"])
25 | def test_task_size(task_size):
26 | args = parse_args(["--task-size", task_size])
27 | assert args.task_size == int_tuple(task_size)
28 |
29 |
30 | def test_parallel_true():
31 | args = parse_args(["--parallel"])
32 | assert args.parallel
33 |
34 |
35 | def test_parallel_false():
36 | args = parse_args(["--no-parallel"])
37 | assert not args.parallel
38 |
39 |
40 | def test_gui_true():
41 | args = parse_args(["--gui"])
42 | assert args.gui
43 |
44 |
45 | def test_gui_false():
46 | args = parse_args(["--no-gui"])
47 | assert not args.gui
48 |
49 |
50 | def test_stats_true():
51 | args = parse_args(["--stats"])
52 | assert args.stats
53 |
54 |
55 | def test_stats_false():
56 | args = parse_args(["--no-stats"])
57 | assert not args.stats
58 |
--------------------------------------------------------------------------------
/docs/sources/jupyter_notebook.rst:
--------------------------------------------------------------------------------
1 | .. _jupyter_notebook:
2 | .. include:: ./ext_links.txt
3 |
4 | Tutorials
5 | *********
6 |
7 | Getting started Jupyter* Notebooks illustrating the usage of **Data Parallel Extensions for Python** are located
8 | in the `GitHub repository `_
9 |
10 | To run the tutorial, in the command line prompt type:
11 |
12 | .. code-block:: console
13 |
14 | jupyter notebook
15 |
16 | This will print some information about the notebook server in your terminal, including the URL of
17 | the web application (by default, ``http://localhost:8888``):
18 |
19 |
20 | .. code-block:: console
21 |
22 | $ jupyter notebook
23 | [I 08:58:24.417 NotebookApp] Serving notebooks from local directory: /Users/catherine
24 | [I 08:58:24.417 NotebookApp] 0 active kernels
25 | [I 08:58:24.417 NotebookApp] The Jupyter Notebook is running at: http://localhost:8888/
26 | [I 08:58:24.417 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
27 | It will then open your default web browser to this URL.
28 |
29 | When the notebook opens in your browser, you will see the Notebook Dashboard,
30 | which will show a list of the notebooks, files, and subdirectories
31 | in the directory where the notebook server was started.
32 | Navigate to the notebook of your interest and open it in the dashboard.
33 |
34 | For more information please refer to `Jupyter documentation `_
35 |
36 | .. toctree::
37 | :maxdepth: 1
38 |
39 | Getting Started
40 | Controlling `dpnp` fallback to `numpy`
41 |
--------------------------------------------------------------------------------
/demos/mandelbrot/conda-recipe/meta.yaml:
--------------------------------------------------------------------------------
1 | package:
2 | name: mandelbrot-demo
3 | version: {{ GIT_DESCRIBE_TAG }}
4 |
5 | source:
6 | path: ../../..
7 |
8 | build:
9 | entry_points:
10 | - mandelbrot = mandelbrot_demo.mandelbrot:main
11 | number: {{ GIT_DESCRIBE_NUMBER }}
12 | script: {{ PYTHON }} -m pip install ./demos/mandelbrot --no-deps --ignore-installed --no-cache-dir -vvv
13 |
14 | requirements:
15 | host:
16 | - python
17 | run:
18 | - python
19 | - pygame >=2.1
20 | - numpy >=1.21
21 | - numba >=0.55
22 | - dpnp
23 | - numba-dpex
24 |
25 | test:
26 | requires:
27 | - pytest
28 | commands:
29 | - mandelbrot --help
30 | - python -m mandelbrot_demo --variant numba --max-frames 1
31 | - mandelbrot --gui --max-frames 1
32 | - mandelbrot --no-gui --max-frames 1
33 | imports:
34 | - mandelbrot_demo
35 | - mandelbrot_demo.impl
36 | - mandelbrot_demo.impl.impl_versioner
37 | - mandelbrot_demo.impl.visualization
38 | - mandelbrot_demo.impl.arg_parser
39 |
40 | about:
41 | home: https://intelpython.github.io/DPEP/main/
42 | license: BSD-2-Clause
43 | license_family: BSD
44 | license_file:
45 | - LICENSE
46 | summary: Mandelbort Set demo using numpy, numba, dpnp, numba-dpex
47 | description: |
48 | The Mandelbrot set is the set of complex numbers c for which the function f(z)=z*z + c
49 | does not diverge to infinity when iterated from z=0, i.e., for which the sequence
50 | f(0), f(f(0)), etc., remains bounded in absolute value. Images of the Mandelbrot set exhibit
51 | an elaborate and infinitely complicated boundary that reveals progressively ever-finer
52 | recursive detail at increasing magnifications
53 |
54 | extra:
55 | recipe-maintainers:
56 | - samaid
57 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/arg_parser.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from game_of_life_demo import int_tuple
4 |
5 |
6 | def parse_args(argv=None):
7 | parser = argparse.ArgumentParser(description="Conway's Game of Life")
8 | parser.add_argument(
9 | "--variant",
10 | help="Implementation variant",
11 | type=str.casefold,
12 | choices=["numpy", "numba", "dpnp", "numba-dpex"],
13 | default="numpy",
14 | )
15 | parser.add_argument(
16 | "--threading-layer",
17 | help="Threading layer",
18 | choices=["omp", "tbb", "workqueue"],
19 | default="omp",
20 | )
21 | parser.add_argument(
22 | "--parallel",
23 | help="Keyword argument parallel= for @njit. Used along with --variant numba. Default --no-parallel",
24 | action="store_true",
25 | default=False,
26 | )
27 | parser.add_argument(
28 | "--frames-count",
29 | help="Stop game after specified amount of frames (default 0 - no stop frame)",
30 | type=int,
31 | default=0,
32 | )
33 | parser.add_argument(
34 | "--gui",
35 | help="Render the evolution of the grid or do computation only and "
36 | "print statistics in the end. Default --no-gui",
37 | action="store_true",
38 | default=False,
39 | )
40 | parser.add_argument(
41 | "--stats",
42 | help="Either display statistics in gui while running or not. Default --no-stats",
43 | action="store_true",
44 | default=False,
45 | )
46 | w = 960
47 | h = 540
48 | parser.add_argument(
49 | "--task-size",
50 | help=f"Size of the grid. E.g. 1200,800. Default {w},{h}",
51 | type=int_tuple,
52 | default=int_tuple(f"{w},{h}"),
53 | )
54 |
55 | args, _ = parser.parse_known_args(argv)
56 | return args
57 |
--------------------------------------------------------------------------------
/examples/01-hello_dpnp.py:
--------------------------------------------------------------------------------
1 | # *****************************************************************************
2 | # Copyright (c) 2022, Intel Corporation All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | #
10 | # Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
18 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 | # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 | # *****************************************************************************
26 |
27 | import dpnp as np
28 |
29 | x = np.asarray([1, 2, 3])
30 | print("Array x allocated on the device:", x.device)
31 |
32 | y = np.sum(x)
33 |
34 | print("Result y is located on the device:", y.device) # The same device as x
35 | print("Shape of y is:", y.shape) # 0-dimensional array
36 | print("y=", y) # Expect 6
37 |
--------------------------------------------------------------------------------
/examples/04-dpctl_device_query.py:
--------------------------------------------------------------------------------
1 | # *****************************************************************************
2 | # Copyright (c) 2022, Intel Corporation All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | #
10 | # Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
18 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 | # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 | # *****************************************************************************
26 |
27 | import dpctl
28 |
29 | dpctl.lsplatform() # Print platform information
30 | print(
31 | "GPU devices:", dpctl.get_devices(device_type="gpu")
32 | ) # Get the list of all GPU devices
33 | print(
34 | "Number of GPU devices", dpctl.get_num_devices(device_type="gpu")
35 | ) # Get the number of GPU devices
36 | print("Has CPU devices?", dpctl.has_cpu_devices()) # Check if there are CPU devices
37 |
--------------------------------------------------------------------------------
/.github/workflows/python_style_checks.yml:
--------------------------------------------------------------------------------
1 | # This is a workflow to format Python code with black formatter
2 |
3 | name: coding style
4 |
5 | # Declare default permissions as read only.
6 | permissions: read-all
7 |
8 | # Controls when the action will run. Triggers the workflow on push or pull request
9 | # events but only for the master branch
10 | on:
11 | pull_request:
12 | push:
13 | branches: [main]
14 |
15 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
16 | jobs:
17 | # The isort job sorts all imports in .py, .pyx, .pxd files
18 | isort:
19 | runs-on: ubuntu-latest
20 | steps:
21 | - uses: actions/checkout@v3
22 | - uses: actions/setup-python@v4
23 | with:
24 | python-version: '3.10'
25 | - uses: jamescurtin/isort-action@master
26 | with:
27 | configuration: "--check-only --profile black --filter-files -sl"
28 |
29 | black:
30 | # The type of runner that the job will run on
31 | runs-on: ubuntu-latest
32 |
33 | # Steps represent a sequence of tasks that will be executed as part of the job
34 | steps:
35 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
36 | - uses: actions/checkout@v3
37 | # Set up a Python environment for use in actions
38 | - uses: actions/setup-python@v4
39 | with:
40 | python-version: '3.10'
41 |
42 | # Run black code formatter
43 | - uses: psf/black@stable
44 | with:
45 | src: "."
46 | options: "--check"
47 | version: "22.12.0"
48 |
49 | flake8:
50 | runs-on: ubuntu-latest
51 |
52 | steps:
53 | - uses: actions/checkout@v3
54 | - name: Set up Python
55 | uses: actions/setup-python@v4
56 | with:
57 | python-version: '3.10'
58 | - name: Install dependencies
59 | run: |
60 | python -m pip install --upgrade pip
61 | pip install flake8
62 | - name: Lint with flake8
63 | uses: py-actions/flake8@v2
64 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/tests/test_grid.py:
--------------------------------------------------------------------------------
1 | from game_of_life_demo.impl.impl_versioner import RUN_VERSION
2 | from game_of_life_demo.impl.impl_versioner import asnumpy
3 |
4 | if RUN_VERSION in ["dpnp", "numba-dpex"]:
5 | import dpnp as np
6 | import numpy.testing as testing
7 | else:
8 | import numpy as np
9 | import numpy.testing as testing
10 |
11 | import pytest
12 | from game_of_life_demo.game_of_life import Grid
13 |
14 | grids = [
15 | (
16 | # Input
17 | [[0, 1, 0], [1, 0, 0], [0, 1, 0]],
18 | # Expected
19 | [[1, 1, 1], [1, 1, 1], [1, 1, 1]],
20 | ),
21 | (
22 | # Input
23 | [[0, 1, 0], [0, 0, 0], [0, 1, 0]],
24 | # Expected
25 | [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
26 | ),
27 | (
28 | # Input
29 | [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
30 | # Expected
31 | [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
32 | ),
33 | (
34 | # Input
35 | [[0, 1, 0], [0, 1, 0], [0, 1, 0]],
36 | # Expected
37 | [[1, 1, 1], [1, 1, 1], [1, 1, 1]],
38 | ),
39 | (
40 | # Input
41 | [[1, 1, 1], [0, 1, 0], [1, 1, 1]],
42 | # Expected
43 | [[0, 0, 0], [0, 0, 0], [0, 0, 0]],
44 | ),
45 | (
46 | # Input
47 | [[0, 0, 1], [0, 1, 0], [1, 0, 0]],
48 | # Expected
49 | [[1, 1, 1], [1, 1, 1], [1, 1, 1]],
50 | ),
51 | (
52 | # Input
53 | [[1, 0, 1], [0, 1, 0], [1, 0, 0]],
54 | # Expected
55 | [[1, 0, 1], [0, 1, 0], [1, 0, 0]],
56 | ),
57 | ]
58 |
59 |
60 | @pytest.mark.parametrize("input_grid, expected_grid", grids)
61 | def test_grid(mocker, input_grid, expected_grid):
62 | def mock_init_grid(w, h, p):
63 | return np.array(input_grid).reshape(h, w)
64 |
65 | mocker.patch("game_of_life_demo.game_of_life.init_grid", mock_init_grid)
66 |
67 | grid = Grid(3, 3, 1.0)
68 | grid.update()
69 |
70 | testing.assert_array_equal(asnumpy(grid.grid), asnumpy(expected_grid))
71 |
--------------------------------------------------------------------------------
/examples/02-dpnp_device.py:
--------------------------------------------------------------------------------
1 | # *****************************************************************************
2 | # Copyright (c) 2022, Intel Corporation All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | #
10 | # Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
18 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 | # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 | # *****************************************************************************
26 |
27 | import dpnp as np
28 |
29 | x = np.empty(3)
30 | try:
31 | x = np.asarray([1, 2, 3], device="gpu")
32 | except Exception:
33 | print("GPU device is not available")
34 |
35 | print("Array x allocated on the device:", x.device)
36 |
37 | y = np.sum(x)
38 |
39 | print("Result y is located on the device:", y.device) # The same device as x
40 | print("Shape of y is:", y.shape) # 0-dimensional array
41 | print("y=", y) # Expect 6
42 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/impl/visualization.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from mandelbrot_demo.impl.arg_parser import parse_args
4 | from mandelbrot_demo.impl.impl_versioner import asnumpy
5 |
6 | GUI = parse_args().gui
7 |
8 | if GUI:
9 | import pygame as pg
10 |
11 | DISPLAY_RES = DISPLAY_W, DISPLAY_H = 1024, 800
12 | FPS = 60
13 |
14 | frames = parse_args().max_frames
15 | if frames > 0:
16 | N_FRAMES = frames
17 | else:
18 | N_FRAMES = 1000000
19 |
20 | OFFSET_X = 1.4 * DISPLAY_W // 2
21 | OFFSET_Y = DISPLAY_H // 2
22 | OFFSET = (OFFSET_X, OFFSET_Y)
23 | ZOOM = 2.5 / DISPLAY_H
24 |
25 | if GUI:
26 |
27 | def set_display():
28 | os.environ["SDL_VIDEO_CENTERED"] = "1"
29 |
30 | pg.init()
31 | surface = pg.display.set_mode(DISPLAY_RES, pg.SCALED)
32 | clock = pg.time.Clock()
33 |
34 | return surface, clock
35 |
36 | def pg_init():
37 | surface, clock = set_display()
38 | return surface, clock
39 |
40 | def pg_draw(surface, fractal):
41 | surface.fill(pg.Color("black"))
42 | pg.surfarray.blit_array(surface, asnumpy(fractal.values))
43 |
44 | def pg_test_quit():
45 | do_game = True
46 | for event in pg.event.get():
47 | if event.type == pg.QUIT:
48 | do_game = False
49 | return do_game
50 |
51 | def pg_update_fps(clk, frames):
52 | pg.display.set_caption(f"FPS: {clk.get_fps():2.1f}, FRAMES:{frames}")
53 |
54 | def pg_prep_next_frame(frames, clk):
55 | pg.display.flip()
56 | clk.tick(FPS)
57 | frames += 1
58 | return frames, frames < N_FRAMES
59 |
60 | def pg_finalize():
61 | pg.quit()
62 |
63 | else:
64 |
65 | def pg_init():
66 | return None, None
67 |
68 | def pg_draw(surface, fractal):
69 | pass
70 |
71 | def pg_test_quit():
72 | return True
73 |
74 | def pg_update_fps(clk, frames):
75 | pass
76 |
77 | def pg_prep_next_frame(frames, clk):
78 | frames += 1
79 | return frames, frames < N_FRAMES
80 |
81 | def pg_finalize():
82 | pass
83 |
--------------------------------------------------------------------------------
/examples/03-dpnp2numba-dpex.py:
--------------------------------------------------------------------------------
1 | # *****************************************************************************
2 | # Copyright (c) 2022, Intel Corporation All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | #
10 | # Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
18 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 | # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 | # *****************************************************************************
26 |
27 | import dpnp as np
28 | from numba_dpex import dpjit as njit
29 |
30 |
31 | @njit()
32 | def sum_it(x): # Device queue is inferred from x. The kernel is submitted to that queue
33 | return np.sum(x)
34 |
35 |
36 | x = np.empty(3)
37 | try:
38 | x = np.asarray([1, 2, 3], device="gpu")
39 | except Exception:
40 | print("GPU device is not available")
41 |
42 | print("Array x allocated on the device:", x.device)
43 |
44 | y = sum_it(x)
45 |
46 | print("Result y is located on the device:", y.device) # The same device as x
47 | print("Shape of y is:", y.shape) # 0-dimensional array
48 | print("y=", y) # Expect 6
49 |
--------------------------------------------------------------------------------
/docs/sources/prerequisites_and_installation.rst:
--------------------------------------------------------------------------------
1 | .. _prerequisites_and_installation:
2 | .. include:: ./ext_links.txt
3 |
4 | .. |copy| unicode:: U+000A9
5 |
6 | .. |trade| unicode:: U+2122
7 |
8 | Prerequisites and Installation
9 | ==============================
10 |
11 | 1. Device Drivers
12 | ******************
13 |
14 | To start programming data parallel devices beyond CPU, you will need an appropriate hardware.
15 | For example, Data Parallel Extensions for Python work fine on Intel |copy| laptops with integrated graphics.
16 | In majority of cases, your Windows*-based laptop already has all necessary device drivers installed. But if you want the most
17 | up-to-date driver, you can always
18 | `update it to the latest one `_.
19 | Follow device driver installation instructions
20 | to complete this step.
21 |
22 | All other necessary components for programming data parallel devices will be installed with
23 | Data Parallel Extensions for Python.
24 |
25 | 2. Python Interpreter
26 | **********************
27 |
28 | You will need Python 3.8, 3.9, or 3.10 installed on your system. If you do not have one yet the easiest way to do
29 | that is to install `Intel Distribution for Python*`_.
30 | It installs all essential Python numerical and machine
31 | learning packages optimized for the Intel hardware, including Data Parallel Extensions for Python*.
32 | If you have Python installation from another vendor, it is fine too. All you need is to install Data Parallel
33 | Extensions for Python manually as shown in the next section.
34 |
35 | 3. Data Parallel Extensions for Python
36 | ***************************************
37 |
38 | Skip this step if you already installed Intel |copy| Distribution for Python.
39 |
40 | The easiest way to install Data Parallel Extensions for Python is to install numba-dpex:
41 |
42 | * Conda: ``conda install numba-dpex``
43 |
44 | * Pip: ``pip install numba-dpex``
45 |
46 | These commands install ``numba-dpex`` along with its dependencies, including ``dpnp``, ``dpctl``,
47 | and required compiler runtimes.
48 |
49 | .. WARNING::
50 | Before installing with conda or pip it is strongly advised to update ``conda`` and ``pip`` to latest versions
51 |
--------------------------------------------------------------------------------
/docs/sources/useful_links.rst:
--------------------------------------------------------------------------------
1 | .. _useful_links:
2 | .. include:: ./ext_links.txt
3 |
4 | Useful links
5 | ============
6 |
7 | .. list-table:: **Companion documentation**
8 | :widths: 70 200
9 | :header-rows: 1
10 |
11 | * - Document
12 | - Description
13 | * - `Data Parallel Extension for Numpy*`_
14 | - Documentation for programming NumPy-like codes on data-parallel devices
15 | * - `Data Parallel Extension for Numba*`_
16 | - Documentation for programming Numba codes on data-parallel devices as you program Numba on CPU
17 | * - `Data Parallel Control`_
18 | - Documentation how to manage data and devices, how to interchange data between different tensor implementations,
19 | and how to write data parallel extensions
20 | * - `Intel VTune Profiler`_
21 | - Performance profiler supporting analysis of bottlenecks from function leve down to low level instructions.
22 | Supports Python and Numba
23 | * - `Intel Advisor`_
24 | - Analyzes native and Python codes and provides the advice for better composition of heterogeneous algorithms
25 | * - `Python* Array API Standard`_
26 | - Standard for writing portable Numpy-like codes targeting different hardware vendors and frameworks
27 | operating with tensor data
28 | * - `SYCL*`_
29 | - Standard for writing C++-like codes for heterogeneous computing
30 | * - `DPC++`_
31 | - Free e-book on how to program data-parallel devices using Data Parallel C++
32 | * - `OpenCl*`_
33 | - OpenCl* Standard for heterogeneous programming
34 | * - `IEEE 754-2019 Standard for Floating-Point Arithmetic`_
35 | - Standard for floating-point arithmetic, essential for writing robust numerical codes
36 | * - `David Goldberg, What every computer scientist should know about floating-point arithmetic`_
37 | - Scientific paper. Important for understanding how to write robust numerical code
38 | * - `Numpy*`_
39 | - Documentation for Numpy - foundational CPU library for array programming. Used in conjunction with
40 | `Data Parallel Extension for Numpy*`_.
41 | * - `Numba*`_
42 | - Documentation for Numba - Just-In-Time compiler for Numpy-like codes. Used in conjunction with
43 | `Data Parallel Extension for Numba*`_.
44 |
45 |
46 | To-Do
47 | =====
48 | .. todolist::
49 |
--------------------------------------------------------------------------------
/docs/sources/parallelism.rst:
--------------------------------------------------------------------------------
1 | .. _parallelism:
2 | .. include:: ./ext_links.txt
3 |
4 | Parallelism in Modern Data-Parallel Architectures
5 | =================================================
6 |
7 | Python is loved for its productivity and interactivity. But when it comes to dealing with
8 | computationally heavy codes, Python performance cannot be compromised. Intel and Python numerical
9 | computing communities, such as `NumFOCUS `_, dedicate attention to
10 | optimizing core numerical and data science packages for leveraging parallelism available in modern CPUs:
11 |
12 | * **Multiple computational cores:** Several computational cores allow to process the data concurrently.
13 | Compared to a single-core CPU, *N* cores can process either *N* times bigger data in a fixed time, or
14 | reduce a computation time *N* times for a set amount of data.
15 |
16 | .. image:: ./_images/dpep-cores.png
17 | :width: 600px
18 | :align: center
19 | :alt: Multiple CPU Cores
20 |
21 | * **SIMD parallelism:** SIMD (Single Instruction Multiple Data) is a special type of instructions
22 | that perform operations on vectors of data elements at the same time. The size of vectors is called the SIMD width.
23 | If a SIMD width is *K* then a SIMD instruction can process *K* data elements in parallel.
24 |
25 | In the following diagram, the SIMD width is 2, which means that a single instruction processes two elements simultaneously.
26 | Compared to regular instructions that process one element at a time, 2-wide SIMD instruction performs
27 | two times more data in fixed time, or, respectively, process a fixed amount of data two times faster.
28 |
29 | .. image:: ./_images/dpep-simd.png
30 | :width: 150px
31 | :align: center
32 | :alt: SIMD
33 |
34 | * **Instruction-Level Parallelism:** Modern CISC architectures, such as x86, allow performing data independent
35 | instructions in parallel. In the following example, see how to compute :math:`a * b + (c - d)`.
36 | Operations :math:`*` and :math:`-` can be executed in parallel, the last instruction
37 | :math:`+` depends on availability of :math:`a * b` and :math:`c - d` and cannot be executed in parallel
38 | with :math:`*` and :math:`-`.
39 |
40 | .. image:: ./_images/dpep-ilp.png
41 | :width: 150px
42 | :align: center
43 | :alt: SIMD
44 |
--------------------------------------------------------------------------------
/demos/mandelbrot/README.md:
--------------------------------------------------------------------------------
1 | # Mandelbrot Set
2 |
3 | [](https://github.com/psf/black)
4 | [](https://github.com/pre-commit/pre-commit)
5 | [](https://app.gitter.im/#/room/#Data-Parallel-Python_community:gitter.im)
6 |
7 | Mandelbrot set demo implemented using NumPy, Numba, DPNP, and Numba-DPEx.
8 |
9 | ## What it is
10 |
11 | The Mandelbrot set is the set of complex numbers $c$ for which the function
12 | $f_{c}(z)=z^{2}+c$ does not diverge to infinity when iterated from $z=0$, i.e.,
13 | for which the sequence $f_{c}(0)$, $f_{c}(f_{c}(0))$, etc., remains bounded in absolute value.
14 |
15 | Images of the Mandelbrot set exhibit an elaborate and infinitely complicated boundary
16 | that reveals progressively ever-finer recursive detail at increasing magnifications
17 |
18 | For further details please visit respective [Wikipedia article](https://en.wikipedia.org/wiki/Mandelbrot_set).
19 |
20 | ## How to run demo
21 |
22 | ### Running from installed conda package
23 |
24 | Install the demo as follows:
25 | `conda install -c pycoddiy/label/dev mandelbrot-demo`
26 |
27 | From command line type:
28 | `mandelbrot [command line options]`
29 |
30 | * `--variant [numba, numpy, dpnp, numba-dpex]` (default `numpy`) - implementation variant
31 | * `--frames-count` - stop rendering after a specified amount of frames. Default 0 meaning that the demo
32 | does not stop until user action, e.g. close window
33 | * `--gui` (default) or `--no-gui` - render the evolution of the grid or do the computation only and
34 | print performance statistics in the end.
35 | * `--task-size` - window size WIDTH, HEIGHT. Example: 1024,800
36 |
37 | ### Running from GitHub sources
38 | Clone repository to a local project directory:
39 | ```
40 | git clone https://github.com/samaid/Mandelbrot.git
41 | cd ./Mandelbrot
42 | ```
43 |
44 | From the command line type:
45 | `python mandelbrot_demo.py [command line options]`
46 |
47 | * `--variant [numba, numpy, dpnp, numba-dpex]` (default `numpy`) - implementation variant
48 | * `--frames-count` - stop rendering after a specified amount of frames. Default 0 meaning that the demo
49 | does not stop until user action, e.g. close window
50 | * `--gui` (default) or `--no-gui` - render the evolution of the grid or do the computation only and
51 | print performance statistics in the end.
52 | * `--task-size` - window size WIDTH, HEIGHT. Example: 1024,800
53 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "id": "b9309c4f-415d-4e7f-a169-6f9c7e28e018",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import dpctl"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 3,
16 | "id": "0a55e346-69e7-4f8e-b395-8d632fcaf558",
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "name": "stdout",
21 | "output_type": "stream",
22 | "text": [
23 | "Intel(R) OpenCL HD Graphics OpenCL 3.0 \n",
24 | "Intel(R) Level-Zero 1.3\n"
25 | ]
26 | }
27 | ],
28 | "source": [
29 | "dpctl.lsplatform()"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 8,
35 | "id": "db0abcf7-2308-4869-9bc7-0f70566785ab",
36 | "metadata": {},
37 | "outputs": [
38 | {
39 | "name": "stderr",
40 | "output_type": "stream",
41 | "text": [
42 | "usage: ipykernel_launcher.py [-h] [--variant {numpy,numba}] [--threading-layer {omp,tbb,workqueue}] [--parallel | --no-parallel] [--frames-count FRAMES_COUNT] [--gui | --no-gui] [--stats | --no-stats] [--task-size TASK_SIZE]\n",
43 | "ipykernel_launcher.py: error: unrecognized arguments: -f C:\\Users\\smaidano\\AppData\\Roaming\\jupyter\\runtime\\kernel-6523de26-e036-47b6-9619-07b0e0d470b1.json\n"
44 | ]
45 | },
46 | {
47 | "ename": "SystemExit",
48 | "evalue": "2",
49 | "output_type": "error",
50 | "traceback": [
51 | "An exception has occurred, use %tb to see the full traceback.\n",
52 | "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 2\n"
53 | ]
54 | }
55 | ],
56 | "source": [
57 | "import game_of_life_demo"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "id": "81eb3455-dae4-42c1-a1fa-66af205e3021",
64 | "metadata": {},
65 | "outputs": [],
66 | "source": []
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "id": "de662eb4-896b-447e-9fa1-9c7cc39d7178",
72 | "metadata": {},
73 | "outputs": [],
74 | "source": []
75 | }
76 | ],
77 | "metadata": {
78 | "kernelspec": {
79 | "display_name": "Python [conda env:ndpex-examples] *",
80 | "language": "python",
81 | "name": "conda-env-ndpex-examples-py"
82 | },
83 | "language_info": {
84 | "codemirror_mode": {
85 | "name": "ipython",
86 | "version": 3
87 | },
88 | "file_extension": ".py",
89 | "mimetype": "text/x-python",
90 | "name": "python",
91 | "nbconvert_exporter": "python",
92 | "pygments_lexer": "ipython3",
93 | "version": "3.9.15"
94 | }
95 | },
96 | "nbformat": 4,
97 | "nbformat_minor": 5
98 | }
99 |
--------------------------------------------------------------------------------
/demos/mandelbrot/mandelbrot_demo/mandelbrot.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from mandelbrot_demo.impl.impl_versioner import init_values
4 | from mandelbrot_demo.impl.impl_versioner import mandelbrot
5 | from mandelbrot_demo.impl.visualization import DISPLAY_H
6 | from mandelbrot_demo.impl.visualization import DISPLAY_W
7 | from mandelbrot_demo.impl.visualization import OFFSET
8 | from mandelbrot_demo.impl.visualization import ZOOM
9 | from mandelbrot_demo.impl.visualization import pg_draw
10 | from mandelbrot_demo.impl.visualization import pg_finalize
11 | from mandelbrot_demo.impl.visualization import pg_init
12 | from mandelbrot_demo.impl.visualization import pg_prep_next_frame
13 | from mandelbrot_demo.impl.visualization import pg_test_quit
14 | from mandelbrot_demo.impl.visualization import pg_update_fps
15 |
16 |
17 | class Fractal:
18 | def __init__(self, w, h, zoom, offset):
19 | self.w = w
20 | self.h = h
21 | self.values = init_values(w, h)
22 | self.zoom = zoom
23 | self.offset = offset
24 | self.need_recalculate = True
25 |
26 | def set_zoom(self, zoom):
27 | old_zoom = self.zoom
28 | if self.zoom != zoom:
29 | self.need_recalculate = True
30 | self.zoom = zoom
31 | return old_zoom
32 |
33 | def set_offset(self, offset):
34 | old_offset = self.offset
35 | if self.offset != offset:
36 | self.need_recalculate = True
37 | self.offset = offset
38 | return old_offset
39 |
40 | def calculate(self):
41 | self.values = mandelbrot(self.w, self.h, self.zoom, self.offset, self.values)
42 | self.need_recalculate = False
43 |
44 | def update(self):
45 | if self.need_recalculate:
46 | self.calculate()
47 |
48 |
49 | def main():
50 | ds, clk = pg_init()
51 |
52 | zoom = ZOOM
53 | scale = 1.01
54 | incr = -5.0
55 | offset_x = OFFSET[0]
56 | offset_y = OFFSET[1]
57 |
58 | fractal = Fractal(DISPLAY_W, DISPLAY_H, zoom, (offset_x, offset_y))
59 |
60 | frames = 0
61 | do_game = True
62 | t1 = time.time()
63 | while do_game:
64 | # Test for windows close event
65 | do_game = pg_test_quit()
66 |
67 | # Draw objects
68 | pg_draw(ds, fractal)
69 |
70 | # Perform updates
71 | if frames % 300 == 0:
72 | scale = 1.0 / scale
73 | incr = -incr
74 | zoom *= scale
75 | offset_x += incr
76 | offset_y += incr
77 |
78 | fractal.set_zoom(zoom)
79 | fractal.set_offset((offset_x, offset_y))
80 | fractal.update()
81 | pg_update_fps(clk, frames)
82 |
83 | # Prepare for next frame
84 | frames, more_frames_flag = pg_prep_next_frame(frames, clk)
85 | do_game = do_game and more_frames_flag
86 | t2 = time.time()
87 | pg_finalize()
88 | print("Avg.fps:", frames / (t2 - t1))
89 |
90 |
91 | if __name__ == "__main__":
92 | main()
93 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/copybutton.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | /* Add a [>>>] button on the top-right corner of code samples to hide
3 | * the >>> and ... prompts and the output and thus make the code
4 | * copyable. */
5 | var div = $('.highlight-python .highlight,' +
6 | '.highlight-python3 .highlight,' +
7 | '.highlight-default .highlight')
8 | var pre = div.find('pre');
9 |
10 | // get the styles from the current theme
11 | pre.parent().parent().css('position', 'relative');
12 | var hide_text = 'Hide the prompts and output';
13 | var show_text = 'Show the prompts and output';
14 | var border_width = pre.css('border-top-width');
15 | var border_style = pre.css('border-top-style');
16 | var border_color = pre.css('border-top-color');
17 | var button_styles = {
18 | 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
19 | 'border-color': border_color, 'border-style': border_style,
20 | 'border-width': border_width, 'color': border_color, 'text-size': '75%',
21 | 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
22 | 'border-radius': '0 3px 0 0'
23 | }
24 |
25 | // create and add the button to all the code blocks that contain >>>
26 | div.each(function(index) {
27 | var jthis = $(this);
28 | if (jthis.find('.gp').length > 0) {
29 | var button = $('>>>');
30 | button.css(button_styles)
31 | button.attr('title', hide_text);
32 | button.data('hidden', 'false');
33 | jthis.prepend(button);
34 | }
35 | // tracebacks (.gt) contain bare text elements that need to be
36 | // wrapped in a span to work with .nextUntil() (see later)
37 | jthis.find('pre:has(.gt)').contents().filter(function() {
38 | return ((this.nodeType == 3) && (this.data.trim().length > 0));
39 | }).wrap('');
40 | });
41 |
42 | // define the behavior of the button when it's clicked
43 | $('.copybutton').click(function(e){
44 | e.preventDefault();
45 | var button = $(this);
46 | if (button.data('hidden') === 'false') {
47 | // hide the code output
48 | button.parent().find('.go, .gp, .gt').hide();
49 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
50 | button.css('text-decoration', 'line-through');
51 | button.attr('title', show_text);
52 | button.data('hidden', 'true');
53 | } else {
54 | // show the code output
55 | button.parent().find('.go, .gp, .gt').show();
56 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
57 | button.css('text-decoration', 'none');
58 | button.attr('title', hide_text);
59 | button.data('hidden', 'false');
60 | }
61 | });
62 | });
63 |
--------------------------------------------------------------------------------
/docs/sources/benchmarks.rst:
--------------------------------------------------------------------------------
1 | .. _benchmarks:
2 | .. include:: ./ext_links.txt
3 |
4 | Benchmarks
5 | ==========
6 |
7 | **Data Parallel Extensions for Python** provide a set of
8 | `benchmarks `_ illustrating different aspects of
9 | implementing the performant code with Data Parallel Extensions for Python.
10 | Benchmarks represent some real life numerical problem or some important part (kernel) of real life application.
11 | Each application/kernel is implemented in several variants (not necessarily all variants):
12 |
13 | - Pure Python: Typically the slowest and used just as a reference implementation
14 |
15 | - ``numpy``: Same application/kernel implemented using NumPy library
16 |
17 | - ``dpnp``: Modified numpy implementation to run on a specific device. You can use numpy as a baseline while evaluating the dpnp implementation and its performance
18 |
19 | - ``numba @njit`` array-style: application/kernel implemented using NumPy and compiled with Numba. You can use numpy as a baseline when evaluate numba @njit array-style implementat and its performance
20 |
21 | - ``numba @njit`` direct loops (prange): Same application/kernel implemented using Numba compiler using direct loops. Sometimes array-style programming is cumbersome and performance inefficient. Using direct loop programming may lead to more readable and performance code. Thus, while evaluating the performance of direct loop implementation it is useful to compare array-style Numba implementation as a baseline
22 |
23 | - ``numba-dpex @dpjit`` array-style: Modified numba @njit array-style implementation to compile and run on a specific device. You can use vanilla Numba implementation as a baseline while comparing numba-dpex implementation details and performance. You can also compare it against dpnp implementation to see how much extra performance numba-dpex can bring when you compile NumPy code for a given device
24 |
25 | - ``numba-dpex @dpjit`` direct loops (prange): Modified numba @njit direct loop implementation to compile and run on a specific device. You can use vanilla Numba implementation as a baseline while comparing numba-dpex implementation details and performance. You can also compare it against dpnp implementation to see how much extra performance numba-dpex can bring when you compile NumPy code for a given device
26 |
27 | - ``numba-dpex @dpjit`` kernel: Kernel-style programming, which is close to @cuda.jit programming model used in vanilla Numba
28 |
29 | - ``numba-mlir``: Array-style, direct loops and kernel-style implementations for experimental MLIR-based backend for Numba
30 |
31 | - ``cupy``: NumPy-like implementation using CuPy to run on CUDA-compatible devices
32 |
33 | - ``@cuda.jit``: Kernel-style Numba implementation to run on CUDA-compatible devices
34 |
35 | - Native SYCL: Most applications/kernels also have DPC++ implementation, which can be used to compare performance of above implementations to DPC++ compiled code.
36 |
37 | These benchmarks are implemented in ``dpbench`` framework, which allows you to run all or select benchmarks and variants to evaluate their performance on different hardware.
38 |
39 | For more details please refer to ``dpbench``
40 | `documentation `_.
41 |
--------------------------------------------------------------------------------
/.github/workflows/scorecard.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub. They are provided
2 | # by a third-party and are governed by separate terms of service, privacy
3 | # policy, and support documentation.
4 |
5 | name: Scorecard supply-chain security
6 | on:
7 | # For Branch-Protection check. Only the default branch is supported. See
8 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
9 | branch_protection_rule:
10 | # To guarantee Maintained check is occasionally updated. See
11 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
12 | schedule:
13 | - cron: '21 1 * * 0'
14 | push:
15 | branches: [ "main" ]
16 |
17 | # Declare default permissions as read only.
18 | permissions: read-all
19 |
20 | jobs:
21 | analysis:
22 | name: Scorecard analysis
23 | runs-on: ubuntu-latest
24 | permissions:
25 | # Needed to upload the results to code-scanning dashboard.
26 | security-events: write
27 | # Needed to publish results and get a badge (see publish_results below).
28 | id-token: write
29 | # Uncomment the permissions below if installing in a private repository.
30 | # contents: read
31 | # actions: read
32 |
33 | steps:
34 | - name: "Checkout code"
35 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
36 | with:
37 | persist-credentials: false
38 |
39 | - name: "Run analysis"
40 | uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
41 | with:
42 | results_file: results.sarif
43 | results_format: sarif
44 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
45 | # - you want to enable the Branch-Protection check on a *public* repository, or
46 | # - you are installing Scorecard on a *private* repository
47 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
48 | # repo_token: ${{ secrets.SCORECARD_TOKEN }}
49 |
50 | # Public repositories:
51 | # - Publish results to OpenSSF REST API for easy access by consumers
52 | # - Allows the repository to include the Scorecard badge.
53 | # - See https://github.com/ossf/scorecard-action#publishing-results.
54 | # For private repositories:
55 | # - `publish_results` will always be set to `false`, regardless
56 | # of the value entered here.
57 | publish_results: true
58 |
59 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
60 | # format to the repository Actions tab.
61 | # - name: "Upload artifact"
62 | # uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20
63 | # with:
64 | # name: SARIF file
65 | # path: results.sarif
66 | # retention-days: 5
67 |
68 | # # Upload the results to GitHub's code scanning dashboard (optional).
69 | # # Commenting out will disable upload of results to your repo's Code Scanning dashboard
70 | # - name: "Upload to code-scanning"
71 | # uses: github/codeql-action/upload-sarif@v3
72 | # with:
73 | # sarif_file: results.sarif
74 |
--------------------------------------------------------------------------------
/demos/game-of-life/README.md:
--------------------------------------------------------------------------------
1 | # Conway's Game Of Life
2 | [](https://github.com/psf/black)
3 | [](https://github.com/pre-commit/pre-commit)
4 | [](https://app.gitter.im/#/room/#Data-Parallel-Python_community:gitter.im)
5 |
6 | The Game of Life is a cellular automaton devised by John Horton Conway in 1970.
7 | It is a zero-player game, meaning that its evolution is determined by its initial state.
8 | One interacts with the Game of Life by creating an initial configuration and observing how it evolves.
9 |
10 | 
11 |
12 | The universe of the Game of Life is an infinite, two-dimensional orthogonal grid of square cells, each of which is in one of two possible states,
13 | live or dead (or populated and unpopulated, respectively). Every cell interacts with its eight neighbours, which are the cells that are horizontally,
14 | vertically, or diagonally adjacent. At each step in time, the following transitions occur:
15 |
16 | * Any live cell with fewer than two live neighbours dies, as if by underpopulation.
17 | * Any live cell with two or three live neighbours lives on to the next generation.
18 | * Any live cell with more than three live neighbours dies, as if by overpopulation.
19 | * Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
20 |
21 | These rules, which compare the behaviour of the automaton to real life, can be condensed into the following:
22 |
23 | * Any live cell with two or three live neighbours survives.
24 | * Any dead cell with three live neighbours becomes a live cell.
25 | * All other live cells die in the next generation. Similarly, all other dead cells stay dead.
26 |
27 | The initial pattern constitutes the seed of the system.
28 | The first generation is created by applying the above rules simultaneously to every cell in the seed,
29 | live or dead; births and deaths occur simultaneously, and the discrete moment at which this happens is
30 | sometimes called a tick.
31 | Each generation is a pure function of the preceding one.
32 | The rules continue to be applied repeatedly to create further generations.
33 |
34 | For further details please address [Wikipedia](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life).
35 |
36 | Installation
37 | ------------
38 | `conda install -c "pycoddiy/label/dev" game-of-life-demo`
39 |
40 | Also, if you are using GUI, you will need to add
41 | `opencv-python` to the environment:
42 |
43 | `pip install opencv-python`
44 |
45 | Running demo
46 | ------------
47 |
48 | From command line type:
49 | `game_of_life [command line options]`
50 |
51 | * `--variant [numba, numpy, dpnp, numba-dpex]` (default `numpy`) - implementation variant
52 | * `--threading-layer [omp, tbb, workqueue]` (default `omp`) - threading layer for `numba` implementation
53 | * `--parallel` (default) or `--no-parallel` - keyword argument `parallel=` for `@njit`.
54 | Used along with `--variant numba`
55 | * `--frames-count` - stop rendering after a specified amount of frames. Default 0 meaning that the demo
56 | does not stop until user action, e.g. close window
57 | * `--gui` (default) or `--no-gui` - render the evolution of the grid or do the computation only and
58 | print performance statistics in the end.
59 | * `--stats` (default) or `--no-stats` - Display statistics in gui while running or not
60 | * `--task-size` - size of the grid WIDTH, HEIGHT. Example: `960,540` (default)
61 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/game_of_life.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from game_of_life_demo import MAX_FRAMES
3 | from game_of_life_demo import PROB_ON
4 | from game_of_life_demo import get_task_size_string
5 | from game_of_life_demo import time_meter
6 | from game_of_life_demo.impl.arg_parser import parse_args
7 | from game_of_life_demo.impl.impl_versioner import get_variant_string
8 | from game_of_life_demo.impl.impl_versioner import grid_update
9 | from game_of_life_demo.impl.impl_versioner import init_grid
10 | from game_of_life_demo.impl.visualization import VISUALIZE_GAME
11 | from game_of_life_demo.impl.visualization import create_window
12 | from game_of_life_demo.impl.visualization import draw
13 | from game_of_life_demo.impl.visualization import test_esc_key_pressed
14 | from game_of_life_demo.impl.visualization import test_window_closed
15 |
16 |
17 | class Grid:
18 | draw_last = "draw_time_last"
19 | draw_total = "draw_time_total"
20 |
21 | update_last = "update_time_last"
22 | update_total = "update_time_total"
23 |
24 | def __init__(self, w, h, p):
25 | self.w = w
26 | self.h = h
27 | self.time = {
28 | self.draw_last: 0,
29 | self.draw_total: 0,
30 | self.update_last: 0,
31 | self.update_total: 0,
32 | }
33 | self.grid = init_grid(w, h, p)
34 |
35 | def get_statistics(self, frame_count):
36 | update_time = self.time[self.update_last]
37 | update_tpf = (
38 | self.time[self.update_total] / frame_count if frame_count > 0 else 0.0
39 | )
40 | draw_time = self.time[self.draw_last]
41 | draw_tpf = self.time[self.draw_total] / frame_count if frame_count > 0 else 0.0
42 | total_time = update_time + draw_time
43 | total_tpf = update_tpf + draw_tpf
44 | return update_time, update_tpf, draw_time, draw_tpf, total_time, total_tpf
45 |
46 | @time_meter(draw_last, draw_total)
47 | def draw(self, show_statistics, frame_count):
48 | (
49 | update_time,
50 | update_tpf,
51 | draw_time,
52 | draw_tpf,
53 | total_time,
54 | total_tpf,
55 | ) = self.get_statistics(frame_count)
56 | draw(
57 | self.grid,
58 | show_statistics,
59 | frame_count,
60 | update_time,
61 | update_tpf,
62 | draw_time,
63 | draw_tpf,
64 | total_time,
65 | total_tpf,
66 | )
67 |
68 | @time_meter(update_last, update_total)
69 | def update(self):
70 | self.grid = grid_update(self.grid)
71 |
72 |
73 | def main(argv=None):
74 | np.random.seed(777777777)
75 |
76 | w, h = parse_args(argv).task_size
77 | grid = Grid(w, h, PROB_ON)
78 |
79 | create_window()
80 |
81 | frames = 0
82 | do_game = True
83 |
84 | stop_frame = parse_args(argv).frames_count
85 | if stop_frame == 0:
86 | stop_frame = MAX_FRAMES
87 |
88 | print(get_variant_string())
89 | print(get_task_size_string(w, h))
90 |
91 | while do_game:
92 | # Checks for game termination
93 | esc_pressed = test_esc_key_pressed()
94 | window_closed = test_window_closed()
95 |
96 | # Draw objects
97 | grid.draw(parse_args().stats, frames)
98 |
99 | # Perform updates
100 | grid.update()
101 |
102 | frames += 1
103 | do_game = (0 < frames <= stop_frame) and not (esc_pressed or window_closed)
104 |
105 | _, update_tpf, _, draw_tpf, _, total_tpf = grid.get_statistics(frames)
106 | print(f"Total frames {frames}")
107 | print("Average fps:")
108 | print(f" Computation {1/update_tpf:4.1f}")
109 | if VISUALIZE_GAME:
110 | print(f" Draw {1/draw_tpf:4.1f}")
111 | print(f" Total {1/total_tpf:4.1f}")
112 |
113 |
114 | if __name__ == "__main__":
115 | main()
116 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "basic/layout.html" %}
2 |
3 | {%- block scripts %}
4 | {{ super() }}
5 |
6 | {% if not embedded %}{% endif %}
7 | {%- endblock %}
8 |
9 | {# Add the google webfonts needed for the logo #}
10 | {% block extrahead %}
11 |
12 | {% endblock %}
13 |
14 | {% block header %}
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 | -
25 | {% block sidebarsearch %}
26 | {% include "searchbox.html" %}
27 | {% endblock %}
28 |
29 |
30 |
31 | {% endblock %}
32 |
33 | {% block relbar1 %}
34 |
66 | {% endblock %}
67 |
68 | {# Silence the bottom relbar. #}
69 | {% block relbar2 %}{% endblock %}
70 |
71 |
72 | {%- block footer %}
73 |
99 | {%- endblock %}
100 |
--------------------------------------------------------------------------------
/.github/workflows/gh-pages.yml:
--------------------------------------------------------------------------------
1 | name: github pages
2 |
3 | # Declare default permissions as read only.
4 | permissions: read-all
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 | types: [opened, synchronize, reopened, closed]
12 |
13 | jobs:
14 | main:
15 | if: ${{ !(github.event.pull_request && github.event.action == 'closed') }}
16 | runs-on: ubuntu-latest
17 | defaults:
18 | run:
19 | shell: bash -el {0}
20 |
21 | steps:
22 | - uses: actions/checkout@v2
23 | with:
24 | fetch-depth: 0
25 |
26 | - name: Setup conda
27 | uses: conda-incubator/setup-miniconda@v2
28 | with:
29 | auto-update-conda: true
30 | run-post: false
31 | miniforge-variant: Mambaforge
32 | miniforge-version: latest
33 | environment-file: environment.yml
34 |
35 | - name: Conda info
36 | run: |
37 | conda info
38 | conda list
39 |
40 | - name: Sphinx
41 | run: |
42 | cd docs
43 | make html
44 |
45 | - name: GitHub Pages [main]
46 | uses: peaceiris/actions-gh-pages@v3
47 | if: ${{ github.ref == 'refs/heads/main' }}
48 | with:
49 | github_token: ${{ secrets.GITHUB_TOKEN }}
50 | publish_dir: ./docs/_build/html/
51 | destination_dir: ./main
52 | allow_empty_commit : true
53 | commit_message: ${{ github.event.head_commit.message }}
54 | publish_branch: gh-pages
55 | user_name: 'github-actions[bot]'
56 | user_email: 'github-actions[bot]@users.noreply.github.com'
57 |
58 | - name: GitHub Pages [PR]
59 | uses: peaceiris/actions-gh-pages@v3
60 | if: ${{ github.event.pull_request && github.event.action != 'closed' }}
61 | with:
62 | github_token: ${{ secrets.GITHUB_TOKEN }}
63 | publish_dir: ./docs/_build/html/
64 | destination_dir: ./pull/${{ github.event.number }}
65 | allow_empty_commit : true
66 | commit_message: ${{ github.event.head_commit.message }}
67 | publish_branch: gh-pages
68 | user_name: 'github-actions[bot]'
69 | user_email: 'github-actions[bot]@users.noreply.github.com'
70 |
71 | - name: Comment PR [docs created]
72 | if: ${{ github.event.pull_request && github.event.action != 'closed' }}
73 | env:
74 | PR_NUM: ${{ github.event.number }}
75 | uses: mshick/add-pr-comment@v1
76 | with:
77 | message: |
78 | Documentation preview: [show](https://intelpython.github.io/DPEP/pull/${{ env.PR_NUM }}).
79 | repo-token: ${{ secrets.GITHUB_TOKEN }}
80 | repo-token-user-login: 'github-actions[bot]'
81 | allow-repeats: true
82 |
83 | clean:
84 | if: ${{ github.event.pull_request && github.event.action == 'closed' }}
85 | runs-on: ubuntu-latest
86 |
87 | steps:
88 | - uses: actions/checkout@v2
89 | with:
90 | fetch-depth: 0
91 |
92 | - name: GitHub Pages [PR closed]
93 | env:
94 | PR_NUM: ${{ github.event.number }}
95 | shell: bash -l {0}
96 | run: |
97 | git remote add tokened_docs https://IntelPython:${{ secrets.GITHUB_TOKEN }}@github.com/IntelPython/DPEP.git
98 | git fetch tokened_docs
99 | git checkout --track tokened_docs/gh-pages
100 | echo `pwd`
101 | [ -d pull/${PR_NUM} ] && git rm -rf pull/${PR_NUM}
102 | git config --global user.name 'github-actions[bot]'
103 | git config --global user.email 'github-actions[bot]@users.noreply.github.com'
104 | git commit -m "Removing docs for closed pull request ${PR_NUM}"
105 | git push tokened_docs gh-pages
106 |
107 | - name: Comment PR [docs removed]
108 | uses: mshick/add-pr-comment@v1
109 | with:
110 | message: |
111 | Documentation preview removed.
112 | repo-token: ${{ secrets.GITHUB_TOKEN }}
113 | repo-token-user-login: 'github-actions[bot]'
114 | allow-repeats: true
115 |
--------------------------------------------------------------------------------
/docs/sources/conf.py:
--------------------------------------------------------------------------------
1 | # *****************************************************************************
2 | # Copyright (c) 2020-2023, Intel Corporation All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | #
10 | # Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
18 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 | # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 | # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 | # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 | # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 | # *****************************************************************************
26 |
27 | # coding: utf-8
28 | # Configuration file for the Sphinx documentation builder.
29 |
30 | import errno
31 | import shutil
32 | from pathlib import Path
33 |
34 | # -- Project information -----------------------------------------------------
35 |
36 | project = "Data Parallel Extensions for Python*"
37 | copyright = "2020-2023, Intel Corporation"
38 | author = "Intel Corporation"
39 |
40 | # The full version, including alpha/beta/rc tags
41 | release = "0.1"
42 |
43 | # -- General configuration ----------------------------------------------------
44 |
45 | # Add any Sphinx extension module names here, as strings. They can be
46 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
47 | # ones.
48 | extensions = [
49 | "sphinx.ext.todo",
50 | "sphinx.ext.intersphinx",
51 | "sphinx.ext.extlinks",
52 | "sphinx.ext.githubpages",
53 | "sphinx.ext.napoleon",
54 | "sphinx.ext.autosectionlabel",
55 | "sphinxcontrib.programoutput",
56 | "sphinxcontrib.googleanalytics",
57 | "nbsphinx",
58 | "IPython.sphinxext.ipython_console_highlighting",
59 | ]
60 |
61 | googleanalytics_id = "G-KVSVYMBQ0W"
62 | googleanalytics_enabled = True
63 |
64 | # Add any paths that contain templates here, relative to this directory.
65 | # templates_path = ['_templates']
66 | templates_path = []
67 |
68 | # List of patterns, relative to source directory, that match files and
69 | # directories to ignore when looking for source files.
70 | # This pattern also affects html_static_path and html_extra_path.
71 | exclude_patterns = ["_build", "**.ipynb_checkpoints"]
72 |
73 |
74 | # -- Options for HTML output -------------------------------------------------
75 |
76 | # The theme to use for HTML and HTML Help pages. See the documentation for
77 | # a list of builtin themes.
78 | #
79 | html_theme = "sdc-sphinx-theme"
80 |
81 | html_theme_path = ["."]
82 |
83 | html_theme_options = {}
84 |
85 | # Add any paths that contain custom static files (such as style sheets) here,
86 | # relative to this directory. They are copied after the builtin static files,
87 | # so a file named "default.css" will overwrite the builtin "default.css".
88 | html_static_path = []
89 |
90 | html_sidebars = {
91 | "**": ["globaltoc.html", "sourcelink.html", "searchbox.html", "relations.html"],
92 | }
93 |
94 | html_show_sourcelink = False
95 |
96 | # -- Todo extension configuration ----------------------------------------------
97 | todo_include_todos = True
98 | todo_link_only = True
99 |
100 | intersphinx_mapping = {}
101 |
102 | # -- Prepend module name to an object name or not -----------------------------------
103 | add_module_names = False
104 |
105 | # -- Copy notebooks into ./docs/sources/notebooks -----------------------------------
106 | notebooks_src_path = "../../notebooks"
107 | notebooks_dst_path = "notebooks"
108 |
109 | dirpath = Path(notebooks_dst_path)
110 | if dirpath.exists() and dirpath.is_dir():
111 | shutil.rmtree(dirpath)
112 |
113 | try:
114 | shutil.copytree(notebooks_src_path, notebooks_dst_path)
115 | except OSError as exc: # python >2.5
116 | if exc.errno in (errno.ENOTDIR, errno.EINVAL):
117 | shutil.copy(notebooks_src_path, notebooks_dst_path)
118 | else:
119 | raise
120 |
--------------------------------------------------------------------------------
/demos/game-of-life/game_of_life_demo/impl/visualization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from game_of_life_demo import get_task_size_string
3 | from game_of_life_demo.impl.arg_parser import parse_args
4 | from game_of_life_demo.impl.impl_versioner import asnumpy
5 | from game_of_life_demo.impl.impl_versioner import get_variant_string
6 |
7 | DISPLAY_RES = DISPLAY_W, DISPLAY_H = 1920, 1080 # Window width and height
8 | CELL_COLOR = (0, 255, 0) # Color to be used for alive cells # BGR
9 | CELL_SIZE = 2 # Cell size in pixels
10 |
11 | FONT_SCALE = 0.5
12 | FONT_COLOR = (255, 255, 255) # BGR(A)
13 | FONT_HEIGHT = 15
14 | FONT_THICKNESS = 1
15 | FONT = None
16 |
17 | # Text box with performance statistics
18 | TEXT_BOX_TOP_LEFT = (5, 7) # Top-left corner
19 | TEXT_BOX_BOTTOM_RIGHT = (420, 110) # Bottom-right corner
20 | TEXT_BOX_FONT_OFFSET = 10 # Offset from the top-left corner in pixels for drawing text
21 | TEXT_BOX_COL2_OFFSET = (
22 | 150 # Offset from the left (in pixels) for drawing text in second column
23 | )
24 | TEXT_BOX_COL3_OFFSET = (
25 | 300 # Offset from the left (in pixels) for drawing text in third column
26 | )
27 | TEXT_BOX_ALPHA = 0.5 # Transparency of the text box background
28 |
29 | # Game field size (GRID_W - number of cells horizontally, GRID_H - number of cells vertically)
30 | GRID_W, GRID_H = DISPLAY_W // CELL_SIZE, DISPLAY_H // CELL_SIZE
31 |
32 | ESC_KEYCODE = 27 # Finish demo by pressing Esc key
33 | WINDOW_NAME = "Conway's Game of life" # Window name
34 |
35 |
36 | # *********************************************************************************************
37 | # CONDITIONAL INITIALIZATIONS
38 | # *********************************************************************************************
39 |
40 | GUI_FLAG = parse_args().gui # If user selected --gui CLI option
41 |
42 | if GUI_FLAG:
43 | try: # Importing OpenCV
44 | import cv2
45 |
46 | VISUALIZE_GAME = True
47 | FONT = cv2.FONT_HERSHEY_TRIPLEX
48 | except ModuleNotFoundError:
49 | VISUALIZE_GAME = False
50 | else:
51 | VISUALIZE_GAME = False
52 |
53 |
54 | def line_to_y(line_number):
55 | return TEXT_BOX_TOP_LEFT[1] + FONT_HEIGHT * line_number + TEXT_BOX_FONT_OFFSET
56 |
57 |
58 | def draw_text(img, text, line_number, x_pos=TEXT_BOX_FONT_OFFSET):
59 | y_pos = line_to_y(line_number)
60 | cv2.putText(img, text, (x_pos, y_pos), FONT, FONT_SCALE, FONT_COLOR, FONT_THICKNESS)
61 |
62 |
63 | def draw_statistics_line(img, name, line_number, fps, time):
64 | # no monospace fonts in OpenCV
65 | draw_text(img, name, line_number)
66 | draw_text(img, "FPS|time(ms)", line_number, TEXT_BOX_COL2_OFFSET)
67 | draw_text(img, f"{fps:4.1f}|{int(1000 * time)}", line_number, TEXT_BOX_COL3_OFFSET)
68 |
69 |
70 | def draw_statistics(
71 | img,
72 | w,
73 | h,
74 | frame_count,
75 | update_time,
76 | update_tpf,
77 | draw_time,
78 | draw_tpf,
79 | total_time,
80 | total_tpf,
81 | ):
82 | p1 = TEXT_BOX_TOP_LEFT
83 | p2 = TEXT_BOX_BOTTOM_RIGHT
84 |
85 | sub_img = img[p1[1] : p2[1], p1[0] : p2[0]]
86 | black_bg = np.zeros(sub_img.shape, dtype=np.uint8)
87 | img[p1[1] : p2[1], p1[0] : p2[0]] = cv2.addWeighted(
88 | sub_img, TEXT_BOX_ALPHA, black_bg, 1.0 - TEXT_BOX_ALPHA, 1.0
89 | )
90 | draw_text(img, get_variant_string(), 0)
91 | draw_text(img, get_task_size_string(w, h), 1)
92 | draw_text(img, f"Frames: {(frame_count // 10) * 10}", 2)
93 | draw_statistics_line(img, "Computation", 3, 1 / update_tpf, update_time)
94 | draw_statistics_line(img, "Draw", 4, 1 / draw_tpf, draw_time)
95 | draw_statistics_line(img, "Total", 5, 1 / total_tpf, total_time)
96 |
97 |
98 | def draw(
99 | grid,
100 | show_statistics,
101 | frame_count,
102 | update_time,
103 | update_tpf,
104 | draw_time,
105 | draw_tpf,
106 | total_time,
107 | total_tpf,
108 | ):
109 | if VISUALIZE_GAME:
110 | h, w = grid.shape
111 | img = np.zeros(shape=grid.shape + (3,), dtype=np.uint8)
112 | img[:, :, 1] = 255 * asnumpy(
113 | grid
114 | ) # The asnumpy() transfers data from device to host as needed
115 | img = cv2.resize(img, (DISPLAY_W, DISPLAY_H), interpolation=cv2.INTER_NEAREST)
116 |
117 | if show_statistics and frame_count > 0:
118 | draw_statistics(
119 | img,
120 | w,
121 | h,
122 | frame_count,
123 | update_time,
124 | update_tpf,
125 | draw_time,
126 | draw_tpf,
127 | total_time,
128 | total_tpf,
129 | )
130 |
131 | cv2.imshow(WINDOW_NAME, img)
132 | cv2.resizeWindow(WINDOW_NAME, DISPLAY_W, DISPLAY_H)
133 |
134 |
135 | def test_esc_key_pressed():
136 | if VISUALIZE_GAME:
137 | return cv2.pollKey() == ESC_KEYCODE
138 | else:
139 | return False
140 |
141 |
142 | def test_window_closed():
143 | if VISUALIZE_GAME:
144 | return not cv2.getWindowProperty(WINDOW_NAME, cv2.WND_PROP_VISIBLE)
145 | else:
146 | return False
147 |
148 |
149 | def create_window():
150 | if VISUALIZE_GAME:
151 | cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)
152 | cv2.resizeWindow(WINDOW_NAME, DISPLAY_W, DISPLAY_H)
153 |
--------------------------------------------------------------------------------
/docs/sources/sdc-sphinx-theme/static/sidebar.js:
--------------------------------------------------------------------------------
1 | /*
2 | * sidebar.js
3 | * ~~~~~~~~~~
4 | *
5 | * This script makes the Sphinx sidebar collapsible.
6 | *
7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
9 | * used to collapse and expand the sidebar.
10 | *
11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
12 | * and the width of the sidebar and the margin-left of the document
13 | * are decreased. When the sidebar is expanded the opposite happens.
14 | * This script saves a per-browser/per-session cookie used to
15 | * remember the position of the sidebar among the pages.
16 | * Once the browser is closed the cookie is deleted and the position
17 | * reset to the default (expanded).
18 | *
19 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
20 | * :license: BSD, see LICENSE for details.
21 | *
22 | */
23 |
24 | $(function() {
25 | // global elements used by the functions.
26 | // the 'sidebarbutton' element is defined as global after its
27 | // creation, in the add_sidebar_button function
28 | var bodywrapper = $('.bodywrapper');
29 | var sidebar = $('.sphinxsidebar');
30 | var sidebarwrapper = $('.sphinxsidebarwrapper');
31 |
32 | // for some reason, the document has no sidebar; do not run into errors
33 | if (!sidebar.length) return;
34 |
35 | // original margin-left of the bodywrapper and width of the sidebar
36 | // with the sidebar expanded
37 | var bw_margin_expanded = bodywrapper.css('margin-left');
38 | var ssb_width_expanded = sidebar.width();
39 |
40 | // margin-left of the bodywrapper and width of the sidebar
41 | // with the sidebar collapsed
42 | var bw_margin_collapsed = 12;
43 | var ssb_width_collapsed = 12;
44 |
45 | // custom colors
46 | var dark_color = '#003a66';
47 | var light_color = '#0070c5';
48 |
49 | function sidebar_is_collapsed() {
50 | return sidebarwrapper.is(':not(:visible)');
51 | }
52 |
53 | function toggle_sidebar() {
54 | if (sidebar_is_collapsed())
55 | expand_sidebar();
56 | else
57 | collapse_sidebar();
58 | }
59 |
60 | function collapse_sidebar() {
61 | sidebarwrapper.hide();
62 | sidebar.css('width', ssb_width_collapsed);
63 | bodywrapper.css('margin-left', bw_margin_collapsed);
64 | sidebarbutton.css({
65 | 'margin-left': '-1px',
66 | 'height': bodywrapper.height(),
67 | 'border-radius': '3px'
68 | });
69 | sidebarbutton.find('span').text('»');
70 | sidebarbutton.attr('title', _('Expand sidebar'));
71 | document.cookie = 'sidebar=collapsed';
72 | }
73 |
74 | function expand_sidebar() {
75 | bodywrapper.css('margin-left', bw_margin_expanded);
76 | sidebar.css('width', ssb_width_expanded);
77 | sidebarwrapper.show();
78 | sidebarbutton.css({
79 | 'margin-left': ssb_width_expanded - 12,
80 | 'height': bodywrapper.height(),
81 | 'border-radius': '0px 3px 3px 0px'
82 | });
83 | sidebarbutton.find('span').text('«');
84 | sidebarbutton.attr('title', _('Collapse sidebar'));
85 | document.cookie = 'sidebar=expanded';
86 | }
87 |
88 | function add_sidebar_button() {
89 | sidebarwrapper.css({
90 | 'float': 'left',
91 | 'margin-right': '0',
92 | 'width': ssb_width_expanded - 18
93 | });
94 | // create the button
95 | sidebar.append('');
96 | var sidebarbutton = $('#sidebarbutton');
97 |
98 | // find the height of the viewport to center the '<<' in the page
99 | var viewport_height;
100 | if (window.innerHeight)
101 | viewport_height = window.innerHeight;
102 | else
103 | viewport_height = $(window).height();
104 | var sidebar_offset = sidebar.offset().top;
105 | var sidebar_height = Math.max(bodywrapper.height(), sidebar.height());
106 | sidebarbutton.find('span').css({
107 | 'font-family': '"Lucida Grande",Arial,sans-serif',
108 | 'display': 'block',
109 | 'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10,
110 | 'width': 12,
111 | 'position': 'fixed',
112 | 'text-align': 'center'
113 | });
114 |
115 | sidebarbutton.click(toggle_sidebar);
116 | sidebarbutton.attr('title', _('Collapse sidebar'));
117 | sidebarbutton.css({
118 | 'color': '#FFFFFF',
119 | 'background-color': light_color,
120 | 'border': '1px solid ' + light_color,
121 | 'border-radius': '0px 3px 3px 0px',
122 | 'font-size': '1.2em',
123 | 'cursor': 'pointer',
124 | 'height': sidebar_height,
125 | 'padding-top': '1px',
126 | 'margin': '-1px',
127 | 'margin-left': ssb_width_expanded - 12
128 | });
129 |
130 | sidebarbutton.hover(
131 | function () {
132 | $(this).css('background-color', dark_color);
133 | },
134 | function () {
135 | $(this).css('background-color', light_color);
136 | }
137 | );
138 | }
139 |
140 | function set_position_from_cookie() {
141 | if (!document.cookie)
142 | return;
143 | var items = document.cookie.split(';');
144 | for(var k=0; k) -
76 | The Monte Carlo method to estimate the value of $\pi$.
77 |
78 | - [Mandelbrot Set](https://github.com/IntelPython/DPEP/tree/main/demos/mandelbrot) -
79 | Visualization of the breathtaking process of diving in the famous Mandelbrot fractal
80 |
81 | - [Game of Life](https://github.com/IntelPython/DPEP/tree/main/demos/game-of-life>) -
82 | Visualization of the life evolution using famous Conway's model
83 |
84 | For more details please refer to the documentation located in the individual demo directory.
85 |
--------------------------------------------------------------------------------
/docs/sources/heterogeneous_computing.rst:
--------------------------------------------------------------------------------
1 | .. _heterogeneous_computing:
2 | .. include:: ./ext_links.txt
3 |
4 | Heterogeneous Computing
5 | =======================
6 |
7 | Device Offload
8 | **************
9 |
10 | Python is an interpreted language, which implies that most of the Python script will run on CPU,
11 | and only a few data parallel regions will execute on data parallel devices.
12 | That is why the concept of the host and offload devices is helpful when it comes to conceptualizing
13 | a heterogeneous programming model in Python.
14 |
15 | .. image:: ./_images/hetero-devices.png
16 | :width: 600px
17 | :align: center
18 | :alt: SIMD
19 |
20 | The above diagram illustrates the *host* (the CPU which runs Python interpreter) and three *devices*
21 | (two GPU devices and one attached accelerator device). **Data Parallel Extensions for Python**
22 | offer a programming model where a script executed by Python interpreter on the host can *offload* data-parallel
23 | kernels to a user-specified device. A *kernel* is the *data-parallel region* of a program submitted
24 | for execution on the device. There can be multiple data-parallel regions, and hence multiple *offload kernels*.
25 |
26 | Kernels can be pre-compiled into a library, such as ``dpnp``, or directly coded
27 | in a programming language for heterogeneous computing, such as `OpenCl*`_ or `DPC++`_ .
28 | **Data Parallel Extensions for Python** offer the way of writing kernels directly in Python
29 | using `Numba*`_ compiler along with ``numba-dpex``, the `Data Parallel Extension for Numba*`_.
30 |
31 | One or more kernels are submitted for execution into a *queue* targeting an *offload device*.
32 | For each device, you can create one or more queues. In most cases, you do not need to work
33 | with device queues directly. Data Parallel Extensions for Python will do necessary underlying
34 | work with queues for you through the :ref:`Compute-Follows-Data`.
35 |
36 | Unified Shared Memory
37 | *********************
38 |
39 | Each device has its memory, not necessarily accessible from another device.
40 |
41 | .. image:: ./_images/hetero-devices.png
42 | :width: 600px
43 | :align: center
44 | :alt: SIMD
45 |
46 | For example, **Device 1** memory may not be directly accessible from the host but accessible
47 | via expensive copying by a driver software. Similarly, depending on the architecture, direct data
48 | exchange between **Device 2** and **Device 1** may be only impossible possible via expensive
49 | copying through the host memory. These aspects must be taken into consideration when programming
50 | data parallel devices.
51 |
52 | On the illustration above the **Device 2** logically consists of two sub-devices: **Sub-Device 1**
53 | and **Sub-Device 2**. The programming model allows accessing **Device 2** as a single logical device, or
54 | by working with each individual sub-devices. For the former case a programmer needs to create
55 | a queue for **Device 2**. For the latter case a programmer needs to create 2 queues, one for each sub-device.
56 |
57 | `SYCL*`_ standard introduces a concept of the *Unified Shared Memory* (USM). USM requires hardware support
58 | for unified virtual address space, which allows coherency between the host and the device
59 | pointers. The host allocates all memory, but offers three distinct allocation types:
60 |
61 | * **Host: located on the host, accessible by the host or device.** This type of memory is useful in a situation
62 | when you need to stream read-only data from the host to the device once.
63 |
64 | * **Device: located on the device, accessible by device only.** The fastest type of memory.
65 | Useful in a situation when most of the data crunching happens on the device.
66 |
67 | * **Shared: location is both host and device, accessible by the host and device**.
68 | Shared allocations are useful when both host and device access data,
69 | since a user does not need to manage data migration explicitly.
70 | However, it is much slower than the USM Device memory type.
71 |
72 | Compute-Follows-Data
73 | ********************
74 | Since data copying between devices is typically very expensive, for performance reasons it is essential
75 | to process data close to where it is allocated. This is the premise of the *Compute-Follows-Data* programming model,
76 | which states that the compute happens where the data resides. Tensors implemented in ``dpctl`` and ``dpnp``
77 | carry information about allocation queues, and hence, about the device on which an array is allocated.
78 | Based on tensor input arguments of the offload kernel, it deduces the queue on which the execution happens.
79 |
80 | .. image:: ./_images/kernel-queue-device.png
81 | :width: 600px
82 | :align: center
83 | :alt: SIMD
84 |
85 | The picture above illustrates the *Compute-Follows-Data* concept. Arrays ``A`` and ``B`` are inputs to the
86 | **Offload Kernel**. These arrays carry information about their *allocation queue* (**Device Queue**) and the
87 | *device* (**Device 1**) where they were created. According to the Compute-Follows-Data paradigm
88 | the **Offload Kernel** will be submitted to this **Device Queue**, and the resulting array ``C`` will
89 | be created on the **Device Queue** associated with the **Device 1**.
90 |
91 | **Data Parallel Extensions for Python** require all input tensor arguments to have the **same** allocation queue.
92 | Otherwise, an exception is thrown. For example, the following usages will result in an exception.
93 |
94 | .. figure:: ./_images/queue-exception1.png
95 | :width: 600px
96 | :align: center
97 | :alt: SIMD
98 |
99 | Input tensors are on different devices and different queues. Exception is thrown.
100 |
101 | .. figure:: ./_images/queue-exception2.png
102 | :width: 600px
103 | :align: center
104 | :alt: SIMD
105 |
106 | Input tensors are on the same device, but queues are different. Exception is thrown.
107 |
108 | .. figure:: ./_images/queue-exception3.png
109 | :width: 600px
110 | :align: center
111 | :alt: SIMD
112 |
113 | Data belongs to the same device, but queues are different and associated with different sub-devices.
114 |
115 | Copying Data Between Devices and Queues
116 | ***************************************
117 |
118 | **Data Parallel Extensions for Python** create **one** *canonical queue* per device. Normally,
119 | you do not need to directly manage queues. Having one canonical queue per device
120 | allows you to copy data between devices using the ``to_device()`` method:
121 |
122 | .. code-block:: python
123 |
124 | a_new = a.to_device(b.device)
125 |
126 | Array ``a`` is copied to the device associated with array ``b`` into the new array ``a_new``.
127 | The same queue is associated with ``b`` and ``a_new``.
128 |
129 | Alternatively, you can do this as follows:
130 |
131 | .. code-block:: python
132 | :caption: DPNP array
133 |
134 | a_new = dpnp.asarray(a, device=b.device)
135 |
136 | .. code-block:: python
137 | :caption: DPCtl array
138 |
139 | a_new = dpctl.tensor.asarray(a, device=b.device)
140 |
141 | Creating Additional Queues
142 | **************************
143 |
144 | As said before, **Data Parallel Extensions for Python** automatically creates one canonical queue per device,
145 | and you normally work with this queue implicitly. However, you can always create as many additional queues per device
146 | as needed and work explicitly with them, for example, for profiling purposes.
147 |
148 | Read `Data Parallel Control`_ documentation for more details about queues.
149 |
--------------------------------------------------------------------------------
/notebooks/02-dpnp_numpy_fallback.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "be340585",
6 | "metadata": {},
7 | "source": [
8 | "# Jupyter Notebook 2: Falling back to NumPy"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "9008dbe5",
14 | "metadata": {},
15 | "source": [
16 | "**Data Parallel Extensions for Python** are still in active development and have not yet reached production quality. This in particular means that some functional and performance gaps may exist in certain `dpnp` functions that require workarounds. The functional gaps include:\n",
17 | "\n",
18 | "* Certain NumPy APIs are not supported. For complete reference please see the [comparison table](https://intelpython.github.io/dpnp/reference/comparison.html) for **Data Parallel Extension for NumPy**.\n",
19 | "* Limited support of `dtypes`. At present the **Data Parallel Extension for NumPy** supports `float32`, `float64`, `int32`, and `int64` types.\n",
20 | "* Certain keyword arguments are supported for default settings only. Please refer to the [Data Parallel Extension for NumPy API Reference](https://intelpython.github.io/dpnp/reference/index.html) for details.\n",
21 | "\n",
22 | "Since NumPy API is versatily there are typically some workaround options exist. Please refer to [Data Parallel Extension for NumPy API Reference](https://intelpython.github.io/dpnp/reference/index.html) and [NumPy documentation](https://numpy.org/doc/stable/) for hints for possible alternatives.\n",
23 | "\n",
24 | "Another possible workaround is to fall back to the host and execute the script via NumPy. This is a discouraged use because it typically results in significant performance penalties associated with copying data from the device to the host and back. However, for debugging purposes you may still want to enable this behavior.\n",
25 | "\n",
26 | "To do so you must set the `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` environment variable to `0`.\n",
27 | "\n",
28 | "Let's take the following example:"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 3,
34 | "id": "c78511cd",
35 | "metadata": {},
36 | "outputs": [
37 | {
38 | "name": "stdout",
39 | "output_type": "stream",
40 | "text": [
41 | "[[3. 3. 3. ... 3. 3. 3.]\n",
42 | " [3. 3. 3. ... 3. 3. 3.]\n",
43 | " [3. 3. 3. ... 3. 3. 3.]\n",
44 | " ...\n",
45 | " [3. 3. 3. ... 3. 3. 3.]\n",
46 | " [3. 3. 3. ... 3. 3. 3.]\n",
47 | " [3. 3. 3. ... 3. 3. 3.]]\n"
48 | ]
49 | },
50 | {
51 | "ename": "NotImplementedError",
52 | "evalue": "Requested funtion=full with args=((20000, 20000), 3.0, None, 'C') and kwargs={'like': array([])} isn't currently supported and would fall back on NumPy implementation. Define enviroment variable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to `0` if the fall back is required to be supported without rasing an exception.",
53 | "output_type": "error",
54 | "traceback": [
55 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
56 | "\u001b[1;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
57 | "Cell \u001b[1;32mIn[3], line 9\u001b[0m\n\u001b[0;32m 7\u001b[0m a_np \u001b[38;5;241m=\u001b[39m numpy\u001b[38;5;241m.\u001b[39mfull((N, N), \u001b[38;5;241m3.\u001b[39m, like \u001b[38;5;241m=\u001b[39m numpy\u001b[38;5;241m.\u001b[39mzeros((N, \u001b[38;5;241m0\u001b[39m)))\n\u001b[0;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(a_np)\n\u001b[1;32m----> 9\u001b[0m a_dp \u001b[38;5;241m=\u001b[39m \u001b[43mdpnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfull\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mN\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mN\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m3.\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlike\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mdpnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mzeros\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mN\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(a_dp)\n",
58 | "File \u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\ndpex-examples\\lib\\site-packages\\dpnp\\dpnp_iface_arraycreation.py:747\u001b[0m, in \u001b[0;36mfull\u001b[1;34m(shape, fill_value, dtype, order, like, device, usm_type, sycl_queue)\u001b[0m\n\u001b[0;32m 738\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 739\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m dpnp_container\u001b[38;5;241m.\u001b[39mfull(shape,\n\u001b[0;32m 740\u001b[0m fill_value,\n\u001b[0;32m 741\u001b[0m dtype\u001b[38;5;241m=\u001b[39mdtype,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 744\u001b[0m usm_type\u001b[38;5;241m=\u001b[39musm_type,\n\u001b[0;32m 745\u001b[0m sycl_queue\u001b[38;5;241m=\u001b[39msycl_queue)\n\u001b[1;32m--> 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcall_origin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnumpy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfull\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mshape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfill_value\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlike\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlike\u001b[49m\u001b[43m)\u001b[49m\n",
59 | "File \u001b[1;32mdpnp\\dpnp_utils\\dpnp_algo_utils.pyx:132\u001b[0m, in \u001b[0;36mdpnp.dpnp_utils.dpnp_algo_utils.call_origin\u001b[1;34m()\u001b[0m\n",
60 | "\u001b[1;31mNotImplementedError\u001b[0m: Requested funtion=full with args=((20000, 20000), 3.0, None, 'C') and kwargs={'like': array([])} isn't currently supported and would fall back on NumPy implementation. Define enviroment variable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to `0` if the fall back is required to be supported without rasing an exception."
61 | ]
62 | }
63 | ],
64 | "source": [
65 | "import dpnp\n",
66 | "import numpy\n",
67 | "\n",
68 | "N = 200*100\n",
69 | "\n",
70 | "# Create an two dimencial array with singular element and array like option\n",
71 | "a_np = numpy.full((N, N), 3., like = numpy.zeros((N, 0)))\n",
72 | "print(a_np)\n",
73 | "a_dp = dpnp.full((N, N), 3., like = dpnp.zeros((N, 0)))\n",
74 | "print(a_dp)"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "id": "ae026021",
80 | "metadata": {},
81 | "source": [
82 | "We are attempting to call `dpnp.full()` function with the keyword argument `like` set to another `dpnp` array, which is currently unsupported. A possible alternative would be to use `dpnp.full_like` function:"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": 4,
88 | "id": "5e588080-484a-44f4-a168-7d07f760c7a0",
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "name": "stdout",
93 | "output_type": "stream",
94 | "text": [
95 | "Elapsed 1494.33 ms\n",
96 | "[[3. 3. 3. ... 3. 3. 3.]\n",
97 | " [3. 3. 3. ... 3. 3. 3.]\n",
98 | " [3. 3. 3. ... 3. 3. 3.]\n",
99 | " ...\n",
100 | " [3. 3. 3. ... 3. 3. 3.]\n",
101 | " [3. 3. 3. ... 3. 3. 3.]\n",
102 | " [3. 3. 3. ... 3. 3. 3.]]\n",
103 | "Array a is located on the device: Device(level_zero:gpu:0)\n"
104 | ]
105 | }
106 | ],
107 | "source": [
108 | "from time import time\n",
109 | "\n",
110 | "t = time()\n",
111 | "a = dpnp.full_like(dpnp.zeros((N, 0)), 3., shape=(N, N))\n",
112 | "print(f\"Elapsed {(time()-t)*1000:.2f} ms\")\n",
113 | "print(a)\n",
114 | "print(\"Array a is located on the device:\", a.device)"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "id": "210440c3",
120 | "metadata": {},
121 | "source": [
122 | "Now we enable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to fall back to NumPy in case of `NotImplementedError`."
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "id": "bca7490d",
128 | "metadata": {},
129 | "source": [
130 | "**Note: Restart Jupyter Notebook kernel before running the following code** "
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": 1,
136 | "id": "05ad1565",
137 | "metadata": {},
138 | "outputs": [
139 | {
140 | "name": "stdout",
141 | "output_type": "stream",
142 | "text": [
143 | "DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK = 0\n",
144 | "Elapsed 2231.62 ms\n",
145 | "[[3. 3. 3. ... 3. 3. 3.]\n",
146 | " [3. 3. 3. ... 3. 3. 3.]\n",
147 | " [3. 3. 3. ... 3. 3. 3.]\n",
148 | " ...\n",
149 | " [3. 3. 3. ... 3. 3. 3.]\n",
150 | " [3. 3. 3. ... 3. 3. 3.]\n",
151 | " [3. 3. 3. ... 3. 3. 3.]]\n",
152 | "Array a is located on the device: Device(level_zero:gpu:0)\n"
153 | ]
154 | }
155 | ],
156 | "source": [
157 | "import os\n",
158 | "os.environ[\"DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK\"] = \"0\" \n",
159 | "\n",
160 | "import dpnp as np\n",
161 | "print (\"DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK =\", np.config.__DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK__) # Expect 0\n",
162 | "\n",
163 | "from time import time\n",
164 | "\n",
165 | "N = 200*100\n",
166 | "\n",
167 | "t = time()\n",
168 | "a = np.full((N, N), 3., like = np.zeros((N, 0)))\n",
169 | "print(f\"Elapsed {(time()-t)*1000:.2f} ms\")\n",
170 | "print(a)\n",
171 | "print(\"Array a is located on the device:\", a.device)"
172 | ]
173 | }
174 | ],
175 | "metadata": {
176 | "kernelspec": {
177 | "display_name": "Python 3 (ipykernel)",
178 | "language": "python",
179 | "name": "python3"
180 | },
181 | "language_info": {
182 | "codemirror_mode": {
183 | "name": "ipython",
184 | "version": 3
185 | },
186 | "file_extension": ".py",
187 | "mimetype": "text/x-python",
188 | "name": "python",
189 | "nbconvert_exporter": "python",
190 | "pygments_lexer": "ipython3",
191 | "version": "3.9.10"
192 | }
193 | },
194 | "nbformat": 4,
195 | "nbformat_minor": 5
196 | }
197 |
--------------------------------------------------------------------------------
/notebooks/.ipynb_checkpoints/02-dpnp_numpy_fallback-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "be340585",
6 | "metadata": {},
7 | "source": [
8 | "# Jupyter Notebook 2: Falling back to NumPy"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "9008dbe5",
14 | "metadata": {},
15 | "source": [
16 | "**Data Parallel Extensions for Python** are still in active development and have not yet reached production quality. This in particular means that some functional and performance gaps may exist in certain `dpnp` functions that require workarounds. The functional gaps include:\n",
17 | "\n",
18 | "* Certain NumPy APIs are not supported. For complete reference please see the [comparison table](https://intelpython.github.io/dpnp/reference/comparison.html) for **Data Parallel Extension for NumPy**.\n",
19 | "* Limited support of `dtypes`. At present the **Data Parallel Extension for NumPy** supports `float32`, `float64`, `int32`, and `int64` types.\n",
20 | "* Certain keyword arguments are supported for default settings only. Please refer to the [Data Parallel Extension for NumPy API Reference](https://intelpython.github.io/dpnp/reference/index.html) for details.\n",
21 | "\n",
22 | "Since NumPy API is versatily there are typically some workaround options exist. Please refer to [Data Parallel Extension for NumPy API Reference](https://intelpython.github.io/dpnp/reference/index.html) and [NumPy documentation](https://numpy.org/doc/stable/) for hints for possible alternatives.\n",
23 | "\n",
24 | "Another possible workaround is to fall back to the host and execute the script via NumPy. This is a discouraged use because it typically results in significant performance penalties associated with copying data from the device to the host and back. However, for debugging purposes you may still want to enable this behavior.\n",
25 | "\n",
26 | "To do so you must set the `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` environment variable to `0`.\n",
27 | "\n",
28 | "Let's take the following example:"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 3,
34 | "id": "c78511cd",
35 | "metadata": {},
36 | "outputs": [
37 | {
38 | "name": "stdout",
39 | "output_type": "stream",
40 | "text": [
41 | "[[3. 3. 3. ... 3. 3. 3.]\n",
42 | " [3. 3. 3. ... 3. 3. 3.]\n",
43 | " [3. 3. 3. ... 3. 3. 3.]\n",
44 | " ...\n",
45 | " [3. 3. 3. ... 3. 3. 3.]\n",
46 | " [3. 3. 3. ... 3. 3. 3.]\n",
47 | " [3. 3. 3. ... 3. 3. 3.]]\n"
48 | ]
49 | },
50 | {
51 | "ename": "NotImplementedError",
52 | "evalue": "Requested funtion=full with args=((20000, 20000), 3.0, None, 'C') and kwargs={'like': array([])} isn't currently supported and would fall back on NumPy implementation. Define enviroment variable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to `0` if the fall back is required to be supported without rasing an exception.",
53 | "output_type": "error",
54 | "traceback": [
55 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
56 | "\u001b[1;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
57 | "Cell \u001b[1;32mIn[3], line 9\u001b[0m\n\u001b[0;32m 7\u001b[0m a_np \u001b[38;5;241m=\u001b[39m numpy\u001b[38;5;241m.\u001b[39mfull((N, N), \u001b[38;5;241m3.\u001b[39m, like \u001b[38;5;241m=\u001b[39m numpy\u001b[38;5;241m.\u001b[39mzeros((N, \u001b[38;5;241m0\u001b[39m)))\n\u001b[0;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(a_np)\n\u001b[1;32m----> 9\u001b[0m a_dp \u001b[38;5;241m=\u001b[39m \u001b[43mdpnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfull\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mN\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mN\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m3.\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlike\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mdpnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mzeros\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mN\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(a_dp)\n",
58 | "File \u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\ndpex-examples\\lib\\site-packages\\dpnp\\dpnp_iface_arraycreation.py:747\u001b[0m, in \u001b[0;36mfull\u001b[1;34m(shape, fill_value, dtype, order, like, device, usm_type, sycl_queue)\u001b[0m\n\u001b[0;32m 738\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 739\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m dpnp_container\u001b[38;5;241m.\u001b[39mfull(shape,\n\u001b[0;32m 740\u001b[0m fill_value,\n\u001b[0;32m 741\u001b[0m dtype\u001b[38;5;241m=\u001b[39mdtype,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 744\u001b[0m usm_type\u001b[38;5;241m=\u001b[39musm_type,\n\u001b[0;32m 745\u001b[0m sycl_queue\u001b[38;5;241m=\u001b[39msycl_queue)\n\u001b[1;32m--> 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcall_origin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnumpy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfull\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mshape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfill_value\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlike\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlike\u001b[49m\u001b[43m)\u001b[49m\n",
59 | "File \u001b[1;32mdpnp\\dpnp_utils\\dpnp_algo_utils.pyx:132\u001b[0m, in \u001b[0;36mdpnp.dpnp_utils.dpnp_algo_utils.call_origin\u001b[1;34m()\u001b[0m\n",
60 | "\u001b[1;31mNotImplementedError\u001b[0m: Requested funtion=full with args=((20000, 20000), 3.0, None, 'C') and kwargs={'like': array([])} isn't currently supported and would fall back on NumPy implementation. Define enviroment variable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to `0` if the fall back is required to be supported without rasing an exception."
61 | ]
62 | }
63 | ],
64 | "source": [
65 | "import dpnp\n",
66 | "import numpy\n",
67 | "\n",
68 | "N = 200*100\n",
69 | "\n",
70 | "# Create an two dimencial array with singular element and array like option\n",
71 | "a_np = numpy.full((N, N), 3., like = numpy.zeros((N, 0)))\n",
72 | "print(a_np)\n",
73 | "a_dp = dpnp.full((N, N), 3., like = dpnp.zeros((N, 0)))\n",
74 | "print(a_dp)"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "id": "ae026021",
80 | "metadata": {},
81 | "source": [
82 | "We are attempting to call `dpnp.full()` function with the keyword argument `like` set to another `dpnp` array, which is currently unsupported. A possible alternative would be to use `dpnp.full_like` function:"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": 4,
88 | "id": "5e588080-484a-44f4-a168-7d07f760c7a0",
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "name": "stdout",
93 | "output_type": "stream",
94 | "text": [
95 | "Elapsed 1494.33 ms\n",
96 | "[[3. 3. 3. ... 3. 3. 3.]\n",
97 | " [3. 3. 3. ... 3. 3. 3.]\n",
98 | " [3. 3. 3. ... 3. 3. 3.]\n",
99 | " ...\n",
100 | " [3. 3. 3. ... 3. 3. 3.]\n",
101 | " [3. 3. 3. ... 3. 3. 3.]\n",
102 | " [3. 3. 3. ... 3. 3. 3.]]\n",
103 | "Array a is located on the device: Device(level_zero:gpu:0)\n"
104 | ]
105 | }
106 | ],
107 | "source": [
108 | "from time import time\n",
109 | "\n",
110 | "t = time()\n",
111 | "a = dpnp.full_like(dpnp.zeros((N, 0)), 3., shape=(N, N))\n",
112 | "print(f\"Elapsed {(time()-t)*1000:.2f} ms\")\n",
113 | "print(a)\n",
114 | "print(\"Array a is located on the device:\", a.device)"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "id": "210440c3",
120 | "metadata": {},
121 | "source": [
122 | "Now we enable `DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK` to fall back to NumPy in case of `NotImplementedError`."
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "id": "bca7490d",
128 | "metadata": {},
129 | "source": [
130 | "**Note: Restart Jupyter Notebook kernel before running the following code** "
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": 1,
136 | "id": "05ad1565",
137 | "metadata": {},
138 | "outputs": [
139 | {
140 | "name": "stdout",
141 | "output_type": "stream",
142 | "text": [
143 | "DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK = 0\n",
144 | "Elapsed 2231.62 ms\n",
145 | "[[3. 3. 3. ... 3. 3. 3.]\n",
146 | " [3. 3. 3. ... 3. 3. 3.]\n",
147 | " [3. 3. 3. ... 3. 3. 3.]\n",
148 | " ...\n",
149 | " [3. 3. 3. ... 3. 3. 3.]\n",
150 | " [3. 3. 3. ... 3. 3. 3.]\n",
151 | " [3. 3. 3. ... 3. 3. 3.]]\n",
152 | "Array a is located on the device: Device(level_zero:gpu:0)\n"
153 | ]
154 | }
155 | ],
156 | "source": [
157 | "import os\n",
158 | "os.environ[\"DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK\"] = \"0\" \n",
159 | "\n",
160 | "import dpnp as np\n",
161 | "print (\"DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK =\", np.config.__DPNP_RAISE_EXCEPION_ON_NUMPY_FALLBACK__) # Expect 0\n",
162 | "\n",
163 | "from time import time\n",
164 | "\n",
165 | "N = 200*100\n",
166 | "\n",
167 | "t = time()\n",
168 | "a = np.full((N, N), 3., like = np.zeros((N, 0)))\n",
169 | "print(f\"Elapsed {(time()-t)*1000:.2f} ms\")\n",
170 | "print(a)\n",
171 | "print(\"Array a is located on the device:\", a.device)"
172 | ]
173 | }
174 | ],
175 | "metadata": {
176 | "kernelspec": {
177 | "display_name": "Python 3 (ipykernel)",
178 | "language": "python",
179 | "name": "python3"
180 | },
181 | "language_info": {
182 | "codemirror_mode": {
183 | "name": "ipython",
184 | "version": 3
185 | },
186 | "file_extension": ".py",
187 | "mimetype": "text/x-python",
188 | "name": "python",
189 | "nbconvert_exporter": "python",
190 | "pygments_lexer": "ipython3",
191 | "version": "3.9.10"
192 | }
193 | },
194 | "nbformat": 4,
195 | "nbformat_minor": 5
196 | }
197 |
--------------------------------------------------------------------------------
/.github/workflows/mandelbrot_build_test_deploy.yml:
--------------------------------------------------------------------------------
1 | name: mandelbrot package
2 |
3 | # Declare default permissions as read only.
4 | permissions: read-all
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 |
12 | env:
13 | PACKAGE_NAME: mandelbrot-demo
14 | MODULE_NAME: mandelbrot_demo
15 |
16 | jobs:
17 | build_linux:
18 | runs-on: ubuntu-20.04
19 |
20 | strategy:
21 | matrix:
22 | python: ['3.8', '3.9', '3.10']
23 | steps:
24 | - uses: actions/checkout@v3
25 | with:
26 | fetch-depth: 0
27 |
28 | - name: Set pkgs_dirs
29 | run: |
30 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
31 | - name: Cache conda packages
32 | uses: actions/cache@v3
33 | env:
34 | CACHE_NUMBER: 0 # Increase to reset cache
35 | with:
36 | path: ~/.conda/pkgs
37 | key:
38 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
39 | restore-keys: |
40 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
41 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
42 | - name: Add conda to system path
43 | run: echo $CONDA/bin >> $GITHUB_PATH
44 | - name: Install conda-build
45 | run: conda install conda-build -c conda-forge --override-channels
46 | - name: Build conda package
47 | run: |
48 | CHANNELS="-c dppy/label/dev -c intel -c conda-forge --override-channels"
49 | VERSIONS="--python ${{ matrix.python }}"
50 | TEST=""
51 | cd ./demos/mandelbrot
52 | conda build $TEST $VERSIONS $CHANNELS conda-recipe
53 | - name: Upload artifact
54 | uses: actions/upload-artifact@v3
55 | with:
56 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
57 | path: /usr/share/miniconda/conda-bld/linux-64/${{ env.PACKAGE_NAME }}-*.tar.bz2
58 |
59 | test_linux:
60 | needs: build_linux
61 | runs-on: ${{ matrix.runner }}
62 |
63 | strategy:
64 | matrix:
65 | python: ['3.8', '3.9', '3.10']
66 | experimental: [true]
67 | runner: [ubuntu-20.04]
68 | continue-on-error: ${{ matrix.experimental }}
69 | env:
70 | CHANNELS: -c dppy/label/dev -c intel -c conda-forge --override-channels
71 |
72 | steps:
73 | - uses: actions/checkout@v3
74 | with:
75 | fetch-depth: 0
76 | - name: Download artifact
77 | uses: actions/download-artifact@v3
78 | with:
79 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
80 | - name: Add conda to system path
81 | run: echo $CONDA/bin >> $GITHUB_PATH
82 | - name: Install conda-build
83 | # Needed to be able to run conda index
84 | run: conda install conda-build -c conda-forge --override-channels
85 | - name: Create conda channel
86 | run: |
87 | mkdir -p $GITHUB_WORKSPACE/channel/linux-64
88 | # conda index $GITHUB_WORKSPACE/channel || exit 1
89 | mv ${PACKAGE_NAME}-*.tar.bz2 $GITHUB_WORKSPACE/channel/linux-64 || exit 1
90 | conda index $GITHUB_WORKSPACE/channel || exit 1
91 | # Test channel
92 | conda search $PACKAGE_NAME -c $GITHUB_WORKSPACE/channel --override-channels --info --json > $GITHUB_WORKSPACE/ver.json
93 | cat ver.json
94 | - name: Collect dependencies
95 | run: |
96 | CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
97 | conda create -n test-env $PACKAGE_NAME python=${{ matrix.python }} $CHANNELS --only-deps --dry-run > lockfile
98 | cat lockfile
99 | - name: Set pkgs_dirs
100 | run: |
101 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
102 | - name: Cache conda packages
103 | uses: actions/cache@v3
104 | env:
105 | CACHE_NUMBER: 0 # Increase to reset cache
106 | with:
107 | path: ~/.conda/pkgs
108 | key:
109 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
110 | restore-keys: |
111 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
112 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
113 | - name: Install package
114 | run: |
115 | export CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
116 | conda create -n test-env $PACKAGE_NAME pytest python=${{ matrix.python }} ${CHANNELS}
117 | . $CONDA/etc/profile.d/conda.sh
118 | conda activate test-env
119 | # Test installed packages
120 | conda list -n test-env
121 | - name: Run tests
122 | run: |
123 | . $CONDA/etc/profile.d/conda.sh
124 | conda activate test-env
125 | pushd ./demos/mandelbrot/${{ env.MODULE_NAME }}
126 | pytest
127 | popd
128 |
129 | upload_linux:
130 | needs: test_linux
131 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
132 | runs-on: ubuntu-20.04
133 | strategy:
134 | matrix:
135 | python: ['3.8', '3.9', '3.10']
136 | steps:
137 | - name: Download artifact
138 | uses: actions/download-artifact@v3
139 | with:
140 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
141 |
142 | - name: Install anaconda-client
143 | run: conda install anaconda-client -c conda-forge --override-channels
144 | - name: Add conda to system path
145 | run: echo $CONDA/bin >> $GITHUB_PATH
146 |
147 | - name: Upload
148 | env:
149 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
150 | run: |
151 | anaconda --token ${{ env.ANACONDA_TOKEN }} upload --user pycoddiy --label main ${PACKAGE_NAME}-*.tar.bz2
152 |
153 | build_windows:
154 | runs-on: windows-latest
155 |
156 | strategy:
157 | matrix:
158 | python: ['3.8', '3.9', '3.10']
159 | env:
160 | conda-bld: C:\Miniconda\conda-bld\win-64\
161 | steps:
162 | - uses: actions/checkout@v3
163 | with:
164 | fetch-depth: 0
165 | - uses: conda-incubator/setup-miniconda@v2
166 | with:
167 | use-only-tar-bz2: true
168 | auto-activate-base: true
169 | conda-build-version: "*"
170 | activate-environment: true
171 | python-version: ${{ matrix.python }}
172 |
173 | - name: Cache conda packages
174 | uses: actions/cache@v3
175 | env:
176 | CACHE_NUMBER: 0 # Increase to reset cache
177 | with:
178 | path: /home/runner/conda_pkgs_dir
179 | key:
180 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
181 | restore-keys: |
182 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
183 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
184 | - name: Build conda package
185 | run: |
186 | cd ./demos/mandelbrot
187 | conda build --keep-old-work --dirty --no-test --python ${{ matrix.python }} -c dppy/label/dev -c intel -c conda-forge --override-channels conda-recipe
188 | - name: Upload artifact
189 | uses: actions/upload-artifact@v3
190 | with:
191 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
192 | path: ${{ env.conda-bld }}${{ env.PACKAGE_NAME }}-*.tar.bz2
193 |
194 | test_windows:
195 | needs: build_windows
196 | runs-on: ${{ matrix.runner }}
197 | defaults:
198 | run:
199 | shell: cmd /C CALL {0}
200 | strategy:
201 | matrix:
202 | python: ['3.8', '3.9', '3.10']
203 | experimental: [false]
204 | runner: [windows-latest]
205 | continue-on-error: ${{ matrix.experimental }}
206 | env:
207 | workdir: '${{ github.workspace }}'
208 | CHANNELS: -c dppy/label/dev -c intel -c conda-forge --override-channels
209 | steps:
210 | - name: Checkout sources
211 | uses: actions/checkout@v3
212 | with:
213 | fetch-depth: 0
214 | - name: Download artifact
215 | uses: actions/download-artifact@v3
216 | with:
217 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
218 | - uses: conda-incubator/setup-miniconda@v2
219 | with:
220 | use-only-tar-bz2: false
221 | auto-update-conda: true
222 | conda-build-version: '*'
223 | miniconda-version: 'latest'
224 | activate-environment: test-env
225 | python-version: ${{ matrix.python }}
226 | - name: conda info
227 | shell: cmd /C CALL {0}
228 | run: |
229 | conda info
230 | - name: conda config --show
231 | shell: cmd /C CALL {0}
232 | run: |
233 | conda config --show
234 | - name: Create conda channel with the artifact bit
235 | shell: cmd /C CALL {0}
236 | run: |
237 | echo ${{ env.workdir }}
238 | mkdir ${{ env.workdir }}\channel\win-64
239 | move ${{ env.PACKAGE_NAME }}-*.tar.bz2 ${{ env.workdir }}\channel\win-64
240 | dir ${{ env.workdir }}\channel\win-64
241 | - name: Index the channel
242 | shell: cmd /C CALL {0}
243 | run: conda index ${{ env.workdir }}\channel
244 | - name: Collect dependencies
245 | shell: cmd /C CALL {0}
246 | run: |
247 | conda install -n test-env ${{ env.PACKAGE_NAME }} python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }} --only-deps --dry-run > lockfile && conda activate test-env
248 | - name: Display lockfile content
249 | shell: pwsh
250 | run: Get-Content -Path .\lockfile
251 | - name: Cache conda packages
252 | uses: actions/cache@v3
253 | env:
254 | CACHE_NUMBER: 0 # Increase to reset cache
255 | with:
256 | path: /home/runner/conda_pkgs_dir
257 | key:
258 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
259 | restore-keys: |
260 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
261 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
262 | - name: Install dependencies
263 | shell: cmd /C CALL {0}
264 | run: |
265 | conda install -n test-env ${{ env.PACKAGE_NAME }} pytest python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }}
266 | - name: Install opencl_rt
267 | shell: cmd /C CALL {0}
268 | run: conda install -n test-env opencl_rt -c intel --override-channels
269 | - name: Configure Intel OpenCL CPU RT
270 | shell: pwsh
271 | run: |
272 | $script_path="$env:CONDA_PREFIX\Scripts\set-intel-ocl-icd-registry.ps1"
273 | &$script_path
274 | # Check the variable assisting OpenCL CPU driver to find TBB DLLs which are not located where it expects them by default
275 | $cl_cfg="$env:CONDA_PREFIX\Library\lib\cl.cfg"
276 | Get-Content -Tail 5 -Path $cl_cfg
277 | - name: Run tests
278 | shell: pwsh
279 | run: |
280 | ls
281 | conda activate test-env
282 | pushd ./demos/mandelbrot/${{ env.MODULE_NAME }}
283 | pytest
284 | popd
285 |
286 | upload_windows:
287 | needs: test_windows
288 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
289 | runs-on: windows-latest
290 | strategy:
291 | matrix:
292 | python: ['3.8', '3.9', '3.10']
293 | steps:
294 | - name: Download artifact
295 | uses: actions/download-artifact@v3
296 | with:
297 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
298 | - uses: conda-incubator/setup-miniconda@v2
299 | with:
300 | auto-activate-base: true
301 | activate-environment: ""
302 | - name: Install anaconda-client
303 | run: conda install anaconda-client -c conda-forge --override-channels
304 |
305 | - name: Upload
306 | env:
307 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
308 | run: |
309 | anaconda --token ${{ env.ANACONDA_TOKEN }} upload --user pycoddiy --label main ${{ env.PACKAGE_NAME }}-*.tar.bz2
310 |
--------------------------------------------------------------------------------
/.github/workflows/mcpi_build_test_upload.yml:
--------------------------------------------------------------------------------
1 | name: mcpi package
2 |
3 | # Declare default permissions as read only.
4 | permissions: read-all
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 |
12 | env:
13 | PACKAGE_NAME: mcpi-demo
14 | MODULE_NAME: mcpi_demo
15 |
16 | jobs:
17 | build_linux:
18 | runs-on: ubuntu-20.04
19 |
20 | strategy:
21 | matrix:
22 | python: ['3.8', '3.9', '3.10']
23 | steps:
24 | - uses: actions/checkout@v3
25 | with:
26 | fetch-depth: 0
27 |
28 | - name: Set pkgs_dirs
29 | run: |
30 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
31 | - name: Cache conda packages
32 | uses: actions/cache@v3
33 | env:
34 | CACHE_NUMBER: 0 # Increase to reset cache
35 | with:
36 | path: ~/.conda/pkgs
37 | key:
38 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
39 | restore-keys: |
40 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
41 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
42 | - name: Add conda to system path
43 | run: echo $CONDA/bin >> $GITHUB_PATH
44 | - name: Install conda-build
45 | run: conda install conda-build -c conda-forge --override-channels
46 | - name: Build conda package
47 | run: |
48 | CHANNELS="-c intel -c main --override-channels"
49 | VERSIONS="--python ${{ matrix.python }}"
50 | TEST=""
51 | ls
52 | cd ./demos/mcpi
53 | conda build $TEST $VERSIONS $CHANNELS conda-recipe
54 | - name: Upload artifact
55 | uses: actions/upload-artifact@v3
56 | with:
57 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
58 | path: /usr/share/miniconda/conda-bld/linux-64/${{ env.PACKAGE_NAME }}-*.tar.bz2
59 |
60 | test_linux:
61 | needs: build_linux
62 | runs-on: ${{ matrix.runner }}
63 |
64 | strategy:
65 | matrix:
66 | python: ['3.8', '3.9', '3.10']
67 | experimental: [true]
68 | runner: [ubuntu-20.04]
69 | continue-on-error: ${{ matrix.experimental }}
70 | env:
71 | CHANNELS: -c dppy/label/dev -c intel -c main --override-channels
72 |
73 | steps:
74 | - uses: actions/checkout@v3
75 | with:
76 | fetch-depth: 0
77 | - name: Download artifact
78 | uses: actions/download-artifact@v3
79 | with:
80 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
81 | - name: Add conda to system path
82 | run: echo $CONDA/bin >> $GITHUB_PATH
83 | - name: Install conda-build
84 | # Needed to be able to run conda index
85 | run: conda install conda-build -c conda-forge --override-channels
86 | - name: Create conda channel
87 | run: |
88 | mkdir -p $GITHUB_WORKSPACE/channel/linux-64
89 | # conda index $GITHUB_WORKSPACE/channel || exit 1
90 | mv ${PACKAGE_NAME}-*.tar.bz2 $GITHUB_WORKSPACE/channel/linux-64 || exit 1
91 | conda index $GITHUB_WORKSPACE/channel || exit 1
92 | # Test channel
93 | conda search $PACKAGE_NAME -c $GITHUB_WORKSPACE/channel --override-channels --info --json > $GITHUB_WORKSPACE/ver.json
94 | cat ver.json
95 | - name: Collect dependencies
96 | run: |
97 | CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
98 | conda create -n test-env $PACKAGE_NAME python=${{ matrix.python }} $CHANNELS --only-deps --dry-run > lockfile
99 | cat lockfile
100 | - name: Set pkgs_dirs
101 | run: |
102 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
103 | - name: Cache conda packages
104 | uses: actions/cache@v3
105 | env:
106 | CACHE_NUMBER: 0 # Increase to reset cache
107 | with:
108 | path: ~/.conda/pkgs
109 | key:
110 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
111 | restore-keys: |
112 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
113 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
114 | - name: Install package
115 | run: |
116 | export CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
117 | conda create -n test-env $PACKAGE_NAME pytest python=${{ matrix.python }} ${CHANNELS}
118 | . $CONDA/etc/profile.d/conda.sh
119 | conda activate test-env
120 | # Test installed packages
121 | conda list -n test-env
122 | - name: Run tests
123 | run: |
124 | . $CONDA/etc/profile.d/conda.sh
125 | conda activate test-env
126 | pushd ./demos/mcpi/${{ env.MODULE_NAME }}
127 | pytest
128 | popd
129 |
130 | upload_linux:
131 | needs: test_linux
132 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
133 | runs-on: ubuntu-20.04
134 | strategy:
135 | matrix:
136 | python: ['3.8', '3.9', '3.10']
137 | steps:
138 | - name: Download artifact
139 | uses: actions/download-artifact@v3
140 | with:
141 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
142 |
143 | - name: Install anaconda-client
144 | run: conda install anaconda-client -c conda-forge --override-channels
145 | - name: Add conda to system path
146 | run: echo $CONDA/bin >> $GITHUB_PATH
147 |
148 | - name: Upload
149 | env:
150 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
151 | run: |
152 | anaconda --token ${{ env.ANACONDA_TOKEN }} upload --user pycoddiy --label main ${PACKAGE_NAME}-*.tar.bz2
153 |
154 | build_windows:
155 | runs-on: windows-latest
156 |
157 | strategy:
158 | matrix:
159 | python: ['3.8', '3.9', '3.10']
160 | env:
161 | conda-bld: C:\Miniconda\conda-bld\win-64\
162 | steps:
163 | - uses: actions/checkout@v3
164 | with:
165 | fetch-depth: 0
166 | - uses: conda-incubator/setup-miniconda@v2
167 | with:
168 | use-only-tar-bz2: true
169 | auto-activate-base: true
170 | conda-build-version: "*"
171 | activate-environment: true
172 | python-version: ${{ matrix.python }}
173 |
174 | - name: Cache conda packages
175 | uses: actions/cache@v3
176 | env:
177 | CACHE_NUMBER: 0 # Increase to reset cache
178 | with:
179 | path: /home/runner/conda_pkgs_dir
180 | key:
181 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
182 | restore-keys: |
183 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
184 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
185 | - name: Build conda package
186 | run: |
187 | cd ./demos/mcpi
188 | conda build --keep-old-work --dirty --no-test --python ${{ matrix.python }} -c intel -c main --override-channels conda-recipe
189 | - name: Upload artifact
190 | uses: actions/upload-artifact@v3
191 | with:
192 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
193 | path: ${{ env.conda-bld }}${{ env.PACKAGE_NAME }}-*.tar.bz2
194 |
195 | test_windows:
196 | needs: build_windows
197 | runs-on: ${{ matrix.runner }}
198 | defaults:
199 | run:
200 | shell: cmd /C CALL {0}
201 | strategy:
202 | matrix:
203 | python: ['3.8', '3.9', '3.10']
204 | experimental: [false]
205 | runner: [windows-latest]
206 | continue-on-error: ${{ matrix.experimental }}
207 | env:
208 | workdir: '${{ github.workspace }}'
209 | CHANNELS: -c dppy/label/dev -c intel -c main --override-channels
210 | steps:
211 | - name: Checkout sources
212 | uses: actions/checkout@v3
213 | with:
214 | fetch-depth: 0
215 | - name: Download artifact
216 | uses: actions/download-artifact@v3
217 | with:
218 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
219 | - uses: conda-incubator/setup-miniconda@v2
220 | with:
221 | use-only-tar-bz2: false
222 | auto-update-conda: true
223 | conda-build-version: '*'
224 | miniconda-version: 'latest'
225 | activate-environment: test-env
226 | python-version: ${{ matrix.python }}
227 | - name: conda info
228 | shell: cmd /C CALL {0}
229 | run: |
230 | conda info
231 | - name: conda config --show
232 | shell: cmd /C CALL {0}
233 | run: |
234 | conda config --show
235 | - name: Create conda channel with the artifact bit
236 | shell: cmd /C CALL {0}
237 | run: |
238 | echo ${{ env.workdir }}
239 | mkdir ${{ env.workdir }}\channel\win-64
240 | move ${{ env.PACKAGE_NAME }}-*.tar.bz2 ${{ env.workdir }}\channel\win-64
241 | dir ${{ env.workdir }}\channel\win-64
242 | - name: Index the channel
243 | shell: cmd /C CALL {0}
244 | run: conda index ${{ env.workdir }}\channel
245 | - name: Collect dependencies
246 | shell: cmd /C CALL {0}
247 | run: |
248 | conda install -n test-env ${{ env.PACKAGE_NAME }} python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }} --only-deps --dry-run > lockfile && conda activate test-env
249 | - name: Display lockfile content
250 | shell: pwsh
251 | run: Get-Content -Path .\lockfile
252 | - name: Cache conda packages
253 | uses: actions/cache@v3
254 | env:
255 | CACHE_NUMBER: 0 # Increase to reset cache
256 | with:
257 | path: /home/runner/conda_pkgs_dir
258 | key:
259 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
260 | restore-keys: |
261 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
262 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
263 | - name: Install dependencies
264 | shell: cmd /C CALL {0}
265 | run: |
266 | conda install -n test-env ${{ env.PACKAGE_NAME }} pytest python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }}
267 | - name: Install opencl_rt
268 | shell: cmd /C CALL {0}
269 | run: conda install -n test-env opencl_rt -c intel --override-channels
270 | - name: Configure Intel OpenCL CPU RT
271 | shell: pwsh
272 | run: |
273 | $script_path="$env:CONDA_PREFIX\Scripts\set-intel-ocl-icd-registry.ps1"
274 | &$script_path
275 | # Check the variable assisting OpenCL CPU driver to find TBB DLLs which are not located where it expects them by default
276 | $cl_cfg="$env:CONDA_PREFIX\Library\lib\cl.cfg"
277 | Get-Content -Tail 5 -Path $cl_cfg
278 | - name: Conda list
279 | shell: cmd /C CALL {0}
280 | run: conda list -n test-env
281 | - name: Run tests
282 | shell: pwsh
283 | run: |
284 | ls
285 | conda activate test-env
286 | pushd ./demos/mcpi/${{ env.MODULE_NAME }}
287 | pytest
288 | popd
289 |
290 | upload_windows:
291 | needs: test_windows
292 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
293 | runs-on: windows-latest
294 | strategy:
295 | matrix:
296 | python: ['3.8', '3.9', '3.10']
297 | steps:
298 | - name: Download artifact
299 | uses: actions/download-artifact@v3
300 | with:
301 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
302 | - uses: conda-incubator/setup-miniconda@v2
303 | with:
304 | auto-activate-base: true
305 | activate-environment: ""
306 | - name: Install anaconda-client
307 | run: conda install anaconda-client -c conda-forge --override-channels
308 |
309 | - name: Upload
310 | env:
311 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
312 | run: |
313 | anaconda --token ${{ env.ANACONDA_TOKEN }} upload --user pycoddiy --label main ${{ env.PACKAGE_NAME }}-*.tar.bz2
314 |
--------------------------------------------------------------------------------
/.github/workflows/gol_build_test_upload.yml:
--------------------------------------------------------------------------------
1 | name: gol package
2 |
3 | # Declare default permissions as read only.
4 | permissions: read-all
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 |
12 | env:
13 | PACKAGE_NAME: game-of-life-demo
14 | MODULE_NAME: game_of_life_demo
15 |
16 | jobs:
17 | build_linux:
18 | runs-on: ubuntu-20.04
19 |
20 | strategy:
21 | matrix:
22 | python: ['3.8', '3.9', '3.10']
23 | steps:
24 | - uses: actions/checkout@v3
25 | with:
26 | fetch-depth: 0
27 |
28 | - name: Set pkgs_dirs
29 | run: |
30 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
31 | - name: Cache conda packages
32 | uses: actions/cache@v3
33 | env:
34 | CACHE_NUMBER: 4 # Increase to reset cache
35 | with:
36 | path: ~/.conda/pkgs
37 | key:
38 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
39 | restore-keys: |
40 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
41 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
42 | - name: Add conda to system path
43 | run: echo $CONDA/bin >> $GITHUB_PATH
44 | - name: Install conda-build
45 | run: conda install conda-build -c conda-forge --override-channels
46 | - name: Build conda package
47 | run: |
48 | CHANNELS="-c dppy/label/dev -c intel -c conda-forge --override-channels"
49 | VERSIONS="--python ${{ matrix.python }}"
50 | TEST="--no-test"
51 | cd ./demos/game-of-life
52 | conda build \
53 | $TEST \
54 | $VERSIONS \
55 | $CHANNELS \
56 | conda-recipe
57 | - name: Upload artifact
58 | uses: actions/upload-artifact@v3
59 | with:
60 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
61 | path: /usr/share/miniconda/conda-bld/linux-64/${{ env.PACKAGE_NAME }}-*.tar.bz2
62 |
63 | build_windows:
64 | runs-on: windows-latest
65 |
66 | strategy:
67 | matrix:
68 | python: ['3.8', '3.9', '3.10']
69 | env:
70 | conda-bld: C:\Miniconda\conda-bld\win-64\
71 | steps:
72 | - uses: actions/checkout@v3
73 | with:
74 | fetch-depth: 0
75 | - uses: conda-incubator/setup-miniconda@v2
76 | with:
77 | use-only-tar-bz2: false
78 | auto-activate-base: true
79 | conda-build-version: "*"
80 | activate-environment: true
81 | python-version: ${{ matrix.python }}
82 |
83 | - name: Cache conda packages
84 | uses: actions/cache@v3
85 | env:
86 | CACHE_NUMBER: 4 # Increase to reset cache
87 | with:
88 | path: /home/runner/conda_pkgs_dir
89 | key:
90 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('**/meta.yaml') }}
91 | restore-keys: |
92 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
93 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
94 | - name: Build conda package
95 | run: |
96 | cd ./demos/game-of-life
97 | conda build --keep-old-work --dirty --no-test --python ${{ matrix.python }} -c dppy/label/dev -c intel -c conda-forge --override-channels conda-recipe
98 | - name: Upload artifact
99 | uses: actions/upload-artifact@v3
100 | with:
101 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
102 | path: ${{ env.conda-bld }}${{ env.PACKAGE_NAME }}-*.tar.bz2
103 |
104 | test_linux:
105 | needs: build_linux
106 | runs-on: ${{ matrix.runner }}
107 |
108 | strategy:
109 | matrix:
110 | python: ['3.8', '3.9', '3.10']
111 | experimental: [false]
112 | runner: [ubuntu-20.04]
113 | continue-on-error: ${{ matrix.experimental }}
114 | env:
115 | CHANNELS: -c dppy/label/dev -c intel -c conda-forge --override-channels
116 |
117 | steps:
118 | - uses: actions/checkout@v3
119 | with:
120 | fetch-depth: 0
121 | - name: Download artifact
122 | uses: actions/download-artifact@v3
123 | with:
124 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
125 | - name: Add conda to system path
126 | run: echo $CONDA/bin >> $GITHUB_PATH
127 | - name: Install conda-build
128 | # Needed to be able to run conda index
129 | run: conda install conda-build -c conda-forge --override-channels
130 | - name: Create conda channel
131 | run: |
132 | mkdir -p $GITHUB_WORKSPACE/channel/linux-64
133 | conda index $GITHUB_WORKSPACE/channel || exit 1
134 | mv ${PACKAGE_NAME}-*.tar.bz2 $GITHUB_WORKSPACE/channel/linux-64 || exit 1
135 | conda index $GITHUB_WORKSPACE/channel || exit 1
136 | # Test channel
137 | conda search $PACKAGE_NAME -c $GITHUB_WORKSPACE/channel --override-channels --info --json > $GITHUB_WORKSPACE/ver.json
138 | cat ver.json
139 | - name: Collect dependencies
140 | run: |
141 | CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
142 | conda create -n test-env $PACKAGE_NAME python=${{ matrix.python }} $CHANNELS --only-deps --dry-run > lockfile
143 | cat lockfile
144 | - name: Set pkgs_dirs
145 | run: |
146 | echo "pkgs_dirs: [~/.conda/pkgs]" >> ~/.condarc
147 | - name: Cache conda packages
148 | uses: actions/cache@v3
149 | env:
150 | CACHE_NUMBER: 4 # Increase to reset cache
151 | with:
152 | path: ~/.conda/pkgs
153 | key:
154 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
155 | restore-keys: |
156 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
157 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
158 | - name: Install package
159 | run: |
160 | export CHANNELS="-c $GITHUB_WORKSPACE/channel ${{ env.CHANNELS }}"
161 | conda create -n test-env $PACKAGE_NAME ${TEST_DEPENDENCIES} python=${{ matrix.python }} ${CHANNELS}
162 | . $CONDA/etc/profile.d/conda.sh
163 | conda activate test-env
164 | pip install pytest-mock
165 | # Test installed packages
166 | conda list -n test-env
167 | - name: Run tests (numpy)
168 | run: |
169 | . $CONDA/etc/profile.d/conda.sh
170 | conda activate test-env
171 | export OCL_ICD_FILENAMES=libintelocl.so
172 | pytest ./demos/game-of-life/game_of_life_demo --variant numpy
173 | - name: Run tests (dpnp)
174 | run: |
175 | . $CONDA/etc/profile.d/conda.sh
176 | conda activate test-env
177 | export OCL_ICD_FILENAMES=libintelocl.so
178 | pytest ./demos/game-of-life/game_of_life_demo --variant dpnp
179 | - name: Run tests (numba parallel)
180 | run: |
181 | . $CONDA/etc/profile.d/conda.sh
182 | conda activate test-env
183 | export OCL_ICD_FILENAMES=libintelocl.so
184 | pytest ./demos/game-of-life/game_of_life_demo --variant numba --parallel
185 | - name: Run tests (numba no-parallel)
186 | run: |
187 | . $CONDA/etc/profile.d/conda.sh
188 | conda activate test-env
189 | export OCL_ICD_FILENAMES=libintelocl.so
190 | pytest ./demos/game-of-life/game_of_life_demo --variant numba --no-parallel
191 |
192 | test_windows:
193 | needs: build_windows
194 | runs-on: ${{ matrix.runner }}
195 | defaults:
196 | run:
197 | shell: cmd /C CALL {0}
198 | strategy:
199 | matrix:
200 | python: ['3.8', '3.9', '3.10']
201 | experimental: [false]
202 | runner: [windows-latest]
203 | continue-on-error: ${{ matrix.experimental }}
204 | env:
205 | workdir: '${{ github.workspace }}'
206 | CHANNELS: -c dppy/label/dev -c intel -c conda-forge --override-channels
207 | steps:
208 | - name: Checkout sources
209 | uses: actions/checkout@v3
210 | with:
211 | fetch-depth: 0
212 | - name: Download artifact
213 | uses: actions/download-artifact@v3
214 | with:
215 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
216 | - uses: conda-incubator/setup-miniconda@v2
217 | with:
218 | use-only-tar-bz2: false
219 | auto-update-conda: true
220 | conda-build-version: '*'
221 | miniconda-version: 'latest'
222 | activate-environment: test-env
223 | python-version: ${{ matrix.python }}
224 | - name: Create conda channel with the artifact bit
225 | shell: cmd /C CALL {0}
226 | run: |
227 | echo ${{ env.workdir }}
228 | mkdir ${{ env.workdir }}\channel\win-64
229 | move ${{ env.PACKAGE_NAME }}-*.tar.bz2 ${{ env.workdir }}\channel\win-64
230 | dir ${{ env.workdir }}\channel\win-64
231 | - name: Index the channel
232 | shell: cmd /C CALL {0}
233 | run: conda index ${{ env.workdir }}\channel
234 |
235 | - name: Dump package version info from created channel into ver.json
236 | shell: cmd /C CALL {0}
237 | run: |
238 | conda search ${{ env.PACKAGE_NAME }} -c ${{ env.workdir }}/channel --override-channels --info --json > ${{ env.workdir }}\ver.json
239 | - name: Output content of produced ver.json
240 | shell: pwsh
241 | run: Get-Content -Path ${{ env.workdir }}\ver.json
242 | - name: Collect dependencies
243 | shell: cmd /C CALL {0}
244 | run: |
245 | conda install -n test-env ${{ env.PACKAGE_NAME }} python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }} --only-deps --dry-run > lockfile && conda activate test-env && pip install pytest-mock
246 | - name: Display lockfile content
247 | shell: pwsh
248 | run: Get-Content -Path .\lockfile
249 | - name: Cache conda packages
250 | uses: actions/cache@v3
251 | env:
252 | CACHE_NUMBER: 4 # Increase to reset cache
253 | with:
254 | path: /home/runner/conda_pkgs_dir
255 | key:
256 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-${{hashFiles('lockfile') }}
257 | restore-keys: |
258 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-python-${{ matrix.python }}-
259 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-
260 | - name: Install opencl_rt
261 | shell: cmd /C CALL {0}
262 | run: conda install -n test-env opencl_rt -c intel --override-channels
263 | - name: Install dependencies
264 | shell: cmd /C CALL {0}
265 | run: |
266 | SET "TEST_DEPENDENCIES=pytest pytest-cov"
267 | conda install -n test-env ${{ env.PACKAGE_NAME }} %TEST_DEPENDENCIES% python=${{ matrix.python }} -c ${{ env.workdir }}/channel ${{ env.CHANNELS }}
268 | - name: Configure Intel OpenCL CPU RT
269 | shell: pwsh
270 | run: |
271 | $script_path="$env:CONDA_PREFIX\Scripts\set-intel-ocl-icd-registry.ps1"
272 | &$script_path
273 | # Check the variable assisting OpenCL CPU driver to find TBB DLLs which are not located where it expects them by default
274 | $cl_cfg="$env:CONDA_PREFIX\Library\lib\cl.cfg"
275 | Get-Content -Tail 5 -Path $cl_cfg
276 | cd ./demos/game-of-life
277 | - name: Run tests (numpy)
278 | shell: cmd /C CALL {0}
279 | run: >-
280 | conda activate test-env && pytest ./demos/game-of-life/game_of_life_demo --variant numpy
281 | - name: Run tests (dpnp)
282 | shell: cmd /C CALL {0}
283 | run: >-
284 | conda activate test-env && pytest ./demos/game-of-life/game_of_life_demo --variant dpnp
285 | - name: Run tests (numba no-parallel)
286 | shell: cmd /C CALL {0}
287 | run: >-
288 | conda activate test-env && pytest ./demos/game-of-life/game_of_life_demo --variant numba --no-parallel
289 | - name: Run tests (numba parallel)
290 | shell: cmd /C CALL {0}
291 | run: >-
292 | conda activate test-env && pytest ./demos/game-of-life/game_of_life_demo --variant numba --parallel
293 |
294 |
295 | upload_linux:
296 | needs: test_linux
297 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
298 | runs-on: ubuntu-20.04
299 | strategy:
300 | matrix:
301 | python: ['3.8', '3.9', '3.10']
302 | steps:
303 | - name: Download artifact
304 | uses: actions/download-artifact@v3
305 | with:
306 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
307 |
308 | - name: Install anaconda-client
309 | run: conda install anaconda-client -c conda-forge --override-channels
310 | - name: Add conda to system path
311 | run: echo $CONDA/bin >> $GITHUB_PATH
312 |
313 | - name: Upload
314 | env:
315 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
316 | run: |
317 | anaconda --token $ANACONDA_TOKEN upload --user pycoddiy --label main ${PACKAGE_NAME}-*.tar.bz2
318 |
319 | upload_windows:
320 | needs: test_windows
321 | if: ${{github.ref == 'refs/heads/main' || (startsWith(github.ref, 'refs/heads/release') == true) || github.event_name == 'push' && contains(github.ref, 'refs/tags/')}}
322 | runs-on: windows-latest
323 | strategy:
324 | matrix:
325 | python: ['3.8', '3.9', '3.10']
326 | steps:
327 | - name: Download artifact
328 | uses: actions/download-artifact@v3
329 | with:
330 | name: ${{ env.PACKAGE_NAME }} ${{ runner.os }} Python ${{ matrix.python }}
331 | - uses: conda-incubator/setup-miniconda@v2
332 | with:
333 | auto-activate-base: true
334 | activate-environment: ""
335 | - name: Install anaconda-client
336 | run: conda install anaconda-client -c conda-forge --override-channels
337 |
338 | - name: Upload
339 | env:
340 | ANACONDA_TOKEN: ${{ secrets.PYCODDIYTOKEN }}
341 | run: |
342 | anaconda --token ${{ env.ANACONDA_TOKEN }} upload --user pycoddiy --label main ${{ env.PACKAGE_NAME }}-*.tar.bz2
343 |
--------------------------------------------------------------------------------