├── .gitignore ├── Gruntfile.js ├── README.md ├── examples ├── cython │ ├── fib │ │ ├── fib-c-only │ │ │ ├── Makefile │ │ │ ├── cfib.c │ │ │ ├── cfib.h │ │ │ └── main.c │ │ ├── fib-exercise │ │ │ ├── Makefile │ │ │ ├── fib.pyx │ │ │ ├── fib_solution.pyx │ │ │ ├── pyfib.py │ │ │ ├── setup_fib.py │ │ │ └── setup_fib_solution.py │ │ ├── fib-handwritten-extension │ │ │ ├── Makefile │ │ │ ├── hand_fib.c │ │ │ └── hand_fib_setup.py │ │ ├── fib-ipython-notebook │ │ │ └── cython-fibonnaci-notebook.ipynb │ │ ├── fib-pyx │ │ │ ├── Makefile │ │ │ ├── fib.pyx │ │ │ ├── setup.py │ │ │ ├── test.py │ │ │ └── test_pyximport.py │ │ └── fib-wrap-c │ │ │ ├── Makefile │ │ │ ├── cfib.c │ │ │ ├── cfib.h │ │ │ ├── setup.py │ │ │ └── wrap_cfib.pyx │ ├── julia │ │ ├── LICENSE.txt │ │ ├── Makefile │ │ ├── README.rst │ │ ├── for-reference │ │ │ ├── cython-julia-set.ipynb │ │ │ ├── julia.py │ │ │ ├── julia_cython_solution1.pyx │ │ │ ├── julia_numpy.py │ │ │ └── julia_ui.py │ │ ├── julia_cython.pyx │ │ ├── julia_cython_solution.pyx │ │ ├── julia_pure_python.py │ │ ├── setup.py │ │ ├── timing.py │ │ └── utils.py │ ├── particle-class-example │ │ ├── Makefile │ │ ├── particle.pyx │ │ ├── particle_heap.pyx │ │ └── setup.py │ ├── wrap-c-example │ │ ├── Makefile │ │ ├── setup.py │ │ ├── test_time_extern.py │ │ └── time_extern.pyx │ └── wrap-cpp-particle │ │ ├── Makefile │ │ ├── particle.cpp │ │ ├── particle.h │ │ ├── setup.py │ │ ├── test_wrap_particle.py │ │ └── wrap_particle.pyx ├── data │ ├── numbers │ └── python.bib ├── intro │ ├── 01_hello_world.py │ ├── 02_write_numbers.py │ ├── 03_call_sys_commands.py │ ├── 04_regular_expressions.py │ ├── 05_debug.py │ ├── 06_simple_plot.py │ ├── 07_simple_plot_customize.py │ ├── 08_simple_plot_legend.py │ ├── 09_simple_plot_spline.py │ ├── 10_histogram.py │ └── 11_histogram_2.py ├── pyhpc-cython.zip └── scale │ ├── 2D Cavity Flow using petsc4py.ipynb │ ├── Bratu3D.pyx │ ├── Bratu3Dimpl.c │ ├── Bratu3Dimpl.h │ ├── CavityFlow2D.pyx │ ├── CavityFlow2Dimpl.c │ ├── CavityFlow2Dimpl.h │ ├── Quadrants Example.ipynb │ ├── cavity_flow2d.py │ ├── danumbering.gif │ ├── ghost.gif │ ├── makefile │ ├── mpi4py_mandelbrot.py │ ├── mpi4py_pi.py │ ├── quadrants.py │ └── setup.py ├── figures ├── TACC_logo.png ├── allgather_alltoall.png ├── books.png ├── broadcast_scatter_gather.png ├── careful.png ├── concurrency.png ├── concurrency_2.png ├── continuum.png ├── creative_commons_logo.png ├── does_it_scale.png ├── dpg.png ├── dsw.png ├── euler_weak_scaling.png ├── fem.png ├── free_lunch.png ├── intro │ ├── array1D.2.lightbg.png │ ├── ecosystem.lightbg.png │ ├── example_surface_from_irregular_data.jpg │ ├── frequency_plot.png │ ├── frequency_signal.png │ ├── hist.png │ ├── hist_legend_fit.png │ ├── random_c.jpg │ ├── simple_plot.png │ ├── simple_plot_cust.png │ ├── simple_plot_cust2.png │ ├── simple_plot_legend.png │ └── snapshot_ipython.png ├── kaust.png ├── log.png ├── molt.png ├── numpy │ ├── broadcasting.png │ └── threefundamental.png ├── reduce_scan.png ├── scale │ ├── danumbering.gif │ ├── euler_weak_scaling.png │ ├── ghost.gif │ └── pyclaw_architecture.png ├── semiconductors.png ├── struct.png └── tbl.png ├── html └── intro.html ├── index.html ├── markdown ├── intro │ ├── building_blocks.md │ ├── hpc_building_blocks.md │ ├── index.md │ ├── intro.md │ ├── ipython.md │ ├── matplotlib.md │ ├── numpy.md │ ├── scipy.md │ ├── tour.md │ └── why.md └── scale │ ├── Makefile │ ├── euler_weak_scaling.png │ ├── petsc4py-tutorial.md │ ├── pyclaw-anatomy.md │ └── pyclaw_architecture.png ├── notebooks ├── 01_Introducing_Python.ipynb ├── 02_Speeding_Python.ipynb ├── 03.1_Distributed_Computing.ipynb ├── 03_Scaling_Python.ipynb ├── 04_yt_Introduction.ipynb ├── 05_Data_Inspection_with_yt.ipynb ├── 06_Data_Objects_in_yt.ipynb ├── 06_Simple_Visualization_with_yt.ipynb ├── 07_Derived_Fields_in_yt.ipynb ├── 08_Volume_Rendering_in_yt.ipynb ├── Appendix_00_Notebook_Tour.ipynb ├── Appendix_01_Resources.ipynb ├── Appendix_02_PETSc4Py.ipynb ├── Appendix_03_Launch_MPI_Engines.ipynb └── files ├── package.json └── pdf ├── 01_Introducing_Python.pdf ├── 01_Introducing_Python.tex ├── 02_Speeding_Python.pdf ├── 02_Speeding_Python.tex ├── 03.1_Distributed_Computing.pdf ├── 03.1_Distributed_Computing.tex ├── 03_Scaling_Python.pdf ├── 03_Scaling_Python.tex ├── 04_yt_Introduction.pdf ├── 04_yt_Introduction.tex ├── 05_Data_Inspection_with_yt.pdf ├── 05_Data_Inspection_with_yt.tex ├── 06_Data_Objects_in_yt.pdf ├── 06_Data_Objects_in_yt.tex ├── 06_Simple_Visualization_with_yt.tex ├── 07_Derived_Fields_in_yt.pdf ├── 07_Derived_Fields_in_yt.tex ├── 08_Volume_Rendering_in_yt.pdf ├── 08_Volume_Rendering_in_yt.tex ├── Appendix_00_Notebook_Tour.pdf ├── Appendix_00_Notebook_Tour.tex ├── Appendix_01_Resources.pdf ├── Appendix_01_Resources.tex ├── Appendix_02_PETSc4Py.pdf ├── Appendix_02_PETSc4Py.tex ├── Appendix_03_Launch_MPI_Engines.pdf ├── Appendix_03_Launch_MPI_Engines.tex └── python-speed-cython-sc14.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | node_modules/* 4 | examples/data/f_numbers 5 | notebooks/.ipynb_checkpoints/01_Introducing_Python-checkpoint.ipynb 6 | notebooks/.ipynb_checkpoints/02_Speeding_Python-checkpoint.ipynb 7 | notebooks/.ipynb_checkpoints/03_Scaling_Python-checkpoint.ipynb 8 | notebooks/file.mat 9 | -------------------------------------------------------------------------------- /Gruntfile.js: -------------------------------------------------------------------------------- 1 | /* global module:false */ 2 | module.exports = function(grunt) { 3 | 4 | // Project configuration 5 | grunt.initConfig({ 6 | pkg: grunt.file.readJSON('package.json'), 7 | meta: { 8 | banner: 9 | '/*!\n' + 10 | ' * reveal.js <%= pkg.version %> (<%= grunt.template.today("yyyy-mm-dd, HH:MM") %>)\n' + 11 | ' * http://lab.hakim.se/reveal-js\n' + 12 | ' * MIT licensed\n' + 13 | ' *\n' + 14 | ' * Copyright (C) 2013 Hakim El Hattab, http://hakim.se\n' + 15 | ' */' 16 | }, 17 | 18 | // Tests will be added soon 19 | qunit: { 20 | files: [ 'test/**/*.html' ] 21 | }, 22 | 23 | uglify: { 24 | options: { 25 | banner: '<%= meta.banner %>\n' 26 | }, 27 | build: { 28 | src: 'js/reveal.js', 29 | dest: 'js/reveal.min.js' 30 | } 31 | }, 32 | 33 | cssmin: { 34 | compress: { 35 | files: { 36 | 'css/reveal.min.css': [ 'css/reveal.css' ] 37 | } 38 | } 39 | }, 40 | 41 | sass: { 42 | main: { 43 | files: { 44 | 'css/theme/default.css': 'css/theme/source/default.scss', 45 | 'css/theme/beige.css': 'css/theme/source/beige.scss', 46 | 'css/theme/night.css': 'css/theme/source/night.scss', 47 | 'css/theme/serif.css': 'css/theme/source/serif.scss', 48 | 'css/theme/simple.css': 'css/theme/source/simple.scss', 49 | 'css/theme/sky.css': 'css/theme/source/sky.scss', 50 | 'css/theme/moon.css': 'css/theme/source/moon.scss', 51 | 'css/theme/solarized.css': 'css/theme/source/solarized.scss' 52 | } 53 | } 54 | }, 55 | 56 | jshint: { 57 | options: { 58 | curly: false, 59 | eqeqeq: true, 60 | immed: true, 61 | latedef: true, 62 | newcap: true, 63 | noarg: true, 64 | sub: true, 65 | undef: true, 66 | eqnull: true, 67 | browser: true, 68 | expr: true, 69 | globals: { 70 | head: false, 71 | module: false, 72 | console: false 73 | } 74 | }, 75 | files: [ 'Gruntfile.js', 'js/reveal.js' ] 76 | }, 77 | 78 | connect: { 79 | server: { 80 | options: { 81 | port: 8000, 82 | base: '.' 83 | } 84 | } 85 | }, 86 | 87 | zip: { 88 | 'reveal-js-presentation.zip': [ 89 | 'index.html', 90 | 'css/**', 91 | 'js/**', 92 | 'lib/**', 93 | 'images/**', 94 | 'plugin/**' 95 | ] 96 | }, 97 | 98 | watch: { 99 | main: { 100 | files: [ 'Gruntfile.js', 'js/reveal.js', 'css/reveal.css' ], 101 | tasks: 'default' 102 | }, 103 | theme: { 104 | files: [ 'css/theme/source/*.scss', 'css/theme/template/*.scss' ], 105 | tasks: 'themes' 106 | } 107 | } 108 | 109 | }); 110 | 111 | // Dependencies 112 | grunt.loadNpmTasks( 'grunt-contrib-jshint' ); 113 | grunt.loadNpmTasks( 'grunt-contrib-cssmin' ); 114 | grunt.loadNpmTasks( 'grunt-contrib-uglify' ); 115 | grunt.loadNpmTasks( 'grunt-contrib-watch' ); 116 | grunt.loadNpmTasks( 'grunt-contrib-sass' ); 117 | grunt.loadNpmTasks( 'grunt-contrib-connect' ); 118 | grunt.loadNpmTasks( 'grunt-zip' ); 119 | 120 | // Default task 121 | grunt.registerTask( 'default', [ 'jshint', 'cssmin', 'uglify' ] ); 122 | 123 | // Theme task 124 | grunt.registerTask( 'themes', [ 'sass' ] ); 125 | 126 | // Package presentation to archive 127 | grunt.registerTask( 'package', [ 'default', 'zip' ] ); 128 | 129 | // Serve presentation locally 130 | grunt.registerTask( 'serve', [ 'connect', 'watch' ] ); 131 | 132 | }; 133 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Python in HPC Tutorial 2 | ============================================= 3 | 4 | Python is a versatile language for the HPC community, with tools as diverse as 5 | visualizing large amounts of data, creating innovative user interfaces, and 6 | running large distributed jobs. Unfortunately, Python has a reputation for 7 | being slow and unfit for HPC computing. HPC Python experts and Shaheen's sixty-five 8 | thousand cores disagree. As HPC increases its vision to big data and 9 | non-traditional applications, it must also use languages that are easier for 10 | the novice, more robust to general computing, and more productive for the 11 | expert. 12 | 13 | Using Python in a performant way moves HPC applications ever closer to 14 | these goals. This success has made Python a requirement for supporting users 15 | new to the HPC field and a good choice for practitioners to adopt. In this 16 | tutorial, we give students practical experience using Python for scientific 17 | computing tasks from leaders in the field of Scientific Python. 18 | 19 | Topics include linear algebra and array computing with NumPy, interactive 20 | and parallel software development with IPython, performance and painless 21 | low-level C linking with Cython, and the friendliest performant interfaces to 22 | MPI available. 23 | 24 | 25 | Using the material 26 | ------------------ 27 | 28 | This tutorial is developed as a set of IPython Notebooks. To run, you only need 29 | to install IPython Notebook, then navigate to this directory and launch the notebooks. 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /examples/cython/fib/fib-c-only/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | gcc -O3 cfib.c main.c -o cfib.x 3 | 4 | clean: 5 | -rm *.o cfib.x 6 | -------------------------------------------------------------------------------- /examples/cython/fib/fib-c-only/cfib.c: -------------------------------------------------------------------------------- 1 | #include "cfib.h" 2 | 3 | int cfib(int n) 4 | { 5 | int a=0, b=1, i=0, tmp=0; 6 | for(i=0; i `lim`, where `z_n = z_{n-1}**2 + c`.\n", 30 | " '''\n", 31 | " count = 0\n", 32 | " while abs(z) < lim and count < cutoff:\n", 33 | " z = z * z + c\n", 34 | " count += 1\n", 35 | " return count\n", 36 | "\n", 37 | "\n", 38 | "def compute_julia(c, N, bound=2, lim=1000., kernel=kernel):\n", 39 | " ''' Pure Python calculation of the Julia set for a given `c`. No NumPy\n", 40 | " array operations are used.\n", 41 | " '''\n", 42 | " julia = np.empty((N, N), dtype=np.uint32)\n", 43 | " grid_x = np.linspace(-bound, bound, N)\n", 44 | " grid_y = grid_x * 1j\n", 45 | " c = complex(c)\n", 46 | " t0 = time()\n", 47 | " for i, x in enumerate(grid_x):\n", 48 | " for j, y in enumerate(grid_y):\n", 49 | " julia[i,j] = kernel(x+y, c, lim)\n", 50 | " return julia, time() - t0" 51 | ], 52 | "language": "python", 53 | "metadata": {}, 54 | "outputs": [] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "collapsed": false, 59 | "input": [ 60 | "import pylab as pl\n", 61 | "\n", 62 | "def plot_julia(kwargs, compute_julia):\n", 63 | " ''' Given parameters dict in `kwargs` and a function to compute the Julia\n", 64 | " set (`compute_julia`), plots the resulting Julia set with appropriately\n", 65 | " labeled axes.\n", 66 | " '''\n", 67 | " kwargs = kwargs.copy()\n", 68 | "\n", 69 | " def _plotter(kwargs):\n", 70 | " bound = kwargs['bound']\n", 71 | " julia, time = compute_julia(**kwargs)\n", 72 | " print \"execution time (s):\", time\n", 73 | " julia = np.log(julia)\n", 74 | " pl.imshow(julia, \n", 75 | " interpolation='nearest',\n", 76 | " extent=(-bound, bound)*2)\n", 77 | " pl.colorbar()\n", 78 | " title = r\"Julia set for $C={0.real:5.3f}+{0.imag:5.3f}i$ $[{1}\\times{1}]$\"\n", 79 | " pl.title(title.format(kwargs['c'], kwargs['N']))\n", 80 | " pl.xlabel(\"$Re(z)$\")\n", 81 | " pl.ylabel(\"$Im(z)$\")\n", 82 | "\n", 83 | " pl.figure(figsize=(14, 12))\n", 84 | "\n", 85 | " cvals = [0.285+0.01j, -0.1+0.651j, -0.4+0.6j, -0.8+0.156j]\n", 86 | " subplots = ['221', '222', '223', '224' ]\n", 87 | "\n", 88 | " for c, sp in zip(cvals, subplots):\n", 89 | " kwargs.update(c=c)\n", 90 | " pl.subplot(sp)\n", 91 | " _plotter(kwargs)\n", 92 | "\n", 93 | " pl.show()" 94 | ], 95 | "language": "python", 96 | "metadata": {}, 97 | "outputs": [] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "collapsed": false, 102 | "input": [ 103 | "kwargs = dict(N=100, bound=1.5)\n", 104 | "plot_julia(kwargs, compute_julia)" 105 | ], 106 | "language": "python", 107 | "metadata": {}, 108 | "outputs": [] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "collapsed": false, 113 | "input": [ 114 | "%%cython\n", 115 | "\n", 116 | "from time import time\n", 117 | "\n", 118 | "import numpy as np\n", 119 | "cimport cython\n", 120 | "cimport numpy as cnp\n", 121 | "\n", 122 | "ctypedef double complex cpx_t\n", 123 | "ctypedef double real_t\n", 124 | "\n", 125 | "cdef inline real_t cabs_sq(cpx_t z) nogil:\n", 126 | " ''' Helper inline function, computes the square of the abs. value of the\n", 127 | " complex number `z`.\n", 128 | " '''\n", 129 | " return z.real * z.real + z.imag * z.imag\n", 130 | " \n", 131 | "cpdef unsigned int kernel(cpx_t z, \n", 132 | " cpx_t c,\n", 133 | " real_t lim,\n", 134 | " real_t cutoff=1e6) nogil:\n", 135 | " ''' Cython implementation of the kernel computation.\n", 136 | "\n", 137 | " This is implemented so that no C-API calls are made inside the function\n", 138 | " body. Even still, there is some overhead as compared with a pure C\n", 139 | " implementation.\n", 140 | " '''\n", 141 | " cdef unsigned int count = 0\n", 142 | " cdef real_t lim_sq = lim * lim\n", 143 | " while cabs_sq(z) < lim_sq and count < cutoff:\n", 144 | " z = z * z + c\n", 145 | " count += 1\n", 146 | " return count\n", 147 | "\n", 148 | "@cython.boundscheck(False)\n", 149 | "@cython.wraparound(False)\n", 150 | "def compute_julia_opt(cpx_t c,\n", 151 | " unsigned int N,\n", 152 | " real_t bound=1.5,\n", 153 | " real_t lim=1000.):\n", 154 | " '''\n", 155 | " Cython `compute_julia()` implementation with Numpy array buffer\n", 156 | " declarations and appropriate compiler directives. The body of this\n", 157 | " function is nearly identical to the `compute_julia_no_opt()` function.\n", 158 | "\n", 159 | " '''\n", 160 | "\n", 161 | " cdef cnp.ndarray[cnp.uint32_t, ndim=2, mode='c'] julia \n", 162 | " cdef cnp.ndarray[real_t, ndim=1, mode='c'] grid\n", 163 | " cdef unsigned int i, j\n", 164 | " cdef real_t x, y\n", 165 | "\n", 166 | " julia = np.empty((N, N), dtype=np.uint32)\n", 167 | " grid = np.linspace(-bound, bound, N)\n", 168 | " t0 = time()\n", 169 | " for i in range(N):\n", 170 | " x = grid[i]\n", 171 | " for j in range(N):\n", 172 | " y = grid[j]\n", 173 | " julia[i,j] = kernel(x+y*1j, c, lim)\n", 174 | " return julia, time() - t0" 175 | ], 176 | "language": "python", 177 | "metadata": {}, 178 | "outputs": [] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "collapsed": false, 183 | "input": [ 184 | "kwargs = dict(N=1000, bound=1.5)\n", 185 | "plot_julia(kwargs, compute_julia_opt)" 186 | ], 187 | "language": "python", 188 | "metadata": {}, 189 | "outputs": [] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "collapsed": false, 194 | "input": [], 195 | "language": "python", 196 | "metadata": {}, 197 | "outputs": [] 198 | } 199 | ], 200 | "metadata": {} 201 | } 202 | ] 203 | } -------------------------------------------------------------------------------- /examples/cython/julia/for-reference/julia.py: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------------------- 2 | # Copyright (c) 2012, Enthought, Inc. 3 | # All rights reserved. See LICENSE.txt for details. 4 | # 5 | # Author: Kurt W. Smith 6 | # Date: 26 March 2012 7 | #----------------------------------------------------------------------------- 8 | 9 | ''' 10 | julia.py 11 | 12 | Compute and plot the Julia set. 13 | 14 | This provides a self-contained---if somewhat contrived---example for comparing 15 | the runtimes between pure Python, Numpy, Cython, and Cython-wrapped C versions 16 | of the julia set calculation. 17 | 18 | It is meant to be run from the command line; run 19 | 20 | $ python julia.py -h 21 | 22 | for details. 23 | 24 | ''' 25 | 26 | # --- Python / Numpy imports ------------------------------------------------- 27 | import numpy as np 28 | import pylab as pl 29 | 30 | # --- Import the various julia set computation modules ----------------------- 31 | import julia_pure_python 32 | import julia_cython_solution as julia_cython 33 | # import julia_cython 34 | import julia_numpy 35 | # import julia_multiprocessing_solution as julia_multiprocessing 36 | 37 | def printer(label, runtime, speedup): 38 | ''' Given a label, the total runtime in seconds, and a speedup value, 39 | prints things nicely to stdout. 40 | ''' 41 | from sys import stdout 42 | print "{}:".format(label.strip()) 43 | fs = " {:.<15s} {: >6.2g}" 44 | print fs.format("runtime (s)", runtime) 45 | print fs.format("speedup", speedup) 46 | print 47 | stdout.flush() 48 | 49 | # some good c values: 50 | # (-0.1 + 0.651j) 51 | # (-0.4 + 0.6j) 52 | # (0.285 + 0.01j) 53 | 54 | def plot_julia(kwargs, compute_julia): 55 | ''' Given parameters dict in `kwargs` and a function to compute the Julia 56 | set (`compute_julia`), plots the resulting Julia set with appropriately 57 | labeled axes. 58 | ''' 59 | kwargs = kwargs.copy() 60 | 61 | def _plotter(kwargs): 62 | bound = kwargs['bound'] 63 | julia, _ = compute_julia(**kwargs) 64 | julia = np.log(julia) 65 | pl.imshow(julia, 66 | interpolation='nearest', 67 | extent=(-bound, bound)*2) 68 | pl.colorbar() 69 | title = r"Julia set for $C={0.real:5.3f}+{0.imag:5.3f}i$ $[{1}\times{1}]$" 70 | pl.title(title.format(kwargs['c'], kwargs['N'])) 71 | pl.xlabel("$Re(z)$") 72 | pl.ylabel("$Im(z)$") 73 | 74 | pl.figure(figsize=(14, 12)) 75 | 76 | cvals = [0.285+0.01j, -0.1+0.651j, -0.4+0.6j, -0.8+0.156j] 77 | subplots = ['221', '222', '223', '224' ] 78 | 79 | for c, sp in zip(cvals, subplots): 80 | kwargs.update(c=c) 81 | pl.subplot(sp) 82 | _plotter(kwargs) 83 | 84 | pl.show() 85 | 86 | def compare_runtimes(kwargs): 87 | ''' Given a parameter dict `kwargs`, runs different implementations of the 88 | Julia set computation and compares the runtimes of each. 89 | ''' 90 | 91 | ref_julia, python_time = julia_pure_python.compute_julia(**kwargs) 92 | printer("Python only", python_time, 1.0) 93 | 94 | _, numpy_time = julia_numpy.compute_julia(**kwargs) 95 | # assert np.allclose(ref_julia, _) 96 | printer("Python only + Numpy expressions", numpy_time, 97 | python_time / numpy_time) 98 | 99 | _, cython_kernel_time = julia_pure_python.compute_julia( 100 | kernel=julia_cython.kernel, **kwargs) 101 | assert np.allclose(ref_julia, _) 102 | printer("Python + cythonized kernel", cython_kernel_time, 103 | python_time / cython_kernel_time) 104 | 105 | _, mp_time = julia_multiprocessing.compute_julia_block(kernel=julia_pure_python.kernel, **kwargs) 106 | assert np.allclose(ref_julia, _) 107 | printer("Multiprocessing + Python kernel", mp_time, python_time / mp_time) 108 | 109 | _, mp_time = julia_multiprocessing.compute_julia_block(kernel=julia_cython.kernel, **kwargs) 110 | assert np.allclose(ref_julia, _) 111 | printer("Multiprocessing + cythonized kernel", mp_time, python_time / mp_time) 112 | 113 | _, cython_no_opt_time = julia_cython.compute_julia_no_opt(**kwargs) 114 | assert np.allclose(ref_julia, _) 115 | printer("All Cython, no optimizations", cython_no_opt_time, 116 | python_time / cython_no_opt_time) 117 | 118 | _, cython_opt_time = julia_cython.compute_julia_opt(**kwargs) 119 | assert np.allclose(ref_julia, _) 120 | printer("All Cython, Numpy optimizations", cython_opt_time, 121 | python_time / cython_opt_time) 122 | 123 | _, ext_opt_time = julia_cython.compute_julia_ext(**kwargs) 124 | assert np.allclose(ref_julia, _) 125 | printer("All C version, wrapped with Cython", ext_opt_time, 126 | python_time / ext_opt_time) 127 | 128 | def main(args): 129 | ''' The main entry point; branches on whether `args.action` is "plot" or 130 | "compare". 131 | ''' 132 | bound = 1.5 133 | kwargs = dict(cr=0.285, ci=0.01, 134 | N=args.N, 135 | bound=bound) 136 | 137 | if args.action == 'plot': 138 | plot_julia(kwargs, julia_cython.compute_julia_ext) 139 | elif args.action == 'compare': 140 | compare_runtimes(kwargs) 141 | 142 | description = """ Explore the performance characteristics of Cython and Numpy 143 | when computing the Julia set.""" 144 | 145 | help_arg_n = """ The number of grid points in each dimension; larger for more 146 | resolution. (default 100)) """ 147 | 148 | help_arg_a = """ Either *plot* an approximation of a Julia set with resolution 149 | N (default), or *compare* the runtimes for different implementations.) """ 150 | 151 | if __name__ == '__main__': 152 | from argparse import ArgumentParser 153 | 154 | parser = ArgumentParser(description=description) 155 | 156 | parser.add_argument('-N', type=int, default=200, help=help_arg_n) 157 | parser.add_argument('-a', '--action', type=str, 158 | default='plot', 159 | choices=('plot', 'compare'), 160 | help=help_arg_a) 161 | 162 | args = parser.parse_args() 163 | main(args) 164 | -------------------------------------------------------------------------------- /examples/cython/julia/for-reference/julia_cython_solution1.pyx: -------------------------------------------------------------------------------- 1 | # --- Python std lib imports ------------------------------------------------- 2 | from time import time 3 | import numpy as np 4 | 5 | cdef float abs_sq(float zr, float zi): 6 | return zr * zr + zi * zi 7 | 8 | cdef int kernel(float zr, float zi, float cr, float ci, float lim, double cutoff): 9 | cdef: 10 | int count = 0 11 | float lim_sq = lim * lim 12 | while abs_sq(zr, zi) < lim_sq and count < cutoff: 13 | zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci 14 | count += 1 15 | return count 16 | 17 | def compute_julia(float cr, float ci, int N, float bound=1.5, float lim=1000., double cutoff=1e6): 18 | cdef: 19 | int i, j 20 | float x, y 21 | unsigned int[:,::1] julia 22 | float[::1] grid 23 | julia = np.empty((N, N), dtype=np.uint32) 24 | grid = np.array(np.linspace(-bound, bound, N), dtype=np.float32) 25 | t0 = time() 26 | for i in range(N): 27 | x = grid[i] 28 | for j in range(N): 29 | y = grid[j] 30 | julia[i,j] = kernel(x, y, cr, ci, lim, cutoff) 31 | return julia, time() - t0 32 | -------------------------------------------------------------------------------- /examples/cython/julia/for-reference/julia_numpy.py: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------------------- 2 | # Copyright (c) 2012, Enthought, Inc. 3 | # All rights reserved. See LICENSE.txt for details. 4 | # 5 | # Author: Kurt W. Smith 6 | # Date: 26 March 2012 7 | #----------------------------------------------------------------------------- 8 | 9 | from time import time 10 | import numpy as np 11 | 12 | def compute_julia(cr, ci, N, bound=1.5, lim=4., cutoff=1e6): 13 | ''' Pure Python calculation of the Julia set for a given `c` using NumPy 14 | array operations. 15 | ''' 16 | c = cr + 1j * ci 17 | orig_err = np.seterr() 18 | np.seterr(over='ignore', invalid='ignore') 19 | julia = np.zeros((N, N), dtype=np.uint32) 20 | X, Y = np.ogrid[-bound:bound:N*1j, -bound:bound:N*1j] 21 | iterations = X + Y * 1j 22 | count = 1 23 | t0 = time() 24 | while not np.all(julia) and count < cutoff: 25 | mask = np.logical_not(julia) & (np.abs(iterations) >= lim) 26 | julia[mask] = count 27 | count += 1 28 | iterations = iterations**2 + c 29 | if count == cutoff: 30 | julia[np.logical_not(julia)] = count 31 | np.seterr(**orig_err) 32 | return julia, time() - t0 33 | -------------------------------------------------------------------------------- /examples/cython/julia/for-reference/julia_ui.py: -------------------------------------------------------------------------------- 1 | # --- Imports ---------------------------------------------------------------- 2 | import numpy as np 3 | 4 | from traits.api import (HasTraits, Float, Instance, Array, on_trait_change, 5 | Property, Int, Enum, Callable) 6 | from traitsui.api import View, Item, RangeEditor, Controller, HGroup, Group 7 | from chaco import default_colormaps 8 | from chaco.api import Plot, ArrayPlotData, hot 9 | from enable.api import ComponentEditor 10 | import utils 11 | 12 | # --- Traits classes. -------------------------------------------------------- 13 | 14 | class Julia(HasTraits): 15 | 16 | resolution = Int(100) 17 | cr = Float(-0.1) 18 | ci = Float(0.651) 19 | cutoff = Int(100) 20 | runtime = Float() 21 | julia = Array() 22 | compute_julia = Callable() 23 | 24 | @on_trait_change('cr, ci, resolution, cutoff') 25 | def update_julia(self): 26 | self.julia = self.compute() 27 | 28 | def _julia_default(self): 29 | return self.compute() 30 | 31 | def compute(self): 32 | julia, self.runtime = self.compute_julia(self.cr, self.ci, 33 | self.resolution, 34 | lim=4., cutoff=self.cutoff) 35 | return np.log(julia) 36 | 37 | # --- Set up the colormaps to use -------------------------------------------- 38 | def colormaps(): 39 | cmnames = default_colormaps.color_map_name_dict.keys() 40 | colormaps = sorted(cmnames, key=str.lower) 41 | for boring in 'hot bone gray yarg gist_gray gist_yarg Greys'.split(): 42 | colormaps.remove(boring) 43 | # Make 'hot' the first colormap. 44 | return ['hot'] + colormaps 45 | 46 | class JuliaUI(Controller): 47 | 48 | model = Instance(Julia) 49 | runtime = Property(depends_on=['model.runtime']) 50 | plot = Instance(Plot) 51 | colormap = Enum(colormaps()) 52 | 53 | traits_view = View(Item('controller.plot', editor=ComponentEditor(), show_label=False), 54 | Group( 55 | Item('cr', editor=RangeEditor(low=-2.0, high=2.0, low_label='-2', high_label='2'), show_label=False), 56 | Item('ci', editor=RangeEditor(low=-2.0, high=2.0, low_label='-2', high_label='2'), show_label=False), 57 | label='c_real / c_imaginary', show_border=True, 58 | ), 59 | HGroup( 60 | Item('resolution', editor=RangeEditor(low=50, high=1000, mode='slider')), 61 | Item('cutoff', editor=RangeEditor(low=100, high=300, mode='slider')), 62 | Item('controller.colormap'), 63 | ), 64 | Item('controller.runtime', style='readonly', show_label=False), 65 | width=800, height=900, resizable=True, 66 | title="Julia Set Explorer") 67 | 68 | @on_trait_change('model.runtime') 69 | def _get_runtime(self): 70 | return "Compute time: {:d} ms".format(int(round(self.model.runtime * 1000))) 71 | 72 | @on_trait_change('model.julia') 73 | def update_julia(self): 74 | self.plot.data.set_data('julia', self.model.julia) 75 | 76 | def _plot_default(self): 77 | julia = self.model.julia 78 | apd = ArrayPlotData(julia=julia[:-1,:-1]) 79 | grid = np.linspace(-2, 2, self.model.resolution-1) 80 | X, Y = np.meshgrid(grid, grid) 81 | plot = Plot(apd) 82 | plot.aspect_ratio = 1.0 83 | plot.img_plot("julia", xbounds=X, ybounds=Y, 84 | colormap=hot, interpolation='nearest') 85 | return plot 86 | 87 | def _colormap_changed(self): 88 | cmap = default_colormaps.color_map_name_dict[self.colormap] 89 | if self.plot is not None: 90 | value_range = self.plot.color_mapper.range 91 | self.plot.color_mapper = cmap(value_range) 92 | self.plot.request_redraw() 93 | 94 | 95 | # --- main entry point ------------------------------------------------------- 96 | 97 | def main(args): 98 | suffix = args.module.rsplit('.', 1)[-1] 99 | if suffix in ('so', 'pyd', 'pyx'): 100 | utils.compiler(args.setup) 101 | compute_julia = utils.importer(args.module, args.function) 102 | julia = Julia(compute_julia=compute_julia) 103 | jui = JuliaUI(model=julia) 104 | jui.configure_traits() 105 | 106 | if __name__ == '__main__': 107 | from argparse import ArgumentParser 108 | parser = ArgumentParser() 109 | parser.add_argument('module') 110 | parser.add_argument('-f', '--function', default='compute_julia') 111 | parser.add_argument('--setup', default='setup.py') 112 | main(parser.parse_args()) 113 | -------------------------------------------------------------------------------- /examples/cython/julia/julia_cython.pyx: -------------------------------------------------------------------------------- 1 | # --- Python std lib imports ------------------------------------------------- 2 | from time import time 3 | import numpy as np 4 | 5 | def abs_sq(zr, zi): 6 | return zr * zr + zi * zi 7 | 8 | def kernel(zr, zi, cr, ci, lim, cutoff): 9 | lim_sq = lim * lim 10 | count = 0 11 | while abs_sq(zr, zi) < lim_sq and count < cutoff: 12 | zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci 13 | count += 1 14 | return count 15 | 16 | def compute_julia(cr, ci, N, bound=1.5, lim=1000., cutoff=1e6): 17 | julia = np.empty((N, N), dtype=np.uint32) 18 | grid = np.array(np.linspace(-bound, bound, N), dtype=np.float32) 19 | t0 = time() 20 | for i in range(N): 21 | x = grid[i] 22 | for j in range(N): 23 | y = grid[j] 24 | julia[i,j] = kernel(x, y, cr, ci, lim, cutoff) 25 | return julia, time() - t0 26 | -------------------------------------------------------------------------------- /examples/cython/julia/julia_cython_solution.pyx: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------------------- 2 | # Copyright (c) 2012, 2013, Enthought, Inc. 3 | # All rights reserved. Distributed under the terms of the 2-clause BSD 4 | # licence. See LICENSE.txt for details. 5 | # 6 | # Author: Kurt W. Smith 7 | # Date: 26 March 2012 8 | #----------------------------------------------------------------------------- 9 | 10 | # --- Python std lib imports ------------------------------------------------- 11 | from time import time 12 | import numpy as np 13 | 14 | # --- Cython cimports -------------------------------------------------------- 15 | cimport cython 16 | from libc.stdint cimport uint32_t, int32_t 17 | from cython.parallel cimport prange 18 | 19 | # --- Ctypedefs -------------------------------------------------------- 20 | ctypedef float real_t 21 | ctypedef uint32_t uint_t 22 | ctypedef int32_t int_t 23 | 24 | #----------------------------------------------------------------------------- 25 | # Cython functions 26 | #----------------------------------------------------------------------------- 27 | cdef real_t abs_sq(real_t zr, real_t zi) nogil: 28 | return zr * zr + zi * zi 29 | 30 | cdef uint_t kernel(real_t zr, real_t zi, 31 | real_t cr, real_t ci, 32 | real_t lim, real_t cutoff) nogil: 33 | cdef: 34 | uint_t count = 0 35 | real_t lim_sq = lim * lim 36 | 37 | while abs_sq(zr, zi) < lim_sq and count < cutoff: 38 | zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci 39 | count += 1 40 | return count 41 | 42 | @cython.boundscheck(False) 43 | @cython.wraparound(False) 44 | def compute_julia(real_t cr, real_t ci, 45 | uint32_t N, real_t bound=1.5, 46 | real_t lim=1000., real_t cutoff=1e6): 47 | cdef: 48 | uint_t[:,::1] julia 49 | real_t[::1] grid 50 | int i, j 51 | real_t x, y 52 | 53 | julia = np.empty((N, N), dtype=np.uint32) 54 | grid = np.asarray(np.linspace(-bound, bound, N), dtype=np.float32) 55 | t0 = time() 56 | for i in range(N): 57 | x = grid[i] 58 | for j in range(N): 59 | y = grid[j] 60 | julia[i,j] = kernel(x, y, cr, ci, lim, cutoff) 61 | return julia, time() - t0 62 | 63 | @cython.boundscheck(False) 64 | @cython.wraparound(False) 65 | def compute_julia_parallel(real_t cr, real_t ci, 66 | uint_t N, real_t bound=1.5, 67 | real_t lim=1000., real_t cutoff=1e6): 68 | cdef: 69 | uint_t[:,::1] julia 70 | real_t[::1] grid 71 | int_t i, j 72 | real_t x 73 | 74 | julia = np.empty((N, N), dtype=np.uint32) 75 | grid = np.asarray(np.linspace(-bound, bound, N), dtype=np.float32) 76 | t0 = time() 77 | for i in prange(N, nogil=True): 78 | x = grid[i] 79 | for j in range(N): 80 | julia[i,j] = kernel(x, grid[j], cr, ci, lim, cutoff) 81 | return julia, time() - t0 82 | -------------------------------------------------------------------------------- /examples/cython/julia/julia_pure_python.py: -------------------------------------------------------------------------------- 1 | # --- Python / Numpy imports ------------------------------------------------- 2 | import numpy as np 3 | from time import time 4 | 5 | def kernel(zr, zi, cr, ci, lim, cutoff): 6 | ''' Computes the number of iterations `n` such that 7 | |z_n| > `lim`, where `z_n = z_{n-1}**2 + c`. 8 | ''' 9 | count = 0 10 | while ((zr*zr + zi*zi) < (lim*lim)) and count < cutoff: 11 | zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci 12 | count += 1 13 | return count 14 | 15 | def compute_julia(cr, ci, N, bound=1.5, lim=1000., cutoff=1e6): 16 | ''' Pure Python calculation of the Julia set for a given `c`. No NumPy 17 | array operations are used. 18 | ''' 19 | julia = np.empty((N, N), dtype=np.uint32) 20 | grid_x = np.linspace(-bound, bound, N) 21 | t0 = time() 22 | for i, x in enumerate(grid_x): 23 | for j, y in enumerate(grid_x): 24 | julia[i,j] = kernel(x, y, cr, ci, lim, cutoff=cutoff) 25 | return julia, time() - t0 26 | -------------------------------------------------------------------------------- /examples/cython/julia/setup.py: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------------------- 2 | # Copyright (c) 2012, Enthought, Inc. 3 | # All rights reserved. See LICENSE.txt for details. 4 | # 5 | # Author: Kurt W. Smith 6 | # Date: 26 March 2012 7 | #----------------------------------------------------------------------------- 8 | 9 | from distutils.core import setup 10 | from distutils.extension import Extension 11 | from Cython.Distutils import build_ext 12 | 13 | extra_args = [] 14 | # Comment/Uncomment the following line to disable/enable OpenMP for GCC-ish 15 | # compilers. 16 | # extra_args = ["-fopenmp"] 17 | 18 | exts = [Extension("julia_cython", 19 | ["julia_cython.pyx"], 20 | extra_compile_args=extra_args, 21 | extra_link_args=extra_args), 22 | Extension("julia_cython_solution", 23 | ["julia_cython_solution.pyx"], 24 | extra_compile_args=extra_args, 25 | extra_link_args=extra_args), 26 | ] 27 | 28 | setup( 29 | cmdclass = {'build_ext': build_ext}, 30 | ext_modules = exts, 31 | ) 32 | -------------------------------------------------------------------------------- /examples/cython/julia/timing.py: -------------------------------------------------------------------------------- 1 | import utils 2 | # import pylab as pl 3 | import numpy as np 4 | 5 | def main(args): 6 | suffix = args.module.rsplit('.', 1)[-1] 7 | if suffix in ('so', 'pyd', 'pyx'): 8 | utils.compiler(args.setup) 9 | compute_julia = utils.importer(args.module, args.function) 10 | jla, time = compute_julia(args.cr, args.ci, args.N, 2.0, 4., args.cutoff) 11 | print "Compute time: %fs" % time 12 | # pl.imshow(np.log(jla), cmap=pl.cm.hot) 13 | # pl.show() 14 | 15 | if __name__ == '__main__': 16 | from argparse import ArgumentParser 17 | parser = ArgumentParser() 18 | parser.add_argument('module', help="""The module to use -- either a pure 19 | python module or a Cython .pyx file. If given a .pyx file, it will 20 | be compiled automatically.""") 21 | parser.add_argument('-f', '--function', default='compute_julia', help="The function from the module to call, default `compute_julia`") 22 | parser.add_argument('--setup', default='setup.py') 23 | parser.add_argument('-cr', default=-0.1, help='The real component of the C parameter.') 24 | parser.add_argument('-ci', default=0.651, help='The imaginary component of the C parameter.') 25 | parser.add_argument('-N', default=200, help='The number of grid points to use.') 26 | parser.add_argument('--cutoff', default=10**3, help='The cutoff value, controls the image detail.') 27 | main(parser.parse_args()) 28 | -------------------------------------------------------------------------------- /examples/cython/julia/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from subprocess import check_call 3 | import sys, platform 4 | 5 | def compiler(setup_name): 6 | # the Python binary full path. 7 | exe = sys.executable 8 | 9 | # figure out what platform we're on and adjust the commandline flags accordingly. 10 | extras = [] 11 | if platform.system() == 'Windows': 12 | extras = ['--compiler=mingw32'] 13 | 14 | # The distutils command to execute 15 | cmd = [exe, setup_name, 'build_ext', '--inplace'] + extras 16 | print(cmd) 17 | 18 | # runs the command and raises an exception on failure. 19 | check_call(cmd) 20 | 21 | def importer(module_name, function_name): 22 | 23 | # Remove any common ending, both for pure python and extension modules. 24 | for ending in ('.py', '.pyc', '.so', '.pyd'): 25 | module_name = module_name.rsplit(ending)[0] 26 | 27 | mod = __import__(module_name) 28 | 29 | # import the required function, re-raising an ImportError on failure. 30 | try: 31 | return getattr(mod, function_name) 32 | except AttributeError: 33 | raise ImportError("cannot import name %s" % function_name) 34 | -------------------------------------------------------------------------------- /examples/cython/particle-class-example/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python setup.py build_ext --inplace 3 | 4 | clean: 5 | rm -r build particle{,_heap}.{c,so} *.pyc __pycache__ 6 | 7 | -------------------------------------------------------------------------------- /examples/cython/particle-class-example/particle.pyx: -------------------------------------------------------------------------------- 1 | cimport cython 2 | from libc.math cimport sqrt 3 | 4 | DEF _LEN = 3 5 | 6 | cdef float magnitude(float *vec): 7 | cdef float mag = 0.0 8 | cdef int idx 9 | for idx in range(_LEN): 10 | mag += vec[idx]*vec[idx] 11 | return sqrt(mag) 12 | 13 | cdef class Particle: 14 | 15 | cdef: 16 | float psn[_LEN] 17 | float vel[_LEN] 18 | public float mass, charge 19 | 20 | def __init__(self, psn=None, vel=None, mass=0.0, charge=0.0): 21 | zeros = (0.0,)*_LEN 22 | psn = psn or zeros 23 | vel = vel or zeros 24 | for i in range(_LEN): 25 | self.psn[i] = psn[i] 26 | self.vel[i] = vel[i] 27 | self.mass = mass 28 | self.charge = charge 29 | 30 | property position: 31 | 32 | def __get__(self): 33 | return tuple(self.psn[i] for i in range(_LEN)) 34 | 35 | def __set__(self, it): 36 | for i in range(_LEN): 37 | self.psn[i] = it[i] 38 | 39 | property velocity: 40 | 41 | def __get__(self): 42 | return tuple(self.vel[i] for i in range(_LEN)) 43 | 44 | def __set__(self, it): 45 | for i in range(_LEN): 46 | self.vel[i] = it[i] 47 | 48 | property momentum: 49 | 50 | "Particle object's momentum." 51 | 52 | def __get__(self): 53 | return tuple(self.vel[i] * self.mass for i in range(_LEN)) 54 | 55 | property speed: 56 | 57 | def __get__(self): 58 | return magnitude(self.vel) 59 | 60 | property direction: 61 | 62 | def __get__(self): 63 | cdef float spd = self.speed 64 | return tuple(self.vel[i] / spd for i in range(_LEN)) 65 | 66 | -------------------------------------------------------------------------------- /examples/cython/particle-class-example/particle_heap.pyx: -------------------------------------------------------------------------------- 1 | # from cython.view import array as cvarray 2 | cimport cython 3 | from libc.stdlib cimport malloc, free 4 | from libc.math cimport sqrt 5 | 6 | DEF _LEN = 3 7 | 8 | cdef class Particle: 9 | 10 | cdef: 11 | float *psn, *vel 12 | public float mass, charge 13 | 14 | def __cinit__(self): 15 | # allocate the psn and vel arrays on the heap. 16 | self.psn = malloc(_LEN * sizeof(float)) 17 | self.vel = malloc(_LEN * sizeof(float)) 18 | if not self.psn or not self.vel: 19 | raise MemoryError("Cannot allocate memory.") 20 | 21 | def __init__(self, psn=None, vel=None, mass=0.0, charge=0.0): 22 | # called after __cinit__() -- initialize all data. 23 | zeros = (0.0,)*_LEN 24 | psn = psn or zeros 25 | vel = vel or zeros 26 | for i in range(_LEN): 27 | self.psn[i] = psn[i] 28 | self.vel[i] = vel[i] 29 | self.mass = mass 30 | self.charge = charge 31 | 32 | def __dealloc__(self): 33 | # called when cleaning up the object; free malloc'd memory. 34 | if self.psn: 35 | free(self.psn); self.psn == NULL 36 | if self.vel: 37 | free(self.vel); self.vel == NULL 38 | 39 | property position: 40 | 41 | def __get__(self): 42 | return tuple(self.psn[i] for i in range(_LEN)) 43 | 44 | def __set__(self, it): 45 | for i in range(_LEN): 46 | self.psn[i] = it[i] 47 | 48 | property velocity: 49 | 50 | def __get__(self): 51 | return tuple(self.vel[i] for i in range(_LEN)) 52 | 53 | def __set__(self, it): 54 | for i in range(_LEN): 55 | self.vel[i] = it[i] 56 | 57 | property momentum: 58 | 59 | "Particle object's momentum." 60 | 61 | def __get__(self): 62 | return tuple(self.vel[i] * self.mass for i in range(_LEN)) 63 | 64 | -------------------------------------------------------------------------------- /examples/cython/particle-class-example/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | 5 | exts = [Extension("particle", ["particle.pyx"]), 6 | Extension("particle_heap", ["particle_heap.pyx"])] 7 | 8 | setup( 9 | cmdclass = {'build_ext': build_ext}, 10 | ext_modules = exts, 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /examples/cython/wrap-c-example/Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: 3 | python setup.py build_ext -i 4 | 5 | clean: 6 | -rm -r build time_extern.{so,c} 7 | -------------------------------------------------------------------------------- /examples/cython/wrap-c-example/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | 5 | setup( 6 | ext_modules=[ Extension("time_extern", ["time_extern.pyx"]) ], 7 | cmdclass = {'build_ext': build_ext} 8 | ) 9 | -------------------------------------------------------------------------------- /examples/cython/wrap-c-example/test_time_extern.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import time_extern 4 | 5 | print "Time extern docstring:" 6 | print time_extern.__doc__ 7 | 8 | print "get_date() docstring:" 9 | print time_extern.get_date.__doc__ 10 | 11 | print "time_extern.get_date(): ", time_extern.get_date() 12 | -------------------------------------------------------------------------------- /examples/cython/wrap-c-example/time_extern.pyx: -------------------------------------------------------------------------------- 1 | ''' 2 | ============================================================================= 3 | time_extern extension module. Simple wrapper that calls into the localtime() 4 | and time() functions in the C standard library. 5 | 6 | Methods: 7 | -------- 8 | 9 | get_date() -- returns a tuple with the current day, month and year. 10 | ============================================================================= 11 | ''' 12 | 13 | 14 | cdef extern from "time.h": 15 | 16 | struct tm: 17 | int tm_mday 18 | int tm_mon 19 | int tm_year 20 | 21 | ctypedef long time_t 22 | tm* localtime(time_t *timer) 23 | time_t time(time_t *tloc) 24 | 25 | def get_date(): 26 | """Return a tuple with the current day, month and year. 27 | """ 28 | cdef: 29 | time_t t 30 | tm* ts 31 | 32 | t = time(NULL) 33 | ts = localtime(&t) 34 | return ts.tm_mday, ts.tm_mon + 1, ts.tm_year + 1900 35 | 36 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | all: 4 | python setup.py build_ext -i 5 | 6 | clean: 7 | -rm -r build wrap_particle.{cpp,so} 8 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/particle.cpp: -------------------------------------------------------------------------------- 1 | #include "particle.h" 2 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/particle.h: -------------------------------------------------------------------------------- 1 | #ifndef _PARTICLE_H_ 2 | #define _PARTICLE_H_ 3 | 4 | #include 5 | #include 6 | 7 | template 8 | inline const T norm2(const T x, const T y, const T z) 9 | { 10 | return sqrt(x * x + y * y + z * z); 11 | } 12 | 13 | class Particle 14 | { 15 | public: 16 | 17 | Particle() : _x(0), _y(0), _z(0), 18 | _vx(0), _vy(0), _vz(0), 19 | _mass(0), _charge(0) {}; 20 | 21 | Particle(float x, float y, float z, 22 | float vx, float vy, float vz, 23 | float mass, float charge) : 24 | _x(x), _y(y), _z(z), 25 | _vx(vx), _vy(vy), _vz(vz), 26 | _mass(mass), _charge(charge) {}; 27 | 28 | const float get_speed() const { 29 | return norm2(_vx, _vy, _vz); 30 | } 31 | 32 | const float& get_x() const { 33 | return _x; 34 | } 35 | 36 | private: 37 | float _x, _y, _z; 38 | float _vx, _vy, _vz; 39 | float _mass; 40 | float _charge; 41 | }; 42 | 43 | #endif 44 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | 5 | ext = Extension("wrap_particle", ["wrap_particle.pyx", "particle.cpp"], language="c++") 6 | 7 | setup( 8 | cmdclass = {'build_ext': build_ext}, 9 | ext_modules = [ext], 10 | ) 11 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/test_wrap_particle.py: -------------------------------------------------------------------------------- 1 | import wrap_particle 2 | import numpy as np 3 | 4 | assert np.allclose(wrap_particle.norm2(1, 2, 3), np.sqrt(14.0)) 5 | 6 | args = [0] * 8 7 | 8 | p = wrap_particle.Particle(*args) 9 | p1 = wrap_particle.Particle(*args) 10 | print "p.get_x():", p.get_x() 11 | 12 | class subparticle(wrap_particle.Particle): 13 | def get_x(self): 14 | return super(subparticle, self).get_x() + 10. 15 | 16 | subp = subparticle(*args) 17 | print "subparticle.get_x():", subp.get_x() 18 | -------------------------------------------------------------------------------- /examples/cython/wrap-cpp-particle/wrap_particle.pyx: -------------------------------------------------------------------------------- 1 | from libcpp.vector cimport vector 2 | from cython.operator cimport dereference as deref 3 | 4 | cdef extern from "particle.h": 5 | 6 | float _norm2 "norm2"(float x, float y, float z) 7 | 8 | cdef cppclass _Particle "Particle": 9 | _Particle() 10 | _Particle(float, float, float, 11 | float, float, float, 12 | float, float) 13 | float get_speed() 14 | float get_x() 15 | 16 | def norm2(float x, float y, float z): 17 | cdef float pn = _norm2(x, y, z) 18 | return pn 19 | 20 | cdef class Particle: 21 | 22 | cdef _Particle *_thisptr 23 | 24 | def __cinit__(self, x, y, z, vx, vy, vz, mass, charge): 25 | self._thisptr = new _Particle(x, y, z, vx, vy, vz, mass, charge) 26 | 27 | def __dealloc__(self): 28 | del self._thisptr 29 | 30 | cpdef float get_x(self): 31 | return self._thisptr.get_x() 32 | 33 | cpdef float get_speed(self): 34 | return self._thisptr.get_speed() 35 | 36 | 37 | if __name__ == '__main__': 38 | import numpy as np 39 | assert np.allclose(norm2(1, 2, 3), np.sqrt(14.0)) 40 | -------------------------------------------------------------------------------- /examples/data/numbers: -------------------------------------------------------------------------------- 1 | 1 2 2 | 3 4 3 | 5 6 4 | 7 8 5 | 9 10 6 | -------------------------------------------------------------------------------- /examples/data/python.bib: -------------------------------------------------------------------------------- 1 | @Book{Langtangen2011, 2 | author = {Hans Petter Langtangen}, 3 | title = {A Primer on Scientific Programming with Python}, 4 | publisher = {Springer}, 5 | year = {2011} 6 | } 7 | @Book{Langtangen2010, 8 | author = {Hans Petter Langtangen}, 9 | title = {Python Scripting for Computational Science}, 10 | publisher = {Springer}, 11 | year = {2010} 12 | } 13 | -------------------------------------------------------------------------------- /examples/intro/01_hello_world.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import math 4 | r = math.pi / 2.0 5 | s = math.sin(r) 6 | print "Hello world, sin(%f)=%f" % (r,s) 7 | -------------------------------------------------------------------------------- /examples/intro/02_write_numbers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import math 4 | import os 5 | 6 | data_dir = os.path.join(os.path.dirname(__file__), "..", "data") 7 | infile = os.path.join(data_dir, "numbers") 8 | outfile = os.path.join(data_dir, "f_numbers") 9 | 10 | f = open(infile, 'r') 11 | g = open(outfile, 'w') 12 | 13 | def func(y): 14 | if y >= 0.0: 15 | return y**5.0*math.exp(-y) 16 | else: 17 | return 0.0 18 | 19 | 20 | print "Read from", infile 21 | 22 | for line in f: 23 | line = line.split() 24 | x, y = float(line[0]), float(line[1]) 25 | g.write("%g %12.5e\n" % (x,func(y))) 26 | 27 | print "Wrote to", outfile 28 | f.close(); g.close() 29 | -------------------------------------------------------------------------------- /examples/intro/03_call_sys_commands.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | 6 | cmd = 'date' 7 | output = os.popen(cmd) 8 | lines = output.readlines() 9 | fail = output.close() 10 | 11 | if fail: print 'You do not have the date command'; sys.exit() 12 | 13 | for line in lines: 14 | line = line.split() 15 | print "The current time is %s on %s %s, %s" % (line[3],line[2],line[1],line[-1]) 16 | -------------------------------------------------------------------------------- /examples/intro/04_regular_expressions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import re 5 | 6 | data_dir = os.path.join(os.path.dirname(__file__), "..", "data") 7 | infile = os.path.join(data_dir, "python.bib") 8 | 9 | pattern1 = "@Book{(.*)," 10 | pattern2 = "\s+title\s+=\s+{(.*)}," 11 | 12 | print "Reading from", infile 13 | for line in file(infile): 14 | match = re.search(pattern1, line) 15 | if match: 16 | print "Found a book with the tag '%s'" % match.group(1) 17 | 18 | match = re.search(pattern2, line) 19 | if match: 20 | print "The title is '%s'" % match.group(1) 21 | -------------------------------------------------------------------------------- /examples/intro/05_debug.py: -------------------------------------------------------------------------------- 1 | l = range(10) 2 | 3 | l[10] = 5 4 | -------------------------------------------------------------------------------- /examples/intro/06_simple_plot.py: -------------------------------------------------------------------------------- 1 | import pylab as pl 2 | import numpy as np 3 | 4 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 5 | C, S = np.cos(X), np.sin(X) 6 | 7 | pl.plot(X, C) 8 | pl.plot(X, S) 9 | 10 | #pl.show() 11 | pl.savefig("foo.png") 12 | -------------------------------------------------------------------------------- /examples/intro/07_simple_plot_customize.py: -------------------------------------------------------------------------------- 1 | import pylab as pl 2 | import numpy as np 3 | # Create a figure of size 8x6 points, 80 dots per inch 4 | pl.figure(figsize=(8, 6), dpi=80) 5 | # Create a new subplot from a grid of 1x1 6 | pl.subplot(1, 1, 1) 7 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 8 | C, S = np.cos(X), np.sin(X) 9 | # Plot cosine with a blue continuous line of width 1 (pixels) 10 | pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-") 11 | # Plot sine with a green continuous line of width 1 (pixels) 12 | pl.plot(X, S, color="green", linewidth=2.5, linestyle="--") # Set x limits 13 | pl.xlim(-4.0, 4.0) # Set x ticks 14 | pl.xticks(np.linspace(-4, 4, 9, endpoint=True)) # Set y limits 15 | pl.ylim(-1.0, 1.0) # Set y ticks 16 | pl.yticks(np.linspace(-1, 1, 5, endpoint=True)) # Save figure using 72 dots per inch 17 | # savefig("exercice_2.png", dpi=72) 18 | # Show result on screen 19 | pl.show() 20 | -------------------------------------------------------------------------------- /examples/intro/08_simple_plot_legend.py: -------------------------------------------------------------------------------- 1 | import pylab as pl 2 | import numpy as np 3 | # Create a figure of size 8x6 points, 80 dots per inch 4 | pl.figure(figsize=(8, 6), dpi=80) 5 | # Create a new subplot from a grid of 1x1 6 | pl.subplot(1, 1, 1) 7 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 8 | C, S = np.cos(X), np.sin(X) 9 | # Plot cosine with a blue continuous line of width 1 (pixels) 10 | pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="cosine") 11 | # Plot sine with a green continuous line of width 1 (pixels) 12 | pl.plot(X, S, color="green", linewidth=2.5, linestyle="--", label="sine") # Set x limits 13 | pl.xlim(-4.0, 4.0) # Set x ticks 14 | pl.xticks(np.linspace(-4, 4, 9, endpoint=True)) # Set y limits 15 | pl.ylim(-1.0, 1.0) # Set y ticks 16 | pl.yticks(np.linspace(-1, 1, 5, endpoint=True)) # Save figure using 72 dots per inch 17 | 18 | pl.legend(loc='upper left') 19 | 20 | 21 | # savefig("exercice_2.png", dpi=72) 22 | # Show result on screen 23 | pl.show() 24 | -------------------------------------------------------------------------------- /examples/intro/09_simple_plot_spline.py: -------------------------------------------------------------------------------- 1 | import pylab as pl 2 | import numpy as np 3 | # Create a figure of size 8x6 points, 80 dots per inch 4 | pl.figure(figsize=(8, 6), dpi=80) 5 | # Create a new subplot from a grid of 1x1 6 | pl.subplot(1, 1, 1) 7 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 8 | C, S = np.cos(X), np.sin(X) 9 | # Plot cosine with a blue continuous line of width 1 (pixels) 10 | pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="cosine") 11 | # Plot sine with a green continuous line of width 1 (pixels) 12 | pl.plot(X, S, color="green", linewidth=2.5, linestyle="--", label="sine") # Set x limits 13 | pl.xlim(-4.0, 4.0) # Set x ticks 14 | pl.xticks(np.linspace(-4, 4, 9, endpoint=True)) # Set y limits 15 | pl.ylim(-1.0, 1.0) # Set y ticks 16 | pl.yticks(np.linspace(-1, 1, 5, endpoint=True)) # Save figure using 72 dots per inch 17 | 18 | 19 | 20 | ax = pl.gca() # gca stands for 'get current axis' 21 | ax.spines['right'].set_color('none') 22 | ax.spines['top'].set_color('none') 23 | ax.xaxis.set_ticks_position('bottom') 24 | ax.spines['bottom'].set_position(('data',0)) 25 | ax.yaxis.set_ticks_position('left') 26 | ax.spines['left'].set_position(('data',0)) 27 | 28 | 29 | pl.legend(loc='upper left') 30 | 31 | 32 | # savefig("exercice_2.png", dpi=72) 33 | # Show result on screen 34 | pl.show() 35 | -------------------------------------------------------------------------------- /examples/intro/10_histogram.py: -------------------------------------------------------------------------------- 1 | import pylab 2 | from numpy import random 3 | 4 | pylab.plot(random.randn(10000), 100) 5 | pylab.show() 6 | -------------------------------------------------------------------------------- /examples/intro/11_histogram_2.py: -------------------------------------------------------------------------------- 1 | import pylab 2 | 3 | n, bins, patches = pylab.hist(pylab.randn(1000), 40, normed=1) 4 | l, = pylab.plot(bins, pylab.normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3) 5 | pylab.legend([l, patches[0]], ['fit', 'hist']) 6 | 7 | pylab.show() 8 | -------------------------------------------------------------------------------- /examples/pyhpc-cython.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/examples/pyhpc-cython.zip -------------------------------------------------------------------------------- /examples/scale/Bratu3D.pyx: -------------------------------------------------------------------------------- 1 | from petsc4py.PETSc cimport Vec, PetscVec 2 | from petsc4py.PETSc cimport Mat, PetscMat 3 | from petsc4py.PETSc cimport DMDA, PetscDM 4 | from petsc4py.PETSc cimport SNES, PetscSNES 5 | 6 | from petsc4py.PETSc import Error 7 | 8 | cdef extern from "Bratu3Dimpl.h": 9 | ctypedef struct Params: 10 | double lambda_ 11 | int FormInitGuess(PetscDM da, PetscVec x, Params *p) 12 | int FormFunction (PetscDM da, PetscVec x, PetscVec F, Params *p) 13 | int FormJacobian (PetscDM da, PetscVec x, PetscMat J, Params *p) 14 | 15 | def formInitGuess(Vec x, DA da, double lambda_): 16 | cdef int ierr 17 | cdef Params p = {"lambda_" : lambda_} 18 | ierr = FormInitGuess(da.dm, x.vec, &p) 19 | if ierr != 0: raise Error(ierr) 20 | 21 | def formFunction(SNES snes, Vec x, Vec f, DA da, double lambda_): 22 | cdef int ierr 23 | cdef Params p = {"lambda_" : lambda_} 24 | ierr = FormFunction(da.dm, x.vec, f.vec, &p) 25 | if ierr != 0: raise Error(ierr) 26 | 27 | def formJacobian(SNES snes, Vec x, Mat J, Mat P, DA da, double lambda_): 28 | cdef int ierr 29 | cdef Params p = {"lambda_" : lambda_} 30 | ierr = FormJacobian(da.dm, x.vec, P.mat, &p) 31 | if ierr != 0: raise Error(ierr) 32 | if J != P: J.assemble() # for matrix-free operator 33 | return Mat.Structure.SAME_NONZERO_PATTERN 34 | -------------------------------------------------------------------------------- /examples/scale/Bratu3Dimpl.c: -------------------------------------------------------------------------------- 1 | /* ------------------------------------------------------------------------ 2 | 3 | Solid Fuel Ignition (SFI) problem. This problem is modeled by the 4 | partial differential equation 5 | 6 | -Laplacian(u) - lambda * exp(u) = 0, 0 < x,y,z < 1, 7 | 8 | with boundary conditions 9 | 10 | u = 0 for x = 0, x = 1, y = 0, y = 1, z = 0, z = 1 11 | 12 | A finite difference approximation with the usual 7-point stencil 13 | is used to discretize the boundary value problem to obtain a 14 | nonlinear system of equations. The problem is solved in a 3D 15 | rectangular domain, using distributed arrays (DAs) to partition 16 | the parallel grid. 17 | 18 | ------------------------------------------------------------------------- */ 19 | 20 | #include "Bratu3Dimpl.h" 21 | 22 | #if PETSC_VERSION_(3,1,0) 23 | #define DMDAGetInfo(da,dim,M,N,P,m,n,p,dof,s,bx,by,bz,st) \ 24 | DAGetInfo((DA)da,dim,M,N,P,m,n,p,dof,s,bx,st) 25 | #define DMDAGetCorners(da,x,y,z,m,n,p) DAGetCorners((DA)da,x,y,z,m,n,p) 26 | #define DMDAVecGetArray(da,v,a) DAVecGetArray((DA)da,v,a) 27 | #define DMDAVecRestoreArray(da,v,a) DAVecRestoreArray((DA)da,v,a) 28 | #endif 29 | 30 | #undef __FUNCT__ 31 | #define __FUNCT__ "FormInitGuess" 32 | PetscErrorCode FormInitGuess(DM da, Vec X, Params *p) 33 | { 34 | PetscInt i,j,k,Mx,My,Mz,xs,ys,zs,xm,ym,zm; 35 | PetscReal lambda,temp1,hx,hy,hz,tempk,tempj; 36 | PetscScalar ***x; 37 | PetscErrorCode ierr; 38 | 39 | PetscFunctionBegin; 40 | 41 | ierr = DMDAGetInfo(da,PETSC_IGNORE, 42 | &Mx,&My,&Mz, 43 | PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE, 44 | PETSC_IGNORE,PETSC_IGNORE, 45 | PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE, 46 | PETSC_IGNORE); 47 | 48 | lambda = p->lambda_; 49 | hx = 1.0/(PetscReal)(Mx-1); 50 | hy = 1.0/(PetscReal)(My-1); 51 | hz = 1.0/(PetscReal)(Mz-1); 52 | temp1 = lambda/(lambda + 1.0); 53 | 54 | /* 55 | Get a pointer to vector data. 56 | 57 | - For default PETSc vectors, VecGetArray() returns a pointer to 58 | the data array. Otherwise, the routine is implementation 59 | dependent. 60 | 61 | - You MUST call VecRestoreArray() when you no longer need access 62 | to the array. 63 | */ 64 | ierr = DMDAVecGetArray(da,X,&x);CHKERRQ(ierr); 65 | 66 | /* 67 | Get local grid boundaries (for 3-dimensional DA): 68 | 69 | - xs, ys, zs: starting grid indices (no ghost points) 70 | 71 | - xm, ym, zm: widths of local grid (no ghost points) 72 | */ 73 | ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr); 74 | 75 | /* 76 | Compute initial guess over the locally owned part of the grid 77 | */ 78 | for (k=zs; klambda_; 122 | hx = 1.0/(PetscReal)(Mx-1); 123 | hy = 1.0/(PetscReal)(My-1); 124 | hz = 1.0/(PetscReal)(Mz-1); 125 | sc = hx*hy*hz*lambda; 126 | hxhzdhy = hx*hz/hy; 127 | hyhzdhx = hy*hz/hx; 128 | hxhydhz = hx*hy/hz; 129 | 130 | /* 131 | 132 | */ 133 | ierr = DMGetLocalVector(da,&localX);CHKERRQ(ierr); 134 | 135 | /* 136 | Scatter ghost points to local vector,using the 2-step process 137 | DAGlobalToLocalBegin(),DAGlobalToLocalEnd(). By placing code 138 | between these two statements, computations can be done while 139 | messages are in transition. 140 | */ 141 | ierr = DMGlobalToLocalBegin(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 142 | ierr = DMGlobalToLocalEnd(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 143 | 144 | /* 145 | Get pointers to vector data. 146 | */ 147 | ierr = DMDAVecGetArray(da,localX,&x);CHKERRQ(ierr); 148 | ierr = DMDAVecGetArray(da,F,&f);CHKERRQ(ierr); 149 | 150 | /* 151 | Get local grid boundaries. 152 | */ 153 | ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr); 154 | 155 | /* 156 | Compute function over the locally owned part of the grid. 157 | */ 158 | for (k=zs; klambda_; 215 | hx = 1.0/(PetscReal)(Mx-1); 216 | hy = 1.0/(PetscReal)(My-1); 217 | hz = 1.0/(PetscReal)(Mz-1); 218 | sc = hx*hy*hz*lambda; 219 | hxhzdhy = hx*hz/hy; 220 | hyhzdhx = hy*hz/hx; 221 | hxhydhz = hx*hy/hz; 222 | 223 | /* 224 | 225 | */ 226 | ierr = DMGetLocalVector(da,&localX);CHKERRQ(ierr); 227 | 228 | /* 229 | Scatter ghost points to local vector, using the 2-step process 230 | DAGlobalToLocalBegin(), DAGlobalToLocalEnd(). By placing code 231 | between these two statements, computations can be done while 232 | messages are in transition. 233 | */ 234 | ierr = DMGlobalToLocalBegin(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 235 | ierr = DMGlobalToLocalEnd(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 236 | 237 | /* 238 | Get pointer to vector data. 239 | */ 240 | ierr = DMDAVecGetArray(da,localX,&x);CHKERRQ(ierr); 241 | 242 | /* 243 | Get local grid boundaries. 244 | */ 245 | ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr); 246 | 247 | /* 248 | Compute entries for the locally owned part of the Jacobian. 249 | 250 | - Currently, all PETSc parallel matrix formats are partitioned by 251 | contiguous chunks of rows across the processors. 252 | 253 | - Each processor needs to insert only elements that it owns 254 | locally (but any non-local elements will be sent to the 255 | appropriate processor during matrix assembly). 256 | 257 | - Here, we set all entries for a particular row at once. 258 | 259 | - We can set matrix entries either using either 260 | MatSetValuesLocal() or MatSetValues(), as discussed above. 261 | */ 262 | for (k=zs; k 5 | 6 | #if PETSC_VERSION_(3,1,0) 7 | #include 8 | #include 9 | #include 10 | #endif 11 | 12 | typedef struct Params { 13 | double lambda_; 14 | } Params; 15 | 16 | PetscErrorCode FormInitGuess(DM da, Vec x, Params *p); 17 | PetscErrorCode FormFunction(DM da, Vec x, Vec F, Params *p); 18 | PetscErrorCode FormJacobian(DM da, Vec x, Mat J, Params *p); 19 | 20 | #endif /* !BRATU3D_H */ 21 | -------------------------------------------------------------------------------- /examples/scale/CavityFlow2D.pyx: -------------------------------------------------------------------------------- 1 | from petsc4py.PETSc cimport Vec, PetscVec 2 | from petsc4py.PETSc cimport DM, PetscDM 3 | from petsc4py.PETSc cimport SNES, PetscSNES 4 | 5 | from petsc4py.PETSc import Error 6 | 7 | cdef extern from "CavityFlow2Dimpl.h": 8 | ctypedef struct Params: 9 | double lidvelocity_, prandtl_, grashof_ 10 | int FormInitGuess(PetscDM da, PetscVec x, Params *p) 11 | int FormFunction (PetscDM da, PetscVec x, PetscVec F, Params *p) 12 | 13 | def formInitGuess(Vec x, DM da, double lidvelocity_, double prandtl_, double grashof_): 14 | cdef int ierr 15 | cdef Params p = {"lidvelocity_" : lidvelocity_, "prandtl_" : prandtl_, "grashof_" : grashof_} 16 | ierr = FormInitGuess(da.dm, x.vec, &p) 17 | if ierr != 0: raise Error(ierr) 18 | 19 | def formFunction(SNES snes, Vec x, Vec f, DM da, double lidvelocity_, double prandtl_, double grashof_): 20 | cdef int ierr 21 | cdef Params p = {"lidvelocity_" : lidvelocity_, "prandtl_" : prandtl_, "grashof_" : grashof_} 22 | 23 | ierr = FormFunction(da.dm, x.vec, f.vec, &p) 24 | if ierr != 0: raise Error(ierr) 25 | -------------------------------------------------------------------------------- /examples/scale/CavityFlow2Dimpl.c: -------------------------------------------------------------------------------- 1 | #include "CavityFlow2Dimpl.h" 2 | 3 | 4 | #undef __FUNCT__ 5 | #define __FUNCT__ "FormInitGuess" 6 | /* 7 | FormInitialGuess - Forms initial approximation. 8 | 9 | Input Parameters: 10 | X - vector 11 | p - user parameters 12 | 13 | Output Parameter: 14 | X - vector 15 | */ 16 | PetscErrorCode FormInitGuess(DM da,Vec X,Params *p) 17 | { 18 | PetscInt i,j,mx,xs,ys,xm,ym; 19 | PetscErrorCode ierr; 20 | PetscReal grashof,dx; 21 | Field **x; 22 | 23 | printf("grashof: %e\n", p->grashof_); 24 | printf("prantdl: %e\n", p->prandtl_); 25 | printf("lidvelocity: %e\n", p->lidvelocity_); 26 | 27 | grashof = p->grashof_; 28 | 29 | ierr = DMDAGetInfo(da,0,&mx,0,0,0,0,0,0,0,0,0,0,0);CHKERRQ(ierr); 30 | dx = 1.0/(mx-1); 31 | 32 | /* 33 | Get local grid boundaries (for 2-dimensional DMDA): 34 | xs, ys - starting grid indices (no ghost points) 35 | xm, ym - widths of local grid (no ghost points) 36 | */ 37 | ierr = DMDAGetCorners(da,&xs,&ys,PETSC_NULL,&xm,&ym,PETSC_NULL);CHKERRQ(ierr); 38 | 39 | /* 40 | Get a pointer to vector data. 41 | - For default PETSc vectors, VecGetArray() returns a pointer to 42 | the data array. Otherwise, the routine is implementation dependent. 43 | - You MUST call VecRestoreArray() when you no longer need access to 44 | the array. 45 | */ 46 | ierr = DMDAVecGetArray(da,X,&x);CHKERRQ(ierr); 47 | 48 | /* 49 | Compute initial guess over the locally owned part of the grid 50 | Initial condition is motionless fluid and equilibrium temperature 51 | */ 52 | for (j=ys; j0)*i*dx; 58 | } 59 | } 60 | 61 | /* 62 | Restore vector 63 | */ 64 | ierr = DMDAVecRestoreArray(da,X,&x);CHKERRQ(ierr); 65 | return 0; 66 | } 67 | 68 | 69 | #undef __FUNCT__ 70 | #define __FUNCT__ "FormFunctionLocal" 71 | PetscErrorCode FormFunctionLocal(DMDALocalInfo *info,Field **x,Field **f,Params *p) 72 | { 73 | PetscErrorCode ierr; 74 | PetscInt xints,xinte,yints,yinte,i,j; 75 | PetscReal hx,hy,dhx,dhy,hxdhy,hydhx; 76 | PetscReal grashof,prandtl,lid; 77 | PetscScalar u,uxx,uyy,vx,vy,avx,avy,vxp,vxm,vyp,vym; 78 | 79 | PetscFunctionBegin; 80 | grashof = p->grashof_; 81 | prandtl = p->prandtl_; 82 | lid = p->lidvelocity_; 83 | 84 | /* 85 | Define mesh intervals ratios for uniform grid. 86 | 87 | Note: FD formulae below are normalized by multiplying through by 88 | local volume element (i.e. hx*hy) to obtain coefficients O(1) in two dimensions. 89 | 90 | 91 | */ 92 | dhx = (PetscReal)(info->mx-1); dhy = (PetscReal)(info->my-1); 93 | hx = 1.0/dhx; hy = 1.0/dhy; 94 | hxdhy = hx*dhy; hydhx = hy*dhx; 95 | 96 | xints = info->xs; xinte = info->xs+info->xm; yints = info->ys; yinte = info->ys+info->ym; 97 | 98 | /* Test whether we are on the bottom edge of the global array */ 99 | if (yints == 0) { 100 | j = 0; 101 | yints = yints + 1; 102 | /* bottom edge */ 103 | for (i=info->xs; ixs+info->xm; i++) { 104 | f[j][i].u = x[j][i].u; 105 | f[j][i].v = x[j][i].v; 106 | f[j][i].omega = x[j][i].omega + (x[j+1][i].u - x[j][i].u)*dhy; 107 | f[j][i].temp = x[j][i].temp-x[j+1][i].temp; 108 | } 109 | } 110 | 111 | /* Test whether we are on the top edge of the global array */ 112 | if (yinte == info->my) { 113 | j = info->my - 1; 114 | yinte = yinte - 1; 115 | /* top edge */ 116 | for (i=info->xs; ixs+info->xm; i++) { 117 | f[j][i].u = x[j][i].u - lid; 118 | f[j][i].v = x[j][i].v; 119 | f[j][i].omega = x[j][i].omega + (x[j][i].u - x[j-1][i].u)*dhy; 120 | f[j][i].temp = x[j][i].temp-x[j-1][i].temp; 121 | } 122 | } 123 | 124 | /* Test whether we are on the left edge of the global array */ 125 | if (xints == 0) { 126 | i = 0; 127 | xints = xints + 1; 128 | /* left edge */ 129 | for (j=info->ys; jys+info->ym; j++) { 130 | f[j][i].u = x[j][i].u; 131 | f[j][i].v = x[j][i].v; 132 | f[j][i].omega = x[j][i].omega - (x[j][i+1].v - x[j][i].v)*dhx; 133 | f[j][i].temp = x[j][i].temp; 134 | } 135 | } 136 | 137 | /* Test whether we are on the right edge of the global array */ 138 | if (xinte == info->mx) { 139 | i = info->mx - 1; 140 | xinte = xinte - 1; 141 | /* right edge */ 142 | for (j=info->ys; jys+info->ym; j++) { 143 | f[j][i].u = x[j][i].u; 144 | f[j][i].v = x[j][i].v; 145 | f[j][i].omega = x[j][i].omega - (x[j][i].v - x[j][i-1].v)*dhx; 146 | f[j][i].temp = x[j][i].temp - (PetscReal)(grashof>0); 147 | } 148 | } 149 | 150 | /* Compute over the interior points */ 151 | for (j=yints; jym*info->xm);CHKERRQ(ierr); 201 | PetscFunctionReturn(0); 202 | } 203 | 204 | #undef __FUNCT__ 205 | #define __FUNCT__ "FormFunction" 206 | PetscErrorCode FormFunction(DM da, Vec X, Vec F, Params *p) 207 | { 208 | DMDALocalInfo info; 209 | Field **u,**fu; 210 | PetscErrorCode ierr; 211 | Vec localX; 212 | 213 | PetscFunctionBegin; 214 | ierr = DMGetLocalVector(da,&localX);CHKERRQ(ierr); 215 | /* 216 | Scatter ghost points to local vector, using the 2-step process 217 | DMGlobalToLocalBegin(), DMGlobalToLocalEnd(). 218 | */ 219 | ierr = DMGlobalToLocalBegin(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 220 | ierr = DMGlobalToLocalEnd(da,X,INSERT_VALUES,localX);CHKERRQ(ierr); 221 | ierr = DMDAGetLocalInfo(da,&info);CHKERRQ(ierr); 222 | ierr = DMDAVecGetArray(da,localX,&u);CHKERRQ(ierr); 223 | ierr = DMDAVecGetArray(da,F,&fu);CHKERRQ(ierr); 224 | ierr = FormFunctionLocal(&info,u,fu,p);CHKERRQ(ierr); 225 | ierr = DMDAVecRestoreArray(da,localX,&u);CHKERRQ(ierr); 226 | ierr = DMDAVecRestoreArray(da,F,&fu);CHKERRQ(ierr); 227 | ierr = DMRestoreLocalVector(da,&localX);CHKERRQ(ierr); 228 | PetscFunctionReturn(0); 229 | } 230 | 231 | 232 | -------------------------------------------------------------------------------- /examples/scale/CavityFlow2Dimpl.h: -------------------------------------------------------------------------------- 1 | #ifndef CAVITYFLOW2D_H 2 | #define CAVITYFLOW2D_H 3 | 4 | #include 5 | 6 | #if PETSC_VERSION_(3,1,0) 7 | #include 8 | #include 9 | #include 10 | #endif 11 | 12 | typedef struct Params { 13 | double lidvelocity_, prandtl_, grashof_; 14 | } Params; 15 | 16 | typedef struct { 17 | PetscScalar u,v,omega,temp; 18 | } Field; 19 | 20 | PetscErrorCode FormInitGuess(DM da, Vec x, Params *p); 21 | PetscErrorCode FormFunction(DM da, Vec x, Vec F, Params *p); 22 | PetscErrorCode FormFunctionLocal(DMDALocalInfo *info,Field **x,Field **f,Params *p); 23 | 24 | #endif /* !CAVITYFLOW2D_H */ 25 | -------------------------------------------------------------------------------- /examples/scale/cavity_flow2d.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | import petsc4py 4 | from numpy import mgrid 5 | petsc4py.init(sys.argv) 6 | from petsc4py import PETSc 7 | try: 8 | from matplotlib import pyplot as plt 9 | except ImportError: 10 | PETSc.Sys.Print("matplotlib not available") 11 | plt = None 12 | import CavityFlow2D 13 | 14 | """ 15 | This problem is an adaptation of petsc example snes/ex19.c 16 | 17 | This problem is modeled by the partial differential equation system 18 | 19 | \begin{eqnarray} 20 | - \triangle U - \nabla_y \Omega & = & 0 \\ 21 | - \triangle V + \nabla_x\Omega & = & 0 \\ 22 | - \triangle \Omega + \nabla \cdot ([U*\Omega,V*\Omega]) - GR* \nabla_x T & = & 0 \\ 23 | - \triangle T + PR* \nabla \cdot ([U*T,V*T]) & = & 0 24 | \end{eqnarray} 25 | 26 | in the unit square, which is uniformly discretized in each of x and y in this simple encoding. 27 | 28 | No-slip, rigid-wall Dirichlet conditions are used for $ [U,V]$. 29 | Dirichlet conditions are used for Omega, based on the definition of 30 | vorticity: $ \Omega = - \nabla_y U + \nabla_x V$, where along each 31 | constant coordinate boundary, the tangential derivative is zero. 32 | Dirichlet conditions are used for T on the left and right walls, 33 | and insulation homogeneous Neumann conditions are used for T on 34 | the top and bottom walls. 35 | 36 | A finite difference approximation with the usual 5-point stencil 37 | is used to discretize the boundary value problem to obtain a 38 | nonlinear system of equations. Upwinding is used for the divergence 39 | (convective) terms and central for the gradient (source) terms. 40 | 41 | The Jacobian can be either 42 | * formed via finite differencing using coloring (the default), or 43 | * applied matrix-free via the option -snes_mf 44 | (for larger grid problems this variant may not converge 45 | without a preconditioner due to ill-conditioning). 46 | 47 | ------------------------------------------------------------------------*/ 48 | 49 | The 2D driven cavity problem is solved in a velocity-vorticity formulation. 50 | The flow can be driven with the lid or with bouyancy or with both. 51 | """ 52 | 53 | 54 | def get_args(): 55 | OptDB = PETSc.Options() 56 | ignore = OptDB.getBool('--help', False) 57 | parser = argparse.ArgumentParser(description='2D Driven Cavity Flow', 58 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 59 | parser.add_argument('--nx', default=32,help='number of grid points in x') 60 | parser.add_argument('--ny', default=32,help='number of grid points in y') 61 | parser.add_argument('--lidvelocity', help='dimensionless velocity of the lid\n(1/(nx*ny) if unspecified', type=float) 62 | parser.add_argument('--grashof', default=1.0,help='dimensionless temperature gradient', type=float) 63 | parser.add_argument('--prandtl', default=1.0,help='dimensionless thermal/momentum diffusivity ratio', type=float) 64 | parser.add_argument('--plot', help='use matplotlib to visualize solution') 65 | args, petsc_opts = parser.parse_known_args() 66 | 67 | if args.lidvelocity is None: 68 | args.lidvelocity = 1./(args.nx*args.ny) 69 | return args 70 | 71 | def cavity_flow2D(nx, ny, lidvelocity, grashof, prandtl): 72 | # create application context 73 | # and PETSc nonlinear solver 74 | snes = PETSc.SNES().create() 75 | da = PETSc.DMDA().create([nx,ny],dof=4, stencil_width=1, stencil_type='star') 76 | 77 | # set up solution vector 78 | F = da.createGlobalVec() 79 | snes.setFunction(CavityFlow2D.formFunction, F, 80 | args=(da, lidvelocity, prandtl, grashof)) 81 | 82 | x = da.createGlobalVec() 83 | CavityFlow2D.formInitGuess(x, da, lidvelocity, prandtl, grashof) 84 | 85 | snes.setDM(da) 86 | snes.setFromOptions() 87 | 88 | # solve the nonlinear problem 89 | snes.solve(None, x) 90 | return x 91 | 92 | def plot(x, nx, ny): 93 | """Plot solution to screen""" 94 | plt.ioff() 95 | Z = x[...].reshape(nx,ny,4) 96 | fig, axs = plt.subplots(2,2, figsize=(10,8)) 97 | titles = ['u', 'v', 'vorticity', 'temperature'] 98 | for idx, ax in enumerate(axs.ravel()): 99 | cs = ax.contourf(Z[:,:,idx], 100) 100 | fig.colorbar(cs, ax=ax, shrink=0.9) 101 | ax.set_title(titles[idx]) 102 | plt.show() 103 | 104 | if __name__ == "__main__": 105 | args = get_args() 106 | print "running cavity flow with: %s" % args 107 | x = cavity_flow2D(args.nx, args.ny, args.lidvelocity, args.grashof, args.prandtl) 108 | plot(x, args.nx, args.ny) 109 | -------------------------------------------------------------------------------- /examples/scale/danumbering.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/examples/scale/danumbering.gif -------------------------------------------------------------------------------- /examples/scale/ghost.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/examples/scale/ghost.gif -------------------------------------------------------------------------------- /examples/scale/makefile: -------------------------------------------------------------------------------- 1 | all: slides pages 2 | 3 | slides: petsc4py-tutorial.md 4 | pandoc petsc4py-tutorial.md -t dzslides -s --mathjax -o petsc4py-tutorial-slides.html 5 | 6 | pages: petsc4py-tutorial.md 7 | pandoc -t html5 -s --mathjax petsc4py-tutorial.md -o petsc4py-tutorial.html 8 | 9 | deploy: slides pages 10 | rsync -v -r ./ aron@ahmadia.net:domains/aron.ahmadia.net/web/public/ 11 | 12 | clean: 13 | rm *.html -------------------------------------------------------------------------------- /examples/scale/mpi4py_mandelbrot.py: -------------------------------------------------------------------------------- 1 | def mandelbrot (x, y, maxit): 2 | c = x + y*1j 3 | z = 0 + 0j 4 | it = 0 5 | while abs(z) < 2 and it < maxit: 6 | z = z**2 + c 7 | it += 1 8 | return it 9 | 10 | x1, x2 = -2.0, 1.0 11 | y1, y2 = -1.0, 1.0 12 | w, h = 150, 100 13 | maxit = 127 14 | 15 | from mpi4py import MPI 16 | import numpy 17 | 18 | comm = MPI.COMM_WORLD 19 | size = comm.Get_size() 20 | rank = comm.Get_rank() 21 | 22 | # number of rows to compute here 23 | N = h // size + (h % size > rank) 24 | 25 | # first row to compute here 26 | start = comm.scan(N)-N 27 | 28 | # array to store local result 29 | Cl = numpy.zeros([N, w], dtype='i') 30 | 31 | # compute owned rows 32 | 33 | dx = (x2 - x1) / w 34 | dy = (y2 - y1) / h 35 | 36 | for i in range(N): 37 | y = y1 + (i + start) * dy 38 | for j in range(w): 39 | x = x1 + j * dx 40 | Cl[i, j] = mandelbrot(x, y, maxit) 41 | 42 | # gather results at root (process 0) 43 | counts = comm.gather(N, root=0) 44 | C = None 45 | if rank == 0: 46 | C = numpy.zeros([h, w], dtype='i') 47 | 48 | rowtype = MPI.INT.Create_contiguous(w) 49 | rowtype.Commit() 50 | 51 | comm.Gatherv(sendbuf=[Cl, MPI.INT], recvbuf=[C, (counts, None), rowtype],root=0) 52 | 53 | rowtype.Free() 54 | 55 | from matplotlib import pyplot 56 | # Some magic to get to MPI4PY rank 0, not necessarily engine id 0 57 | pyplot.imshow(CC[ranks.index(0)], aspect='equal') 58 | pyplot.spectral() 59 | pyplot.show() -------------------------------------------------------------------------------- /examples/scale/mpi4py_pi.py: -------------------------------------------------------------------------------- 1 | # from mpi4py demonstrations 2 | 3 | from mpi4py import MPI 4 | import math 5 | 6 | def compute_pi(n, start=0, step=1): 7 | h = 1.0 / n 8 | s = 0.0 9 | for i in range(start, n, step): 10 | x = h * (i + 0.5) 11 | s += 4.0 / (1.0 + x**2) 12 | return s * h 13 | 14 | comm = MPI.COMM_WORLD 15 | nprocs = comm.Get_size() 16 | myrank = comm.Get_rank() 17 | if myrank == 0: 18 | n = 10 19 | else: 20 | n = None 21 | 22 | n = comm.bcast(n, root=0) 23 | 24 | mypi = compute_pi(n, myrank, nprocs) 25 | 26 | pi = comm.reduce(mypi, op=MPI.SUM, root=0) 27 | 28 | if myrank == 0: 29 | error = abs(pi - math.pi) 30 | print ("pi is approximately %.16f, error is %.16f" % (pi, error)) -------------------------------------------------------------------------------- /examples/scale/quadrants.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | # From clawpack/pyclaw/examples/euler_2d/quadrants 5 | # notebook at: http://nbviewer.ipython.org/urls/raw.github.com/pyHPC/pyhpc-tutorial/master/examples/scale/Quadrants%20Example.ipynb 6 | 7 | """ 8 | Solve the Euler equations of compressible fluid dynamics. 9 | """ 10 | from clawpack import pyclaw 11 | from clawpack import riemann 12 | 13 | solver = pyclaw.ClawSolver2D(riemann.euler_4wave_2D) 14 | solver.all_bcs = pyclaw.BC.extrap 15 | 16 | domain = pyclaw.Domain([0.,0.],[1.,1.],[100,100]) 17 | solution = pyclaw.Solution(solver.num_eqn,domain) 18 | gamma = 1.4 19 | solution.problem_data['gamma'] = gamma 20 | solver.dimensional_split = False 21 | solver.transverse_waves = 2 22 | 23 | # Set initial data 24 | xx,yy = domain.grid.p_centers 25 | l = xx<0.8; r = xx>=0.8; b = yy<0.8; t = yy>=0.8 26 | solution.q[0,...] = 1.5*r*t + 0.532258064516129*l*t + 0.137992831541219*l*b + 0.532258064516129*r*b 27 | u = 0.*r*t + 1.206045378311055*l*t + 1.206045378311055*l*b + 0.*r*b 28 | v = 0.*r*t + 0.*l*t + 1.206045378311055*l*b + 1.206045378311055*r*b 29 | p = 1.5*r*t + 0.3*l*t + 0.029032258064516*l*b + 0.3*r*b 30 | solution.q[1,...] = solution.q[0,...] * u 31 | solution.q[2,...] = solution.q[0,...] * v 32 | solution.q[3,...] = 0.5*solution.q[0,...]*(u**2+v**2) + p/(gamma-1.) 33 | 34 | #solver.evolve_to_time(solution,tend=0.3) 35 | claw = pyclaw.Controller() 36 | claw.tfinal = 0.8 37 | claw.solution = solution 38 | claw.solver = solver 39 | 40 | status = claw.run() 41 | 42 | #pyclaw.plot.interactive_plot() 43 | -------------------------------------------------------------------------------- /examples/scale/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #$ python setup.py build_ext --inplace 4 | 5 | from numpy.distutils.command import build_src 6 | 7 | # a bit of monkeypatching ... 8 | import Cython.Compiler.Main 9 | build_src.Pyrex = Cython 10 | build_src.have_pyrex = lambda: True 11 | 12 | def have_pyrex(): 13 | import sys 14 | try: 15 | import Cython.Compiler.Main 16 | sys.modules['Pyrex'] = Cython 17 | sys.modules['Pyrex.Compiler'] = Cython.Compiler 18 | sys.modules['Pyrex.Compiler.Main'] = Cython.Compiler.Main 19 | return True 20 | except ImportError: 21 | return False 22 | build_src.have_pyrex = have_pyrex 23 | 24 | 25 | def configuration(parent_package='',top_path=None): 26 | INCLUDE_DIRS = [] 27 | LIBRARY_DIRS = [] 28 | LIBRARIES = [] 29 | 30 | # PETSc 31 | import os 32 | PETSC_DIR = os.environ['PETSC_DIR'] 33 | PETSC_ARCH = os.environ.get('PETSC_ARCH', '') 34 | from os.path import join, isdir 35 | if PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH)): 36 | INCLUDE_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'include'), 37 | join(PETSC_DIR, 'include')] 38 | LIBRARY_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'lib')] 39 | else: 40 | if PETSC_ARCH: pass # XXX should warn ... 41 | INCLUDE_DIRS += [join(PETSC_DIR, 'include')] 42 | LIBRARY_DIRS += [join(PETSC_DIR, 'lib')] 43 | LIBRARIES += [#'petscts', 'petscsnes', 'petscksp', 44 | #'petscdm', 'petscmat', 'petscvec', 45 | 'petsc'] 46 | 47 | # PETSc for Python 48 | import petsc4py 49 | INCLUDE_DIRS += [petsc4py.get_include()] 50 | print INCLUDE_DIRS 51 | 52 | # Configuration 53 | from numpy.distutils.misc_util import Configuration 54 | config = Configuration('', parent_package, top_path) 55 | config.add_extension('CavityFlow2D', 56 | sources = ['CavityFlow2D.pyx', 57 | 'CavityFlow2Dimpl.c'], 58 | depends = ['CavityFlow2Dimpl.h'], 59 | include_dirs=INCLUDE_DIRS + [os.curdir], 60 | libraries=LIBRARIES, 61 | library_dirs=LIBRARY_DIRS, 62 | runtime_library_dirs=LIBRARY_DIRS) 63 | return config 64 | 65 | if __name__ == "__main__": 66 | from numpy.distutils.core import setup 67 | setup(**configuration(top_path='').todict()) 68 | -------------------------------------------------------------------------------- /figures/TACC_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/TACC_logo.png -------------------------------------------------------------------------------- /figures/allgather_alltoall.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/allgather_alltoall.png -------------------------------------------------------------------------------- /figures/books.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/books.png -------------------------------------------------------------------------------- /figures/broadcast_scatter_gather.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/broadcast_scatter_gather.png -------------------------------------------------------------------------------- /figures/careful.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/careful.png -------------------------------------------------------------------------------- /figures/concurrency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/concurrency.png -------------------------------------------------------------------------------- /figures/concurrency_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/concurrency_2.png -------------------------------------------------------------------------------- /figures/continuum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/continuum.png -------------------------------------------------------------------------------- /figures/creative_commons_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/creative_commons_logo.png -------------------------------------------------------------------------------- /figures/does_it_scale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/does_it_scale.png -------------------------------------------------------------------------------- /figures/dpg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/dpg.png -------------------------------------------------------------------------------- /figures/dsw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/dsw.png -------------------------------------------------------------------------------- /figures/euler_weak_scaling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/euler_weak_scaling.png -------------------------------------------------------------------------------- /figures/fem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/fem.png -------------------------------------------------------------------------------- /figures/free_lunch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/free_lunch.png -------------------------------------------------------------------------------- /figures/intro/array1D.2.lightbg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/array1D.2.lightbg.png -------------------------------------------------------------------------------- /figures/intro/ecosystem.lightbg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/ecosystem.lightbg.png -------------------------------------------------------------------------------- /figures/intro/example_surface_from_irregular_data.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/example_surface_from_irregular_data.jpg -------------------------------------------------------------------------------- /figures/intro/frequency_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/frequency_plot.png -------------------------------------------------------------------------------- /figures/intro/frequency_signal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/frequency_signal.png -------------------------------------------------------------------------------- /figures/intro/hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/hist.png -------------------------------------------------------------------------------- /figures/intro/hist_legend_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/hist_legend_fit.png -------------------------------------------------------------------------------- /figures/intro/random_c.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/random_c.jpg -------------------------------------------------------------------------------- /figures/intro/simple_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/simple_plot.png -------------------------------------------------------------------------------- /figures/intro/simple_plot_cust.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/simple_plot_cust.png -------------------------------------------------------------------------------- /figures/intro/simple_plot_cust2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/simple_plot_cust2.png -------------------------------------------------------------------------------- /figures/intro/simple_plot_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/simple_plot_legend.png -------------------------------------------------------------------------------- /figures/intro/snapshot_ipython.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/intro/snapshot_ipython.png -------------------------------------------------------------------------------- /figures/kaust.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/kaust.png -------------------------------------------------------------------------------- /figures/log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/log.png -------------------------------------------------------------------------------- /figures/molt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/molt.png -------------------------------------------------------------------------------- /figures/numpy/broadcasting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/numpy/broadcasting.png -------------------------------------------------------------------------------- /figures/numpy/threefundamental.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/numpy/threefundamental.png -------------------------------------------------------------------------------- /figures/reduce_scan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/reduce_scan.png -------------------------------------------------------------------------------- /figures/scale/danumbering.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/scale/danumbering.gif -------------------------------------------------------------------------------- /figures/scale/euler_weak_scaling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/scale/euler_weak_scaling.png -------------------------------------------------------------------------------- /figures/scale/ghost.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/scale/ghost.gif -------------------------------------------------------------------------------- /figures/scale/pyclaw_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/scale/pyclaw_architecture.png -------------------------------------------------------------------------------- /figures/semiconductors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/semiconductors.png -------------------------------------------------------------------------------- /figures/struct.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/struct.png -------------------------------------------------------------------------------- /figures/tbl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/figures/tbl.png -------------------------------------------------------------------------------- /html/intro.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Python for Science 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 27 | 28 | 31 | 32 | 44 | 45 | 46 | 47 |
48 |
49 |
50 |

Introduction to Python for Science

51 |

http://www.pyHPC.org

52 |
53 | 54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 | 64 | 65 | 66 |
67 |
68 | 69 | 70 | 71 | 72 | 73 | 74 | 98 | 99 | 100 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | PyHPC Tutorial 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 27 | 28 | 31 | 32 | 36 | 37 | 38 | 39 |

PyHPC Tutorial

40 |

Supercomputing 2013

41 | 42 | Presented by: 43 |
    44 |
  • Andy R. Terrel, The University of Texas at Austin
  • 45 |
  • Travis Oliphant, Continuum Analytics, Inc
  • 46 |
  • Aron Ahmadia, Army Corps of Engineers
  • 47 |
  • Kurt Smith, Enthought, Inc
  • 48 |
49 | 50 |

Tutorial Materials

51 | 57 | 58 |

Other resources

59 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /markdown/intro/building_blocks.md: -------------------------------------------------------------------------------- 1 | Basic Building Blocks 2 | --------------------- 3 | 4 | Scientific Python is un-bundled, so users need some basic building blocks. 5 | 6 | 11 | 12 | 13 | 14 | - **Python**, a generic and modern computing language 15 | 16 | - Python language: data types (`string`, `int`), flow control, 17 | data collections (lists, dictionaries), patterns, etc. 18 | 19 | - Modules of the standard library. 20 | 21 | - A large number of specialized modules or applications written 22 | in Python: web protocols, web framework, etc. ... and 23 | scientific computing. 24 | 25 | - Development tools (automatic testing, documentation 26 | generation) 27 | 28 | 29 | 30 | ![image](../figures/intro/snapshot_ipython.png) 31 | - **IPython**, an advanced **Python shell** 32 | [http://ipython.org](http://ipython.org)/ 33 | 34 | 35 | 36 | - **Numpy** : provides powerful **numerical arrays** objects, and 37 | routines to manipulate them. 38 | [http://www.numpy.org](http://www.numpy.org)/ 39 | 40 | 41 | 42 | - **Scipy** : high-level data processing routines. Optimization, 43 | regression, interpolation, etc 44 | [http://www.scipy.org](http://www.scipy.org)/ 45 | 46 | 47 | 48 | ![image](../figures/intro/random_c.jpg) 49 | - **Matplotlib** : 2-D visualization, "publication-ready" plots 50 | [http://matplotlib.org/](http://matplotlib.org/) 51 | 52 | 53 | 54 | - **SymPy**: Symbolic computing inside python 55 | [http://sympy.org](http://sympy.org) 56 | 57 | 58 | 59 | - **Pandas**: Data manipulation 60 | [http://pandas.pydata.org](http://pandas.pydata.org) 61 | 62 | 63 | 64 | - **PyTables**: Very large on node files and operations 65 | [http://www.pytables.org/](http://www.pytables.org/) 66 | 67 | 68 | 69 | - **Cython**: Compile Typed Python to C and call C functions 70 | [http://cython.org/](http://cython.org/) 71 | 72 | 73 | 74 | ![image](../figures/intro/example_surface_from_irregular_data.jpg) 75 | - **Mayavi** : 3-D visualization 76 | [http://code.enthought.com/projects/mayavi](http://code.enthought.com/projects/mayavi)/ 77 | -------------------------------------------------------------------------------- /markdown/intro/hpc_building_blocks.md: -------------------------------------------------------------------------------- 1 | HPC Building Blocks 2 | ------------------- 3 | 4 | Libraries from the HPC Community using python. 5 | 6 | 7 | 8 | ## Parallelism 9 | 10 | * mpi4py 11 | * Ipython.parallel 12 | * pypar 13 | * pyUPC 14 | 15 | 16 | 17 | ## Data 18 | 19 | * pyh5 20 | * Blaze 21 | * gain (numpy + global arrays) 22 | * Odin 23 | 24 | 25 | 26 | ## Linear Algebra 27 | 28 | * elemental 29 | * pestc4py 30 | * pyTrilinos 31 | 32 | 33 | 34 | ## Applications 35 | 36 | * gpaw 37 | * galaxy 38 | * FEniCS 39 | * PyClaw 40 | * Yt / Aztro 41 | * Visit 42 | * Flash 43 | 44 | 45 | 46 | ## Speed ups 47 | 48 | * Numba 49 | * Copperhead 50 | * Parakeet 51 | * Pythran 52 | -------------------------------------------------------------------------------- /markdown/intro/index.md: -------------------------------------------------------------------------------- 1 | intro.md 2 | why.md 3 | tour.md 4 | building_blocks.md 5 | hpc_building_blocks.md 6 | ipython.md 7 | matplotlib.md 8 | numpy.md 9 | scipy.md 10 | conclusion.md 11 | -------------------------------------------------------------------------------- /markdown/intro/intro.md: -------------------------------------------------------------------------------- 1 | Introduction to Python 2 | ------------ 3 | 4 | ### Objectives 5 | 6 | 1. You will understand how scripting languages fit into the toolbox of 7 | a computational scientist. 8 | 2. You will see why Python is a powerful choice 9 | 3. You will get a taste of Python for actual scientific computing 10 | 11 | 12 | 13 | ### Sources 14 | 15 | - Scipy Lectures [http://scipy-lectures.github.io](scipy-lectures.github.io) 16 | - Software Carpentry [https://github.com/swcarpentry/boot-camps](https://github.com/swcarpentry/boot-camps) 17 | 18 | ### See also 19 | 20 | - Scipy Conference Tutorials: [http://conference.scipy.org/scipy2013/tutorials_schedule.php](http://conference.scipy.org/scipy2013/tutorials_schedule.php) 21 | 22 | -------------------------------------------------------------------------------- /markdown/intro/ipython.md: -------------------------------------------------------------------------------- 1 | The interactive workflow: IPython and a text editor 2 | --------------------------------------------------- 3 | 4 | - Not a single "blessed" environment 5 | - IPython provides many interactive elements missing in base interpreter 6 | - tab completion 7 | - documentation in a pager 8 | - notebook interface for literate style 9 | - simple parallel features 10 | 11 | 22 | 23 | 24 | 25 | 26 | ### Command line interaction 27 | 28 | Start `ipython`: 29 | 30 | ```bash 31 | $ ipython 32 | Python 2.7.5 (default, Aug 2 2013, 22:27:50) 33 | Type "copyright", "credits" or "license" for more information. 34 | 35 | IPython 0.13.2 -- An enhanced Interactive Python. 36 | ? -> Introduction and overview of IPython's features. 37 | %quickref -> Quick reference. 38 | help -> Python's own help system. 39 | object? -> Details about 'object', use 'object??' for extra details. 40 | 41 | In [1]: print("Hello World") 42 | Hello World 43 | ``` 44 | 45 | 46 | 47 | ### Getting help 48 | 49 | Getting help by using the **?** operator after an object: 50 | 51 | 52 | ``` 53 | In [6]: list? 54 | Type: type 55 | String Form: 56 | Namespace: Python builtin 57 | Docstring: 58 | list() -> new empty list 59 | list(iterable) -> new list initialized from iterable's items 60 | ``` 61 | 62 | 63 | 64 | View *python* source with **??** operator: 65 | ```python 66 | In [13]: os.path.join?? 67 | Type: function 68 | String Form: 69 | File: /usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/posixpath.py 70 | Definition: os.path.join(a, *p) 71 | Source: 72 | def join(a, *p): 73 | """Join two or more pathname components, inserting '/' as needed. 74 | If any component is an absolute path, all previous path components 75 | will be discarded. An empty last part will result in a path that 76 | ends with a separator.""" 77 | path = a 78 | for b in p: 79 | if b.startswith('/'): 80 | path = b 81 | elif path == '' or path.endswith('/'): 82 | path += b 83 | else: 84 | path += '/' + b 85 | return path 86 | ``` 87 | 88 | 89 | 90 | ### IPython Tips and Tricks 91 | 92 | Ipython also contains many *magic* functions for iterating on your algorithm: 93 | 94 | - `%run` - run file as if it were a script 95 | - `%timeit` - times a single expression 96 | - `%debug` - debug the last traceback 97 | 98 | 99 | 100 | `%run` - run file as if it were a script 101 | 102 | ```python 103 | In [14]: %run examples/intro/04_regular_expressions.py 104 | Reading from examples/intro/../data/python.bib 105 | Found a book with the tag 'Langtangen2011' 106 | The title is 'A Primer on Scientific Programming with Python' 107 | Found a book with the tag 'Langtangen2010' 108 | The title is 'Python Scripting for Computational Science' 109 | In [15]: infile 110 | Out[15]: u'examples/intro/../data/python.bib' 111 | ``` 112 | 113 | 114 | 115 | `%timeit` - times a single expression 116 | 117 | ```python 118 | In [16]: %timeit range(100) 119 | 1000000 loops, best of 3: 1.2 us per loop 120 | In [17]: %timeit sum(xrange(100)) 121 | 100000 loops, best of 3: 1.54 us per loop 122 | In [18]: %timeit sum(range(100)) 123 | 100000 loops, best of 3: 2.65 us per loop 124 | In [19]: def loop_sum(): 125 | ....: sum = 0 126 | ....: for i in xrange(100): 127 | ....: sum += i 128 | ....: return sum 129 | ....: 130 | 131 | In [20]: %timeit loop_sum() 132 | 100000 loops, best of 3: 6.72 us per loop 133 | ``` 134 | 135 | 136 | 137 | `%debug` - debug the last traceback 138 | 139 | ``` 140 | In [17]: %run examples/intro/05_debug.py 141 | IndexError Traceback (most recent call last) 142 | 143 | In [18]: %debug 144 | > /Users/aterrel/Dropbox/Documents/Teaching/pyhpc-tutorial/examples/intro/05_debug.py(3)() 145 | 1 l = range(10) 146 | 2 147 | ----> 3 l[10] = 5 148 | 149 | ipdb> print(l) 150 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 151 | ipdb> print(len(l)) 152 | 10 153 | ipdb> print(l[9]) 154 | 9 155 | ``` 156 | 157 | 158 | 159 | Other useful magic functions are: 160 | 161 | - `%cd` to change the current directory (see `alias` for other mapped shell commands). 162 | 163 | - `!` to use shell commands. 164 | 165 | - `%cpaste` allows you to paste code, especially code from websites 166 | which has been prefixed with the standard python prompt (e.g. `>>>`) 167 | or with an ipython prompt, (e.g. `in [3]`): 168 | 169 | - `%history` to see your history (or save your session) 170 | 171 | - To explore use *tab completion* 172 | 173 | -------------------------------------------------------------------------------- /markdown/intro/matplotlib.md: -------------------------------------------------------------------------------- 1 | Matplotlib - Plotting 2 | --------------------- 3 | 4 | - Publication worthy plotting 5 | - Focus on 2D with some 3D and animation support 6 | - Can hit many different backend 7 | - See large gallery at: [http://matplotlib.org/gallery.html](http://matplotlib.org/gallery.html) 8 | 9 | 10 | 11 | Simple plot 12 | 13 | ```python 14 | import pylab as pl 15 | import numpy as np 16 | 17 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 18 | C, S = np.cos(X), np.sin(X) 19 | 20 | pl.plot(X, C) 21 | pl.plot(X, S) 22 | pl.show() 23 | ``` 24 | 25 | 26 | 27 | Customize Everything 28 | 29 | ```python 30 | # Create a figure of size 8x6 points, 80 dots per inch 31 | pl.figure(figsize=(8, 6), dpi=80) 32 | # Create a new subplot from a grid of 1x1 33 | pl.subplot(1, 1, 1) 34 | X = np.linspace(-np.pi, np.pi, 256, endpoint=True) 35 | C, S = np.cos(X), np.sin(X) 36 | # Plot cosine with a blue continuous line of width 1 (pixels) 37 | pl.plot(X, C, color="blue", linewidth=1.0, linestyle="-") 38 | # Plot sine with a green continuous line of width 1 (pixels) 39 | pl.plot(X, S, color="green", linewidth=1.0, linestyle="-") # Set x limits 40 | pl.xlim(-4.0, 4.0) # Set x ticks 41 | pl.xticks(np.linspace(-4, 4, 9, endpoint=True)) # Set y limits 42 | pl.ylim(-1.0, 1.0) # Set y ticks 43 | pl.yticks(np.linspace(-1, 1, 5, endpoint=True)) # Save figure using 72 dots per inch 44 | # savefig("exercice_2.png", dpi=72) 45 | # Show result on screen 46 | pl.show() 47 | ``` 48 | 49 | 50 | 51 | Change linestyles 52 | 53 | ```python 54 | pl.figure(figsize=(10, 6), dpi=80) 55 | pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-") 56 | pl.plot(X, S, color="red", linewidth=2.5, linestyle="-") 57 | ``` 58 | 59 | 60 | 61 | Adding legend 62 | 63 | ```python 64 | pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="cosine") 65 | pl.plot(X, S, color="green", linewidth=2.5, linestyle="--", label="sine") # Set x limits 66 | pl.legend(loc='upper left') 67 | ``` 68 | 69 | 70 | 71 | Moving splines 72 | 73 | ```python 74 | ax = pl.gca() # gca stands for 'get current axis' 75 | ax.spines['right'].set_color('none') 76 | ax.spines['top'].set_color('none') 77 | ax.xaxis.set_ticks_position('bottom') 78 | ax.spines['bottom'].set_position(('data',0)) 79 | ax.yaxis.set_ticks_position('left') 80 | ax.spines['left'].set_position(('data',0)) 81 | ``` 82 | 83 | 84 | 85 | Histograms 86 | 87 | 88 | ```python 89 | import pylab as pl 90 | 91 | pl.plot(pylab.randn(10000), 100) 92 | pl.show() 93 | ``` 94 | 95 | 96 | 97 | Add a fit line and legend 98 | 99 | ```python 100 | n, bins, patches = pl.hist(pl.randn(1000), 40, normed=1) 101 | l, = pl.plot(bins, pl.normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3) 102 | legend([l, patches[0]], ['fit', 'hist']) 103 | ``` 104 | 105 | 106 | 107 | Other types of plots include: 108 | 109 | * Scatter 110 | * Bar 111 | * Pie 112 | * Sankey 113 | * Images 114 | * Quivers 115 | * Multiplots 116 | * Polar 117 | * 3D 118 | 119 | 120 | 121 | Other elements to know about: 122 | 123 | - Ticks: control how the ticks look 124 | - Annotations: Add visual elements to your plot 125 | - Axes: draw plots on top of themselves 126 | - Backends: draw for different rendering engines 127 | -------------------------------------------------------------------------------- /markdown/intro/numpy.md: -------------------------------------------------------------------------------- 1 | NumPy 2 | ----- 3 | 4 | Python library that provides multi-dimensional arrays, tables, and matrices for Python 5 | 6 |
7 | ![image](../figures/intro/array1D.2.lightbg.png) 8 |
9 | 10 | - Contiguous or strided arrays 11 | - Homogeneous (but types can be algebraic) 12 | - Arrays of records and nested records 13 | - Fast routines for array operations (C, ATLAS, MKL) 14 | 15 | 16 | 17 | ## NumPy's Many Uses 18 | 19 | - Image and signal processing 20 | - Linear algebra 21 | - Data transformation and query 22 | - Time series analysis 23 | - Statistical analysis 24 | - Many more! 25 | 26 |

27 | ## NumPy is the foundation of the Python scientific stack 28 | 29 | 30 | 31 | ### NumPy Ecosystem 32 | 33 |
34 | ![image](../figures/intro/ecosystem.lightbg.png) 35 |
36 | 37 | 38 | 39 | ### Basic array tour 40 | 41 | ```python 42 | In [18]: a = np.array([0,1,2,3,4,5], dtype=int) 43 | 44 | In [19]: a 45 | Out[19]: array([0, 1, 2, 3, 4, 5]) 46 | 47 | In [20]: a[1:3] 48 | Out[20]: array([1, 2]) 49 | 50 | In [21]: a.ndim 51 | Out[21]: 1 52 | 53 | In [22]: a.shape 54 | Out[22]: (6,) 55 | ``` 56 | 57 | 58 | 59 | ### Simple 2D 60 | ```python 61 | In [24]: b = np.array([[0,1,2],[3,4,5],[6,7,8]], dtype=float) 62 | 63 | In [25]: b.ndim 64 | Out[25]: 2 65 | 66 | In [26]: b.shape 67 | Out[26]: (3, 3) 68 | 69 | In [27]: b[1:3,1:3] 70 | Out[27]: 71 | array([[4., 5.], 72 | [7., 8.]]) 73 | 74 | In [31]: b[..., 1:3] 75 | Out[31]: 76 | array([[1., 2.], 77 | [4., 5.], 78 | [7., 8.]]) 79 | ``` 80 | 81 | 82 | 83 | But I thought arrays were just a pointer? 84 | ```python 85 | In [47]: c = np.arange(9) 86 | 87 | In [48]: c.data 88 | Out[48]: 89 | 90 | In [49]: c.strides 91 | Out[49]: (8,) 92 | 93 | In [50]: c.shape 94 | Out[50]: (9,) 95 | 96 | In [51]: d = c.reshape((3,3)) 97 | 98 | In [52]: d.data 99 | Out[52]: 100 | 101 | In [53]: d.strides 102 | Out[53]: (24, 8) 103 | 104 | In [54]: d.shape 105 | Out[54]: (3, 3) 106 | ``` 107 | 108 | 109 | 110 | ### Common arrays 111 | 112 | ```python 113 | In [66]: print np.arange(10) # Like range [0, 1, ..., 9] 114 | [0 1 2 3 4 5 6 7 8 9] 115 | 116 | In [67]: print np.arange(1,9, 2) # [1, 3, 5, 7] 117 | [1 3 5 7] 118 | 119 | In [68]: print np.linspace(0, 1, 6) # A linear space of [0,1] with 6 pts 120 | [ 0. 0.2 0.4 0.6 0.8 1. ] 121 | 122 | In [69]: print np.linspace(0, 1, 6, endpoint=False) # [0,1[ 123 | [ 0. 0.16666667 0.33333333 0.5 0.66666667 0.83333333] 124 | ``` 125 | 126 | 127 | 128 | ### Common arrays 129 | ```python 130 | In [70]: print np.ones((3,3)) # 3 X 3 2D array of 1's 131 | [[ 1. 1. 1.] 132 | [ 1. 1. 1.] 133 | [ 1. 1. 1.]] 134 | 135 | In [71]: print np.eye(3) 136 | [[ 1. 0. 0.] 137 | [ 0. 1. 0.] 138 | [ 0. 0. 1.]] 139 | 140 | In [72]: print np.diag(np.arange(4)) 141 | [[0 0 0 0] 142 | [0 1 0 0] 143 | [0 0 2 0] 144 | [0 0 0 3]] 145 | 146 | In [73]: print np.random.rand(4) # Uniform distribution 147 | [ 0.39259348 0.84921539 0.70292474 0.10054081] 148 | 149 | In [74]: print np.random.randn(4) # Gaussian distribution 150 | [ 0.53405047 -3.12422252 0.19564584 0.217296 ] 151 | ``` 152 | 153 | 154 | 155 | ### Fast operations 156 | 157 | Just like matlab, vectorized operations are much faster in NumPy 158 | ```python 159 | In [9]: %timeit [x + 1 for x in xrange(100000)] 160 | 100 loops, best of 3: 8.38 ms per loop 161 | 162 | In [10]: %timeit np.arange(100000) + 1 163 | 10000 loops, best of 3: 153 us per loop 164 | 165 | In [11]: a = range(100) 166 | In [12]: b = range(100) 167 | In [13]: %timeit [a[i]*b[i] for i in xrange(len(a))] 168 | 100000 loops, best of 3: 19.8 us per loop 169 | 170 | In [15]: c = np.arange(100) 171 | In [16]: d = np.arange(100) 172 | In [17]: %timeit c*d 173 | 100000 loops, best of 3: 2.25 us per loop 174 | ``` 175 | 176 | 177 | 178 | ### Scalar and aggregate operations 179 | 180 | ```python 181 | In [90]: b = np.arange(10) 182 | 183 | In [91]: b * 2 + 1 184 | Out[91]: array([ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]) 185 | 186 | In [93]: np.max(b) 187 | Out[93]: 9 188 | 189 | In [94]: np.sin(b) 190 | Out[94]: 191 | array([ 0. , 0.84147098, 0.90929743, 0.14112001, -0.7568025 , 192 | -0.95892427, -0.2794155 , 0.6569866 , 0.98935825, 0.41211849]) 193 | ``` 194 | 195 | 196 | 197 | ### Dot products 198 | ```python 199 | In [79]: a = np.arange(3) 200 | 201 | In [80]: b = np.arange(9, dtype=float).reshape((3,3)) 202 | 203 | In [81]: c = np.arange(9, dtype=float).reshape((3,3)) 204 | 205 | In [82]: np.dot(b, a) 206 | Out[82]: array([ 5., 14., 23.]) 207 | 208 | In [83]: np.dot(b,c) 209 | Out[83]: 210 | array([[ 15., 18., 21.], 211 | [ 42., 54., 66.], 212 | [ 69., 90., 111.]]) 213 | ``` 214 | 215 | 216 | 217 | ### Other pieces of NumPy to be aware of: 218 | ```python 219 | In [84]: np.linalg? 220 | 221 | In [85]: np.random? 222 | 223 | In [86]: np.fft? 224 | ``` 225 | -------------------------------------------------------------------------------- /markdown/intro/tour.md: -------------------------------------------------------------------------------- 1 | ### Tour of Scripts 2 | 3 | Let's look at a few simple examples. 4 | 5 | 6 | 7 | Hello Scientific World 8 | ```python 9 | import math 10 | r = math.pi / 2.0 11 | s = math.sin(r) 12 | print "Hello world, sin(%f)=%f" % (r,s) 13 | ``` 14 | 15 | Running in the shell: 16 | ```bash 17 | $ python examples/intro/01_hello_world.py 18 | Hello world, sin(1.570796)=1.000000 19 | ``` 20 | 21 | 22 | 23 | Input / Output 24 | ```python 25 | import math 26 | import os 27 | 28 | data_dir = os.path.join(os.path.dirname(__file__), "..", "data") 29 | infile = "numbers" 30 | outfile = "f_numbers" 31 | 32 | f = open(os.path.join(data_dir, infile), 'r') 33 | g = open(os.path.join(data_dir, outfile), 'w') 34 | 35 | def func(y): 36 | if y >= 0.0: 37 | return y**5.0*math.exp(-y) 38 | else: 39 | return 0.0 40 | 41 | for line in f: 42 | line = line.split() 43 | x, y = float(line[0]), float(line[1]) 44 | g.write("%g %12.5e\n" % (x,func(y))) 45 | 46 | f.close(); g.close() 47 | ``` 48 | 49 | 50 | 51 | Input / Ouput Continued 52 | ```bash 53 | $ cat examples/data/numbers 54 | 1 2 55 | 3 4 56 | 5 6 57 | 7 8 58 | 9 10 59 | $ python examples/intro/02_write_numbers.py 60 | Read from examples/intro/../data/numbers 61 | Wrote to examples/intro/../data/f_numbers 62 | $ cat examples/data/f_numbers 63 | 1 4.33073e+00 64 | 3 1.87552e+01 65 | 5 1.92748e+01 66 | 7 1.09924e+01 67 | 9 4.53999e+00 68 | ``` 69 | 70 | 71 | 72 | System commands 73 | 74 | ```python 75 | #!/usr/bin/env python 76 | import sys,os 77 | cmd = 'date' 78 | output = os.popen(cmd) 79 | lines = output.readlines() 80 | fail = output.close() 81 | 82 | if fail: print 'You do not have the date command'; sys.exit() 83 | 84 | for line in lines: 85 | line = line.split() 86 | print "The current time is %s on %s %s, %s" % (line[3],line[2],line[1],line[-1]) 87 | ``` 88 | 89 | ```bash 90 | $ ./examples/intro/03_call_sys_commands.py 91 | The current time is 11:50:25 on 24 Aug, 2013 92 | ``` 93 | 94 | 95 | 96 | Regular Expressions 97 | 98 | ``` 99 | @Book{Langtangen2011, 100 | author = {Hans Petter Langtangen}, 101 | title = {A Primer on Scientific Programming with Python}, 102 | publisher = {Springer}, 103 | year = {2011} 104 | } 105 | @Book{Langtangen2010, 106 | author = {Hans Petter Langtangen}, 107 | title = {Python Scripting for Computational Science}, 108 | publisher = {Springer}, 109 | year = {2010} 110 | } 111 | ``` 112 | 113 | 114 | 115 | Regular Expressions Continued 116 | ```python 117 | import os 118 | import re 119 | 120 | data_dir = os.path.join(os.path.dirname(__file__), "..", "data") 121 | infile = os.path.join(data_dir, "python.bib") 122 | 123 | pattern1 = "@Book{(.*)," 124 | pattern2 = "\s+title\s+=\s+{(.*)}," 125 | 126 | print "Reading from", infile 127 | for line in file(infile): 128 | match = re.search(pattern1, line) 129 | if match: 130 | print "Found a book with the tag '%s'" % match.group(1) 131 | 132 | match = re.search(pattern2, line) 133 | if match: 134 | print "The title is '%s'" % match.group(1) 135 | ``` 136 | 137 | ```bash 138 | $ python examples/intro/04_regular_expressions.py 139 | Reading from examples/intro/../data/python.bib 140 | Found a book with the tag 'Langtangen2011' 141 | The title is 'A Primer on Scientific Programming with Python' 142 | Found a book with the tag 'Langtangen2010' 143 | The title is 'Python Scripting for Computational Science' 144 | ``` 145 | -------------------------------------------------------------------------------- /markdown/intro/why.md: -------------------------------------------------------------------------------- 1 | Why Python? 2 | ----------- 3 | 4 | ### The scientist's needs 5 | 6 | - Get data (simulation, experiment control) 7 | 8 | - Manipulate and process data. 9 | 10 | - Visualize results... to understand what we are doing! 11 | 12 | - Communicate results: produce figures for reports or publications, 13 | write presentations. 14 | 15 | 16 | 17 | ### Specifications 18 | 19 | - Existing **bricks** corresponding to classical numerical methods or basic actions 20 | 21 | - Easy to learn 22 | 23 | - Easy to communicate with collaborators, students, customers, to make 24 | the code live within a lab or a company 25 | 26 | - Efficient code that executes quickly... 27 | 28 | - A single environment/language for everything 29 | 30 | 54 | 55 | 56 | 57 | ### Existing solutions 58 | 59 | Which solutions do scientists use to work? 60 | 61 | - Compiled languages: C, C++, Fortran 62 | - Matlab 63 | - Other scripting languages: Scilab, Octave, Igor, R, IDL, etc. 64 | 65 | 66 | 67 | **Compiled languages: C, C++, Fortran, etc.** 68 | 69 | - Advantages: 70 | 71 | - Very fast. Very optimized compilers. For heavy computations, 72 | it's difficult to outperform these languages. 73 | 74 | - Some very optimized scientific libraries have been written for 75 | these languages. Example: BLAS (vector/matrix operations) 76 | 77 | - Drawbacks: 78 | 79 | - Painful usage: no interactivity during development, mandatory 80 | compilation steps, verbose syntax (&, ::, }}, ; etc.), manual 81 | memory management (tricky in C). These are **difficult 82 | languages** for non computer scientists. 83 | 84 | 85 | 86 | **Scripting languages: Matlab** 87 | 88 | - Advantages: 89 | 90 | - Very rich collection of libraries with numerous algorithms, for 91 | many different domains. Fast execution because these libraries 92 | are often written in a compiled language. 93 | 94 | - Pleasant development environment: comprehensive and well 95 | organized help, integrated editor, etc. 96 | 97 | - Commercial support is available. 98 | 99 | - Drawbacks: 100 | 101 | - Base language is quite poor and can become restrictive for 102 | advanced users. 103 | 104 | - Not free. 105 | 106 | 107 | 108 | **Other scripting languages: Scilab, Octave, Igor, R, IDL, etc.** 109 | 110 | - Advantages: 111 | 112 | - Open-source, free, or at least cheaper than Matlab. 113 | 114 | - Some features can be very advanced (statistics in R, figures in 115 | Igor, etc.) 116 | 117 | - Drawbacks: 118 | 119 | - Fewer available algorithms than in Matlab, and the language is 120 | not more advanced. 121 | 122 | - Some software are dedicated to one domain. Ex: Gnuplot or 123 | xmgrace to draw curves. These programs are very powerful, but 124 | they are restricted to a single type of usage, such as plotting. 125 | 126 | 127 | 128 | **What about Python?** 129 | 130 | - Advantages: 131 | 132 | - Very rich scientific computing libraries (a bit less than 133 | Matlab, though) 134 | 135 | - Well thought out language, allowing to write very readable and 136 | well structured code: we "code what we think". 137 | 138 | - Many libraries for other tasks than scientific computing (web 139 | server management, serial port access, etc.) 140 | 141 | - Free and open-source software, widely spread, with a vibrant 142 | community. 143 | 144 | - Drawbacks: 145 | 146 | - less pleasant development environment than, for example, Matlab. 147 | (More geek-oriented). 148 | 149 | - Not all the algorithms that can be found in more specialized 150 | software or toolboxes. 151 | -------------------------------------------------------------------------------- /markdown/scale/Makefile: -------------------------------------------------------------------------------- 1 | all: slides pages 2 | 3 | slides: petsc4py-tutorial.md pyclaw-anatomy.md 4 | pandoc petsc4py-tutorial.md -t dzslides -s --mathjax -o petsc4py-tutorial-slides.html 5 | pandoc pyclaw-anatomy.md -t slidy -s --mathjax -o pyclaw-anatomy-slides.html 6 | 7 | pages: petsc4py-tutorial.md pyclaw-anatomy.md 8 | pandoc -t html5 -s --mathjax petsc4py-tutorial.md -o petsc4py-tutorial.html 9 | pandoc -t html5 -s --mathjax pyclaw-anatomy.md -o pyclaw-anatomy.html 10 | 11 | deploy: slides pages 12 | rsync -v -r ./ aron@ahmadia.net:domains/aron.ahmadia.net/web/public/pyhpc 13 | 14 | clean: 15 | rm *.html 16 | -------------------------------------------------------------------------------- /markdown/scale/euler_weak_scaling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/markdown/scale/euler_weak_scaling.png -------------------------------------------------------------------------------- /markdown/scale/pyclaw-anatomy.md: -------------------------------------------------------------------------------- 1 | # Anatomy of PyClaw 2 | 3 | ## Motivation 4 | 5 | * Complete example of modernizing a serial Fortran code for 6 | large-scale computing 7 | * Start with Clawpack, a serial grid-based Fortran code 8 | * Wrap legacy Fortran code using f2py 9 | * Prototype, visualize, and test from Python 10 | * Scale serial grid code using petsc4py 11 | 12 | ## Prototyping Example 13 | 14 | (Live demo of PyClaw notebook) 15 | 16 | ## Clawpack Overview 17 | 18 | * **C**onservation **L**aws **Pack**age:
 19 | + General hyperbolic PDEs in 1/2/3 dimensions 20 | * Developed by Randy LeVeque, Marsh Berger, and various others over the past 15 years in Fortran 77 21 | * Dozens of “Riemann solvers” by numerous additional contributors 22 | * Includes adaptive mesh refinement (AMRClaw) 23 | * Textbook and many examples available 24 | * Models tidal waves, storm surges, acoustic waves, and pyroclastic flows/surges 25 | * Available at: www.clawpack.org 26 | 27 | ## Wrapping Clawpack with f2py 28 | 29 | * We wrap at the hyperbolic solver level, Fortran code for 30 | advancing the solution on a grid by one time step 31 | * The solver is generic over different physics, it accepts a pointer 32 | to a Fortran subroutine for computing the Riemann kernel at each 33 | interface 34 | * We use f2py to wrap both the `step` subroutine and the `rp` Riemann 35 | kernel. We don't call the Riemann kernel from Python, it is simply 36 | passed as an argument to the f2py-wrapped `step` function. 37 | 38 | ## Wrapping 1-D Wave Propagation Kernels with f2py 39 | 40 | ``` 41 | subroutine step1(num_eqn,num_waves,num_ghost,num_aux,mx,q,aux,dx, & 42 | dt,method,mthlim,cfl,f,wave,s,amdq,apdq,dtdx,use_fwave,rp1) 43 | ``` 44 | 45 | We need to give f2py a little information about how we intend to use 46 | the data to avoid making unnecessary copies. We do this by adding 47 | f2py directives after the function declaration. 48 | 49 | ``` 50 | !f2py intent(in,out) q 51 | !f2py intent(out) cfl 52 | !f2py intent(in) num_eqn 53 | !f2py intent(in) num_ghost 54 | !f2py intent(in) mx 55 | !f2py optional f, amdq, apdq, dtdx, s, wave 56 | ``` 57 | The variables `num_eqn`, `num_waves`, and `num_aux` are automatically inferred 58 | from the dimensions of the input arrays. 59 | 60 | ## Wrapping 2-D Wave Propagation Kernels with f2py 61 | 62 | The 2-D picture is only slightly more complicated: 63 | 64 | ``` 65 | subroutine step2(maxm,num_eqn,num_waves,num_aux,num_ghost,mx,my, & 66 | qold,qnew,aux,dx,dy,dt,method,mthlim,cfl, & 67 | qadd,fadd,gadd,q1d,dtdx1d,dtdy1d, & 68 | aux1,aux2,aux3,work,mwork,use_fwave,rpn2,rpt2) 69 | ``` 70 | 71 | Note that we're being slightly less verbose here, only explicitly 72 | specifying the output variable `cfl` as well as the modified array `qnew`. 73 | 74 | ``` 75 | !f2py intent(out) cfl 76 | !f2py intent(in,out) qnew 77 | !f2py optional q1d, qadd, fadd, gadd, dtdx1d, dtdy1d 78 | ``` 79 | 80 | ## Wrapping Riemann Fortran Kernels as Function Pointers with f2py 81 | 82 | ``` 83 | subroutine rp1(maxm,meqn,mwaves,mbc,mx,ql,qr,auxl,auxr, & 84 | wave,s,amdq,apdq,num_aux) 85 | ``` 86 | 87 | The function pointer is wrapped as-is! 88 | 89 | ## Calling f2py-Wrapped Wave Propagation Kernels from Python 90 | 91 | Here's the original Fortran interface: 92 | 93 | ``` 94 | subroutine step1(num_eqn,num_waves,num_ghost,num_aux,mx,q,aux,dx, & 95 | dt,method,mthlim,cfl,f,wave,s,amdq,apdq,dtdx,use_fwave,rp1) 96 | ``` 97 | 98 | Here's the function call from Python. 99 | 100 | ``` 101 | rp1 = self.rp.rp1._cpointer 102 | self.qbc,cfl = self.fmod.step1(num_ghost,mx,self.qbc,self.auxbc,dx, 103 | dt,self._method,self._mthlim,self.fwave,rp1) 104 | ``` 105 | 106 | ## Enabling Grid-Based Parallelism with PETSc DMDA 107 | 108 | * Grid-based serial solver operates on a grid augmented by "ghost 109 | cells" 110 | * Exact same concept used by PETSc DMDA 111 | + each process is responsible for one grid, exchanges boundary 112 | information with neighbors 113 | * Changes to PyClaw (Less than 100 LOC): 114 | + Store grid data in DMDA instead of NumPy array 115 | + Calculate global CFL condition by reduction 116 | + Update neighbor information after successful time steps 117 | 118 | ## PyClaw Architecture 119 | 120 | 121 | 122 | ## Scaling 123 | 124 | 125 | 126 | ## Verification and Validation 127 | 128 | * Code is prototyped and verified from Python scripts 129 | * Validated against Clawpack 130 | + which in turn has been validated against other codes and models 131 | * Verified by developers before commits 132 | * Also verified continuously by Travis CI on GitHub 133 | -------------------------------------------------------------------------------- /markdown/scale/pyclaw_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/markdown/scale/pyclaw_architecture.png -------------------------------------------------------------------------------- /notebooks/04_yt_Introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:7c68cdd34ce71c042fa3c4badc4587693f1cc1b6aa0b3c99a4a63a1db6fe57f9" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Welcome to the yt quickstart!\n", 16 | "\n", 17 | "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n", 18 | "\n", 19 | "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at http://yt-project.org/doc/. If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n", 20 | "\n", 21 | "## Acquiring the datasets for this tutorial\n", 22 | "\n", 23 | "If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at http://yt-project.org/data/. The datasets necessary for each lesson are noted next to the corresponding tutorial.\n", 24 | "\n", 25 | "## What's Next?\n", 26 | "\n", 27 | "The Notebooks are meant to be explored in this order:\n", 28 | "\n", 29 | "1. Introduction\n", 30 | "2. Data Inspection (IsolatedGalaxy dataset)\n", 31 | "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n", 32 | "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n", 33 | "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n", 34 | "6. Volume Rendering (IsolatedGalaxy dataset)" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it." 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "collapsed": false, 47 | "input": [ 48 | "download_datasets = False\n", 49 | "if download_datasets:\n", 50 | " !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n", 51 | " print \"Got enzo_tiny_cosmology\"\n", 52 | " !tar xf enzo_tiny_cosmology.tar\n", 53 | " \n", 54 | " !curl -sSO http://yt-project.org/data/Enzo_64.tar\n", 55 | " print \"Got Enzo_64\"\n", 56 | " !tar xf Enzo_64.tar\n", 57 | " \n", 58 | " !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n", 59 | " print \"Got IsolatedGalaxy\"\n", 60 | " !tar xf IsolatedGalaxy.tar\n", 61 | " \n", 62 | " print \"All done!\"" 63 | ], 64 | "language": "python", 65 | "metadata": {}, 66 | "outputs": [] 67 | } 68 | ], 69 | "metadata": {} 70 | } 71 | ] 72 | } -------------------------------------------------------------------------------- /notebooks/05_Data_Inspection_with_yt.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Starting Out and Loading Data\n", 16 | "\n", 17 | "We're going to get started by loading up yt. This next command brings all of the libraries into memory and sets up our environment." 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "collapsed": false, 23 | "input": [ 24 | "import yt" 25 | ], 26 | "language": "python", 27 | "metadata": {}, 28 | "outputs": [] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "Now that we've loaded yt, we can load up some data. Let's load the `IsolatedGalaxy` dataset." 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "collapsed": false, 40 | "input": [ 41 | "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")" 42 | ], 43 | "language": "python", 44 | "metadata": {}, 45 | "outputs": [] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "## Fields and Facts\n", 52 | "\n", 53 | "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation. Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk. Once it knows that, yt can tell you some statistics about the simulation:" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "collapsed": false, 59 | "input": [ 60 | "ds.print_stats()" 61 | ], 62 | "language": "python", 63 | "metadata": {}, 64 | "outputs": [] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "yt can also tell you the fields it found on disk:" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "collapsed": false, 76 | "input": [ 77 | "ds.field_list" 78 | ], 79 | "language": "python", 80 | "metadata": {}, 81 | "outputs": [] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "And, all of the fields it thinks it knows how to generate:" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "collapsed": false, 93 | "input": [ 94 | "ds.derived_field_list" 95 | ], 96 | "language": "python", 97 | "metadata": {}, 98 | "outputs": [] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "yt can also transparently generate fields. However, we encourage you to examine exactly what yt is doing when it generates those fields. To see, you can ask for the source of a given field." 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "collapsed": false, 110 | "input": [ 111 | "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()" 112 | ], 113 | "language": "python", 114 | "metadata": {}, 115 | "outputs": [] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "yt stores information about the domain of the simulation:" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "collapsed": false, 127 | "input": [ 128 | "print ds.domain_width" 129 | ], 130 | "language": "python", 131 | "metadata": {}, 132 | "outputs": [] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "yt can also convert this into various units:" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "collapsed": false, 144 | "input": [ 145 | "print ds.domain_width.in_units(\"kpc\")\n", 146 | "print ds.domain_width.in_units(\"au\")\n", 147 | "print ds.domain_width.in_units(\"mile\")" 148 | ], 149 | "language": "python", 150 | "metadata": {}, 151 | "outputs": [] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "metadata": {}, 156 | "source": [ 157 | "# Mesh Structure\n", 158 | "\n", 159 | "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh. For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on." 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "collapsed": false, 165 | "input": [ 166 | "print ds.index.grid_left_edge" 167 | ], 168 | "language": "python", 169 | "metadata": {}, 170 | "outputs": [] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "But, you may have to access information about individual grid objects! Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it. The index (`ds.index` here) has an attribute `grids` which is all of the grid objects." 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "collapsed": false, 182 | "input": [ 183 | "print ds.index.grids[1]" 184 | ], 185 | "language": "python", 186 | "metadata": {}, 187 | "outputs": [] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "collapsed": false, 192 | "input": [ 193 | "g = ds.index.grids[1]\n", 194 | "print g" 195 | ], 196 | "language": "python", 197 | "metadata": {}, 198 | "outputs": [] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": {}, 203 | "source": [ 204 | "Grids have dimensions, extents, level, and even a list of Child grids." 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "collapsed": false, 210 | "input": [ 211 | "g.ActiveDimensions" 212 | ], 213 | "language": "python", 214 | "metadata": {}, 215 | "outputs": [] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "collapsed": false, 220 | "input": [ 221 | "g.LeftEdge, g.RightEdge" 222 | ], 223 | "language": "python", 224 | "metadata": {}, 225 | "outputs": [] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "collapsed": false, 230 | "input": [ 231 | "g.Level" 232 | ], 233 | "language": "python", 234 | "metadata": {}, 235 | "outputs": [] 236 | }, 237 | { 238 | "cell_type": "code", 239 | "collapsed": false, 240 | "input": [ 241 | "g.Children" 242 | ], 243 | "language": "python", 244 | "metadata": {}, 245 | "outputs": [] 246 | }, 247 | { 248 | "cell_type": "markdown", 249 | "metadata": {}, 250 | "source": [ 251 | "## Advanced Grid Inspection\n", 252 | "\n", 253 | "If we want to examine grids only at a given level, we can! Not only that, but we can load data and take a look at various fields.\n", 254 | "\n", 255 | "*This section can be skipped!*" 256 | ] 257 | }, 258 | { 259 | "cell_type": "code", 260 | "collapsed": false, 261 | "input": [ 262 | "gs = ds.index.select_grids(ds.index.max_level)" 263 | ], 264 | "language": "python", 265 | "metadata": {}, 266 | "outputs": [] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "collapsed": false, 271 | "input": [ 272 | "g2 = gs[0]\n", 273 | "print g2\n", 274 | "print g2.Parent\n", 275 | "print g2.get_global_startindex()" 276 | ], 277 | "language": "python", 278 | "metadata": {}, 279 | "outputs": [] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "collapsed": false, 284 | "input": [ 285 | "print g2[\"density\"][:,:,0]" 286 | ], 287 | "language": "python", 288 | "metadata": {}, 289 | "outputs": [] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "collapsed": false, 294 | "input": [ 295 | "print (g2.Parent.child_mask == 0).sum() * 8\n", 296 | "print g2.ActiveDimensions.prod()" 297 | ], 298 | "language": "python", 299 | "metadata": {}, 300 | "outputs": [] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "collapsed": false, 305 | "input": [ 306 | "for f in ds.field_list:\n", 307 | " fv = g[f]\n", 308 | " if fv.size == 0: continue\n", 309 | " print f, fv.min(), fv.max()" 310 | ], 311 | "language": "python", 312 | "metadata": {}, 313 | "outputs": [] 314 | }, 315 | { 316 | "cell_type": "markdown", 317 | "metadata": {}, 318 | "source": [ 319 | "# Examining Data in Regions\n", 320 | "\n", 321 | "yt provides data object selectors. In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it. yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n", 322 | "\n", 323 | "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc." 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "collapsed": false, 329 | "input": [ 330 | "sp = ds.sphere(\"max\", (10, 'kpc'))" 331 | ], 332 | "language": "python", 333 | "metadata": {}, 334 | "outputs": [] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "collapsed": false, 339 | "input": [ 340 | "print sp" 341 | ], 342 | "language": "python", 343 | "metadata": {}, 344 | "outputs": [] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "metadata": {}, 349 | "source": [ 350 | "We can calculate a bunch of bulk quantities. Here's that list, but there's a list in the docs, too!" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "collapsed": false, 356 | "input": [ 357 | "print sp.quantities.keys()" 358 | ], 359 | "language": "python", 360 | "metadata": {}, 361 | "outputs": [] 362 | }, 363 | { 364 | "cell_type": "markdown", 365 | "metadata": {}, 366 | "source": [ 367 | "Let's look at the total mass. This is how you call a given quantity. yt calls these \"Derived Quantities\". We'll talk about a few in a later notebook." 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "collapsed": false, 373 | "input": [ 374 | "print sp.quantities.total_mass()" 375 | ], 376 | "language": "python", 377 | "metadata": {}, 378 | "outputs": [] 379 | } 380 | ], 381 | "metadata": {} 382 | } 383 | ] 384 | } -------------------------------------------------------------------------------- /notebooks/06_Simple_Visualization_with_yt.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Simple Visualizations of Data\n", 16 | "\n", 17 | "Just like in our first notebook, we have to load yt and then some data." 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "collapsed": false, 23 | "input": [ 24 | "import yt" 25 | ], 26 | "language": "python", 27 | "metadata": {}, 28 | "outputs": [] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "For this notebook, we'll load up a cosmology dataset." 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "collapsed": false, 40 | "input": [ 41 | "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n", 42 | "print \"Redshift =\", ds.current_redshift" 43 | ], 44 | "language": "python", 45 | "metadata": {}, 46 | "outputs": [] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "In the terms that yt uses, a projection is a line integral through the domain. This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned. Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you. However, we also provide a simple method of creating Projections and plotting them in a single step. This is called a Plot Window, here specifically known as a `ProjectionPlot`. One thing to note is that in yt, we project all the way through the entire domain at a single time. This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n", 53 | "\n", 54 | "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly. The cookbook in the documentation includes detailed examples of this." 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "collapsed": false, 60 | "input": [ 61 | "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n", 62 | "p.show()" 63 | ], 64 | "language": "python", 65 | "metadata": {}, 66 | "outputs": [] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "The `show` command simply sends the plot to the IPython notebook. You can also call `p.save()` which will save the plot to the file system. This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n", 73 | "\n", 74 | "Now we'll zoom and pan a bit." 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "collapsed": false, 80 | "input": [ 81 | "p.zoom(2.0)" 82 | ], 83 | "language": "python", 84 | "metadata": {}, 85 | "outputs": [] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "collapsed": false, 90 | "input": [ 91 | "p.pan_rel((0.1, 0.0))" 92 | ], 93 | "language": "python", 94 | "metadata": {}, 95 | "outputs": [] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "collapsed": false, 100 | "input": [ 101 | "p.zoom(10.0)" 102 | ], 103 | "language": "python", 104 | "metadata": {}, 105 | "outputs": [] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "collapsed": false, 110 | "input": [ 111 | "p.pan_rel((-0.25, -0.5))" 112 | ], 113 | "language": "python", 114 | "metadata": {}, 115 | "outputs": [] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "collapsed": false, 120 | "input": [ 121 | "p.zoom(0.1)" 122 | ], 123 | "language": "python", 124 | "metadata": {}, 125 | "outputs": [] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "If we specify multiple fields, each time we call `show` we get multiple plots back. Same for `save`!" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "collapsed": false, 137 | "input": [ 138 | "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n", 139 | "p.show()" 140 | ], 141 | "language": "python", 142 | "metadata": {}, 143 | "outputs": [] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": {}, 148 | "source": [ 149 | "We can adjust the colormap on a field-by-field basis." 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "collapsed": false, 155 | "input": [ 156 | "p.set_cmap(\"temperature\", \"hot\")" 157 | ], 158 | "language": "python", 159 | "metadata": {}, 160 | "outputs": [] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "And, we can re-center the plot on different locations. One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "collapsed": false, 172 | "input": [ 173 | "v, c = ds.find_max(\"density\")\n", 174 | "p.set_center((c[0], c[1]))\n", 175 | "p.zoom(10)" 176 | ], 177 | "language": "python", 178 | "metadata": {}, 179 | "outputs": [] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "collapsed": false, 191 | "input": [ 192 | "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n", 193 | "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n", 194 | "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n", 195 | "s.zoom(10.0)" 196 | ], 197 | "language": "python", 198 | "metadata": {}, 199 | "outputs": [] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "We can adjust the logging of various fields:" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "collapsed": false, 211 | "input": [ 212 | "s.set_log(\"velocity_magnitude\", True)" 213 | ], 214 | "language": "python", 215 | "metadata": {}, 216 | "outputs": [] 217 | }, 218 | { 219 | "cell_type": "markdown", 220 | "metadata": {}, 221 | "source": [ 222 | "yt provides many different annotations for your plots. You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here. We'll annotate with velocity arrows." 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "collapsed": false, 228 | "input": [ 229 | "s.annotate_velocity()" 230 | ], 231 | "language": "python", 232 | "metadata": {}, 233 | "outputs": [] 234 | }, 235 | { 236 | "cell_type": "markdown", 237 | "metadata": {}, 238 | "source": [ 239 | "Contours can also be overlaid:" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "collapsed": false, 245 | "input": [ 246 | "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n", 247 | "s.annotate_contour(\"temperature\")\n", 248 | "s.zoom(2.5)" 249 | ], 250 | "language": "python", 251 | "metadata": {}, 252 | "outputs": [] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "metadata": {}, 257 | "source": [ 258 | "Finally, we can save out to the file system." 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "collapsed": false, 264 | "input": [ 265 | "s.save()" 266 | ], 267 | "language": "python", 268 | "metadata": {}, 269 | "outputs": [] 270 | } 271 | ], 272 | "metadata": {} 273 | } 274 | ] 275 | } -------------------------------------------------------------------------------- /notebooks/07_Derived_Fields_in_yt.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Derived Fields and Profiles\n", 16 | "\n", 17 | "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk. This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used. Additionally, you can create them by just writing python functions." 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "collapsed": false, 23 | "input": [ 24 | "%matplotlib inline\n", 25 | "import yt\n", 26 | "import numpy as np\n", 27 | "from yt import derived_field\n", 28 | "from matplotlib import pylab" 29 | ], 30 | "language": "python", 31 | "metadata": {}, 32 | "outputs": [] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "## Derived Fields\n", 39 | "\n", 40 | "This is an example of the simplest possible way to create a derived field. All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on. Fields can be defined in the way in the next cell. What this does is create a function which accepts two arguments and then provide the units for that field. In this case, our field is `dinosaurs` and our units are `K*cm/s`. The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "collapsed": false, 46 | "input": [ 47 | "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n", 48 | "def _dinos(field, data):\n", 49 | " return data[\"temperature\"] * data[\"velocity_magnitude\"]" 50 | ], 51 | "language": "python", 52 | "metadata": {}, 53 | "outputs": [] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "One important thing to note is that derived fields must be defined *before* any datasets are loaded. Let's load up our data and take a look at some quantities." 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "collapsed": false, 65 | "input": [ 66 | "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n", 67 | "dd = ds.all_data()\n", 68 | "print dd.quantities.keys()" 69 | ], 70 | "language": "python", 71 | "metadata": {}, 72 | "outputs": [] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": {}, 77 | "source": [ 78 | "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy? We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on." 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "collapsed": false, 84 | "input": [ 85 | "print dd.quantities.extrema(\"dinosaurs\")" 86 | ], 87 | "language": "python", 88 | "metadata": {}, 89 | "outputs": [] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "We can do the same for the average quantities as well." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "collapsed": false, 101 | "input": [ 102 | "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")" 103 | ], 104 | "language": "python", 105 | "metadata": {}, 106 | "outputs": [] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "## A Few Other Quantities\n", 113 | "\n", 114 | "We can ask other quantities of our data, as well. For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema. All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "collapsed": false, 120 | "input": [ 121 | "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n", 122 | "bv = sp.quantities.bulk_velocity()\n", 123 | "L = sp.quantities.angular_momentum_vector()\n", 124 | "rho_min, rho_max = sp.quantities.extrema(\"density\")\n", 125 | "print bv\n", 126 | "print L\n", 127 | "print rho_min, rho_max" 128 | ], 129 | "language": "python", 130 | "metadata": {}, 131 | "outputs": [] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "## Profiles\n", 138 | "\n", 139 | "yt provides the ability to bin in 1, 2 and 3 dimensions. This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n", 140 | "\n", 141 | "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`. The first two are the most common since they are the easiest to visualize.\n", 142 | "\n", 143 | "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`. We then plot it in a loglog plot." 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "collapsed": false, 149 | "input": [ 150 | "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n", 151 | "prof.add_fields([\"temperature\",\"dinosaurs\"])\n", 152 | "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n", 153 | "pylab.xlabel('Density $(g/cm^3)$')\n", 154 | "pylab.ylabel('Temperature $(K)$')" 155 | ], 156 | "language": "python", 157 | "metadata": {}, 158 | "outputs": [] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "Now we plot the `dinosaurs` field." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "collapsed": false, 170 | "input": [ 171 | "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n", 172 | "pylab.xlabel('Density $(g/cm^3)$')\n", 173 | "pylab.ylabel('Dinosaurs $(K cm / s)$')" 174 | ], 175 | "language": "python", 176 | "metadata": {}, 177 | "outputs": [] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "metadata": {}, 182 | "source": [ 183 | "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight. Specifying `weight=None` will simply take the total value in every bin and add that up." 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "collapsed": false, 189 | "input": [ 190 | "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n", 191 | "prof.add_fields([\"cell_mass\"])\n", 192 | "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n", 193 | "pylab.xlabel('Density $(g/cm^3)$')\n", 194 | "pylab.ylabel('Cell mass $(M_\\odot)$')" 195 | ], 196 | "language": "python", 197 | "metadata": {}, 198 | "outputs": [] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": {}, 203 | "source": [ 204 | "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class. Let's redo the last plot using `ProfilePlot`" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "collapsed": false, 210 | "input": [ 211 | "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n", 212 | "prof.set_unit('cell_mass', 'Msun')\n", 213 | "prof.show()" 214 | ], 215 | "language": "python", 216 | "metadata": {}, 217 | "outputs": [] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "## Field Parameters\n", 224 | "\n", 225 | "Field parameters are a method of passing information to derived fields. For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation. yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off. Here we show how that works:" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "collapsed": false, 231 | "input": [ 232 | "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n", 233 | "bv = sp_small.quantities.bulk_velocity()\n", 234 | "\n", 235 | "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n", 236 | "rv1 = sp.quantities.extrema(\"radial_velocity\")\n", 237 | "\n", 238 | "sp.clear_data()\n", 239 | "sp.set_field_parameter(\"bulk_velocity\", bv)\n", 240 | "rv2 = sp.quantities.extrema(\"radial_velocity\")\n", 241 | "\n", 242 | "print bv\n", 243 | "print rv1\n", 244 | "print rv2" 245 | ], 246 | "language": "python", 247 | "metadata": {}, 248 | "outputs": [] 249 | } 250 | ], 251 | "metadata": {} 252 | } 253 | ] 254 | } 255 | -------------------------------------------------------------------------------- /notebooks/08_Volume_Rendering_in_yt.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# A Brief Demo of Volume Rendering\n", 16 | "\n", 17 | "This shows a small amount of volume rendering. Really, just enough to get your feet wet!" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "collapsed": false, 23 | "input": [ 24 | "import yt\n", 25 | "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")" 26 | ], 27 | "language": "python", 28 | "metadata": {}, 29 | "outputs": [] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "To create a volume rendering, we need a camera and a transfer function. We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function. This means behavior for data outside these values is undefined.\n", 36 | "\n", 37 | "We then add on \"layers\" like an onion. This function can accept a width (here specified) in data units, and also a color map. Here we add on four layers.\n", 38 | "\n", 39 | "Finally, we create a camera. The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function. Once we've done that, we call `show` to actually cast our rays and display them inline." 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "collapsed": false, 45 | "input": [ 46 | "tf = yt.ColorTransferFunction((-28, -24))\n", 47 | "tf.add_layers(4, w=0.01)\n", 48 | "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n", 49 | "cam.show()" 50 | ], 51 | "language": "python", 52 | "metadata": {}, 53 | "outputs": [] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "If we want to apply a clipping, we can specify the `clip_ratio`. This will clip the upper bounds to this value times the standard deviation of the values in the image array." 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "collapsed": false, 65 | "input": [ 66 | "cam.show(clip_ratio=4)" 67 | ], 68 | "language": "python", 69 | "metadata": {}, 70 | "outputs": [] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "There are several other options we can specify. Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "collapsed": false, 82 | "input": [ 83 | "tf = yt.ColorTransferFunction((-28, -25))\n", 84 | "tf.add_layers(4, w=0.03)\n", 85 | "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n", 86 | "cam.show(clip_ratio=4.0)" 87 | ], 88 | "language": "python", 89 | "metadata": {}, 90 | "outputs": [] 91 | } 92 | ], 93 | "metadata": {} 94 | } 95 | ] 96 | } -------------------------------------------------------------------------------- /notebooks/Appendix_01_Resources.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "Appendix_01_Resources" 4 | }, 5 | "nbformat": 3, 6 | "nbformat_minor": 0, 7 | "worksheets": [ 8 | { 9 | "cells": [ 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "\n", 23 | "# Appendix 1: Resources\n", 24 | "# Python in HPC\n", 25 | "\n", 26 | "## Saudi Arabian High Performance Computing Users' Group Conference 2012\n", 27 | "\n", 28 | "## King Abdullah University of Science and Technology\n", 29 | "\n", 30 | "Author: \n", 31 | "Andy Terrel \n", 32 | "Texas Advanced Computing Center\n", 33 | "\n", 34 | "Presenter:\n", 35 | "Aron Ahmadia, PhD \n", 36 | "Chief Technology Officer \n", 37 | "RunMyCode.org\n", 38 | "\n", 39 | "[![Creative Commons License](/files/figures/creative_commons_logo.png)](http://creativecommons.org/licenses/by/3.0/deed.en_US) \n", 40 | "\n", 41 | "![KAUST Logo](/files/figures/kaust.png)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "---\n", 49 | "\n", 50 | "## Learning Python ##\n", 51 | "\n", 52 | "There are a very large number of resources for learning the python language. The issue of course is finding a resource that is attractive to the correct mindset. Very often something that a programmer is able to learn from has little resonance with an artist. For this reason, I am going to list off a couple of places that I find particularly good, but of course I have been programming for over a decade.\n", 53 | "\n", 54 | "While learning the language is important, the real value of Python is the numerous libraries and the community around the tools. With that said finding what your particular community is doing is pretty important, but there are a core set of tools that are used by most pythonistas. I list off a few of these tools for your consumption.\n", 55 | "\n", 56 | "Also, because we like to see what people are doing with python, I list off a few shining examples of python in the wide world of scientific computing. I have to admit that there are a huge number of possibilities but few in the realm of HPC. At the very least, a HPC person should know Python for its scripting abilities but as the other sections of this document underscore, Python is a good candidate for HPC codes as well.\n", 57 | "\n", 58 | "### Python Tutorials ###\n", 59 | "\n", 60 | "* **[Python Doc Tutorial](http://docs.python.org/tutorial/)**: This is the tutorial written by the developers of Python, best for programmers.\n", 61 | "* **[Think Python](http://www.greenteapress.com/thinkpython/)**: A book for non-programmers.\n", 62 | "\n", 63 | "### Python Tools ###\n", 64 | "\n", 65 | "* **[Enthought Python Distribution](http://www.enthought.com/products/epd.php)**: A distribution of the most commonly used tools by the community (free for academics).\n", 66 | "* **[NumPy](http://numpy.scipy.org/)**: Fast array library for Python.\n", 67 | "* **[SciPy](http://scipy.org)**: A collection of scientific libraries.\n", 68 | "* **[MatPlotLib](http://matplotlib.sourceforge.net/)**: A highly customizable 2D plotting library\n", 69 | "* **[IPython](http://ipython.org/)**: An interactive Python shell and parallel code manager. The IPython notebook has become very popular and allows users to use an interface similar to Mathematica on a supercomputer.\n", 70 | "\n", 71 | "### Python Stories ###\n", 72 | "\n", 73 | "* **[SciPy Conferences](http://conference.scipy.org/)**: The series of conferences associated with the scientific Python community see recent videos at [Next Day Video Youtube Channel](http://www.youtube.com/user/NextDayVideo/videos?flow=grid&view=0) or [PyVideo](http://pyvideo.org).\n", 74 | "* **[Python in Astronomy](http://www.youtube.com/watch?v=mLuIB8aW2KA&feature=youtu.be)**: Joshua Bloom from UC Berkeley gave a keynote talk at SciPy 2012 on \"Python as Super Glue for the Modern Scientific Workflow\"\n", 75 | "* **[NumFocus User Stories](http://numfocus.org/user-stories/)**: A foundation for scientific computing tools with a growing number of user stories.\n", 76 | "* **[PyCLAW](http://numerics.kaust.edu.sa/papers/pyclaw-sisc/pyclaw-sisc.html)**: A petascale application written in Python" 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "metadata": {}, 82 | "source": [ 83 | "---\n", 84 | "\n", 85 | "## Performance ##\n", 86 | "\n", 87 | "Despite all the great features outlined above, the (mis)perception is that Python is too slow for HPC Development. While it is true that Python might not be the best language to write your tight loop and expect a high percentage of peak flop rate, it turns out that Python has a number of tools to help switch those lower-level languages.\n", 88 | "\n", 89 | "To discuss performance I outline three sets of tools: profiling, speeding up the python code via c, and speeding up python via python. It is my view that Python has some of the best tools for looking at what your code's performance is then drilling down to the actual bottle necks. Speeding up code without profiling is about like trying to kill a deer with an uzi.\n", 90 | "\n", 91 | "\n", 92 | "### Python tools for profiling ###\n", 93 | "\n", 94 | "\n", 95 | "* **[profile and cProfile modules](http://docs.python.org/library/profile.html)**: These modules will give you your standard run time analysis and function call stack. It is pretty nice to save their statistics and using the pstats module you can look at the data in a number of ways.\n", 96 | "\n", 97 | "* **[kernprof](http://packages.python.org/line_profiler/)**: this tool puts together many routines for doing things like line by line code timing\n", 98 | "\n", 99 | "* **[memory_profiler](http://pypi.python.org/pypi/memory_profiler)**: this tool produces line by line memory foot print of your code.\n", 100 | "\n", 101 | "* **[IPython timers](http://ipython.org/ipython-doc/dev/interactive/tutorial.html#magic-functions)**: The `timeit` function is quite nice for seeing the differences in functions in a quick interactive way.\n", 102 | "\n", 103 | "\n", 104 | "### Speeding up Python ###\n", 105 | "\n", 106 | "\n", 107 | "* **[Cython](http://cython.org/)**: cython is the quickest way to take a few functions in python and get faster code. You can decorate the function with the cython variant of python and it generates c code. This is very maintable and can also link to other hand written code in c/c++/fortran quite easily. It is by far the preferred tool today.\n", 108 | "\n", 109 | "* **[ctypes](http://docs.python.org/library/ctypes.html)**: ctypes will allow you to write your functions in c and then wrap them quickly with its simple decoration of the code. It handles all the pain of casting from PyObjects and managing the gil to call the c function.\n", 110 | "\n", 111 | "Other approaches exist for writing your code in C but they are all somewhat more for taking a C/C++ library and wrapping it in Python.\n", 112 | "\n", 113 | "\n", 114 | "### Python-only approaches ###\n", 115 | "\n", 116 | "If you want to stay inside Python mostly, my advice is to figure out what data you are using and picking correct data types for implementing your algorithms. It has been my experience that you will usually get much farther by optimizing your data structures then any low level c hack. For example:\n", 117 | "\n", 118 | "* **[numpy](http://numpy.scipy.org/)**: a contingous array very fast for strided operations of arrays\n", 119 | "\n", 120 | "* **[numexpr](http://code.google.com/p/numexpr/)**: a numpy array expression optimizer. It allows for multithreading numpy array expressions and also gets rid of the numerous temporaries numpy makes because of restrictions of the Python interpreter.\n", 121 | "\n", 122 | "* **[blist](http://pypi.python.org/pypi/blist)**: a b-tree implementation of a list, very fast for inserting, indexing, and moving the internal nodes of a list\n", 123 | "\n", 124 | "* **[pandas](http://pandas.pydata.org/)**: data frames (or tables) very fast analytics on the arrays.\n", 125 | "\n", 126 | "* **[pytables](http://www.pytables.org/moin)**: fast structured hierarchical tables (like hdf5), especially good for out of core calculations and queries to large data." 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "---\n", 134 | "\n", 135 | "## Scaling Python ##\n", 136 | "\n", 137 | "Right now there are a few distributed Python tools but the list is growing rapidly. Here I just give a list the tools and some domain tools that are used in HPC that provide a Python interface.\n", 138 | "\n", 139 | "### Distibuted computing libraries ###\n", 140 | "\n", 141 | "* **[mpi4py](http://mpi4py.scipy.org/)**: Fastest most complete mpi python wrapper.\n", 142 | "* **[disco](http://discoproject.org/)**: Python Hadoop-like framework.\n", 143 | "* **[IPython Parallel](http://ipython.org/ipython-doc/dev/parallel/index.html)**: A mpi or zero-mq based parallel python.\n", 144 | "* **[pathos](http://dev.danse.us/trac/pathos)**: framework for heterogeneous computing\n", 145 | "\n", 146 | "### Domain specific libraries ###\n", 147 | "\n", 148 | "* **[petsc4py](http://code.google.com/p/petsc4py/)**: Python bindings for PETSc, the Portable, Extensible Toolkit for Scientific Computation.\n", 149 | "* **[slepc4py](http://slepc4py.googlecode.com/)**: Python bindings for SLEPc, the Scalable Library for Eigenvalue Problem Computations.\n", 150 | "* **[tao4py](http://tao4py.googlecode.com/)**: Python bindings for TAO, the Toolkit for Advanced Optimization.\n", 151 | "* **[pyTrilinos](http://trilinos.sandia.gov/packages/pytrilinos/)**: Trilinos wrappers" 152 | ] 153 | } 154 | ], 155 | "metadata": {} 156 | } 157 | ] 158 | } -------------------------------------------------------------------------------- /notebooks/Appendix_03_Launch_MPI_Engines.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "Appendix_03_Launch_MPI_Engines" 4 | }, 5 | "nbformat": 3, 6 | "nbformat_minor": 0, 7 | "worksheets": [ 8 | { 9 | "cells": [ 10 | { 11 | "cell_type": "code", 12 | "collapsed": false, 13 | "input": [ 14 | "!ipcluster start --engines=MPI --n 4" 15 | ], 16 | "language": "python", 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "output_type": "stream", 21 | "stream": "stdout", 22 | "text": [ 23 | "2012-11-05 20:32:02,225.225 [IPClusterStart] Using existing profile dir: u'/Users/aterrel/.ipython/profile_default'\r\n", 24 | "2012-11-05 20:32:02.229 [IPClusterStart] Starting ipcluster with [daemon=False]\r\n", 25 | "2012-11-05 20:32:02.229 [IPClusterStart] Creating pid file: /Users/aterrel/.ipython/profile_default/pid/ipcluster.pid\r\n", 26 | "2012-11-05 20:32:02.229 [IPClusterStart] Starting Controller with LocalControllerLauncher\r\n" 27 | ] 28 | }, 29 | { 30 | "output_type": "stream", 31 | "stream": "stdout", 32 | "text": [ 33 | "2012-11-05 20:32:03.230 [IPClusterStart] Starting 4 Engines with MPI\r\n" 34 | ] 35 | }, 36 | { 37 | "output_type": "stream", 38 | "stream": "stdout", 39 | "text": [ 40 | "2012-11-05 20:32:33.239 [IPClusterStart] Engines appear to have started successfully\r\n" 41 | ] 42 | } 43 | ], 44 | "prompt_number": "*" 45 | }, 46 | { 47 | "cell_type": "code", 48 | "collapsed": false, 49 | "input": [ 50 | "!ipcluster stop" 51 | ], 52 | "language": "python", 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "output_type": "stream", 57 | "stream": "stdout", 58 | "text": [ 59 | "2012-11-05 20:31:38,344.344 [IPClusterStop] Using existing profile dir: u'/Users/aterrel/.ipython/profile_default'\r\n" 60 | ] 61 | }, 62 | { 63 | "output_type": "stream", 64 | "stream": "stdout", 65 | "text": [ 66 | "2012-11-05 20:31:38.381 [IPClusterStop] Cluster [pid=32847] is not running.\r\n", 67 | "2012-11-05 20:31:38.382 [IPClusterStop] Removing pid file: /Users/aterrel/.ipython/profile_default/pid/ipcluster.pid\r\n" 68 | ] 69 | } 70 | ], 71 | "prompt_number": 2 72 | }, 73 | { 74 | "cell_type": "code", 75 | "collapsed": false, 76 | "input": [], 77 | "language": "python", 78 | "metadata": {}, 79 | "outputs": [] 80 | } 81 | ], 82 | "metadata": {} 83 | } 84 | ] 85 | } -------------------------------------------------------------------------------- /notebooks/files: -------------------------------------------------------------------------------- 1 | .. -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "reveal.js", 3 | "version": "2.5.0", 4 | "description": "The HTML Presentation Framework", 5 | "homepage": "http://lab.hakim.se/reveal-js", 6 | "subdomain": "revealjs", 7 | "scripts": { 8 | "test": "grunt jshint", 9 | "start": "" 10 | }, 11 | "author": { 12 | "name": "Hakim El Hattab", 13 | "email": "hakim.elhattab@gmail.com", 14 | "web": "http://hakim.se" 15 | }, 16 | "repository": { 17 | "type": "git", 18 | "url": "git://github.com/hakimel/reveal.js.git" 19 | }, 20 | "engines": { 21 | "node": "~0.8.0" 22 | }, 23 | "dependencies": { 24 | "underscore": "~1.3.3", 25 | "express": "~2.5.9", 26 | "mustache": "~0.4.0", 27 | "socket.io": "~0.9.13" 28 | }, 29 | "devDependencies": { 30 | "grunt-contrib-jshint": "~0.2.0", 31 | "grunt-contrib-cssmin": "~0.4.1", 32 | "grunt-contrib-uglify": "~0.1.1", 33 | "grunt-contrib-watch": "~0.2.0", 34 | "grunt-contrib-sass": "~0.2.2", 35 | "grunt-contrib-connect": "~0.2.0", 36 | "grunt-zip": "~0.7.0", 37 | "grunt": "~0.4.0" 38 | }, 39 | "licenses": [ 40 | { 41 | "type": "MIT", 42 | "url": "https://github.com/hakimel/reveal.js/blob/master/LICENSE" 43 | } 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /pdf/01_Introducing_Python.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/01_Introducing_Python.pdf -------------------------------------------------------------------------------- /pdf/02_Speeding_Python.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/02_Speeding_Python.pdf -------------------------------------------------------------------------------- /pdf/03.1_Distributed_Computing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/03.1_Distributed_Computing.pdf -------------------------------------------------------------------------------- /pdf/03_Scaling_Python.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/03_Scaling_Python.pdf -------------------------------------------------------------------------------- /pdf/04_yt_Introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/04_yt_Introduction.pdf -------------------------------------------------------------------------------- /pdf/05_Data_Inspection_with_yt.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/05_Data_Inspection_with_yt.pdf -------------------------------------------------------------------------------- /pdf/06_Data_Objects_in_yt.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/06_Data_Objects_in_yt.pdf -------------------------------------------------------------------------------- /pdf/07_Derived_Fields_in_yt.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/07_Derived_Fields_in_yt.pdf -------------------------------------------------------------------------------- /pdf/08_Volume_Rendering_in_yt.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/08_Volume_Rendering_in_yt.pdf -------------------------------------------------------------------------------- /pdf/Appendix_00_Notebook_Tour.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/Appendix_00_Notebook_Tour.pdf -------------------------------------------------------------------------------- /pdf/Appendix_01_Resources.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/Appendix_01_Resources.pdf -------------------------------------------------------------------------------- /pdf/Appendix_02_PETSc4Py.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/Appendix_02_PETSc4Py.pdf -------------------------------------------------------------------------------- /pdf/Appendix_03_Launch_MPI_Engines.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/Appendix_03_Launch_MPI_Engines.pdf -------------------------------------------------------------------------------- /pdf/python-speed-cython-sc14.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyHPC/pyhpc-tutorial/ae73ddab456080d1fae92bbc81ca996c1191309d/pdf/python-speed-cython-sc14.pdf --------------------------------------------------------------------------------