├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── bilateral_filter.py ├── bilateral_grid.py ├── bilateral_grid_hdr.py ├── gaussian_filter_separate.py ├── image_bilinear_inpterpolation.py ├── image_transpose.py └── images ├── bilateral_grid_hdr.jpg ├── cambridge.png ├── cambridge_smaller.png ├── cat.jpg ├── cat_96x64.jpg ├── happy_face.png ├── lenna_bw.png └── mountain.jpg /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: ^build/ 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.1.0 5 | hooks: 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | 9 | - repo: https://github.com/google/yapf 10 | rev: v0.32.0 11 | hooks: 12 | - id: yapf 13 | additional_dependencies: [toml] 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Taichi Developers 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # (GPU) Image processing with Taichi 2 | 3 | Every Python file in this repo is runnable if you `pip3 install -U taichi opencv-python`. 4 | 5 | For example, if you run `python3 bilateral_grid_hdr.py`, you get the following UI: 6 | 7 | ![](images/bilateral_grid_hdr.jpg) 8 | 9 | ### Developer note: enforcing code format 10 | 11 | We use the `pre-commit` Python package, which invokes `yapf` automatically format Python code. 12 | 13 | Usage: 14 | 1. Install `pre-commit`: `pip install pre-commit`. 15 | 2. Run code format: `pre-commit run -a`. 16 | 3. Install as pre-commit hook: `pre-commit install`. 17 | -------------------------------------------------------------------------------- /bilateral_filter.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import taichi as ti 3 | import taichi.math as tm 4 | 5 | ti.init(arch=ti.gpu, debug=True) 6 | 7 | img_filtered = ti.Vector.field(3, dtype=ti.u8, shape=(1024, 1024)) 8 | 9 | img2d = ti.types.ndarray(element_dim=1) 10 | 11 | 12 | @ti.kernel 13 | def bilateral_filter(img: img2d, sigma_s: ti.f32, sigma_r: ti.f32): 14 | n, m = img.shape[0], img.shape[1] 15 | 16 | blur_radius_s = ti.ceil(sigma_s * 3, int) 17 | 18 | for i, j in ti.ndrange(n, m): 19 | k_begin, k_end = ti.max(0, 20 | i - blur_radius_s), ti.min(n, i + blur_radius_s + 1) 21 | l_begin, l_end = ti.max(0, 22 | j - blur_radius_s), ti.min(m, j + blur_radius_s + 1) 23 | 24 | total_rgb = tm.vec3(0.0) 25 | total_weight = 0.0 26 | for k, l in ti.ndrange((k_begin, k_end), (l_begin, l_end)): 27 | dist = ((i - k)**2 + (j - l)**2) / sigma_s**2 28 | # No need to compute Gaussian coeffs here since we normalize in the end anyway 29 | w = ti.exp(-0.5 * dist) 30 | total_rgb += img[k, l] * w 31 | total_weight += w 32 | 33 | img_filtered[i, j] = (total_rgb / total_weight).cast(ti.u8) 34 | 35 | for i, j in ti.ndrange(n, m): 36 | img[i, j] = img_filtered[i, j] 37 | 38 | 39 | img = cv2.imread('images/happy_face.png') 40 | cv2.imshow('input', img) 41 | bilateral_filter(img, 6, 30) 42 | cv2.imshow('Bilateral filtered', img) 43 | cv2.waitKey() 44 | -------------------------------------------------------------------------------- /bilateral_grid.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import taichi as ti 3 | import taichi.math as tm 4 | import numpy as np 5 | 6 | ti.init(arch=ti.gpu, debug=True) 7 | 8 | grid = ti.Vector.field(2, dtype=ti.f32, shape=(512, 512, 128)) 9 | grid_blurred = ti.Vector.field(2, dtype=ti.f32, shape=(512, 512, 128)) 10 | weights = ti.field(dtype=ti.f32, shape=(2, 512), offset=(0, -256)) 11 | 12 | 13 | @ti.func 14 | def compute_weights(i, radius, sigma): 15 | total = 0.0 16 | 17 | # Not much computation here - serialize the for loop to save two more GPU kernel launch costs 18 | ti.loop_config(serialize=True) 19 | for j in range(-radius, radius + 1): 20 | # Drop the normal distribution constant coefficients since we need to normalize later anyway 21 | val = ti.exp(-0.5 * (j / sigma)**2) 22 | weights[i, j] = val 23 | total += val 24 | 25 | ti.loop_config(serialize=True) 26 | for j in range(-radius, radius + 1): 27 | weights[i, j] /= total 28 | 29 | 30 | @ti.func 31 | def sample_grid_spatial(i, j, k): 32 | g = ti.static(grid_blurred) # Create an alias 33 | mix_i_0 = tm.mix(g[int(i), int(j), k], g[int(i) + 1, int(j), k], 34 | tm.fract(i)) 35 | mix_i_1 = tm.mix(g[int(i), int(j) + 1, k], g[int(i) + 1, 36 | int(j) + 1, k], tm.fract(i)) 37 | return tm.mix(mix_i_0, mix_i_1, tm.fract(j)) 38 | 39 | 40 | @ti.func 41 | def sample_grid(i, j, k): 42 | return tm.mix(sample_grid_spatial(i, j, int(k)), 43 | sample_grid_spatial(i, j, 44 | int(k) + 1), tm.fract(k)) 45 | 46 | 47 | @ti.kernel 48 | def bilateral_filter(img: ti.types.ndarray(), s_s: ti.i32, s_r: ti.i32, 49 | sigma_s: ti.f32, sigma_r: ti.f32): 50 | # Reset the grid 51 | grid.fill(0) 52 | grid_blurred.fill(0) 53 | for i, j in ti.ndrange(img.shape[0], img.shape[1]): 54 | lum = img[i, j] 55 | grid[ti.round(i / s_s, ti.i32), 56 | ti.round(j / s_s, ti.i32), 57 | ti.round(lum / s_r, ti.i32)] += tm.vec2(lum, 1) 58 | 59 | compute_weights(0, ti.ceil(sigma_s * 3, int), sigma_s) 60 | compute_weights(1, ti.ceil(sigma_r * 3, int), sigma_r) 61 | 62 | # Grid processing (blur) 63 | grid_n, grid_m = (img.shape[0] + s_s - 1) // s_s, (img.shape[1] + s_s - 64 | 1) // s_s 65 | grid_l = (255 + s_r - 1) // s_r 66 | blur_radius = ti.ceil(sigma_s * 3, int) 67 | 68 | # Since grids store affine attributes, no need to normalize in the following three loops (will normalize in slicing anyway) 69 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 70 | l_begin, l_end = ti.max(0, i - blur_radius), ti.min(grid_n, 71 | i + blur_radius + 1) 72 | total = tm.vec2(0, 0) 73 | for l in range(l_begin, l_end): 74 | total += grid[l, j, k] * weights[0, i - l] 75 | 76 | grid_blurred[i, j, k] = total 77 | 78 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 79 | l_begin, l_end = ti.max(0, j - blur_radius), ti.min(grid_m, 80 | j + blur_radius + 1) 81 | total = tm.vec2(0, 0) 82 | for l in range(l_begin, l_end): 83 | total += grid_blurred[i, l, k] * weights[0, j - l] 84 | 85 | grid[i, j, k] = total 86 | 87 | blur_radius = ti.ceil(sigma_r * 3, int) 88 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 89 | l_begin, l_end = ti.max(0, k - blur_radius), ti.min(grid_l, 90 | k + blur_radius + 1) 91 | total = tm.vec2(0, 0) 92 | for l in range(l_begin, l_end): 93 | total += grid[i, j, l] * weights[1, k - l] 94 | 95 | grid_blurred[i, j, k] = total 96 | 97 | # Slicing 98 | for i, j in ti.ndrange(img.shape[0], img.shape[1]): 99 | lum = img[i, j] 100 | sample = sample_grid(i / s_s, j / s_s, lum / s_r) 101 | img[i, j] = ti.u8(sample[0] / sample[1]) 102 | 103 | 104 | src = cv2.imread('images/mountain.jpg')[:, :].copy() 105 | 106 | gui_res = 512 107 | gui = ti.GUI('Bilateral Grid', gui_res) 108 | s_s = gui.slider('s_s', 4, 50) 109 | sigma_s = gui.slider('sigma_s', 0.1, 5) 110 | s_r = gui.slider('s_r', 4, 32) 111 | sigma_r = gui.slider('sigma_r', 0.1, 5) 112 | 113 | s_s.value = 16 114 | s_r.value = 16 115 | 116 | sigma_s.value = 1 117 | sigma_r.value = 1 118 | 119 | while gui.running and not gui.get_event(gui.ESCAPE): 120 | img = src.copy() 121 | channels = [img[:, :, c].copy() for c in range(3)] 122 | for c in range(3): 123 | bilateral_filter(channels[c], int(s_s.value), int(s_r.value), 124 | sigma_s.value, sigma_r.value) 125 | img[:, :, c] = channels[c] 126 | img = img.swapaxes(0, 1)[:, ::-1, ::-1] 127 | img_padded = np.zeros(dtype=np.uint8, shape=(gui_res, gui_res, 3)) 128 | img_padded[:img.shape[0], :img.shape[1]] = img 129 | gui.set_image(img_padded) 130 | gui.show() 131 | -------------------------------------------------------------------------------- /bilateral_grid_hdr.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import taichi as ti 3 | import taichi.math as tm 4 | import numpy as np 5 | 6 | ti.init(arch=ti.gpu, debug=True) 7 | 8 | grid = ti.Vector.field(2, dtype=ti.f32, shape=(512, 512, 128)) 9 | grid_blurred = ti.Vector.field(2, dtype=ti.f32, shape=(512, 512, 128)) 10 | weights = ti.field(dtype=ti.f32, shape=(2, 512), offset=(0, -256)) 11 | 12 | 13 | @ti.func 14 | def compute_weights(i, radius, sigma): 15 | total = 0.0 16 | 17 | # Not much computation here - serialize the for loop to save two more GPU kernel launch costs 18 | ti.loop_config(serialize=True) 19 | for j in range(-radius, radius + 1): 20 | # Drop the normal distribution constant coefficients since we need to normalize later anyway 21 | val = ti.exp(-0.5 * (j / sigma)**2) 22 | weights[i, j] = val 23 | total += val 24 | 25 | ti.loop_config(serialize=True) 26 | for j in range(-radius, radius + 1): 27 | weights[i, j] /= total 28 | 29 | 30 | @ti.func 31 | def sample_grid_spatial(i, j, k): 32 | g = ti.static(grid_blurred) # Create an alias 33 | mix_i_0 = tm.mix(g[int(i), int(j), k], g[int(i) + 1, int(j), k], 34 | tm.fract(i)) 35 | mix_i_1 = tm.mix(g[int(i), int(j) + 1, k], g[int(i) + 1, 36 | int(j) + 1, k], tm.fract(i)) 37 | return tm.mix(mix_i_0, mix_i_1, tm.fract(j)) 38 | 39 | 40 | @ti.func 41 | def sample_grid(i, j, k): 42 | return tm.mix(sample_grid_spatial(i, j, int(k)), 43 | sample_grid_spatial(i, j, 44 | int(k) + 1), tm.fract(k)) 45 | 46 | 47 | log_luminance_scale = 16 48 | 49 | 50 | @ti.func 51 | def log_luminance(c): 52 | lum = 0.2126 * c[0] + 0.7152 * c[1] + 0.0722 * c[2] 53 | return ti.max(ti.min((ti.log(lum) / ti.log(2) * log_luminance_scale) + 256, 256), 54 | 0) 55 | 56 | 57 | img2d = ti.types.ndarray(dtype=ti.math.vec3, ndim=2) 58 | 59 | 60 | @ti.kernel 61 | def bilateral_filter(img: img2d, s_s: ti.i32, s_r: ti.i32, sigma_s: ti.f32, 62 | sigma_r: ti.f32, exposure: ti.f32, blend: ti.f32, 63 | gamma: ti.f32, alpha: ti.f32, beta: ti.f32): 64 | # Reset the grid 65 | grid.fill(0) 66 | grid_blurred.fill(0) 67 | 68 | for i, j in ti.ndrange(img.shape[0], img.shape[1]): 69 | l = log_luminance(img[i, j]) 70 | grid[ti.round(i / s_s, ti.i32), 71 | ti.round(j / s_s, ti.i32), 72 | ti.round(l / s_r, ti.i32)] += tm.vec2(l, 1) 73 | 74 | compute_weights(0, ti.ceil(sigma_s * 3, int), sigma_s) 75 | compute_weights(1, ti.ceil(sigma_r * 3, int), sigma_r) 76 | 77 | # Grid processing (blur) 78 | grid_n, grid_m = (img.shape[0] + s_s - 1) // s_s, (img.shape[1] + s_s - 79 | 1) // s_s 80 | grid_l = (255 + s_r - 1) // s_r 81 | blur_radius = ti.ceil(sigma_s * 3, int) 82 | 83 | # Since grids store affine attributes, no need to normalize in the following three loops (will normalize in slicing anyway) 84 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 85 | l_begin, l_end = ti.max(0, i - blur_radius), ti.min(grid_n, 86 | i + blur_radius + 1) 87 | total = tm.vec2(0, 0) 88 | for l in range(l_begin, l_end): 89 | total += grid[l, j, k] * weights[0, i - l] 90 | 91 | grid_blurred[i, j, k] = total 92 | 93 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 94 | l_begin, l_end = ti.max(0, j - blur_radius), ti.min(grid_m, 95 | j + blur_radius + 1) 96 | total = tm.vec2(0, 0) 97 | for l in range(l_begin, l_end): 98 | total += grid_blurred[i, l, k] * weights[0, j - l] 99 | 100 | grid[i, j, k] = total 101 | 102 | blur_radius = ti.ceil(sigma_r * 3, int) 103 | for i, j, k in ti.ndrange(grid_n, grid_m, grid_l): 104 | l_begin, l_end = ti.max(0, k - blur_radius), ti.min(grid_l, 105 | k + blur_radius + 1) 106 | total = tm.vec2(0, 0) 107 | for l in range(l_begin, l_end): 108 | total += grid[i, j, l] * weights[1, k - l] 109 | 110 | grid_blurred[i, j, k] = total 111 | 112 | # Slicing 113 | for i, j in ti.ndrange(img.shape[0], img.shape[1]): 114 | l = log_luminance(img[i, j]) 115 | sample = sample_grid(i / s_s, j / s_s, l / s_r) 116 | base = sample[0] / sample[1] 117 | detail = l - base 118 | final_log_lum = alpha * base + beta + detail 119 | 120 | linear_scale = ti.pow(2, (final_log_lum - l) / log_luminance_scale) 121 | 122 | ldr = tm.mix(img[i, j], img[i, j] * linear_scale, blend) 123 | ldr = ti.min(1.0, ldr * 2**exposure)**(1 / gamma) 124 | img[i, j] = ldr 125 | 126 | 127 | src = cv2.imread('images/cambridge_smaller.png').astype(np.float32) / (2**10) 128 | src = src.swapaxes(0, 1)[:, ::-1, ::-1].copy() 129 | 130 | gui_res = (src.shape[0] + 200, src.shape[1]) 131 | gui = ti.GUI('Bilateral Grid HDR', gui_res) 132 | s_s = gui.slider('s_s', 4, 50) 133 | sigma_s = gui.slider('sigma_s', 0.1, 5) 134 | s_r = gui.slider('s_r', 4, 32) 135 | sigma_r = gui.slider('sigma_r', 0.1, 5) 136 | 137 | exposure = gui.slider('exposure value', -8, 8) 138 | blend = gui.slider('blend', 0, 1) 139 | gamma = gui.slider('gamma', 0.3, 3) 140 | 141 | alpha = gui.slider('alpha', 0.1, 2) 142 | beta = gui.slider('beta', -200, 200) 143 | 144 | s_s.value = 16 145 | s_r.value = 16 146 | 147 | sigma_s.value = 1 148 | sigma_r.value = 1 149 | exposure.value = 1 150 | blend.value = 1 151 | gamma.value = 1 152 | 153 | alpha.value = 0.5 154 | beta.value = 125 155 | 156 | while gui.running and not gui.get_event(gui.ESCAPE): 157 | img = src.copy() 158 | bilateral_filter(img, int(s_s.value), int(s_r.value), sigma_s.value, 159 | sigma_r.value, exposure.value, blend.value, gamma.value, 160 | alpha.value, beta.value) 161 | img_padded = np.zeros(dtype=np.float32, shape=(gui_res[0], gui_res[1], 3)) 162 | img_padded[:img.shape[0], :img.shape[1]] = img 163 | gui.set_image(img_padded) 164 | gui.show() 165 | -------------------------------------------------------------------------------- /gaussian_filter_separate.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import taichi as ti 3 | import taichi.math as tm 4 | 5 | ti.init(arch=ti.gpu) 6 | 7 | img_blurred = ti.Vector.field(3, dtype=ti.u8, shape=(1024, 1024)) 8 | weights = ti.field(dtype=ti.f32, shape=1024, offset=-512) 9 | 10 | img2d = ti.types.ndarray(element_dim=1) 11 | 12 | 13 | @ti.func 14 | def compute_weights(radius, sigma): 15 | total = 0.0 16 | 17 | # Not much computation here - serialize the for loop to save two more GPU kernel launch costs 18 | ti.loop_config(serialize=True) 19 | for i in range(-radius, radius + 1): 20 | # Drop the normal distribution constant coefficients since we need to normalize later anyway 21 | val = ti.exp(-0.5 * (i / sigma)**2) 22 | weights[i] = val 23 | total += val 24 | 25 | ti.loop_config(serialize=True) 26 | for i in range(-radius, radius + 1): 27 | weights[i] /= total 28 | 29 | 30 | @ti.kernel 31 | def gaussian_blur(img: img2d, sigma: ti.f32): 32 | img_blurred.fill(0) 33 | n, m = img.shape[0], img.shape[1] 34 | 35 | blur_radius = ti.ceil(sigma * 3, int) 36 | compute_weights(blur_radius, sigma) 37 | 38 | for i, j in ti.ndrange(n, m): 39 | l_begin, l_end = ti.max(0, i - blur_radius), ti.min(n, i + blur_radius + 1) 40 | total_rgb = tm.vec3(0.0) 41 | total_weight = 0.0 42 | for l in range(l_begin, l_end): 43 | w = weights[i - l] 44 | total_rgb += img[l, j] * w 45 | total_weight += w 46 | 47 | img_blurred[i, j] = (total_rgb / total_weight).cast(ti.u8) 48 | 49 | for i, j in ti.ndrange(n, m): 50 | l_begin, l_end = ti.max(0, j - blur_radius), ti.min(m, j + blur_radius + 1) 51 | total_rgb = tm.vec3(0.0) 52 | total_weight = 0.0 53 | for l in range(l_begin, l_end): 54 | w = weights[j - l] 55 | total_rgb += img_blurred[i, l] * w 56 | total_weight += w 57 | 58 | img[i, j] = (total_rgb / total_weight).cast(ti.u8) 59 | 60 | 61 | img = cv2.imread('images/mountain.jpg') 62 | cv2.imshow('input', img) 63 | gaussian_blur(img, 10) 64 | cv2.imshow('blurred', img) 65 | cv2.waitKey() 66 | -------------------------------------------------------------------------------- /image_bilinear_inpterpolation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import taichi as ti 4 | import taichi.math as tm 5 | 6 | ti.init() 7 | 8 | src = cv2.imread("images/cat_96x64.jpg") 9 | h, w, c = src.shape 10 | scale = 5 11 | dst = np.zeros((h * scale, w * scale, c), dtype=src.dtype) 12 | 13 | img2d = ti.types.ndarray(element_dim=1) 14 | 15 | 16 | @ti.kernel 17 | def bilinear_interp(src: img2d, dst: img2d): 18 | for I in ti.grouped(dst): 19 | x, y = I / scale 20 | x1, y1 = int(x), int(y) # Bottom-left corner 21 | x2, y2 = ti.min(x1 + 1, h - 1), ti.min(y1 + 1, w - 1) # Top-right corner 22 | Q11 = src[x1, y1] 23 | Q21 = src[x2, y1] 24 | Q12 = src[x1, y2] 25 | Q22 = src[x2, y2] 26 | R1 = tm.mix(Q11, Q21, x - x1) 27 | R2 = tm.mix(Q12, Q22, x - x1) 28 | dst[I] = ti.round(tm.mix(R1, R2, y - y1)).cast(ti.u8) 29 | 30 | 31 | bilinear_interp(src, dst) 32 | cv2.imwrite("cat_bilinear_interp.jpg", dst) 33 | -------------------------------------------------------------------------------- /image_transpose.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import taichi as ti 4 | 5 | ti.init() 6 | 7 | src = cv2.imread("images/cat.jpg") 8 | h, w, c = src.shape 9 | dst = np.zeros((w, h, c), dtype=src.dtype) 10 | 11 | img2d = ti.types.ndarray(element_dim=1) 12 | 13 | 14 | @ti.kernel 15 | def transpose(src: img2d, dst: img2d): 16 | for i, j in ti.ndrange(h, w): 17 | dst[j, i] = src[i, j] 18 | 19 | 20 | transpose(src, dst) 21 | cv2.imwrite("cat_transpose.jpg", dst) 22 | -------------------------------------------------------------------------------- /images/bilateral_grid_hdr.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/bilateral_grid_hdr.jpg -------------------------------------------------------------------------------- /images/cambridge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/cambridge.png -------------------------------------------------------------------------------- /images/cambridge_smaller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/cambridge_smaller.png -------------------------------------------------------------------------------- /images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/cat.jpg -------------------------------------------------------------------------------- /images/cat_96x64.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/cat_96x64.jpg -------------------------------------------------------------------------------- /images/happy_face.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/happy_face.png -------------------------------------------------------------------------------- /images/lenna_bw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/lenna_bw.png -------------------------------------------------------------------------------- /images/mountain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taichi-dev/image-processing-with-taichi/c68af21ff66bd0ce9cf00eab4b9e6d2c69305417/images/mountain.jpg --------------------------------------------------------------------------------