├── images ├── img ├── LoG.jpg ├── Canny2.jpg ├── Edges.jpg ├── Sobel.jpg ├── bench.png ├── kirsch.png ├── nonmax.png ├── CannyRGB.jpg ├── CannyThr.jpg ├── Laplace.jpg ├── LoG_9Ker.gif ├── all_gpu.png ├── filters.png ├── gradient.jpg ├── kernels.jpg ├── kernels2.jpg ├── pipeline.png ├── prewitt.jpg ├── CannySigma.jpg ├── Gaussian_2.PNG ├── LoG_formula.jpg ├── LoG_graph.jpeg ├── Log_Filter.jpg ├── all_graph.jpeg ├── all_memory.jpeg ├── bench_time.png ├── derivatives.png ├── laplacian.jpg ├── zero_cross.jpg ├── BikesgrayFig3.jpg ├── CannyRGBPlot.jpeg ├── Laplace_sigma.jpg ├── all_graph_16.jpeg ├── all_graph_32.jpeg ├── alltypes_GPU.jpeg ├── bench_memory.png ├── canny_graph.jpeg ├── gradient_thr.jpg ├── robertsCross.png ├── sobel_graph.jpeg ├── Deriche_equation.png ├── Log_FilterSigma.jpg ├── bench_time_GPU.jpeg ├── canny_comparison.jpg ├── initial_signal.png ├── kernel_laplacian.jpg ├── log_comparison.jpg ├── prewitt_robert.jpg ├── sobel-kernels1.jpg ├── sobel_comparison.jpg ├── Canny_optimisation.PNG ├── EdgeDetectionUnit8.PNG ├── Lena-mexican5-us2.jpg ├── discrete_laplacian.jpg ├── schema_hysteresis.png ├── uint8EdgeDetection.PNG ├── EdgeDetectionFloat32.png ├── Lena-featureJ3-5-us2.jpg ├── canny_comparison_GPU.jpg ├── EdgeDetection_JS_WebGL2.PNG ├── Gaussian_First_Derivate.png ├── Canny_equation_maximization.png ├── prewitt_horizontal_kernel_in_tupple.PNG ├── Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgo.jpg └── Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgoCPU-Lena-myalgoGPU.png ├── .gitignore ├── edge.pdf ├── edge2.pdf ├── edge_thierry.pdf ├── samples ├── gel.gif ├── leaf.jpg ├── embryos.jpg ├── AuPbSn40.jpg ├── Dot_Blot.jpg ├── Cell_Colony.jpg └── for_benchmark │ ├── coins_1024.jpg │ ├── coins_128.jpg │ ├── coins_2048.jpg │ ├── coins_300.jpg │ └── coins_512.jpg ├── css ├── background.png └── times.css ├── .gitmodules ├── usefulRefs.md ├── usefulLinksForCoding.md ├── LoGBenchmarkRawWebGl.csv ├── How to convert to PDF ├── EdgeBenchmarkRawWebGL.csv ├── EdgeBenchmarkRawTiji.csv ├── examples ├── testLoG.js ├── testCanny.js ├── benchmark_Canny_GPU.html ├── benchmark_LoG_GPU.html ├── testLoG.html ├── testCanny.html ├── benchmarkForLOGGPU.js ├── benchmarkForTijiGPU.js ├── testEdge.js └── testEdge.html ├── src ├── process │ ├── fft.js │ ├── morphology.js │ ├── rankFilters.js │ ├── type.js │ ├── threshold.js │ ├── filters.js │ ├── hough.js │ ├── analyze.js │ ├── noise.js │ ├── statistics.js │ ├── utils.js │ ├── geometry.js │ ├── math.js │ └── color.js └── gpu │ ├── display.js │ ├── gpu_preprocess.js │ ├── index.js │ ├── gpu_color.js │ ├── gpu_statistics.js │ ├── gpu_utils.js │ ├── edge.js │ ├── gpu_constants.js │ ├── testLaplace.js │ ├── gpu_math.js │ ├── laplace.js │ └── Processor.js ├── benchmarkForTiji.js ├── benchmark.js ├── README.md ├── benchRGB.csv ├── edge3_bock.md ├── edge3_ostertag.md ├── edge3_thierry.md └── benchmark_results.csv /images/img: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | css/* 2 | javascripts/* 3 | *.*~ 4 | -------------------------------------------------------------------------------- /edge.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/edge.pdf -------------------------------------------------------------------------------- /edge2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/edge2.pdf -------------------------------------------------------------------------------- /images/LoG.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/LoG.jpg -------------------------------------------------------------------------------- /edge_thierry.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/edge_thierry.pdf -------------------------------------------------------------------------------- /images/Canny2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Canny2.jpg -------------------------------------------------------------------------------- /images/Edges.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Edges.jpg -------------------------------------------------------------------------------- /images/Sobel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Sobel.jpg -------------------------------------------------------------------------------- /images/bench.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/bench.png -------------------------------------------------------------------------------- /images/kirsch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/kirsch.png -------------------------------------------------------------------------------- /images/nonmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/nonmax.png -------------------------------------------------------------------------------- /samples/gel.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/gel.gif -------------------------------------------------------------------------------- /samples/leaf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/leaf.jpg -------------------------------------------------------------------------------- /css/background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/css/background.png -------------------------------------------------------------------------------- /images/CannyRGB.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/CannyRGB.jpg -------------------------------------------------------------------------------- /images/CannyThr.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/CannyThr.jpg -------------------------------------------------------------------------------- /images/Laplace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Laplace.jpg -------------------------------------------------------------------------------- /images/LoG_9Ker.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/LoG_9Ker.gif -------------------------------------------------------------------------------- /images/all_gpu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/all_gpu.png -------------------------------------------------------------------------------- /images/filters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/filters.png -------------------------------------------------------------------------------- /images/gradient.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/gradient.jpg -------------------------------------------------------------------------------- /images/kernels.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/kernels.jpg -------------------------------------------------------------------------------- /images/kernels2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/kernels2.jpg -------------------------------------------------------------------------------- /images/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/pipeline.png -------------------------------------------------------------------------------- /images/prewitt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/prewitt.jpg -------------------------------------------------------------------------------- /samples/embryos.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/embryos.jpg -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "times"] 2 | path = times 3 | url = https://github.com/crazybiocomputing/times 4 | -------------------------------------------------------------------------------- /images/CannySigma.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/CannySigma.jpg -------------------------------------------------------------------------------- /images/Gaussian_2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Gaussian_2.PNG -------------------------------------------------------------------------------- /images/LoG_formula.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/LoG_formula.jpg -------------------------------------------------------------------------------- /images/LoG_graph.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/LoG_graph.jpeg -------------------------------------------------------------------------------- /images/Log_Filter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Log_Filter.jpg -------------------------------------------------------------------------------- /images/all_graph.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/all_graph.jpeg -------------------------------------------------------------------------------- /images/all_memory.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/all_memory.jpeg -------------------------------------------------------------------------------- /images/bench_time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/bench_time.png -------------------------------------------------------------------------------- /images/derivatives.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/derivatives.png -------------------------------------------------------------------------------- /images/laplacian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/laplacian.jpg -------------------------------------------------------------------------------- /images/zero_cross.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/zero_cross.jpg -------------------------------------------------------------------------------- /samples/AuPbSn40.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/AuPbSn40.jpg -------------------------------------------------------------------------------- /samples/Dot_Blot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/Dot_Blot.jpg -------------------------------------------------------------------------------- /images/BikesgrayFig3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/BikesgrayFig3.jpg -------------------------------------------------------------------------------- /images/CannyRGBPlot.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/CannyRGBPlot.jpeg -------------------------------------------------------------------------------- /images/Laplace_sigma.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Laplace_sigma.jpg -------------------------------------------------------------------------------- /images/all_graph_16.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/all_graph_16.jpeg -------------------------------------------------------------------------------- /images/all_graph_32.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/all_graph_32.jpeg -------------------------------------------------------------------------------- /images/alltypes_GPU.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/alltypes_GPU.jpeg -------------------------------------------------------------------------------- /images/bench_memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/bench_memory.png -------------------------------------------------------------------------------- /images/canny_graph.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/canny_graph.jpeg -------------------------------------------------------------------------------- /images/gradient_thr.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/gradient_thr.jpg -------------------------------------------------------------------------------- /images/robertsCross.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/robertsCross.png -------------------------------------------------------------------------------- /images/sobel_graph.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/sobel_graph.jpeg -------------------------------------------------------------------------------- /samples/Cell_Colony.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/Cell_Colony.jpg -------------------------------------------------------------------------------- /images/Deriche_equation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Deriche_equation.png -------------------------------------------------------------------------------- /images/Log_FilterSigma.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Log_FilterSigma.jpg -------------------------------------------------------------------------------- /images/bench_time_GPU.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/bench_time_GPU.jpeg -------------------------------------------------------------------------------- /images/canny_comparison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/canny_comparison.jpg -------------------------------------------------------------------------------- /images/initial_signal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/initial_signal.png -------------------------------------------------------------------------------- /images/kernel_laplacian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/kernel_laplacian.jpg -------------------------------------------------------------------------------- /images/log_comparison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/log_comparison.jpg -------------------------------------------------------------------------------- /images/prewitt_robert.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/prewitt_robert.jpg -------------------------------------------------------------------------------- /images/sobel-kernels1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/sobel-kernels1.jpg -------------------------------------------------------------------------------- /images/sobel_comparison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/sobel_comparison.jpg -------------------------------------------------------------------------------- /images/Canny_optimisation.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Canny_optimisation.PNG -------------------------------------------------------------------------------- /images/EdgeDetectionUnit8.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/EdgeDetectionUnit8.PNG -------------------------------------------------------------------------------- /images/Lena-mexican5-us2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Lena-mexican5-us2.jpg -------------------------------------------------------------------------------- /images/discrete_laplacian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/discrete_laplacian.jpg -------------------------------------------------------------------------------- /images/schema_hysteresis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/schema_hysteresis.png -------------------------------------------------------------------------------- /images/uint8EdgeDetection.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/uint8EdgeDetection.PNG -------------------------------------------------------------------------------- /images/EdgeDetectionFloat32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/EdgeDetectionFloat32.png -------------------------------------------------------------------------------- /images/Lena-featureJ3-5-us2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Lena-featureJ3-5-us2.jpg -------------------------------------------------------------------------------- /images/canny_comparison_GPU.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/canny_comparison_GPU.jpg -------------------------------------------------------------------------------- /images/EdgeDetection_JS_WebGL2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/EdgeDetection_JS_WebGL2.PNG -------------------------------------------------------------------------------- /images/Gaussian_First_Derivate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Gaussian_First_Derivate.png -------------------------------------------------------------------------------- /samples/for_benchmark/coins_1024.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/for_benchmark/coins_1024.jpg -------------------------------------------------------------------------------- /samples/for_benchmark/coins_128.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/for_benchmark/coins_128.jpg -------------------------------------------------------------------------------- /samples/for_benchmark/coins_2048.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/for_benchmark/coins_2048.jpg -------------------------------------------------------------------------------- /samples/for_benchmark/coins_300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/for_benchmark/coins_300.jpg -------------------------------------------------------------------------------- /samples/for_benchmark/coins_512.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/samples/for_benchmark/coins_512.jpg -------------------------------------------------------------------------------- /images/Canny_equation_maximization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Canny_equation_maximization.png -------------------------------------------------------------------------------- /images/prewitt_horizontal_kernel_in_tupple.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/prewitt_horizontal_kernel_in_tupple.PNG -------------------------------------------------------------------------------- /images/Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgo.jpg -------------------------------------------------------------------------------- /images/Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgoCPU-Lena-myalgoGPU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bockp/Edge-Detection-project/HEAD/images/Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgoCPU-Lena-myalgoGPU.png -------------------------------------------------------------------------------- /usefulRefs.md: -------------------------------------------------------------------------------- 1 | not used in part1, but for next parts : 2 | 3 | [ecma2011] : ECMAScript EC. European Computer Manufacturers Association and others. ECMAScript language specification. 2011. 4 | 5 | [MAR2011] : Marrin C. Webgl specification. Khronos WebGL Working Group. 2011. 6 | -------------------------------------------------------------------------------- /usefulLinksForCoding.md: -------------------------------------------------------------------------------- 1 | [style guide](https://github.com/airbnb/javascript) 2 | 3 | [Gaussian JS convolution kernel for inspiration](https://github.com/sidorares/gaussian-convolution-kernel/blob/master/index.js) 4 | 5 | [Wikipedia Kernel page, explaining how to design them and treat borders.](https://en.wikipedia.org/wiki/Kernel_(image_processing)) 6 | -------------------------------------------------------------------------------- /LoGBenchmarkRawWebGl.csv: -------------------------------------------------------------------------------- 1 | LoG_300 267 2 | LoG_300 215 3 | LoG_300 253 4 | LoG_300 229 5 | LoG_300 317 6 | LoG_300 1013 7 | LoG_300 199 8 | LoG_300 270 9 | LoG_300 196 10 | LoG_300 743 11 | LoG_512 1835 12 | LoG_512 241 13 | LoG_512 235 14 | LoG_512 197 15 | LoG_512 1140 16 | LoG_512 264 17 | LoG_512 334 18 | LoG_512 246 19 | LoG_512 253 20 | LoG_512 262 21 | LoG_1024 344 22 | LoG_1024 338 23 | LoG_1024 333 24 | LoG_1024 312 25 | LoG_1024 364 26 | LoG_1024 3974 27 | LoG_1024 443 28 | LoG_1024 512 29 | LoG_1024 616 30 | LoG_1024 2817 31 | LoG_2048 1136 32 | LoG_2048 18427 33 | LoG_2048 1126 34 | LoG_2048 18119 35 | LoG_2048 1542 36 | LoG_2048 8090 37 | LoG_2048 1130 38 | LoG_2048 18352 39 | LoG_2048 1171 40 | LoG_2048 18353 41 | -------------------------------------------------------------------------------- /How to convert to PDF: -------------------------------------------------------------------------------- 1 | 1. copy edges.md and images folder to a local folder, 2 | 2. navigate to that folder in a terminal 3 | 3. > pandoc edge.md --latex-engine=xelatex -V margin-left=2.5cm -V margin-right=2.5cm -f markdown-implicit_figures -o edge.pdf 4 | 5 | * The --latex-engine=xelatex option changes the default latex engine to generate the PDF; as the default one can't handle accents. 6 | * The -V margin options allows us to customize the margins to be 2.5cm wide (the default is closer to 5 or 6 cm). 7 | * The -f markdown-implicit_figures suppresses the XeLaTeX default behaviour of autoamtically numerating the figures. 8 | 9 | 10 | This can be done on any computer in the CREMI. 11 | To be used on your own computer, you must first install Pandoc and the XeLaTeX engine. 12 | 13 | This procedure respects the automatic enumeration of references given in Taveau's email. 14 | -------------------------------------------------------------------------------- /EdgeBenchmarkRawWebGL.csv: -------------------------------------------------------------------------------- 1 | EdgeDetectionMethod 128px(ms) 300px(ms) 512px(ms) 1024px(ms) 2048px(ms) 2 | prewitt 50 43 49 62 166 3 | prewitt 48 36 51 57 257 4 | prewitt 48 55 49 73 214 5 | prewitt 49 43 51 62 154 6 | prewitt 52 55 62 56 241 7 | prewitt 65 45 47 79 141 8 | prewitt 40 81 54 55 293 9 | prewitt 48 67 44 88 253 10 | prewitt 47 45 62 49 133 11 | prewitt 56 38 43 69 192 12 | sobel 87 42 52 115 141 13 | sobel 51 58 57 66 142 14 | sobel 45 52 45 57 259 15 | sobel 40 50 51 72 140 16 | sobel 48 50 57 63 165 17 | sobel 49 47 60 73 306 18 | sobel 56 45 51 83 173 19 | sobel 45 50 50 56 272 20 | sobel 50 50 60 90 153 21 | sobel 49 51 72 59 125 22 | robert 60 32 37 90 90 23 | robert 67 31 47 41 118 24 | robert 41 34 48 47 145 25 | robert 41 41 42 63 74 26 | robert 34 39 68 51 97 27 | robert 37 61 46 52 195 28 | robert 42 35 38 86 80 29 | robert 35 33 40 53 143 30 | robert 41 37 36 73 143 31 | robert 35 51 41 48 96 32 | -------------------------------------------------------------------------------- /EdgeBenchmarkRawTiji.csv: -------------------------------------------------------------------------------- 1 | EdgeDetectionMethod 128px(ms) 300px(ms) 512px(ms) 1024px(ms) 2048px(ms) 2 | prewitt 6 19 69 254 1083 3 | prewitt 4 25 71 291 1113 4 | prewitt 5 20 97 249 1260 5 | prewitt 3 27 67 325 1121 6 | prewitt 2 17 116 262 1152 7 | prewitt 3 24 107 248 1301 8 | prewitt 6 24 71 233 1267 9 | prewitt 2 19 78 245 1215 10 | prewitt 2 38 77 275 1258 11 | prewitt 3 43 80 231 1167 12 | sobel 5 35 81 242 1107 13 | sobel 5 28 85 222 1147 14 | sobel 5 23 82 256 1323 15 | sobel 4 18 82 262 1330 16 | sobel 3 24 86 288 1351 17 | sobel 3 18 78 256 1430 18 | sobel 3 24 84 253 1225 19 | sobel 4 21 58 226 1593 20 | sobel 3 27 59 211 1114 21 | sobel 2 22 62 309 1062 22 | robert 9 30 59 273 1083 23 | robert 5 20 81 264 1084 24 | robert 4 22 79 272 1217 25 | robert 4 27 67 224 1091 26 | robert 4 23 63 295 1037 27 | robert 8 21 94 263 1083 28 | robert 5 16 104 238 1079 29 | robert 4 21 84 226 1185 30 | robert 4 30 65 222 1025 31 | robert 6 24 61 221 1089 32 | -------------------------------------------------------------------------------- /examples/testLoG.js: -------------------------------------------------------------------------------- 1 | let uint8_lena = lenna_256x256; 2 | let uint16_lena = lenna_256x256.map ( (px) => px * 256); 3 | let float_lena = lenna_256x256.map( (px) => px/256 ); 4 | 5 | //let type = 'uint16'; 6 | //let pix = uint16_lena; 7 | let W = 256; 8 | let H = 256; 9 | //128-105 , 300-246 , 512-420 , 1024-840 , 2048-1679, 4096-3360 10 | let img = new T.Image('uint8',W,H); 11 | img.setPixels(new Uint8Array(uint8_lena)); 12 | let gpuEnv = gpu.getGraphicsContext("preview"); 13 | gpuDisplay(img.getRaster(),gpuEnv); 14 | 15 | let img1 = new T.Image('uint8',W,H); 16 | img1.setPixels(new Uint8Array(uint8_lena)); 17 | let gpuEnv1 = gpu.getGraphicsContext("preview1"); 18 | gpuEdgeLaplace()(img1.getRaster(),gpuEnv1); 19 | 20 | let img2 = new T.Image('uint16',W,H); 21 | img2.setPixels(new Uint16Array(uint16_lena)); 22 | let gpuEnv2 = gpu.getGraphicsContext("preview2"); 23 | gpuEdgeLaplace()(img2.getRaster(),gpuEnv2); 24 | 25 | let img3 = new T.Image('float32',W,H); 26 | img3.setPixels(new Float32Array(float_lena)); 27 | let gpuEnv3 = gpu.getGraphicsContext("preview3"); 28 | gpuEdgeLaplace()(img3.getRaster(),gpuEnv3); 29 | -------------------------------------------------------------------------------- /examples/testCanny.js: -------------------------------------------------------------------------------- 1 | let uint8_lena = lenna_256x256; 2 | let uint16_lena = lenna_256x256.map ( (px) => px * 256); 3 | let float_lena = lenna_256x256.map( (px) => px/256 ); 4 | 5 | //let type = 'uint16'; 6 | //let pix = uint16_lena; 7 | let W = 256; 8 | let H = 256; 9 | //128-105 , 300-246 , 512-420 , 1024-840 , 2048-1679, 4096-3360 10 | let img = new T.Image('uint8',W,H); 11 | img.setPixels(new Uint8Array(uint8_lena)); 12 | let gpuEnv = gpu.getGraphicsContext("preview"); 13 | gpuDisplay(img.getRaster(),gpuEnv); 14 | 15 | let img1 = new T.Image('uint8',W,H); 16 | img1.setPixels(new Uint8Array(uint8_lena)); 17 | let gpuEnv1 = gpu.getGraphicsContext("preview1"); 18 | gpuEdgeCanny(50.0,100.0)(img1.getRaster(),gpuEnv1); 19 | 20 | let img2 = new T.Image('uint16',W,H); 21 | img2.setPixels(new Uint16Array(uint16_lena)); 22 | let gpuEnv2 = gpu.getGraphicsContext("preview2"); 23 | gpuEdgeCanny(50.0,100.0)(img2.getRaster(),gpuEnv2); 24 | 25 | let img3 = new T.Image('float32',W,H); 26 | img3.setPixels(new Float32Array(float_lena)); 27 | let gpuEnv3 = gpu.getGraphicsContext("preview3"); 28 | gpuEdgeCanny(50.0,100.0)(img3.getRaster(),gpuEnv3); 29 | 30 | -------------------------------------------------------------------------------- /src/process/fft.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use script'; 26 | 27 | /** 28 | * Hartley Fourier Transform 29 | * 30 | * @param {type} - 31 | * @return {type} - 32 | * 33 | * @author TODO 34 | */ 35 | const hft = function (img,copy=true) { 36 | let ouput = TRaster.from(img,copy); 37 | // TODO 38 | return output; 39 | } 40 | -------------------------------------------------------------------------------- /src/process/morphology.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use script'; 26 | 27 | /** 28 | * Description: TODO 29 | * 30 | * @param {type} - 31 | * @return {type} - 32 | * 33 | * @author TODO 34 | */ 35 | const erode = function (img,copy=true) { 36 | let ouput = TRaster.from(img,copy); 37 | // TODO 38 | return output; 39 | } 40 | 41 | -------------------------------------------------------------------------------- /examples/benchmark_Canny_GPU.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Benchmarks 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 |
21 |
22 | 23 |
24 | 25 | 26 | 27 |
28 |

29 |
30 |
31 | 32 |
33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /examples/benchmark_LoG_GPU.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Benchmarks 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 |
21 |
22 | 23 |
24 | 25 | 26 | 27 |
28 |

29 |
30 |
31 | 32 |
33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /examples/testLoG.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Test 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
19 |
20 |
21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 | 29 |
30 |

31 |
32 |
33 | 34 |
35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /examples/testCanny.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Test 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
19 |
20 |
21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 | 29 |
30 |

31 |
32 |
33 | 34 |
35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /src/process/rankFilters.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 26 | /** 27 | * @module rankFilters 28 | */ 29 | 30 | /** 31 | * Minimum filter 32 | * 33 | * @param {TRaster} kernel - Convolution mask 34 | * @param {TRaster} img - Input image to process 35 | * @param {boolean} copy - Copy mode to manage memory usage 36 | * @return {TRaster} - Filtered Image 37 | * 38 | * @author TODO 39 | */ 40 | const minimum = (kernel) => (img,copy=true) => { 41 | let ouput = TRaster.from(img,copy); 42 | // TODO 43 | return output; 44 | } 45 | -------------------------------------------------------------------------------- /src/process/type.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 26 | /** 27 | * @module type 28 | */ 29 | 30 | /** 31 | * Convert a RGBA to a uint8 image 32 | * 33 | * @example Conversion of a color image to a luminance gray image. 34 | * let gray8_img = toUint8(luminance)(img); 35 | * 36 | * @param {function} func - A converter function 37 | * @param {TRaster} img - Input image to process 38 | * @param {boolean} copy - Copy mode to manage memory usage 39 | * @returns {TRaster} - Uint8 Image (aka 8-bit image) 40 | * 41 | * @author TODO 42 | */ 43 | const toUint8 = (func) => (img,copy=true) => { 44 | let ouput = TRaster.from(img,copy); 45 | // TODO 46 | return output; 47 | } 48 | -------------------------------------------------------------------------------- /src/process/threshold.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * Manual thresholding 27 | * 28 | * @param {number} value - Threshold value 29 | * @param {TRaster} img - Input gray-level image 30 | * @param {boolean} copy_mode - Boolean used to control the deep copy or not of the pixels data 31 | * @return {TRaster} - Binary output image with True = 0 (black) and False = 255 (white) pixels 32 | * 33 | * @author Jean-Christophe Taveau 34 | */ 35 | const threshold = (value) => (img,copy_mode = true) => { 36 | let output = TRaster.from(img,copy_mode); 37 | output.pixelData.forEach(px =>(px > value) ? 0 : 255); 38 | return output; 39 | }; 40 | 41 | /** 42 | * 43 | * 44 | * @param {type} - 45 | * @return {type} - 46 | * @author TODO 47 | */ 48 | const otsu = function (img,copy=true) { 49 | // TODO 50 | console.log(`otsu`); 51 | return TRaster.from(img,copy); 52 | } 53 | -------------------------------------------------------------------------------- /css/times.css: -------------------------------------------------------------------------------- 1 | .twindow,.gpu_window { 2 | background-color: #efefef; 3 | border: 2px solid black; 4 | border-radius: 10px 10px 0 10px; 5 | margin: 10px; 6 | padding: 2px; 7 | display:inline-block; 8 | text-align: left; 9 | font-family: 'Lato', sans-serif; 10 | position: absolute; 11 | left: 0px; 12 | top: 0px; 13 | z-index: 1; 14 | } 15 | 16 | .twindow header { 17 | background-color:#666666 ; 18 | margin: 0px; 19 | padding: 5px 10px 5px 10px ; 20 | border-top-left-radius: 10px; 21 | border-top-right-radius: 10px; 22 | -moz-border-top-left-radius: 10px; 23 | -moz-border-top-right-radius: 10px; 24 | color: #FFFFFF; 25 | /*font-family: 'Walter Turncoat', cursive;*/ 26 | } 27 | 28 | .gpu_window header { 29 | background-color:#ff2222 ; 30 | margin: 0px; 31 | padding: 5px 10px 5px 10px ; 32 | border-top-left-radius: 10px; 33 | border-top-right-radius: 10px; 34 | -moz-border-top-left-radius: 10px; 35 | -moz-border-top-right-radius: 10px; 36 | color: #FFFFFF; 37 | /*font-family: 'Walter Turncoat', cursive;*/ 38 | } 39 | 40 | .twindow header:hover { 41 | background-color: #333334; 42 | cursor: move; 43 | } 44 | 45 | .twindow header label { 46 | -webkit-user-select: none; /* Chrome all / Safari all */ 47 | -moz-user-select: none; /* Firefox all */ 48 | -ms-user-select: none; /* IE 10+ */ 49 | user-select: none; /* Likely future */ 50 | } 51 | 52 | .twindow canvas { 53 | background-image: url("../css/background.png"); 54 | } 55 | .gpu_window canvas { 56 | background-image: url("../css/background.png"); 57 | } 58 | 59 | .twindow footer { 60 | font-size : 0.6em; 61 | margin: 0px; 62 | padding: 0px; 63 | /* Required for text-overflow */ 64 | white-space: nowrap; 65 | overflow: hidden; 66 | text-overflow: ellipsis; 67 | 68 | } 69 | 70 | 71 | -------------------------------------------------------------------------------- /src/process/filters.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * @module filters 27 | */ 28 | 29 | /** 30 | * Convolve operation 31 | * 32 | * @param {TRaster} kernel - Convolution mask 33 | * @param {TRaster} img - Input image to process 34 | * @param {boolean} copy - Copy mode to manage memory usage 35 | * @return {TRaster} - Filtered Image 36 | * 37 | * @author TODO 38 | */ 39 | const convolve = (kernel) => (img,copy=true) => { 40 | let ouput = TRaster.from(img,copy); 41 | // TODO 42 | return output; 43 | } 44 | 45 | /** 46 | * Gaussian Blur Filter 47 | * 48 | * @param {TRaster} kernel - Convolution mask 49 | * @param {TRaster} img - Input image to process 50 | * @param {boolean} copy - Copy mode to manage memory usage 51 | * @return {TRaster} - Filtered Image 52 | * 53 | * @author TODO 54 | */ 55 | const gaussBlur = (kernel) => (img,copy=true) => { 56 | let ouput = TRaster.from(img,copy); 57 | // TODO 58 | return output; 59 | } 60 | -------------------------------------------------------------------------------- /benchmarkForTiji.js: -------------------------------------------------------------------------------- 1 | //Author : Cecilia Ostertag 2 | 3 | function benchmark(img,timeList) 4 | { 5 | var startTime,endTime,time,memory; 6 | 7 | startTime = Date.now(); 8 | func = prewitt(); 9 | func(img.getRaster()); 10 | endTime = Date.now(); 11 | time = endTime - startTime; 12 | //memory currently being used by ImageJ 13 | timeList.push(time); 14 | //memoryList.push(memory/1048576);//convert bytes to MB 15 | } 16 | 17 | function runBenchmark(pixels,dim) 18 | { 19 | console.log("Front loading...\n"); 20 | //------------------------------------FRONT-LOADING------------------------------ 21 | 22 | //front-load 5 times without saving the returned timing 23 | var timeList=[],memoryList=[]; 24 | for(var i = 0; i<5; i++){ 25 | 26 | let img = new T.Image('uint8',dim,dim); 27 | img.setPixels(pixels); 28 | benchmark(img,timeList); 29 | 30 | if (i===0) 31 | { 32 | console.log("first results :\n"); 33 | console.log("time: "+timeList[i]); 34 | console.log("memory: "+memoryList[i]); 35 | } 36 | 37 | } 38 | 39 | //------------------------------------BENCHMARKING------------------------------ 40 | 41 | 42 | timeList = []; 43 | memoryLsit = []; 44 | var loops = 10; 45 | for(var j = 0; j < loops; j++){ 46 | console.log("j=",j); 47 | let img = new T.Image('uint8',dim,dim); 48 | img.setPixels(pixels); 49 | benchmark(img,timeList); 50 | } 51 | 52 | console.log("End of benchmark\n"); 53 | console.log("prewitt_"+dim+"\t"+timeList.join("\nprewitt_"+dim+"\t")); 54 | } 55 | 56 | // Main program 57 | 58 | imgList=[Lenna_128,Lenna_256,Lenna_512,Lenna_1024,Lenna_2048]; 59 | dimsList=[128,256,512,1024,2048]; 60 | 61 | //LoG(9,1.4) , canny(10.0,40.0,2.0) 62 | 63 | 64 | for (var i=0; i. 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * @module hough 27 | */ 28 | 29 | 30 | /** 31 | * Linear Hough Transform 32 | * 33 | * @param {type} params - Parameters 34 | * @param {TRaster} img - Input image to process 35 | * @param {boolean} copy - Copy mode to manage memory usage 36 | * @return {TRaster} - Image corresponding to the accumulator 37 | * 38 | * @author TODO 39 | */ 40 | const houghLinear = (params) => (img,copy=true) => { 41 | let ouput = TRaster.from(img,copy); 42 | // TODO 43 | return output; 44 | } 45 | 46 | /** 47 | * Circular Hough Transform 48 | * 49 | * @param {type} params - Parameters 50 | * @param {TRaster} img - Input image to process 51 | * @param {boolean} copy - Copy mode to manage memory usage 52 | * @return {TRaster} - Image corresponding to the accumulator 53 | * 54 | * @author TODO 55 | */ 56 | const houghCircle = (params) => (img,copy=true) => { 57 | let ouput = TRaster.from(img,copy); 58 | // TODO 59 | return output; 60 | } 61 | -------------------------------------------------------------------------------- /examples/benchmarkForTijiGPU.js: -------------------------------------------------------------------------------- 1 | //Author : Cecilia Ostertag 2 | 3 | function benchmark(img,timeList,gpuEnv) 4 | { 5 | var startTime,endTime,time; 6 | 7 | startTime = Date.now(); 8 | func = gpuEdgeCanny(50,100); 9 | func(img.getRaster(),gpuEnv); 10 | endTime = Date.now(); 11 | time = endTime - startTime; 12 | timeList.push(time); 13 | } 14 | 15 | function runBenchmark(pixels,W,H,gpuEnv) 16 | { 17 | console.log("Front loading...\n"); 18 | //------------------------------------FRONT-LOADING------------------------------ 19 | 20 | //front-load 5 times without saving the returned timing 21 | var timeList=[],memoryList=[]; 22 | for(var i = 0; i<5; i++){ 23 | 24 | let img = new T.Image('uint8',W,H); 25 | img.setPixels(pixels); 26 | benchmark(img,timeList,gpuEnv); 27 | 28 | if (i===0) 29 | { 30 | console.log("first results :\n"); 31 | console.log("time: "+timeList[i]); 32 | console.log("memory: "+memoryList[i]); 33 | } 34 | 35 | } 36 | 37 | //------------------------------------BENCHMARKING------------------------------ 38 | 39 | 40 | timeList = []; 41 | memoryLsit = []; 42 | var loops = 10; 43 | for(var j = 0; j < loops; j++){ 44 | console.log("j=",j); 45 | let img = new T.Image('uint8',W,H); 46 | img.setPixels(pixels); 47 | benchmark(img,timeList,gpuEnv); 48 | } 49 | 50 | console.log("End of benchmark\n"); 51 | console.log("canny_"+W+"\t"+timeList.join("\ncanny_"+W+"\t")); 52 | } 53 | 54 | // Main program 55 | 56 | let imgList=[coins_128,coins_128,coins_300,coins_300,coins_512,coins_512,coins_1024,coins_1024,coins_2048,coins_2048]; 57 | let dimsList=[128,105,300,246,512,420,1024,840,2048,1679]; 58 | let gpuEnv = gpu.getGraphicsContext("preview"); 59 | 60 | for (var i=0; i. 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * @module analyze 27 | */ 28 | 29 | /** 30 | * Label an image as a set of Regions Of Interest (ROI) 31 | * 32 | * @param {TRaster} img - Input image 33 | * @param {boolean} copy - Copy mode 34 | * @return {type} A set of Regions Of Interest (ROI) 35 | * @author TODO 36 | */ 37 | const labelling = function (img,copy=true) { 38 | // TODO 39 | console.log(`labelling`); 40 | return TRaster.from(img,copy); 41 | } 42 | 43 | /** 44 | * Measure a set of Regions Of Interest (ROI) 45 | * 46 | * @param {type} params - Measurements Parameters (eg. Area, Centroid) 47 | * @param {type} roiset - A set of ROIs 48 | * @param {boolean} copy - Useless. Just here for compatibility 49 | * @return {type} Measurements and/or result image 50 | * @author TODO 51 | */ 52 | const measure = function (params) { 53 | return function (roiset,copy=true) { 54 | // TODO 55 | console.log(`measure ${params}`); 56 | return new TMeasurements(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /examples/testEdge.js: -------------------------------------------------------------------------------- 1 | let uint8_lena = lenna_256x256; 2 | let uint16_lena = lenna_256x256.map ( (px) => px * 256); 3 | let float_lena = lenna_256x256.map( (px) => px/256 ); 4 | 5 | let W = 256; 6 | let H = 256; 7 | let img = new T.Image('uint8',W,H); 8 | img.setPixels(new Uint8Array(uint8_lena)); 9 | let gpuEnv = gpu.getGraphicsContext("preview"); 10 | gpuDisplay(img.getRaster(),gpuEnv); 11 | 12 | 13 | 14 | let img1S = new T.Image('uint8',W,H); 15 | img1S.setPixels(new Uint8Array(uint8_lena)); 16 | let gpuEnv1S = gpu.getGraphicsContext("preview1S"); 17 | gpuEdgeSobel()(img1S.getRaster(),gpuEnv1S); 18 | 19 | let img2S = new T.Image('uint16',W,H); 20 | img2S.setPixels(new Uint16Array(uint16_lena)); 21 | let gpuEnv2S = gpu.getGraphicsContext("preview2S"); 22 | gpuEdgeSobel()(img2S.getRaster(),gpuEnv2S); 23 | 24 | let img3S = new T.Image('float32',W,H); 25 | img3S.setPixels(new Float32Array(float_lena)); 26 | let gpuEnv3S = gpu.getGraphicsContext("preview3S"); 27 | gpuEdgeSobel()(img3S.getRaster(),gpuEnv3S); 28 | 29 | 30 | 31 | let img1P = new T.Image('uint8',W,H); 32 | img1P.setPixels(new Uint8Array(uint8_lena)); 33 | let gpuEnv1P = gpu.getGraphicsContext("preview1P"); 34 | gpuEdgePrewitt()(img1P.getRaster(),gpuEnv1P); 35 | 36 | let img2P = new T.Image('uint16',W,H); 37 | img2P.setPixels(new Uint16Array(uint16_lena)); 38 | let gpuEnv2P = gpu.getGraphicsContext("preview2P"); 39 | gpuEdgePrewitt()(img2P.getRaster(),gpuEnv2P); 40 | 41 | let img3P = new T.Image('float32',W,H); 42 | img3P.setPixels(new Float32Array(float_lena)); 43 | let gpuEnv3P = gpu.getGraphicsContext("preview3P"); 44 | gpuEdgePrewitt()(img3P.getRaster(),gpuEnv3P); 45 | 46 | 47 | 48 | let img1R = new T.Image('uint8',W,H); 49 | img1R.setPixels(new Uint8Array(uint8_lena)); 50 | let gpuEnv1R = gpu.getGraphicsContext("preview1R"); 51 | gpuEdgeRobert()(img1R.getRaster(),gpuEnv1R); 52 | 53 | let img2R = new T.Image('uint16',W,H); 54 | img2R.setPixels(new Uint16Array(uint16_lena)); 55 | let gpuEnv2R = gpu.getGraphicsContext("preview2R"); 56 | gpuEdgeRobert()(img2R.getRaster(),gpuEnv2R); 57 | 58 | let img3R = new T.Image('float32',W,H); 59 | img3R.setPixels(new Float32Array(float_lena)); 60 | let gpuEnv3R = gpu.getGraphicsContext("preview3R"); 61 | gpuEdgeRobert()(img3R.getRaster(),gpuEnv3R); 62 | -------------------------------------------------------------------------------- /examples/testEdge.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Edge Test 6 | 7 | 8 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 |
30 |
31 | 32 |
33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 |
51 |

52 |
53 |
54 | 55 |
56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /src/gpu/display.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | const gpuDisplay = (raster, graphContext, copy_mode = true) => 4 | { 5 | let id='display' 6 | let src_vs = `#version 300 es 7 | 8 | in vec2 a_vertex; 9 | in vec2 a_texCoord; 10 | 11 | uniform vec2 u_resolution; 12 | 13 | out vec2 v_texCoord; 14 | 15 | void main() { 16 | v_texCoord = a_texCoord; 17 | vec2 clipSpace = a_vertex * u_resolution * 2.0 - 1.0; 18 | gl_Position = vec4( clipSpace * vec2(1,-1), 0.0, 1.0); 19 | }`; 20 | 21 | const getFragmentSource = (samplerType,outVec) => { 22 | return `#version 300 es 23 | #pragma debug(on) 24 | 25 | precision mediump usampler2D; 26 | precision mediump float; 27 | 28 | in vec2 v_texCoord; 29 | const float maxUint16 = 65535.0; 30 | uniform ${samplerType} u_image; 31 | out vec4 outColor; 32 | 33 | void main() { 34 | outColor = vec4(${outVec}, 1.0); 35 | }`; 36 | } 37 | 38 | 39 | // Step #1: Create - compile + link - shader program 40 | // Set up fragment shader source depending of raster type (uint8, uint16, float32,rgba) 41 | let samplerType = (raster.type === 'uint16') ? 'usampler2D' : 'sampler2D'; 42 | let outColor; 43 | switch (raster.type) { 44 | case 'uint8': 45 | case 'rgba' : outColor = `texture(u_image, v_texCoord).rgb`; break; 46 | case 'uint16': outColor = `vec3(float(texture(u_image, v_texCoord).r) / maxUint16 )`; break; 47 | case 'float32': outColor = `vec3(texture(u_image, v_texCoord).r)`; break; 48 | } 49 | 50 | let the_shader = gpu.createProgram(graphContext,src_vs,getFragmentSource(samplerType,outColor)); 51 | 52 | console.log('programs done...'); 53 | 54 | // Step #2: Create a gpu.Processor, and define geometry, attributes, texture, VAO, .., and run 55 | let gproc = gpu.createGPU(graphContext) 56 | .size(raster.width,raster.height) 57 | .geometry(gpu.rectangle(raster.width,raster.height)) 58 | .attribute('a_vertex',2,'float', 16,0) // X, Y 59 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 60 | .texture(raster,0) 61 | .packWith(the_shader) // VAO 62 | .clearCanvas([0.0,1.0,1.0,1.0]) 63 | .preprocess() 64 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 65 | .uniform('u_image',0) 66 | .run(); 67 | 68 | return raster; 69 | 70 | } 71 | -------------------------------------------------------------------------------- /src/gpu/gpu_preprocess.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | /** 28 | * https://www.khronos.org/opengl/wiki/Blending 29 | * 30 | * @example 31 | * 32 | * 33 | * @author Jean-Christophe Taveau 34 | */ 35 | const blend = (funcRGBA) => { 36 | // TODO 37 | const parse = (txt) => { 38 | // Get Arguments 39 | // parse body 40 | // equations: add(s+d), subtract(s-d) reverse_subtract(d-s) ,min(s,d) max(s,d) 41 | //factors: constants (0 and 1), alpha, src, dst, constant_color via blendColor?, constant_alpha? 42 | // get Blend values 43 | }; 44 | 45 | // func(src,dst) => src + dst - blendEquation(FUNC_ADD); blendFunc(ONE,ONE)) 46 | // func(src,dst) => src - dst - blendEquation(FUNC_SUBTRACT); blendFunc(ONE,ONE)) 47 | // func(src,dst) => dst - src - blendEquation(FUNC_REVERSE_SUBTRACT); blendFunc(ONE,ONE)) 48 | // func(src,dst) => src * dst [+ 0 * dst] - blendEquation(FUNC_ADD); blendFunc(DST_COLOR,ZERO)) 49 | // func(src,dst) => src * src.a + (1 - src.a) * dst] - blendEquation(FUNC_ADD); blendFunc(SRC_ALPHA,ONE_MINUS_SRC_ALPHA)) 50 | 51 | // Parse one-liner function 52 | parse(funcRGBA.toString()); 53 | 54 | return { 55 | name: 'blend', 56 | params: [gpu.FUNC_ADD,gpu.ONE,gpu.ONE] 57 | } 58 | } 59 | 60 | /** 61 | * 62 | * @author Jean-Christophe Taveau 63 | */ 64 | const viewport = (x,y,w,h) => ({name: 'viewport', params: [x,y,w,h]}); 65 | 66 | 67 | export {blend,viewport}; 68 | 69 | 70 | -------------------------------------------------------------------------------- /src/gpu/index.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 26 | /* gpu/gpu_constants */ 27 | import { 28 | POINTS,LINES,LINE_LOOP,LINE_STRIP,TRIANGLES,TRIANGLE_STRIP,TRIANGLE_FAN, 29 | NEAREST,LINEAR,REPEAT,CLAMP_TO_EDGE,MIRRORED_REPEAT,CLAMP,MIRROR, 30 | FUNC_ADD,FUNC_SUBSTRACT,FUNC_REVERSE_SUBTRACT,MIN,MAX, 31 | ZERO,ONE,SRC_COLOR,ONE_MINUS_SRC_COLOR,SRC_ALPHA,ONE_MINUS_SRC_ALPHA, 32 | DST_ALPHA,ONE_MINUS_DST_ALPHA,DST_COLOR,ONE_MINUS_DST_COLOR, 33 | SRC_ALPHA_SATURATE,CONSTANT_COLOR,ONE_MINUS_CONSTANT_COLOR,CONSTANT_ALPHA, 34 | ONE_MINUS_CONSTANT_ALPHA} from './gpu_constants'; 35 | 36 | /* gpu/gpu_utils */ 37 | import {createGPU,createProgram,getGraphicsContext,rectangle} from './gpu_utils'; 38 | 39 | /* gpu/Processor*/ 40 | import {Processor} from './Processor'; 41 | 42 | /* gpu/color*/ 43 | import {invert} from './gpu_color'; 44 | 45 | /* gpu/math*/ 46 | import {fill} from './gpu_math'; 47 | 48 | /* gpu/preprocess*/ 49 | import {blend} from './gpu_preprocess'; 50 | 51 | /* gpu/statistics*/ 52 | import {histogram} from './gpu_statistics'; 53 | 54 | 55 | export { 56 | POINTS,LINES,LINE_LOOP,LINE_STRIP,TRIANGLES,TRIANGLE_STRIP,TRIANGLE_FAN, 57 | NEAREST,LINEAR,REPEAT,CLAMP_TO_EDGE,MIRRORED_REPEAT,CLAMP,MIRROR, 58 | FUNC_ADD,FUNC_SUBSTRACT,FUNC_REVERSE_SUBTRACT,MIN,MAX, 59 | ZERO,ONE,SRC_COLOR,ONE_MINUS_SRC_COLOR,SRC_ALPHA,ONE_MINUS_SRC_ALPHA, 60 | DST_ALPHA,ONE_MINUS_DST_ALPHA,DST_COLOR,ONE_MINUS_DST_COLOR, 61 | SRC_ALPHA_SATURATE,CONSTANT_COLOR,ONE_MINUS_CONSTANT_COLOR,CONSTANT_ALPHA, 62 | ONE_MINUS_CONSTANT_ALPHA, 63 | createGPU,createProgram,getGraphicsContext,rectangle, 64 | Processor, 65 | invert, 66 | fill, 67 | blend,viewport, 68 | histogram 69 | }; 70 | 71 | 72 | -------------------------------------------------------------------------------- /src/gpu/gpu_color.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use script'; 26 | 27 | /** 28 | * Invert colors 29 | * 30 | * @author Jean-Christophe Taveau 31 | */ 32 | 33 | const invert = (raster,graphContext, copy_mode = true) => { 34 | 35 | let id='invert'; 36 | 37 | let src_vs = `#version 300 es 38 | 39 | in vec2 a_vertex; 40 | in vec2 a_texCoord; 41 | 42 | uniform vec2 u_resolution; 43 | 44 | out vec2 v_texCoord; 45 | 46 | void main() { 47 | v_texCoord = a_texCoord; 48 | vec2 clipSpace = a_vertex * u_resolution * 2.0 - 1.0; 49 | gl_Position = vec4( clipSpace * vec2(1,-1), 0.0, 1.0); 50 | }`; 51 | 52 | let src_fs = `#version 300 es 53 | precision mediump float; 54 | 55 | in vec2 v_texCoord; 56 | uniform sampler2D u_image; 57 | 58 | out vec4 outColor; 59 | 60 | void main() { 61 | outColor = vec4(1.0 - texture(u_image, v_texCoord).rgb, 1.0); 62 | }`; 63 | 64 | 65 | // Step #1: Create - compile + link - shader program 66 | let the_shader = gpu.createProgram(graphContext,src_vs,src_fs); 67 | 68 | console.log('programs done...'); 69 | 70 | // Step #2: Create a gpu.Processor, and define geometry, attributes, texture, VAO, .., and run 71 | let gproc = gpu.createGPU(graphContext) 72 | .size(raster.width,raster.height) 73 | .geometry(gpu.rectangle(raster.width,raster.height)) 74 | .attribute('a_vertex',2,'float', 16,0) // X, Y 75 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 76 | .texture(raster,0) 77 | .packWith(the_shader) // VAO 78 | .clearCanvas([0.0,1.0,1.0,1.0]) 79 | .preprocess() 80 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 81 | .uniform('u_image',0) 82 | .run(); 83 | 84 | return raster; 85 | } 86 | 87 | export {invert}; 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /src/process/noise.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * Add Salt and Pepper Noise 27 | * 28 | * @param {number} percent - percentage of noise added to the image 29 | * @param {Raster} raster - Input image 30 | * @param {boolean} copy_mode - Used to control the copy (or not) of the image pixels 31 | * @usage saltAndPepper(0.05)(my_image) 32 | * 33 | * @author Jean-Christophe Taveau 34 | */ 35 | const saltAndPepper = (percent=0.05) => (raster,copy_mode = true) => { 36 | let output = T.Raster.from(raster,copy_mode); 37 | let pixels = output.pixelData; 38 | Array.from({length: Math.floor(raster.length * percent)}, x => Math.floor(Math.random() * raster.length) ) 39 | .forEach( (x,i) => pixels[x] = (i%2==0) ? 255 : 0 ); 40 | return output; 41 | }; 42 | 43 | 44 | /** 45 | * Adds pseudorandom, Gaussian ("normally") distributed values, with 46 | * mean 0.0 and the specified standard deviation, to this image or ROI. 47 | * Adapted from ImageJ code (Wayne Rasband) 48 | */ 49 | const noise = (standardDeviation = 25.0) => (raster,copy_mode = true) => { 50 | // Private functions 51 | const inRange = (x,a_min,a_max) => (x >= a_min && x <= a_max); 52 | 53 | // Standard Normal variate using Box-Muller transform. 54 | const rand_bm = (variance) => { 55 | let u = 0, v = 0; 56 | while(u === 0) u = Math.random(); //Converting [0,1) to (0,1) 57 | while(v === 0) v = Math.random(); 58 | return Math.sqrt( -2.0 * Math.log( u ) * variance) * Math.cos( 2.0 * Math.PI * v ); 59 | }; 60 | 61 | let dummy = T.statistics(raster); 62 | let output = T.Raster.from(raster,copy_mode); 63 | let pixels = output.pixelData; 64 | let variance = standardDeviation**2; 65 | raster.pixelData.forEach( (px,i) => { 66 | do { 67 | pixels[i] = Math.floor(px + rand_bm(variance)); 68 | } while (!inRange(pixels[i],0,255)) 69 | } ); 70 | return output; 71 | }; 72 | 73 | 74 | 75 | 76 | export {noise,saltAndPepper}; 77 | -------------------------------------------------------------------------------- /src/process/statistics.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * @module statistics 27 | */ 28 | 29 | /** 30 | * Computes basic stats: min, max, mean/average and standard deviation of the image. 31 | * Algorithm for variance found in Wikipedia 32 | * 33 | * @param {Raster} img - Input raster 34 | * @param {boolean} copy_mode - Useless here, only for compatibility with the other processing functions 35 | * @return {object} - Returns an object containing min, max, mean, variance 36 | * 37 | * @author Jean-Christophe Taveau 38 | */ 39 | const statistics = (img, copy_mode = true) => { 40 | let tmp = img.pixelData.reduce ( (accu,px,i) => { 41 | accu.min = Math.min(accu.min,px); 42 | accu.max = Math.max(accu.max,px); 43 | accu.mean += px; 44 | accu.n++; 45 | let delta = px - accu.mean2; 46 | accu.mean2 += delta/accu.n; 47 | accu.variance += delta * delta; 48 | return accu; 49 | }, 50 | {min: Number.MAX_SAFE_INTEGER, max: 0, mean: 0.0, mean2 : 0.0, n: 0, variance: 0.0} 51 | ); 52 | 53 | // Update stats in this TRaster 54 | img.statistics = { 55 | min: tmp.min, 56 | max: tmp.max, 57 | count : img.pixelData.length, 58 | mean : tmp.mean / img.pixelData.length, 59 | stddev : Math.sqrt(tmp.variance / img.pixelData.length) 60 | }; 61 | return img; 62 | }; 63 | 64 | const histogram = (binNumber) => (raster, copy_mode = true) => { 65 | // Update statistics 66 | let stats = T.statistics(raster); 67 | let delta = (raster.statistics.max - raster.statistics.min); 68 | raster.statistics.histogram = raster.pixelData.reduce ((bins,px,i) => { 69 | let index = T.clamp(0,binNumber)( Math.floor( (binNumber - 1) * (px - raster.statistics.min)/ delta)); 70 | bins[index]++; 71 | return bins; 72 | }, 73 | new Array(binNumber).fill(0) 74 | ); 75 | return raster; 76 | }; 77 | 78 | // Exports 79 | export {histogram,statistics}; 80 | 81 | 82 | -------------------------------------------------------------------------------- /benchmark.js: -------------------------------------------------------------------------------- 1 | //Authors : Peter Bock, Cecilia Ostertag 2 | 3 | function benchmark(img, functionNameString, parameters, timeList, memoryList) 4 | { 5 | var startTime,endTime,time,memory; 6 | 7 | IJ.freeMemory(); //run garbage collector 8 | 9 | startTime = System.currentTimeMillis(); 10 | IJ.run(img, functionNameString, parameters); 11 | endTime = System.currentTimeMillis(); 12 | time = endTime - startTime; 13 | memory = IJ.currentMemory(); //memory currently being used by ImageJ 14 | timeList.push(time); 15 | memoryList.push(memory/1048576);//convert bytes to MB 16 | } 17 | 18 | function runBenchmark(imp, functionNameString, parameters) 19 | { 20 | IJ.log("Front loading...\n"); 21 | //------------------------------------FRONT-LOADING------------------------------ 22 | 23 | //front-load 5 times without saving the returned timing 24 | var timeList=[],memoryList=[]; 25 | for(var i = 0; i<5; i++){ 26 | 27 | impDupl = imp.duplicate(); 28 | benchmark(impDupl,functionNameString,parameters,timeList, memoryList); 29 | 30 | if(i==0) 31 | { 32 | IJ.log("first results :\n"); 33 | IJ.log("time: "+timeList[i]); 34 | IJ.log("memory: "+memoryList[i]); 35 | } 36 | 37 | } 38 | //IJ.run("Close All Windows", ""); //close all image windows 39 | //------------------------------------BENCHMARKING------------------------------ 40 | 41 | IJ.log("Benchmarking the function "+functionNameString+"\n"); 42 | 43 | timeList = []; 44 | memoryLsit = []; 45 | var loops = 20; 46 | for(var j = 0; j < loops; j++){ 47 | impDupl = imp.duplicate(); 48 | benchmark(impDupl,functionNameString,parameters,timeList, memoryList); 49 | 50 | } 51 | IJ.run("Close All Windows", ""); 52 | IJ.log("End of benchmark\n"); 53 | for(var i=0;i. 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | import {TIMES} from '../TIMES'; 28 | 29 | 30 | /** 31 | * Extract alpha (transparency) component of RGBA pixel value 32 | */ 33 | const append = function (obj) { 34 | TIMES.storage.push(obj); 35 | return TIMES.storage; 36 | } 37 | 38 | /** 39 | * Clamp value between min and max 40 | * 41 | * @author Jean-Christophe Taveau 42 | */ 43 | const clamp = (min_value,max_value) => (value) => Math.max(min_value,Math.min(value,max_value)); 44 | 45 | 46 | /** 47 | * Clamp value between 0 and 255 (2^8 -1) 48 | * 49 | * @author Jean-Christophe Taveau 50 | */ 51 | const clampUint8 = clamp(0,255); 52 | 53 | 54 | /** 55 | * Clamp value between 0 and 65535 (2^16 -1) 56 | * 57 | * @author Jean-Christophe Taveau 58 | */ 59 | const clampUint16 = clamp(0,65535); 60 | 61 | /** 62 | * Check Endianness 63 | * 64 | * @author Jean-Christophe Taveau 65 | */ 66 | const isLittleEndian = () => { 67 | const checkEndianness = () => { 68 | // https://hacks.mozilla.org/2011/12/faster-canvas-pixel-manipulation-with-typed-arrays/ 69 | let buf = new ArrayBuffer(4); 70 | let buf8 = new Uint8ClampedArray(buf); 71 | let data = new Uint32Array(buf); 72 | 73 | // Determine whether Uint32 is little- or big-endian. 74 | data[0] = 0x0a0b0c0d; 75 | TIMES.cache.littleEndian = (buf8[0] === 0x0d); 76 | return TIMES.cache.littleEndian; 77 | }; 78 | 79 | return (TIMES.cache.littleEndian !== undefined) ? TIMES.cache.littleEndian : checkEndianness(); 80 | 81 | }; 82 | 83 | /** 84 | * pipe(func1, func2, func3, ..., funcn) 85 | * From https://medium.com/javascript-scene/reduce-composing-software-fe22f0c39a1d 86 | * 87 | * @example pipe(func1,func2) returns func2(func1(x)) 88 | * 89 | * @author Eric Elliott 90 | */ 91 | const pipe = (...fns) => (x,copy_mode=false) => fns.reduce((v, f,i) => { 92 | return f(v,copy_mode); 93 | }, x); 94 | 95 | 96 | 97 | // Exports 98 | export {clamp,clampUint8,isLittleEndian,pipe}; 99 | 100 | -------------------------------------------------------------------------------- /src/process/geometry.js: -------------------------------------------------------------------------------- 1 | /* 2 | * times: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of times 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with times. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use script'; 26 | 27 | /** 28 | * @module geometry 29 | */ 30 | 31 | /** 32 | * Crop raster 33 | * @param {number} topleft_x - X-coordinate of the top left corner 34 | * @param {number} topleft_y - Y-coordinate of the top left corner 35 | * @param {number} new_width - Width of the cropped raster 36 | * @param {number} new_height - Height of the cropped raster 37 | * @param {Raster} raster - Input Raster 38 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 39 | * 40 | * @author Jean-Christophe Taveau 41 | */ 42 | const crop = (top_left_x, top_left_y,new_width,new_height) => (raster,copy_mode=true) => { 43 | let output = T.Raster.from(raster,false); 44 | let pixels = Array.from({length: new_height}, (v,i) => top_left_x + (top_left_y + i) * raster.width) 45 | .reduce( (accu,xy) => { 46 | let chunk = raster.pixelData.slice( xy, xy + new_width); 47 | return [...accu, ...chunk]; 48 | },[]); 49 | output.width = new_width; 50 | output.height = new_height; 51 | output.pixelData = [...pixels]; 52 | return output; 53 | } 54 | 55 | 56 | /** 57 | * Flip vertically 58 | */ 59 | const flipV = (angle) => (raster,copy_mode=true) => console.log('TODO: flipV'); 60 | 61 | /** 62 | * Flip horizontally 63 | */ 64 | const flipH = (angle) => (raster,copy_mode=true) => console.log('TODO: flipH'); 65 | 66 | /** 67 | * Pad - TODO 68 | * 69 | */ 70 | const pad = (topleft_x,topleft_y,new_width, new_height,value) => (img,copy_mode=true) => { 71 | let output = new T.Raster(img.type,new_width,new_height); 72 | output.pixelData = T.Raster.createPixels(output.type,output.length); 73 | for (let y = 0; y < img.height; y++) { 74 | let chunk = img.pixelData.slice(y * img.width, (y+1) * img.width); 75 | chunk.forEach ( (px, index) => ouput.pixelData[topleft_x + index + topleft_y * img.width] = px); 76 | } 77 | return output; 78 | }; 79 | 80 | /** 81 | * Rotate 82 | */ 83 | const rotate = (angle) => (raster,copy_mode=true) => console.log('TODO: rotate'); 84 | 85 | /** 86 | * Scale 87 | */ 88 | const scale = (scalex, scaley) => (raster,copy_mode=true) => console.log('TODO: scale'); 89 | 90 | /** 91 | * Translate 92 | */ 93 | const translate = (angle) => (raster,copy_mode=true) => console.log('TODO: translate'); 94 | 95 | 96 | // Exports 97 | export {crop}; 98 | -------------------------------------------------------------------------------- /src/gpu/gpu_statistics.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | /** 28 | * @module statistics 29 | */ 30 | 31 | /** 32 | * Computes basic stats: min, max, mean/average and standard deviation of the image. 33 | * Algorithm for variance found in Wikipedia 34 | * 35 | * @param {Raster} img - Input raster 36 | * @param {boolean} copy_mode - Useless here, only for compatibility with the other processing functions 37 | * @return {object} - Returns an object containing min, max, mean, variance 38 | * 39 | * @author Jean-Christophe Taveau 40 | */ 41 | const statistics = (img, copy_mode = true) => { 42 | // TODO 43 | 44 | return img; 45 | }; 46 | 47 | const histogram = (binNumber) => (raster, copy_mode = true) => { 48 | // https://stackoverflow.com/questions/10316708/ios-glsl-is-there-a-way-to-create-an-image-histogram-using-a-glsl-shader 49 | // https://www.opengl.org/discussion_boards/showthread.php/151073-copying-from-texture-to-vertex-buffer 50 | // texture to vertex buffer ? see http://nullprogram.com/blog/2014/06/29/ 51 | 52 | let src_vs_texture = `#version 300 es 53 | 54 | in int a_index; 55 | 56 | uniform int maxbin; 57 | 58 | // Vertex Texture 59 | uniform sampler2D u_raster; 60 | 61 | out float bin; 62 | 63 | void main() { 64 | ivec2 u_size = textureSize(u_raster,0); 65 | int x = (a_index % u_size.x); 66 | int y = int(a_index / u_size.x); 67 | vec2 coords = vec2(x / u_size.x, y / u_size.y); 68 | // bin = texture(u_raster, coords) ; 69 | 70 | gl_Position = vec4(coords * 2.0 - 1.0, 0.0, 1.0); 71 | } 72 | `; 73 | // Vertex Shader 74 | let src_vs = `#version 300 es 75 | 76 | in float a_pixel; 77 | 78 | uniform float u_maxbin; 79 | 80 | out float bin; 81 | 82 | void main() { 83 | bin = (a_pixel + 0.5) / u_maxbin; 84 | gl_Position = vec4(bin * 2.0 - 1.0, 0.0, 0.0, 1.0); 85 | } 86 | `; 87 | // Fragment Shader 88 | let src_fs = `#version 300 es 89 | precision mediump float; 90 | 91 | const vec3 step = vec3(1.0,0.0,0.0); 92 | 93 | in float bin; 94 | out vec4 outColor; 95 | 96 | void main(){ 97 | outColor = vec4(step, 1.0) ; 98 | }`; 99 | 100 | // Step #2: Create - compile + link - shader program 101 | let the_shader = gpu.createProgram(gpuEnv,src_vs,src_fs); 102 | 103 | // Step #3: Create the rectangle WITHOUT texture 104 | let maxBin = binNumber; 105 | 106 | let gproc = gpu.createGPU(gpuEnv,maxBin,1) 107 | .geometry({ 108 | type: gpu.POINTS, 109 | num: raster.width * raster.height, 110 | vertices: new Float32Array(raster.pixelData) 111 | } ) 112 | .attribute('a_pixel',1,'float', 4,0) 113 | .packWith(the_shader) 114 | .clearCanvas([0.0,0.0,0.0,1.0]) 115 | .redirectTo('fbo','float32') 116 | .preprocess([ 117 | gpu.blend((src,dst) => src + dst) 118 | ]) 119 | .uniform('u_maxbin',maxBin) 120 | .run(); 121 | 122 | let histogramFloat = gproc.readPixels('fbo').filter( (v,i) => ( (i % 4) === 0) ); 123 | 124 | if (raster.statistics === undefined) { 125 | raster.statistics = { 126 | min: -1, 127 | max: -1, 128 | count : raster.pixelData.length, 129 | mean : -1, 130 | stddev : -1, 131 | histogram: histogramFloat 132 | }; 133 | } 134 | else { 135 | raster.statistics.histogram = histogramFloat; 136 | } 137 | 138 | return raster; 139 | }; 140 | 141 | // Exports 142 | export {histogram}; 143 | 144 | 145 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | To use the complete set of Image Processing functions in your browser, head on over to [our teacher's TIMES project](https://github.com/crazybiocomputing/times), which integrates the different algorithms developed by us and the rest of our colleagues from the [Bioinformatics Master degree of Bordeaux](http://master-bioinfo-bordeaux.github.io/). 3 | 4 | --------------- 5 | 6 | To use, clone first crazybiocomputing/times repository on your computer, then in the same directory clone this repository. 7 | 8 | # Edge detection functions with WEBGL 2: 9 | In src/gpu : display.js : basic display of an image , canny.js : gpuEdgeCanny function (Canny edge detector) 10 | 11 | In examples : testCanny.js and testCanny.html : example of Canny edge detector, benchmarkForTijiGPU.js : benchmark function to test Canny GPU implementation, STILL BUG WITH 16bit IMAGES 12 | 13 | 14 | [Example of Canny, 8bit, 16bit, float32](https://rawgit.com/bockp/Edge-Detection-project/master/examples/testCanny.html) 15 | 16 | [Example of Sobel, Prewitt, Robert's cross](https://rawgit.com/bockp/Edge-Detection-project/master/examples/testEdge.html) 17 | 18 | Benchmark files for CPU : branch cpu, main folder 19 | 20 | Benchmark files for GPU : branch master, examples folder 21 | 22 | Benchmark file for ImageJ : branch master, main folder 23 | 24 | # Edge detection functions with ES6 (see cpu branch): 25 | 26 | All of our functions are in src/process/edgeDetect.js . 27 | 28 | To test our results, you can use the file examples/testEdge.js, using a 256x256 uint8 version of Lena as input for our five functions : *sobel()*, *prewitt()*, *robertscross()*, *LoG()*, and *canny()*. 29 | 30 | [Examples](https://rawgit.com/bockp/Edge-Detection-project/cpu/examples/testEdge.html) 31 | 32 | # Tiny Image in Javascript (TIJ) project 33 | 34 | Each group of three students must choose a subject among this list: 35 | 36 | * Analyze: Analyze Particles (as in ImageJ) 37 | * Edge: Edge Detection: Sobel, Laplace, Canny, etc. 38 | * FilterLin: Filters 2D (linear filter): Convolve, Mean, Gaussian Blur, etc. 39 | * FilterRank: Filters 2D (rank filters): Median, Min, Max, Variance 40 | * FFT: Fourier Transform + Correlation 41 | * Hough: Hough Transform : Line and Circle detection 42 | * Threshold: Thresholding methods: by hand, automatic: Otsu, etc. (as in ImageJ). 43 | * Morpho: Mathematical Morphology: dilate, erode, open, close, Hit or Miss, skeletonize, EDM, UEP with structuring element (plugin ImageJ). 44 | * Contrast: 'Adjust > Brighntess and Contrast' and 'Enhance Contrast' operations of ImageJ with Normalize and Equalize. 45 | 46 | 47 | # Our Subject -> Edge Detection: Sobel, Laplace, Canny, etc. 48 | 49 | The aim of this project is to develop one or more image processing features in a classical image toolbox. 50 | The project is separated into 3 segments: 51 | 52 | ## Literature search on the different algorithms and their implementation in ImageJ + benchmarking in ImageJ 53 | 54 | First month is focused on literature research and presentating the different algorithms, along with the various optimizations available. 55 | 56 | For us it would be presenting all *Edge Detection algorithms available in ImageJ*, be it by default or plugins. 57 | 58 | Benchmarking is needed to compare the efficiency and effectiveness of these algorithms. 59 | We will use this benchmarking to study how efficient each algorithm seems at detecting edges (both real and false positives), and what is sacrificed to achieve better results. 60 | 61 | ## Development of one or more of the Edge Detectors in JS/ECMA 6 + benchmarking and comparison with previous benchmark. 62 | 63 | Second month consists of the development of one or more functions in Javascript — the latest version will be used, ECMAScript 2015 or ECMAScript 6 — using the **functional programming paradigm**. 64 | 65 | 66 | Only, Mozilla Firefox and Google Chrome will be used as testing platforms for the JS code. 67 | 68 | node.js will *not* be used in this project because the program architecture is not the same. Moreover, we would need the latest version — version 8 — to support ECMAScript 2015. 69 | 70 | 71 | The JS code must follow — as much as possible — the style guidelines of AirBnB available in [github.io](https://github.com/airbnb/javascript). 72 | 73 | Workload distribution: 74 | 75 | Cecilia Ostertag : Canny operator 76 | 77 | Ophelie Thierry : Sobel/Prewitt/Cross operators 78 | 79 | Peter Bock : Laplacian of Gaussian operator 80 | 81 | ## Implementation of the *same functions as above* using the *WebGL library* + benchmarking and comparison with previous benchmarks. 82 | 83 | Third month is dedicated to the implementation of the same functions using WebGL library, and benchmarking to study if such an implementation improves the performance of the algorithms compared to the previous months implementations. 84 | -------------------------------------------------------------------------------- /src/gpu/gpu_utils.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | /** 28 | * Toolbox for GPU 29 | * 30 | */ 31 | 32 | /** 33 | * Create a GPU Processor 34 | * 35 | * @author Jean-Christophe Taveau 36 | */ 37 | const createGPU = (graphics,width=-1,height=-1) => new gpu.Processor(graphics.context,graphics.canvas,width,height); 38 | 39 | 40 | /** 41 | * 42 | * Init WebGL2 43 | * 44 | */ 45 | const getGraphicsContext = (elementID='preview') => { 46 | // http://webglreport.com/ 47 | let _canvas = document.getElementById(elementID); 48 | let gl2; 49 | let _params = {}; 50 | 51 | try { 52 | gl2 = _canvas.getContext("webgl2"); 53 | // Need extension(s) 54 | const ext = gl2.getExtension("EXT_color_buffer_float"); 55 | if (!ext) { 56 | alert("need EXT_color_buffer_float"); 57 | } 58 | // Various useful configuration parameters 59 | _params.maxTextures = gl2.getParameter(gl2.MAX_TEXTURE_IMAGE_UNITS); 60 | _params.maxTextureSize = gl2.getParameter(gl2.MAX_TEXTURE_SIZE); 61 | 62 | } catch (e) { 63 | } 64 | if (!gl2) { 65 | alert("Could not initialise WebGL2, sorry :-("); 66 | } 67 | return {canvas: _canvas, context: gl2, parameters: _params}; 68 | }; 69 | 70 | 71 | /** 72 | * 73 | * Create Shader Program 74 | * 75 | */ 76 | const createProgram = (gpuEnv,src_vs,src_fs) => { 77 | 78 | // Compile shader 79 | const compileShader = (gl, source,type) => { 80 | let str = source; 81 | 82 | let shader; 83 | if (type == "fragment") { 84 | shader = gl.createShader(gl.FRAGMENT_SHADER); 85 | } else if (type == "vertex") { 86 | shader = gl.createShader(gl.VERTEX_SHADER); 87 | } else { 88 | return null; 89 | } 90 | 91 | gl.shaderSource(shader, str); 92 | gl.compileShader(shader); 93 | 94 | if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { 95 | alert(` ${type}: ${gl.getShaderInfoLog(shader)}`); 96 | return null; 97 | } 98 | 99 | return shader; 100 | }; 101 | 102 | /**** MAIN ****/ 103 | 104 | let shader = { 105 | program: null, 106 | attributes: {}, 107 | uniforms: {} 108 | }; 109 | 110 | let gl = gpuEnv.context; 111 | 112 | //1- Check in source(s) where are the attributes (keyword`in `) 113 | let re = /in\s*(\w+)\s(\w+)/gm; 114 | let result; 115 | while ((result = re.exec(src_vs)) !== null) { 116 | // console.log(re.exec(src_vs)); 117 | shader.attributes[result[2]] = {type: result[1],name: result[2],location: null}; 118 | } 119 | // Check in source(s) where are the uniforms (keyword: `uniform`) 120 | re = /uniform\s*(\w+)\s+(\w+)\s*(\[)*/gm; 121 | while ((result = re.exec(src_vs)) !== null) { 122 | // console.log(re.exec(src_vs)); 123 | shader.uniforms[result[2]] = {type: result[1]+(result[3]?'[]':''),name: result[2],location: null}; 124 | } 125 | while ((result = re.exec(src_fs)) !== null) { 126 | // console.log(re.exec(src_vs)); 127 | shader.uniforms[result[2]] = {type: result[1]+(result[3]?'[]':''),name: result[2],location: null}; 128 | } 129 | console.log(shader); 130 | 131 | // 2- Create Shader Program with link step. 132 | shader.program = gl.createProgram(); 133 | 134 | gl.attachShader(shader.program, compileShader(gl,src_vs,'vertex')); 135 | gl.attachShader(shader.program, compileShader(gl,src_fs,'fragment')); 136 | gl.linkProgram(shader.program); 137 | 138 | if (!gl.getProgramParameter(shader.program, gl.LINK_STATUS)) { 139 | alert("Could not initialise shaders"); 140 | } 141 | 142 | // 3- Get Attribute and Uniform locations 143 | Object.values(shader.attributes).forEach( (attr) => attr.location = gl.getAttribLocation(shader.program, attr.name) ); 144 | Object.values(shader.uniforms).forEach( (uniform) => uniform.location = gl.getUniformLocation(shader.program, uniform.name) ); 145 | return shader; 146 | } 147 | 148 | /** 149 | * Create a rectangle for gpu.Processor.geometry(..) function 150 | * 151 | * @author Jean-Christophe Taveau 152 | */ 153 | 154 | const rectangle = (w,h) => ({ 155 | type: gpu.TRIANGLE_STRIP, 156 | num: 4, 157 | vertices: new Float32Array( 158 | [ 159 | 0.0,0.0,0.0,0.0, 160 | 0.0,h ,0.0,1.0, 161 | w ,0.0,1.0,0.0, 162 | w ,h ,1.0,1.0 163 | ] 164 | ) 165 | }); 166 | 167 | // Export 168 | export {createGPU,getGraphicsContext,createProgram,rectangle}; 169 | 170 | 171 | -------------------------------------------------------------------------------- /src/gpu/edge.js: -------------------------------------------------------------------------------- 1 | const SOBEL_H = [1, 0, -1, 2, 0, -2, 1, 0, -1]; 2 | const SOBEL_V = [-1, -2, -1, 0, 0, 0, 1, 2, 1]; 3 | const PREWITT_H = [1, 0, -1, 1, 0, -1, 1, 0, -1]; 4 | const PREWITT_V = [-1, -1, -1, 0, 0, 0, 1, 1, 1]; 5 | const ROBERT_H = [0, -1, 0, 1, 0, 0, 0, 0, 0]; 6 | const ROBERT_V = [-1, 0, 0, 0, 1, 0, 0, 0, 0]; 7 | 8 | 9 | /** 10 | * gpuEdgePrewitt, gpuEdgeSobel and gpuEdgeRobert : Find the edges in an image using respectively Prewitt, Sobel and Robert operators. 11 | * 12 | * 13 | * @param {raster} raster - Input raster 14 | * @param {} gpuEnv - Graphical environment 15 | * 16 | * @author Ophelie Thierry 17 | */ 18 | 19 | const gpuEdgePrewitt = () => (raster, gpuEnv, copy = true) => 20 | {gpuEdge(raster, gpuEnv, PREWITT_H, PREWITT_V); 21 | let id = "prewitt"; 22 | console.log(id); 23 | return raster; 24 | }; 25 | 26 | const gpuEdgeSobel = () => (raster, gpuEnv, copy = true) => 27 | {gpuEdge(raster, gpuEnv, SOBEL_H, SOBEL_V); 28 | let id = "sobel"; 29 | console.log(id); 30 | return raster; 31 | }; 32 | 33 | const gpuEdgeRobert = () => (raster, gpuEnv, copy = true) => 34 | {gpuEdge(raster, gpuEnv, ROBERT_H, ROBERT_V); 35 | let id = "robert"; 36 | console.log(id); 37 | return raster; 38 | }; 39 | 40 | 41 | 42 | /** 43 | * Find the edges in an image according to a specific kernel given in argument. 44 | * 45 | * 46 | * @param {raster} raster - Input raster 47 | * @param {} gpuEnv - Graphical environment 48 | * @param {array} kernelH - Horizontal kernel 49 | * @param {array} kernelV - Vertical kernel 50 | * 51 | * @author Ophelie Thierry 52 | */ 53 | 54 | const gpuEdge = (raster, gpuEnv, kernelH, kernelV, copy = true) => 55 | { 56 | //let gpuEnv = gpu.getGraphicsContext('preview'); 57 | //console.log(gpuEnv); 58 | 59 | // Vertex Shader 60 | const src_vs = `#version 300 es 61 | in vec2 a_vertex; 62 | in vec2 a_texCoord; 63 | uniform vec2 u_resolution; 64 | out vec2 v_texCoord; 65 | 66 | void main() { 67 | v_texCoord = a_texCoord; 68 | vec2 clipSpace = a_vertex * u_resolution * 2.0 - 1.0; 69 | gl_Position = vec4(clipSpace * vec2(1.0,-1.0), 0.0, 1.0); 70 | }`; 71 | 72 | console.log("vertex shader done"); 73 | 74 | console.log(raster.type); 75 | 76 | // other types : "uint8" "float32" "rgba" 77 | 78 | let ajustRasterTypePrefix = (raster.type == "uint16" ) ? `float(` : `` ; 79 | let ajustRasterTypeSuffix = (raster.type == "uint16" ) ? `)` : `` ; 80 | 81 | //console.log(ajustTypeRasterSuffix); 82 | 83 | // Fragment Shader 84 | const src_fs = `#version 300 es 85 | precision mediump float; 86 | 87 | in vec2 v_texCoord; 88 | uniform sampler2D u_image; 89 | uniform float KERNEL_H[9]; 90 | uniform float KERNEL_V[9]; 91 | out vec4 outputColor; 92 | 93 | void main() { 94 | float stepSizeX = 1.0 / float(textureSize(u_image,0).x); 95 | float stepSizeY = 1.0 / float(textureSize(u_image,0).y); 96 | 97 | // get surrounding pixels used with the kernel 98 | float a11 = `+ajustRasterTypePrefix+`texture(u_image, v_texCoord - vec2(stepSizeX,stepSizeY)).r`+ajustRasterTypeSuffix+`; 99 | float a12 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s, v_texCoord.t - stepSizeY)).r`+ajustRasterTypeSuffix+`; 100 | float a13 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t - stepSizeY)).r`+ajustRasterTypeSuffix+`; 101 | 102 | float a21 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t)).r`+ajustRasterTypeSuffix+`; 103 | float a22 = `+ajustRasterTypePrefix+`texture(u_image, v_texCoord).r`+ajustRasterTypeSuffix+`; 104 | float a23 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t)).r`+ajustRasterTypeSuffix+`; 105 | 106 | float a31 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t + stepSizeY)).r`+ajustRasterTypeSuffix+`; 107 | float a32 = `+ajustRasterTypePrefix+`texture(u_image, vec2(v_texCoord.s, v_texCoord.t + stepSizeX)).r`+ajustRasterTypeSuffix+`; 108 | float a33 = `+ajustRasterTypePrefix+`texture(u_image, v_texCoord + vec2(stepSizeX,stepSizeY)).r`+ajustRasterTypeSuffix+`; 109 | 110 | vec2 convolution = vec2 (KERNEL_H[0] * a11 + KERNEL_H[1] * a12 + KERNEL_H[2] * a13 + KERNEL_H[3] * a21 + KERNEL_H[4] * a22 + KERNEL_H[5] * a23 + KERNEL_H[6] * a31 + KERNEL_H[7] * a32 + KERNEL_H[8] * a33, 111 | KERNEL_V[0] * a11 + KERNEL_V[1] * a12 + KERNEL_V[2] * a13 + KERNEL_V[3] * a21 + KERNEL_V[4] * a22 + KERNEL_V[5] * a23 + KERNEL_V[6] * a31 + KERNEL_V[7] * a32 + KERNEL_V[8] * a33); 112 | 113 | outputColor.r = sqrt((convolution.x*convolution.x) + (convolution.y*convolution.y)) ; 114 | outputColor.g = outputColor.r; 115 | outputColor.b = outputColor.r; 116 | outputColor.a = 1.0; 117 | }`; 118 | 119 | //console.log(src_fs); 120 | 121 | let edge = gpu.createProgram(gpuEnv,src_vs,src_fs); 122 | 123 | let gprocEdge = gpu.createGPU(gpuEnv) 124 | .size(raster.width,raster.height) 125 | .geometry(gpu.rectangle(raster.width,raster.height)) // double triangle already done by gpu_utils 126 | .attribute('a_vertex', 2, 'float', 16, 0) // X, Y 127 | .attribute('a_texCoord', 2, 'float', 16, 8) // S, T 128 | .texture(raster, 0) 129 | .packWith(edge) // VAO 130 | .clearCanvas([0.0,1.0,0.0,1.0]) 131 | .preprocess() 132 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 133 | .uniform('u_image', 0) 134 | .uniform('KERNEL_H',new Float32Array(kernelH)) 135 | .uniform('KERNEL_V',new Float32Array(kernelV)) 136 | .run(); 137 | 138 | return raster; 139 | 140 | }; 141 | -------------------------------------------------------------------------------- /src/gpu/gpu_constants.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | // From MDN 28 | // https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Constants 29 | 30 | /** 31 | * Passed to drawElements or drawArrays to draw single points. 32 | */ 33 | export const POINTS = 0x0000; 34 | 35 | /** 36 | * Passed to drawElements or drawArrays to draw lines. Each vertex connects to the one after it. 37 | */ 38 | export const LINES = 0x0001; 39 | 40 | /** 41 | * Passed to drawElements or drawArrays to draw lines. Each set of two vertices is treated as a separate line segment. 42 | */ 43 | export const LINE_LOOP = 0x0002; 44 | 45 | /** 46 | * Passed to drawElements or drawArrays to draw a connected group of line segments from the first vertex to the last. 47 | */ 48 | export const LINE_STRIP = 0x0003; 49 | 50 | /** 51 | * Passed to drawElements or drawArrays to draw triangles. Each set of three vertices creates a separate triangle. 52 | */ 53 | export const TRIANGLES = 0x0004; 54 | 55 | /** 56 | * Passed to drawElements or drawArrays to draw a connected group of triangles. 57 | */ 58 | export const TRIANGLE_STRIP = 0x0005; 59 | 60 | /** 61 | * Passed to drawElements or drawArrays to draw a connected group of triangles. 62 | * Each vertex connects to the previous and the first vertex in the fan. 63 | */ 64 | export const TRIANGLE_FAN = 0x0006; 65 | 66 | /** 67 | * Texture constant 68 | */ 69 | export const NEAREST = 0x2600; 70 | 71 | /** 72 | * Texture constant 73 | */ 74 | export const LINEAR = 0x2601; 75 | 76 | /** 77 | * Texture constant 78 | */ 79 | export const REPEAT = 0x2901; 80 | 81 | /** 82 | * Texture constant 83 | */ 84 | export const CLAMP_TO_EDGE = 0x812F; 85 | 86 | /** 87 | * Texture constant 88 | */ 89 | export const MIRRORED_REPEAT = 0x8370; 90 | 91 | /** 92 | * Shorter non official texture constant 93 | */ 94 | export const CLAMP = CLAMP_TO_EDGE; 95 | 96 | /** 97 | * Shorter non official texture constant 98 | */ 99 | export const MIRROR = MIRRORED_REPEAT; 100 | 101 | /** 102 | * Passed to blendEquation or blendEquationSeparate to set an addition blend function. 103 | * Op = source + destination, 104 | */ 105 | export const FUNC_ADD = 0x8006; 106 | 107 | /** 108 | * Passed to blendEquation or blendEquationSeparate to specify a subtraction blend function 109 | * Op = (source - destination). 110 | */ 111 | export const FUNC_SUBSTRACT = 0x800A; 112 | 113 | /** 114 | * Passed to blendEquation or blendEquationSeparate to specify a reverse subtraction blend function 115 | * Op =(destination - source). 116 | */ 117 | export const FUNC_REVERSE_SUBTRACT = 0x800B; 118 | 119 | /** 120 | * Produces the minimum color components of the source and destination colors. 121 | */ 122 | export const MIN = 0x8007; 123 | 124 | /** 125 | * Produces the maximum color components of the source and destination colors. 126 | */ 127 | export const MAX = 0x8008; 128 | 129 | /** 130 | * Passed to blendFunc or blendFuncSeparate to turn off a component. 131 | */ 132 | export const ZERO = 0; 133 | 134 | /** 135 | * Passed to blendFunc or blendFuncSeparate to turn on a component. 136 | */ 137 | export const ONE = 1; 138 | 139 | /** 140 | * Passed to blendFunc or blendFuncSeparate to multiply a component by the source elements color. 141 | */ 142 | export const SRC_COLOR = 0x0300; 143 | 144 | /** 145 | * Passed to blendFunc or blendFuncSeparate to multiply a component by one minus the source elements color. 146 | */ 147 | export const ONE_MINUS_SRC_COLOR = 0x0301; 148 | 149 | /** 150 | * Passed to blendFunc or blendFuncSeparate to multiply a component by the source's alpha. 151 | */ 152 | export const SRC_ALPHA = 0x0302; 153 | 154 | /** 155 | * Passed to blendFunc or blendFuncSeparate to multiply a component by one minus the source's alpha. 156 | */ 157 | export const ONE_MINUS_SRC_ALPHA = 0x0303; 158 | 159 | /** 160 | * Passed to blendFunc or blendFuncSeparate to multiply a component by the destination's alpha. 161 | */ 162 | export const DST_ALPHA = 0x0304; 163 | 164 | /** 165 | * Passed to blendFunc or blendFuncSeparate to multiply a component by one minus the destination's alpha. 166 | */ 167 | export const ONE_MINUS_DST_ALPHA = 0x0305; 168 | 169 | /** 170 | * Passed to blendFunc or blendFuncSeparate to multiply a component by the destination's color. 171 | */ 172 | export const DST_COLOR = 0x0306; 173 | 174 | /** 175 | * Passed to blendFunc or blendFuncSeparate to multiply a component by one minus the destination's color. 176 | */ 177 | export const ONE_MINUS_DST_COLOR = 0x0307; 178 | 179 | /** 180 | * Passed to blendFunc or blendFuncSeparate to multiply a component by the minimum of source's alpha or one minus the destination's alpha. 181 | */ 182 | export const SRC_ALPHA_SATURATE = 0x0308 183 | 184 | /** 185 | * Passed to blendFunc or blendFuncSeparate to specify a constant color blend function. 186 | */ 187 | export const CONSTANT_COLOR = 0x8001; 188 | 189 | /** 190 | * Passed to blendFunc or blendFuncSeparate to specify one minus a constant color blend function. 191 | */ 192 | export const ONE_MINUS_CONSTANT_COLOR = 0x8002; 193 | 194 | /** 195 | * Passed to blendFunc or blendFuncSeparate to specify a constant alpha blend function. 196 | */ 197 | export const CONSTANT_ALPHA = 0x8003; 198 | 199 | /** 200 | * Passed to blendFunc or blendFuncSeparate to specify one minus a constant alpha blend function. 201 | */ 202 | export const ONE_MINUS_CONSTANT_ALPHA = 0x8004; 203 | 204 | 205 | -------------------------------------------------------------------------------- /benchRGB.csv: -------------------------------------------------------------------------------- 1 | Type,Time(ms),Memory(MB) 2 | 8-bit,210,31.158111572265625 3 | 8-bit,202,33.08526611328125 4 | 8-bit,209,35.012420654296875 5 | 8-bit,207,36.9395751953125 6 | 8-bit,218,38.866729736328125 7 | 8-bit,202,40.932334899902344 8 | 8-bit,203,42.85948944091797 9 | 8-bit,203,44.786643981933594 10 | 8-bit,211,46.71379852294922 11 | 8-bit,210,48.640953063964844 12 | 8-bit,214,50.56810760498047 13 | 8-bit,203,52.495262145996094 14 | 8-bit,219,54.42241668701172 15 | 8-bit,202,56.349571228027344 16 | 8-bit,203,58.27672576904297 17 | 8-bit,204,60.203880310058594 18 | 8-bit,203,62.13103485107422 19 | 8-bit,204,64.05818939208984 20 | 8-bit,206,65.98534393310547 21 | 8-bit,203,67.9124984741211 22 | 8-bit,204,69.83965301513672 23 | 8-bit,205,71.76680755615234 24 | 8-bit,204,73.69396209716797 25 | 8-bit,202,75.6211166381836 26 | 8-bit,204,77.54827117919922 27 | 8-bit,206,79.47542572021484 28 | 8-bit,203,81.40258026123047 29 | 8-bit,218,18.791709899902344 30 | 8-bit,203,20.12529754638672 31 | 8-bit,207,22.14575958251953 32 | 8-bit,205,23.95838165283203 33 | 8-bit,203,25.978843688964844 34 | 8-bit,203,27.791465759277344 35 | 8-bit,203,29.811927795410156 36 | 8-bit,203,31.624549865722656 37 | 8-bit,203,33.64501190185547 38 | 8-bit,204,35.45763397216797 39 | 8-bit,206,37.47809600830078 40 | 8-bit,205,39.29071807861328 41 | 8-bit,205,41.311180114746094 42 | 8-bit,204,43.123802185058594 43 | 8-bit,203,45.144264221191406 44 | 8-bit,206,46.956886291503906 45 | 8-bit,203,49.070220947265625 46 | 8-bit,227,51.510292053222656 47 | 8-bit,203,53.53075408935547 48 | 8-bit,203,55.34337615966797 49 | 8-bit,204,57.36383819580078 50 | 8-bit,203,59.17646026611328 51 | 8-bit,202,61.196922302246094 52 | 8-bit,203,63.009544372558594 53 | 8-bit,203,65.0300064086914 54 | 8-bit,204,66.8426284790039 55 | 8-bit,203,68.86309051513672 56 | 8-bit,204,70.67571258544922 57 | 8-bit,205,72.69617462158203 58 | 8-bit,204,74.50879669189453 59 | 8-bit,204,76.52925872802734 60 | 8-bit,203,78.43570709228516 61 | 8-bit,204,80.45616912841797 62 | 8-bit,205,82.26879119873047 63 | 8-bit,225,17.649642944335938 64 | 8-bit,206,19.006813049316406 65 | 8-bit,208,21.073165893554688 66 | 8-bit,209,23.062049865722656 67 | 8-bit,203,24.874671936035156 68 | 8-bit,203,26.74980926513672 69 | 8-bit,203,28.801170349121094 70 | 8-bit,203,30.613792419433594 71 | 8-bit,203,32.66515350341797 72 | 8-bit,203,34.415260314941406 73 | 8-bit,204,36.40412902832031 74 | 8-bit,203,38.4547119140625 75 | 8-bit,204,40.267333984375 76 | 8-bit,203,42.14247131347656 77 | 8-bit,203,44.19383239746094 78 | 8-bit,203,46.00645446777344 79 | 8-bit,203,48.05781555175781 80 | 8-bit,203,49.80792236328125 81 | 8-bit,203,51.796791076660156 82 | 8-bit,203,53.785675048828125 83 | 8-bit,203,55.598297119140625 84 | 8-bit,203,57.535301208496094 85 | 8-bit,202,59.58666229248047 86 | 8-bit,203,61.39928436279297 87 | 8-bit,203,63.450645446777344 88 | 8-bit,202,65.20075225830078 89 | 8-bit,202,67.18962097167969 90 | 8-bit,204,69.17945861816406 91 | 8-bit,203,70.99208068847656 92 | 8-bit,203,72.86721801757812 93 | 8-bit,202,74.9185791015625 94 | 8-bit,202,76.79289245605469 95 | 8-bit,203,78.84425354003906 96 | 8-bit,203,80.5943603515625 97 | 8-bit,203,82.5832290649414 98 | 8-bit,233,17.961898803710938 99 | 8-bit,209,19.012283325195312 100 | 8-bit,203,20.93891143798828 101 | 8-bit,204,23.795509338378906 102 | RGB,237,33.75529479980469 103 | RGB,203,35.81793212890625 104 | RGB,202,37.988365173339844 105 | RGB,202,40.15879821777344 106 | RGB,203,42.32923126220703 107 | RGB,202,44.687217712402344 108 | RGB,203,46.81237030029297 109 | RGB,202,48.937522888183594 110 | RGB,201,51.10795593261719 111 | RGB,207,53.27838897705078 112 | RGB,205,55.448822021484375 113 | RGB,203,57.61925506591797 114 | RGB,206,59.78968811035156 115 | RGB,201,61.960121154785156 116 | RGB,202,65.46432495117188 117 | RGB,243,67.63475799560547 118 | RGB,204,69.80519104003906 119 | RGB,203,71.97562408447266 120 | RGB,202,74.14605712890625 121 | RGB,203,76.31649017333984 122 | RGB,202,21.024459838867188 123 | RGB,204,25.283058166503906 124 | RGB,202,28.31916046142578 125 | RGB,203,29.604896545410156 126 | RGB,202,32.64099884033203 127 | RGB,204,33.926734924316406 128 | RGB,210,37.39635467529297 129 | RGB,213,38.682090759277344 130 | RGB,219,41.71819305419922 131 | RGB,213,43.003929138183594 132 | RGB,213,46.04003143310547 133 | RGB,208,47.325767517089844 134 | RGB,202,50.36186981201172 135 | RGB,202,51.647605895996094 136 | RGB,202,54.68370819091797 137 | RGB,203,55.969444274902344 138 | RGB,203,58.032081604003906 139 | RGB,202,60.15723419189453 140 | RGB,204,62.98493957519531 141 | RGB,203,65.145751953125 142 | RGB,201,67.30656433105469 143 | RGB,202,69.46737670898438 144 | RGB,202,71.62818908691406 145 | RGB,201,73.78900146484375 146 | RGB,219,75.94981384277344 147 | RGB,203,78.11062622070312 148 | RGB,202,80.27143859863281 149 | RGB,202,82.4322509765625 150 | RGB,207,84.59306335449219 151 | RGB,201,23.001808166503906 152 | RGB,201,26.58544158935547 153 | RGB,216,27.64801788330078 154 | RGB,202,29.821304321289062 155 | RGB,202,31.994590759277344 156 | RGB,218,34.355377197265625 157 | RGB,202,36.355499267578125 158 | RGB,203,38.48065185546875 159 | RGB,206,41.65399932861328 160 | RGB,234,43.889801025390625 161 | RGB,209,44.95237731933594 162 | RGB,203,47.12566375732422 163 | RGB,202,49.2989501953125 164 | RGB,202,51.65973663330078 165 | RGB,201,50.41639709472656 166 | RGB,202,52.54154968261719 167 | RGB,202,54.71483612060547 168 | RGB,204,57.07562255859375 169 | RGB,202,59.07574462890625 170 | RGB,202,61.200897216796875 171 | RGB,204,64.3742446899414 172 | RGB,202,65.73497009277344 173 | RGB,201,67.73509216308594 174 | RGB,203,69.86024475097656 175 | RGB,203,73.0335922241211 176 | RGB,203,75.26939392089844 177 | RGB,202,76.33197021484375 178 | RGB,202,78.50525665283203 179 | RGB,203,80.67854309082031 180 | RGB,203,83.0393295288086 181 | RGB,203,85.0394515991211 182 | RGB,202,19.73560333251953 183 | RGB,202,21.91272735595703 184 | RGB,209,24.037879943847656 185 | RGB,202,26.215003967285156 186 | RGB,204,28.392127990722656 187 | RGB,202,30.51728057861328 188 | RGB,209,32.69440460205078 189 | RGB,201,34.87152862548828 190 | RGB,202,36.996681213378906 191 | RGB,203,39.173805236816406 192 | RGB,204,41.53411102294922 193 | RGB,201,43.659263610839844 194 | RGB,201,45.836387634277344 195 | RGB,200,48.013511657714844 196 | RGB,204,50.13866424560547 197 | RGB,204,52.31578826904297 198 | RGB,202,54.49291229248047 199 | RGB,202,56.618064880371094 200 | RGB,203,58.795188903808594 201 | RGB,203,60.972312927246094 202 | -------------------------------------------------------------------------------- /src/process/math.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | /** 26 | * @module math 27 | */ 28 | 29 | 30 | /** 31 | * Fill with the pixel gray or color value depending of the raster type 32 | * @param {number} value - A gray or color value 33 | * @param {Raster} raster - Input Raster 34 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 35 | * 36 | * @author Jean-Christophe Taveau 37 | */ 38 | const fillColor = (value) => (raster,copy_mode=true) => { 39 | raster.pixelData.fill(value); 40 | return raster; 41 | } 42 | 43 | /** 44 | * Pattern `chessboard`. Must be used with T.fill(..) 45 | * 46 | * @example 47 | * let output = T.fill(T.chessboard)(raster) 48 | * 49 | * @author Jean-Christophe Taveau 50 | */ 51 | const chessboard = (px,i,x,y) => ( (Math.floor(x/16) + Math.floor(y/16)) % 2 === 0) ? 255 : 0; 52 | 53 | /** 54 | * Pattern `ramp`. Must be used with T.fill(..) 55 | * 56 | * @example 57 | * let output = T.fill(T.ramp)(raster) 58 | * 59 | * @author Jean-Christophe Taveau 60 | */ 61 | const ramp = (px,i,x,y,z,w,h) => x / w * 255; 62 | 63 | /** 64 | * Pattern `spiral`. Must be used with T.fill(..) 65 | * 66 | * @example 67 | * let output = T.fill(T.spiral)(raster) 68 | * 69 | * @author Jean-Christophe Taveau 70 | */ 71 | const spiral = (px,i,x,y,z,w,h,a,d) => 128 * (Math.sin(d / 10 + a * DEG)+1); 72 | 73 | /** 74 | * Pattern `black`. Must be used with T.fill(..) 75 | * 76 | * @example 77 | * let output = T.fill(T.black)(raster) 78 | * 79 | * @author Jean-Christophe Taveau 80 | */ 81 | const black = (px) => 0; 82 | 83 | /** 84 | * Pattern `white`. Must be used with T.fill(..) 85 | * 86 | * @example 87 | * let output = T.fill(T.white)(raster) 88 | * 89 | * @author Jean-Christophe Taveau 90 | */ 91 | const white = (px) => 255; 92 | 93 | 94 | /** 95 | * Fill with values calculated from a function 96 | * 97 | * @param {function} func - A function 98 | *

The function may take a maximum of nine arguments:

99 | *
    100 | *
  • pix - Pixel value
  • 101 | *
  • index - Index corresponding to pix. A raster is a 1D pixels array
  • 102 | *
  • x - X-coordinate of pix
  • 103 | *
  • y - Y-coordinate of pix
  • 104 | *
  • z - Z-coordinate of pix if raster is in 3D
  • 105 | *
  • w - Width of raster
  • 106 | *
  • h - Height of raster
  • 107 | *
  • a - Angle calculated as atan2(y/x)
  • 108 | *
  • d - Distance to the center
  • 109 | *
110 | * @example Fill with a spiral 111 | * const DEG = Math.PI / 180; 112 | * const spiral = (pix,i,x,y,z,w,h,a,d) => 128 * (Math.sin(d / 10+ a * DEG)+1); 113 | * let raster = T.fill(spiral)(img.getRaster() ); 114 | * @param {Raster} raster - Input Raster 115 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 116 | * 117 | * @author Jean-Christophe Taveau 118 | */ 119 | const fill = (func) => (raster,copy_mode=true) => { 120 | let w = raster.width; 121 | let h = raster.height; 122 | let nz = raster.depth || raster.nslices || 0; 123 | let cx = w / 2.0; 124 | let cy = h / 2.0; 125 | let cz = nz / 2.0; 126 | 127 | raster.pixelData.forEach ( (px,i,arr) => { 128 | let x = i % w; 129 | let y = Math.floor(i / w); 130 | let z = Math.floor( i / w / h); 131 | let d = Math.sqrt ((x - cx)**2 + (y - cy)**2 + (z - cz)**2); 132 | let a = Math.atan2(y,x); 133 | raster.pixelData[i] = func(px,i,x,y,z,w,h,a,d); 134 | }); 135 | return raster; 136 | }; 137 | 138 | 139 | /** 140 | * Fill with values calculated from a function 141 | * 142 | * @param {function} func - A function 143 | *

The function may take a maximum of nine arguments:

144 | *
    145 | *
  • pix - Pixel value
  • 146 | *
  • index - Index corresponding to pix. A raster is a 1D pixels array
  • 147 | *
  • x - X-coordinate of pix
  • 148 | *
  • y - Y-coordinate of pix
  • 149 | *
  • z - Z-coordinate of pix if raster is in 3D
  • 150 | *
  • w - Width of raster
  • 151 | *
  • h - Height of raster
  • 152 | *
  • a - Angle calculated as atan2(y/x)
  • 153 | *
  • d - Distance to the center
  • 154 | *
155 | * @example Fill with a spiral 156 | * const DEG = Math.PI / 180; 157 | * const spiral = (pix,i,x,y,z,w,h,a,d) => 128 * (Math.sin(d / 10+ a * DEG)+1); 158 | * let raster = T.fill(spiral)(img.getRaster() ); 159 | * @param {Raster} raster - Input Raster 160 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 161 | * 162 | * @author Jean-Christophe Taveau 163 | */ 164 | const math = (func) => (raster,copy_mode=true) => T.fill(func)(T.Raster.from(raster,copy_mode),copy_mode); 165 | 166 | /** 167 | * Image Calculator. Combine two images by operation 168 | * 169 | * @param {Raster} raster - Input Raster 170 | * @param {function} func - Function for computation 171 | * @param {Raster} raster - Input Raster 172 | * @param {boolean} copy_mode - Copy mode to manage memory usage. 173 | * @example Addition of two uint8 rasters with clamping 174 | * let raster3 = T.calc(raster1, (px1,px2) => T.clampUint8(px1 + px2) )(raster2) 175 | * 176 | * @author Jean-Christophe Taveau 177 | */ 178 | const calc = (other,func) => (raster,copy_mode=true) => { 179 | // TODO Assume two raster have same dimension 180 | let output = T.Raster.from(raster, copy_mode); 181 | output.pixelData.forEach( (px2,i,arr) => { 182 | let w = raster.width; 183 | let x = i % w; 184 | let y = Math.floor(i / w); 185 | let px1 = other.pixelData[i]; 186 | output.pixelData[i] = func(px1,px2,i,x,y); 187 | }); 188 | return output; 189 | }; 190 | 191 | // Export 192 | export {black, calc, chessboard, fill, fillColor, math, ramp, spiral, white}; 193 | 194 | -------------------------------------------------------------------------------- /src/gpu/testLaplace.js: -------------------------------------------------------------------------------- 1 | const gpuLoG = () => (raster, graphContext, copy_mode = true) => 2 | { 3 | let id='LoG'; 4 | console.log(id); 5 | 6 | // Vertex Shader 7 | let src_vs = `#version 300 es 8 | 9 | in vec2 a_vertex; 10 | in vec2 a_texCoord; 11 | 12 | uniform vec2 u_resolution; 13 | 14 | out vec2 v_texCoord; 15 | 16 | void main() { 17 | v_texCoord = a_texCoord ; 18 | vec2 clipSpace = a_vertex * u_resolution * 2.0 - 1.0; 19 | gl_Position = vec4(clipSpace * vec2(1,-1), 0.0, 1.0); 20 | } 21 | `; 22 | 23 | // Fragment Shader 24 | let src_fs_log = `#version 300 es 25 | 26 | precision mediump float; 27 | 28 | in vec2 v_texCoord; 29 | uniform sampler2D u_image; 30 | 31 | out vec4 outColor; 32 | 33 | void main(){ 34 | 35 | float stepSizeX = 1.0 / float(textureSize(u_image,0).x); 36 | float stepSizeY = 1.0 / float(textureSize(u_image,0).y); 37 | 38 | float a01 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t - 2.0*stepSizeY)).r; 39 | float a02 = texture(u_image, vec2(v_texCoord.s - 2.0*stepSizeX, v_texCoord.t)).r; 40 | float a03 = texture(u_image, vec2(v_texCoord.s + 2.0*stepSizeX, v_texCoord.t)).r; 41 | float a04 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t + 2.0*stepSizeX)).r; 42 | 43 | float a11 = texture(u_image, v_texCoord - vec2(stepSizeX,stepSizeY)).r; 44 | float a12 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t - stepSizeY)).r; 45 | float a13 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t - stepSizeY)).r; 46 | 47 | float a21 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t)).r; 48 | float a22 = texture(u_image, v_texCoord).r; 49 | float a23 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t)).r; 50 | 51 | float a31 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t + stepSizeY)).r; 52 | float a32 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t + stepSizeX)).r; 53 | float a33 = texture(u_image, v_texCoord + vec2(stepSizeX,stepSizeY)).r; 54 | 55 | // convolution with 5x5 LoG kernel from http://fourier.eng.hmc.edu/e161/lectures/gradient/node8.html + threshold at 0 with step() function 56 | outColor.r = step(0.0, 1.0 * a01 + 1.0 * a11 + 2.0 * a12 + 1.0 * a13 + 1.0 * a02 + 2.0 * a21 - 16.0 * a22 + 2.0 * a23 + 1.0 * a03 + 1.0 * a31 + 2.0 * a32 + 1.0 * a33 + 1.0 * a04); 57 | 58 | //outColor.r = LoG; 59 | outColor.g = outColor.r; 60 | outColor.b = outColor.r; 61 | outColor.a = 1.0; 62 | 63 | }`; 64 | 65 | let shader_log = gpu.createProgram(graphContext,src_vs,src_fs_log); 66 | 67 | 68 | let gproc_log = gpu.createGPU(graphContext,raster.width,raster.height) 69 | .size(raster.width,raster.height) 70 | .geometry(gpu.rectangle(raster.width,raster.height)) 71 | .attribute('a_vertex',2,'float', 16,0) // X, Y 72 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 73 | .texture(raster) 74 | .redirectTo('fbo1','float32',0) 75 | .packWith(shader_log) // VAO 76 | .clearCanvas([0.0,1.0,1.0,1.0]) 77 | .preprocess() 78 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 79 | .uniform('u_image',0) 80 | .run(); 81 | 82 | console.log('LoG filter done...'); 83 | 84 | let src_fs_threshold = `#version 300 es 85 | 86 | precision mediump float; 87 | 88 | in vec2 v_texCoord; 89 | uniform sampler2D u_image; 90 | 91 | out vec4 outColor; 92 | 93 | void main(){ 94 | 95 | float stepSizeX = 1.0 / float(textureSize(u_image,0).x); 96 | float stepSizeY = 1.0 / float(textureSize(u_image,0).y); 97 | 98 | //get the 9 neighboring pixels values 99 | float a11 = texture(u_image, v_texCoord - vec2(stepSizeX,stepSizeY)).r; 100 | float a12 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t - stepSizeY)).r; 101 | float a13 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t - stepSizeY)).r; 102 | 103 | float a21 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t)).r; 104 | float a22 = texture(u_image, v_texCoord).r; 105 | float a23 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t)).r; 106 | 107 | float a31 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t + stepSizeY)).r; 108 | float a32 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t + stepSizeX)).r; 109 | float a33 = texture(u_image, v_texCoord + vec2(stepSizeX,stepSizeY)).r; 110 | 111 | if ((a22 == 0.0) && ( (a11 == 1.0) || (a12 == 1.0) || (a13 == 1.0) || (a21 == 1.0) || (a23 == 1.0) || (a31 == 1.0) || (a32 == 1.0) || (a33 == 1.0) )) 112 | { 113 | outColor.r = 1.0; 114 | } 115 | else 116 | { 117 | outColor.r = 0.0; 118 | } 119 | outColor.g = outColor.r; 120 | outColor.b = outColor.r; 121 | outColor.a = 1.0; 122 | 123 | }`; 124 | 125 | let shader_threshold = gpu.createProgram(graphContext,src_vs,src_fs_threshold); 126 | 127 | let gproc_threshold = gpu.createGPU(graphContext,raster.width,raster.height) 128 | .size(raster.width,raster.height) 129 | .geometry(gpu.rectangle(raster.width,raster.height)) 130 | .attribute('a_vertex',2,'float', 16,0) // X, Y 131 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 132 | .texture(gproc_log.framebuffers['fbo1']) 133 | .redirectTo('fbo2','float32',0) 134 | .packWith(shader_threshold) // VAO 135 | .clearCanvas([0.0,1.0,1.0,1.0]) 136 | .preprocess() 137 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 138 | .uniform('u_image',0) 139 | .run(); 140 | 141 | console.log('threshold done...'); 142 | 143 | let src_fs = `#version 300 es 144 | 145 | precision mediump float; 146 | 147 | in vec2 v_texCoord; 148 | uniform sampler2D u_image; 149 | 150 | out vec4 outColor; 151 | 152 | void main(){ 153 | outColor = vec4(texture(u_image, v_texCoord).rgb, 1.0); 154 | }`; 155 | 156 | 157 | /////////// 158 | 159 | let the_shader = gpu.createProgram(graphContext,src_vs,src_fs); 160 | 161 | // Step #2: Create a gpu.Processor, and define geometry, attributes, texture, VAO, .., and run 162 | let gproc = gpu.createGPU(graphContext) 163 | .size(raster.width,raster.height) 164 | .geometry(gpu.rectangle(raster.width,raster.height)) 165 | .attribute('a_vertex',2,'float', 16,0) // X, Y 166 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 167 | .texture(gproc_threshold.framebuffers['fbo2']) 168 | .packWith(the_shader) // VAO 169 | .clearCanvas([0.0,1.0,1.0,1.0]) 170 | .preprocess() 171 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 172 | .uniform('u_image',0) 173 | .run(); 174 | 175 | return raster; 176 | } 177 | -------------------------------------------------------------------------------- /src/gpu/gpu_math.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | 28 | /** 29 | * @module math 30 | */ 31 | 32 | 33 | 34 | /** 35 | * Fill with values calculated from a function 36 | * 37 | * @param {function} func - A function 38 | *

The function may take a maximum of nine arguments:

39 | *
    40 | *
  • pix - Pixel value
  • 41 | *
  • index - Index corresponding to pix. A raster is a 1D pixels array
  • 42 | *
  • x - X-coordinate of pix
  • 43 | *
  • y - Y-coordinate of pix
  • 44 | *
  • z - Z-coordinate of pix if raster is in 3D
  • 45 | *
  • w - Width of raster
  • 46 | *
  • h - Height of raster
  • 47 | *
  • a - Angle calculated as atan2(y/x)
  • 48 | *
  • d - Distance to the center
  • 49 | *
50 | * @example Fill with a spiral 51 | * const DEG = Math.PI / 180; 52 | * const spiral = (pix,i,x,y,z,w,h,a,d) => 128 * (Math.sin(d / 10+ a * DEG)+1); 53 | * let raster = T.fill(spiral)(img.getRaster() ); 54 | * @param {Raster} raster - Input Raster 55 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 56 | * 57 | * @author Jean-Christophe Taveau 58 | */ 59 | const fill = (func) => (raster,copy_mode=true) => { 60 | let src_vs = `#version 300 es 61 | 62 | in vec2 a_vertex; 63 | 64 | void main() { 65 | gl_Position = vec4(a_vertex * 2.0 - 1.0, 0.0, 1.0); 66 | }`; 67 | 68 | /* 69 | Private function 70 | */ 71 | function parse(src) { 72 | 73 | // Parse 74 | // 1- Search the arguments between parentheses `(..)` 75 | let re = /\(([^)]+)\)/g; 76 | let args = re.exec(src)[1].split(','); 77 | if (args.length !== 9) { 78 | const default_args = ['pix','index','x','y','z','rasterWidth','rasterHeight','angle','distance']; 79 | args = [...args,...default_args.slice(args.length,default_args.length) ]; 80 | } 81 | // 2- Search the core between curly brackets {..}` or between arrow `=>` and semi-column `;` or EOL 82 | //=> { } 83 | let func_body = ''; 84 | if (/\{(.*)\}/g.exec(src) === undefined) { 85 | func_body = /\{(.*)\}/g.exec(src)[1]; 86 | } 87 | else { 88 | func_body = /=>(.*)$/g.exec(src)[1]; 89 | } 90 | // 2- Clean code like removing `Math.`, changed variable names, etc. 91 | // Python replace(/(?The function may take a maximum of nine arguments:

165 | *
    166 | *
  • pix - Pixel value
  • 167 | *
  • index - Index corresponding to pix. A raster is a 1D pixels array
  • 168 | *
  • x - X-coordinate of pix
  • 169 | *
  • y - Y-coordinate of pix
  • 170 | *
  • z - Z-coordinate of pix if raster is in 3D
  • 171 | *
  • w - Width of raster
  • 172 | *
  • h - Height of raster
  • 173 | *
  • a - Angle calculated as atan2(y/x)
  • 174 | *
  • d - Distance to the center
  • 175 | *
176 | * @example Fill with a spiral 177 | * const DEG = Math.PI / 180; 178 | * const spiral = (pix,i,x,y,z,w,h,a,d) => 128 * (Math.sin(d / 10+ a * DEG)+1); 179 | * let raster = T.fill(spiral)(img.getRaster() ); 180 | * @param {Raster} raster - Input Raster 181 | * @param {boolean} copy_mode - Useless, here. Only for compatibility with other process functions 182 | * 183 | * @author Jean-Christophe Taveau 184 | */ 185 | const math = (func) => (raster,copy_mode=true) => { 186 | // TODO 187 | // cpu.fill(func)(T.Raster.from(raster,copy_mode),copy_mode); 188 | }; 189 | 190 | /** 191 | * Image Calculator. Combine two images by operation 192 | * 193 | * @param {Raster} raster - Input Raster 194 | * @param {function} func - Function for computation 195 | * @param {Raster} raster - Input Raster 196 | * @param {boolean} copy_mode - Copy mode to manage memory usage. 197 | * @example Addition of two uint8 rasters with clamping 198 | * let raster3 = T.calc(raster1, (px1,px2) => T.clampUint8(px1 + px2) )(raster2) 199 | * 200 | * @author Jean-Christophe Taveau 201 | */ 202 | const calc = (other,func) => (raster,copy_mode=true) => { 203 | // TODO Assume two raster have same dimension 204 | 205 | return output; 206 | }; 207 | 208 | // Export 209 | export {fill}; 210 | 211 | -------------------------------------------------------------------------------- /src/gpu/laplace.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Find edges in an image using the Laplacian of Gaussian Algorithm. This implementation follows the algorithm presented in Seth George Hall's thesis "GPU Accelerated Feature Algorithms for Mobile Devices", and was adapted for 8bit, 16 bit and float32 images. 3 | * It is based principally on Cecilia Ostertag's Canny impplementation, with changes where necessary to work for LoG. 4 | * 5 | * @author Peter Bock 6 | */ 7 | 8 | 9 | const gpuEdgeLaplace = () => (raster, graphContext, copy_mode = true) => 10 | { 11 | 12 | let id='laplace' 13 | 14 | //console.log(id) 15 | 16 | // Vertex Shader 17 | let src_vs = `#version 300 es 18 | 19 | in vec2 a_vertex; 20 | in vec2 a_texCoord; 21 | 22 | uniform vec2 u_resolution; 23 | 24 | out vec2 v_texCoord; 25 | 26 | void main() { 27 | v_texCoord = a_texCoord; 28 | vec2 clipSpace = a_vertex * u_resolution * 2.0 - 1.0; 29 | gl_Position = vec4(clipSpace * vec2(1,-1), 0.0, 1.0); 30 | } 31 | `; 32 | 33 | 34 | // 1. Laplacian of Gaussian application 35 | 36 | // Fragment Shader 37 | let src_fs_log = `#version 300 es 38 | // idem to Cecilia's work 39 | // I (peter bock) have no idea how to make heads or tails of this. so I just studied Cecilia's work till I could make out which part should be different for the LoG algorithm. 40 | // pretty much none of this is my work. 41 | precision mediump usampler2D; 42 | 43 | precision mediump float; 44 | 45 | in vec2 v_texCoord; 46 | uniform sampler2D u_image; 47 | uniform float u_kernel_H[9]; 48 | 49 | out vec4 outColor; 50 | 51 | void main(){ 52 | 53 | float stepSizeX = 1.0 / float(textureSize(u_image,0).x); 54 | float stepSizeY = 1.0 / float(textureSize(u_image,0).y); 55 | 56 | //get the 9 neighboring pixel intensities 57 | float a11 = texture(u_image, v_texCoord - vec2(stepSizeX,stepSizeY)).r; 58 | float a12 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t - stepSizeY)).r; 59 | float a13 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t - stepSizeY)).r; 60 | 61 | float a21 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t)).r; 62 | float a22 = texture(u_image, v_texCoord).r; 63 | float a23 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t)).r; 64 | 65 | float a31 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t + stepSizeY)).r; 66 | float a32 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t + stepSizeX)).r; 67 | float a33 = texture(u_image, v_texCoord + vec2(stepSizeX,stepSizeY)).r; 68 | 69 | //gradient vector 70 | 71 | // this is where the results differ from Cecilia's work. 72 | // Given that the Laplacian kernel CANNOT be separated into Horizontal and Vertical aspects, it is necessary to calculate an entire kernel using the unitary LoG JS transposed into glsl. 73 | // This also makes the entire Blurring/Gaussian step in Cecilia's algorithm unnecessary, as LoG includes the Gaussian blur in it's kernel already. 74 | 75 | // not sure how to do this without the vec2, but since I'll only be using the X component I can just leave it as a vec2 anyway. 76 | 77 | float laplace = step(0.0,u_kernel_H[0] * a11 + u_kernel_H[1] * a12 + u_kernel_H[2] * a13 + u_kernel_H[3] * a21 + u_kernel_H[4] * a22 + u_kernel_H[5] * a23 + u_kernel_H[6] * a31 + u_kernel_H[7] * a32 + u_kernel_H[8] * a33); 78 | 79 | 80 | 81 | 82 | // I need to do a filtering step, and step(a,b) seems like it fits the bill. 83 | // I should be able to adjust the value in x according to the result step gives applied to it. 84 | 85 | 86 | outColor.r = laplace; 87 | outColor.g = outColor.r; 88 | outColor.b = outColor.r; // utiliser les 3 canaux rend presque tout blanc X/ 89 | outColor.a = 1.0; 90 | 91 | }`; 92 | 93 | let shader_log = gpu.createProgram(graphContext,src_vs,src_fs_log); 94 | //console.log('log filter done...'); 95 | 96 | let gproc_log = gpu.createGPU(graphContext,raster.width,raster.height) 97 | .size(raster.width,raster.height) 98 | .geometry(gpu.rectangle(raster.width,raster.height)) 99 | .attribute('a_vertex',2,'float', 16,0) // X, Y 100 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 101 | .texture(raster) 102 | .redirectTo('fbo1','float32',0) 103 | .packWith(shader_log) // VAO 104 | .clearCanvas([0.0,1.0,1.0,1.0]) 105 | .preprocess() 106 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 107 | .uniform('u_image',0) 108 | .uniform('u_kernel_H', new Float32Array([-1,-1,-1,-1,8,-1,-1,-1,-1]))//logKernel(3,1) 109 | .run(); // ne plus rediriger, et eliminer les gprocs apres celui ci, rend un lena juste legerement floutée X/ 110 | 111 | let src_fs_threshold = `#version 300 es 112 | 113 | precision mediump float; 114 | 115 | in vec2 v_texCoord; 116 | uniform sampler2D u_image; 117 | 118 | out vec4 outColor; 119 | 120 | void main(){ 121 | 122 | float stepSizeX = 1.0 / float(textureSize(u_image,0).x); 123 | float stepSizeY = 1.0 / float(textureSize(u_image,0).y); 124 | 125 | //get the 9 neighboring pixels values 126 | float a11 = texture(u_image, v_texCoord - vec2(stepSizeX,stepSizeY)).r; 127 | float a12 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t - stepSizeY)).r; 128 | float a13 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t - stepSizeY)).r; 129 | 130 | float a21 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t)).r; 131 | float a22 = texture(u_image, v_texCoord).r; 132 | float a23 = texture(u_image, vec2(v_texCoord.s + stepSizeX, v_texCoord.t)).r; 133 | 134 | float a31 = texture(u_image, vec2(v_texCoord.s - stepSizeX, v_texCoord.t + stepSizeY)).r; 135 | float a32 = texture(u_image, vec2(v_texCoord.s, v_texCoord.t + stepSizeX)).r; 136 | float a33 = texture(u_image, v_texCoord + vec2(stepSizeX,stepSizeY)).r; 137 | 138 | if ((a22 == 0.0) && ( (a11 == 1.0) || (a12 == 1.0) || (a13 == 1.0) || (a21 == 1.0) || (a23 == 1.0) || (a31 == 1.0) || (a32 == 1.0) || (a33 == 1.0) )) 139 | { 140 | outColor.r = 1.0; 141 | } 142 | else 143 | { 144 | outColor.r = 0.0; 145 | } 146 | outColor.g = outColor.r; 147 | outColor.b = outColor.r; 148 | outColor.a = 1.0; 149 | 150 | }`; 151 | 152 | let shader_threshold = gpu.createProgram(graphContext,src_vs,src_fs_threshold); 153 | 154 | let gproc_threshold = gpu.createGPU(graphContext,raster.width,raster.height) 155 | .size(raster.width,raster.height) 156 | .geometry(gpu.rectangle(raster.width,raster.height)) 157 | .attribute('a_vertex',2,'float', 16,0) // X, Y 158 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 159 | .texture(gproc_log.framebuffers['fbo1']) 160 | .redirectTo('fbo2','float32',0) 161 | .packWith(shader_threshold) // VAO 162 | .clearCanvas([0.0,1.0,1.0,1.0]) 163 | .preprocess() 164 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 165 | .uniform('u_image',0) 166 | .run(); 167 | 168 | //console.log('threshold done...'); 169 | 170 | let src_fs = `#version 300 es 171 | 172 | precision mediump float; 173 | 174 | in vec2 v_texCoord; 175 | uniform sampler2D u_image; 176 | 177 | out vec4 outColor; 178 | 179 | void main(){ 180 | outColor = vec4(texture(u_image, v_texCoord).rgb, 1.0); 181 | }`; 182 | 183 | 184 | /////////// 185 | 186 | let the_shader = gpu.createProgram(graphContext,src_vs,src_fs); 187 | 188 | // Step #2: Create a gpu.Processor, and define geometry, attributes, texture, VAO, .., and run 189 | let gproc = gpu.createGPU(graphContext) 190 | .size(raster.width,raster.height) 191 | .geometry(gpu.rectangle(raster.width,raster.height)) 192 | .attribute('a_vertex',2,'float', 16,0) // X, Y 193 | .attribute('a_texCoord',2, 'float', 16, 8) // S, T 194 | .texture(gproc_threshold.framebuffers['fbo2']) 195 | .packWith(the_shader) // VAO 196 | .clearCanvas([0.0,1.0,1.0,1.0]) 197 | .preprocess() 198 | .uniform('u_resolution',new Float32Array([1.0/raster.width,1.0/raster.height])) 199 | .uniform('u_image',0) 200 | .run(); 201 | 202 | 203 | return raster; 204 | 205 | } 206 | -------------------------------------------------------------------------------- /src/process/color.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use script'; 26 | 27 | import {clamp,isLittleEndian} from './utils'; 28 | 29 | /** 30 | * @module color 31 | */ 32 | 33 | 34 | /* 35 | * Clamp value between 0 and 255 (2^8 -1) 36 | * 37 | * @author Jean-Christophe Taveau 38 | */ 39 | const clampUint8 = clamp(0,255); 40 | 41 | /** 42 | * Compute color pixel value from gray uint8 value 43 | * @param {number} gray8 - uint8 gray value 44 | * @return {number} color Pixel value 45 | */ 46 | const fromGray8 = (gray8) => isLittleEndian() ? 0xff000000 | gray8 << 16 | gray8 << 8 | gray8 & 0xff : gray8 << 24 | gray8 << 16 | gray8 << 8 | 0xff; 47 | 48 | /** 49 | * Convert color pixel value to an array with red, green, blue, and alpha uint8 values 50 | * @param {number} color - color Pixel value 51 | * @return {array} An array of red, green, blue, and alpha uint8 components 52 | */ 53 | const fromcolor = (color) => [(color >> 24) & 0xff, (color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff]; 54 | 55 | const tocolor = (r,g,b,a) => ( r << 24) | (g << 16) | (b << 8) | a; 56 | 57 | // TODO 58 | const fromABGR = (abgr) => ( abgr << 24) | (abgr << 16) | (abgr << 8) | abgr; 59 | 60 | /** 61 | * Compute RGBA pixel value from four uint8 red, green, blue, and alpha components 62 | * @param {number} red - uint8 red component 63 | * @param {number} green - uint8 green component 64 | * @param {number} blue - uint8 blue component 65 | * @param {number} alpha - uint8 alpha component 66 | * @return {number} ABGR Pixel value 67 | */ 68 | const toRGBA = (r,g,b,a) => (isLittleEndian() ? (( a << 24) | (b << 16) | (g << 8) | r) : (( r << 24) | (g << 16) | b << 8) | a); 69 | 70 | // TODO 71 | const toabgr = (color) => ( (color & 0xff) << 24) | ( (color & 0x00ff00) << 8) | ( (color & 0xff0000) >> 8) | ( (color & 0xff000000) >> 24); 72 | 73 | /** 74 | * Extract red component of color pixel value 75 | * @param {number} color - color Pixel value 76 | * @return {number} uint8 value 77 | */ 78 | const alpha = (color) => isLittleEndian() ? color >> 24 & 0xff : color & 0xff; 79 | 80 | /** 81 | * Extract green component of color pixel value 82 | * @param {number} color - color Pixel value 83 | * @return {number} uint8 value 84 | */ 85 | const blue = (color) => isLittleEndian() ? color >> 16 & 0xff : color >> 8 & 0xff; 86 | 87 | /** 88 | * Extract blue component of color pixel value 89 | * @param {number} color - color Pixel value 90 | * @return {number} uint8 value 91 | */ 92 | const green = (color) => isLittleEndian() ? color >> 8 & 0xff : color >> 16 & 0xff; 93 | 94 | /** 95 | * Extract alpha (transparency) component of color pixel value 96 | * @param {number} color - color Pixel value 97 | * @return {number} - uint8 value 98 | */ 99 | const red = (color) => isLittleEndian() ? color & 0xff : color >> 24 & 0xff; 100 | 101 | /** 102 | * Compute Luminance gray value from color pixel value 103 | * @param {number} color - color Pixel value 104 | * @return {number} Luminance uint8 value 105 | */ 106 | const luminance = (color) => { 107 | /* 108 | Franci Penov and Glenn Slayden 109 | From https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color 110 | Photometric/digital ITU BT.709: Y = 0.2126 R + 0.7152 G + 0.0722 B 111 | Digital ITU BT.601 (gives more weight to the R and B components): Y = 0.299 R + 0.587 G + 0.114 B 112 | Approximation #1: Y = 0.33 R + 0.5 G + 0.16 B 113 | Approximation #2: Y = 0.375 R + 0.5 G + 0.125 B 114 | Fast: Y = (R+R+B+G+G+G)/6 115 | Fast: Y = (R+R+R+B+G+G+G+G)>>3 116 | */ 117 | 118 | let r = red(color); 119 | let g = green(color); 120 | let b = blue(color); 121 | return (r+r+r+b+g+g+g+g)>>3; 122 | }; 123 | 124 | /** 125 | * Extract chrominance red component of color pixel value according to the YUV colorspace 126 | * @param {number} color - Color Pixel value 127 | * @return {number} - uint8 value 128 | */ 129 | const chrominanceRed = (color) => -0.168736 * red(color) - 0.331264 * green(color) + 0.500000 * blue(color) + 128; 130 | 131 | /** 132 | * Extract chrominance blue component of color pixel value according to the YUV colorspace 133 | * @param {number} color - color Pixel value 134 | * @return {number} - uint8 value 135 | */ 136 | const chrominanceBlue = (color) => 0.500000 * red(color) - 0.418688 * green(color) - 0.081312 * blue(color) + 128; 137 | 138 | /** 139 | * Convert color pixel value to Average gray value 140 | * @param {number} color - color Pixel value 141 | * @return {number} uint8 value 142 | */ 143 | const average = (color) => Math.floor(red(color) + green(color) + blue(color) / 3.0); 144 | 145 | /** 146 | * Extract hue component of color pixel value according to HSV colorspace 147 | * @param {number} color - color Pixel value 148 | * @return {number} - uint8 value 149 | */ 150 | const hue = (color) => { 151 | const ratio = (a,b,delta) => (a-b)/delta; 152 | 153 | let r = T.red(color) / 255.0, g = T.green(color) / 255.0, b = T.blue(color) / 255.0; 154 | let maxi = Math.max(r,Math.max(g,b)); 155 | let mini = Math.min(r,Math.min(g,b)); 156 | let delta = maxi - mini; 157 | let out = (maxi === 0 || mini === maxi) ? 0 : 158 | ( (maxi === r) ? (60 * ratio(g,b,delta) + 0) % 360 : 159 | ( (maxi === g) ? 60 * ratio(b,r,delta) + 120 : 60 * ratio(r,g,delta) + 240 ) ); 160 | return clampUint8(Math.floor(out / 360.0 * 255)); 161 | }; 162 | 163 | const hue2 = (color) => { 164 | let r = T.red(color), g = T.green(color), b = T.blue(color); 165 | let maxi = Math.max(r,Math.max(g,b)); 166 | let mini = Math.min(r,Math.min(g,b)); 167 | 168 | if (maxi === 0 || maxi === mini) { 169 | return 0; 170 | } 171 | 172 | if (maxi === r) { 173 | return Math.max(0,Math.min(Math.floor(0 + 43 * (g - b) / (maxi - mini),255))); 174 | } 175 | else if (maxi === g) { 176 | return Math.max(0,Math.min(85 + 43 * (b - r) / (maxi - mini))); 177 | } 178 | else { 179 | return Math.max(0,Math.min(171 + 43 * (r - g) / (maxi - mini))); 180 | } 181 | } 182 | 183 | /** 184 | * Extract saturation component of color pixel value according to HSV colorspace 185 | * @param {number} color - color Pixel value 186 | * @return {number} - uint8 value 187 | */ 188 | const saturation = (color) => { 189 | let r = T.red(color), b = T.blue(color), g = T.green(color); 190 | let maxi = Math.max(r,Math.max(g,b)); 191 | let mini = Math.min(r,Math.min(g,b)); 192 | return (maxi === 0) ? 0 : (1.0 - mini/maxi) * 255; 193 | }; 194 | 195 | /** 196 | * Extract `value` component of color pixel value according to HSV colorspace 197 | * @param {number} color - color Pixel value 198 | * @return {number} - uint8 value 199 | */ 200 | const value = (color) => Math.max(T.red(color),Math.max(T.green(color), T.blue(color))); 201 | 202 | 203 | /** 204 | * Split channels of color Raster according to various colorspaces 205 | * 206 | * @param {function} fns - A series of functions among: 207 | *
    208 | *
  • red(px), green(px),blue(px),alpha(px),
  • 209 | *
  • hue(px),saturation(px),value(px),
  • 210 | *
  • cyan(px),magenta(px),yellow(px),
  • 211 | *
  • luminance(px), chrominance(px)
  • 212 | *
213 | * @param {Raster} color_img - A color image 214 | * @param {boolean} copy - Useless here, only for compatibility with the other process functions 215 | * @return {Stack} Return a stack containing the channels of various colorspaces 216 | * @see color.js 217 | */ 218 | const splitChannels = (...fns) => (color_img,copy = true) => { 219 | let stack = new T.Stack("Split Channels","uint8",color_img.width,color_img.height,fns.length); 220 | stack.slices.forEach( (sli) => sli.pixelData = T.Raster.createPixels('uint8',color_img.length) ); 221 | stack.slices.forEach( (sli,i) => { 222 | sli.label = fns[i].name; 223 | sli.pixelData.forEach( (px,j,pixels) => pixels[j] = fns[i](color_img.pixelData[j])); 224 | }); 225 | return stack; 226 | }; 227 | 228 | 229 | // Exports 230 | export {red, green, blue, alpha, luminance, chrominanceRed, chrominanceBlue, average, hue, saturation, value}; 231 | export {toRGBA,tocolor}; 232 | export {splitChannels}; 233 | 234 | -------------------------------------------------------------------------------- /edge3_bock.md: -------------------------------------------------------------------------------- 1 | # Edge detection part 3: WebGL implementation 2 | 3 | Author : Peter Bock 4 | 5 | ## Introduction 6 | 7 | Image processing is one of the most important fields in the domain of computer vision[^BOV2009]. Most scientific domains use information extracted from images in one way or another. For a computer to make sense of these images, and be able to extract meaningful data from them, it needs to be able to interpret and understand them. 8 | That is where Image Processing comes in, allowing a computer to process an image and detect its major features, and to perform higher-level vision tasks like face recognition. 9 | In our project, we will examine one specific field of image processing called edge detection. 10 | 11 | The physical notion of edge comes from the shape of three dimensional objects or from their material properties. But, seeing as the acquisition process translates 3D scenes to 2D representations, this definition does not apply to image processing. In this report we will use the following definition by Bovik[^BOV2009]: "An edge can generally be defined as a boundary or contour that separates adjacent image regions having relatively distinct characteristics according to some features of interest. Most often this feature is gray level or luminance”. According to this definition, the pixels of an image belonging to an edge are the pixels located in regions of abrupt gray level changes. Moreover, to avoid counting noise pixels as edges, the pixels have to be part of a contour-like structure. 12 | Edge detection is the process of finding the pixels belonging to the edges in an image, and producing a binary image showing the locations of the edge pixels. 13 | 14 | In our project, we began by documenting the main linear edge detection approaches and algorithms, and their implementation in the image processing software ImageJ. We then performed a benchmark on the ImageJ plugins, in order to compare their execution time and the memory load for the Java Virtual Machine (JVM). 15 | 16 | For the second part of our project, we made our own implementations of the Sobel, Prewitt, Robert's cros, Canny and Laplacian of Gaussian operators using ECMAscript6[^ECMA2011]. 17 | 18 | In this report, I will present the last part of our project : the implementation of the Laplacian of Gaussian algorithm[^MAR1980] using the GPU via the WebGL 2 API. 19 | Finally, I will be able to compare qualitatively and quantitatively the CPU and GPU implementations, and to compare the GPU implementation with the plugin implemented in ImageJ. 20 | 21 | 22 | **This report (written using the markdown format), and associated files, is stored in a repository on github:** 23 | https://github.com/bockp/Edge-Detection-project 24 | 25 | 26 | ## Material & Methods 27 | 28 | 29 | ### Principle of the Laplacian of Gaussian algorithm : 30 | 31 | Laplacian of Gaussian follows these steps: 32 | - Convolving the image with a Laplacian of Gaussian filter of a given standard deviation, which includes a noise reduction step given the presence of the Gaussian. 33 | - Thresholding the output by 0 to get a binary image. 34 | - Detecting Zero-crossings in the convolved image, and keeping only these for the final image, so as to improve edge detection. 35 | ### Fragment shaders : 36 | 37 | All of the fragment shaders used for this implementation are adapted from Seth George Hall's thesis [^HAL2014] showing an implementation of the Canny edge detection algorithm using OpenGL ES 2.0. It uses a pipeline of five fragment shaders, linked together via framebuffers ([Fig.1]) : 38 | 39 | ![Fig.1](images/pipeline.png) 40 | 41 | **Fig.1: Canny edge detection pipeline using OpenGL ES 2.0[^HAL2014]** 42 | 43 | This thesis was found by Cecilia Ostertag, and I've adapted it to work with the Laplacian of Gaussian edge detection filter. 44 | 45 | 46 | The *gpuEdgeLaplace()* function is in *src/gpu/laplace.js*, and the example files are *examples/testLoG*.js and *example/testLoG.html*. 47 | 48 | The first shader, in *src_fs_log()*, corresponds to a Laplacian of Gaussian using a 3x3 kernel (implementing the 9x9 kernel that would have given the same kinds of results as our JS implementation, didn't work out, as I couldn't figure out how to do it wihtout manually writing the 9*9 lines to get the raster values for one iteration). Here Cecilia Ostertag modified the original code because the author ommited to flip the kernels before the convolution so it was giving poor results. 49 | The kernel is defined as a *uniform* parameter, and, given difficulties discovered in my Laplacian of Gaussian kernel generation code (certain values, notably for 3X3 kernels, gave back only positive numbers, which rendered the thresholding step impossible), was simply set as a 3*3 kernel with the following values: 1,1,1,1,-8,1,1,1,1. The output of this function is a pixel value, which is immediatly thresholded inside the shader so as to give a binary output. 50 | 51 | The second shader, in *src_fs_threshold()*, performs the zero-crossing detection, by comparing each pixel ot it's neighbours values, with the definition that only the foreground pixels with at least one background pixel neighbour are zero-crossings. 52 | 53 | ### Benchmarking process 54 | 55 | Given this time around the we were supposed to write our reports in separate parts, I've had to run the benchmark on a different computer than for the previous 2 benchmarks[^OST2017] of the LoG algorithm (the ImageJ and CPU versions). 56 | The previous benchmarks were done using a computer with an Intel core I7-6700K @4.0 Ghz, on Linux Ubuntu 16.04 64 bits with a kernel 4.10. The GPU is a nvidia gtx1070. The version of ImageJ is 1.51q, using Java 1.8.0\_112 (64 bits). the choice of processor was fixed using the taskset to avoid a sharing of the processor load, and fixed the processes with a high priority to avoid preemption. 57 | 58 | For this benchmark, We used the same picture ("coins", in uint8), in five different sizes : 128x105 px, 300x246 px, 512x420 px, 1024x840px, and 2048x1679 px, to show how the performance of the functions vary when increasing the complexity of the input image. 59 | 60 | ## Results 61 | 62 | ### Edge detection on a test image : 63 | 64 | The following figure represents the result of the *gpuEdgeLaplace()* function, using a 3*3 kernel, compared with the result of the CPU *laplace()* function which uses a 9*9, the Mexican Hat ImageJ plugin[^PRO2015], and the FeatureJ ImageJ LoG plugin ([Fig.2]). We can see that a simple 3*3 kernel is in no way sufficient, as a lot of noise stays evident in the image, but the GPU script still finds the edges we expect from a functional LoG implementation. 65 | 66 | 67 | ![Fig.2](images/Lena8bit_Lena-mexicanHat-5_Lena-FeatureJ2_Lena-FeatureJ3_Lena-myalgoCPU-Lena-myalgoGPU) 68 | 69 | **Fig.2: Result of LoG edge detection. 1:original 8bit image, 2:output of Mexican Hat function, 3:output of ImageJ FeatureJ plugin 3X3 kernel function, 4: ImageJ FeatureJ plugin 9X9 kernel function 5: my JS CPU implementation with 9X9 kernel, 6: my WebGL GPU implementation** 70 | 71 | 72 | ### Benchmark results 73 | 74 | Given that I have no experience with benchmarking, am using a computer completely different in both build and OS, and did not manage to properly implement a script comparable in output to the previous implementations, I believe it is best not to make comparisons between the speed of this implementation and that of the previous one's. 75 | 76 | I'll add the raw CSV data in a table here: 77 | 78 | | image size (px) | time (ms) | 79 | |-----------------|-----------| 80 | | LoG_300 | 267 | 81 | | LoG_300 | 215 | 82 | | LoG_300 | 253 | 83 | | LoG_300 | 229 | 84 | | LoG_300 | 317 | 85 | | LoG_300 | 1013 | 86 | | LoG_300 | 199 | 87 | | LoG_300 | 270 | 88 | | LoG_300 | 196 | 89 | | LoG_300 | 743 | 90 | | LoG_512 | 1835 | 91 | | LoG_512 | 241 | 92 | | LoG_512 | 235 | 93 | | LoG_512 | 197 | 94 | | LoG_512 | 1140 | 95 | | LoG_512 | 264 | 96 | | LoG_512 | 334 | 97 | | LoG_512 | 246 | 98 | | LoG_512 | 253 | 99 | | LoG_512 | 262 | 100 | | LoG_1024 | 344 | 101 | | LoG_1024 | 338 | 102 | | LoG_1024 | 333 | 103 | | LoG_1024 | 312 | 104 | | LoG_1024 | 364 | 105 | | LoG_1024 | 3974 | 106 | | LoG_1024 | 443 | 107 | | LoG_1024 | 512 | 108 | | LoG_1024 | 616 | 109 | | LoG_1024 | 2817 | 110 | | LoG_2048 | 1136 | 111 | | LoG_2048 | 18427 | 112 | | LoG_2048 | 1126 | 113 | | LoG_2048 | 18119 | 114 | | LoG_2048 | 1542 | 115 | | LoG_2048 | 8090 | 116 | | LoG_2048 | 1130 | 117 | | LoG_2048 | 18352 | 118 | | LoG_2048 | 1171 | 119 | | LoG_2048 | 18353 | 120 | 121 | 122 | ## Discussion and Conclusion 123 | 124 | The LoG Detector implemented with WebGL 2 can handle the processing of pictures up to 2048x1679 pixels before a crash . 125 | 126 | Given that I wasn't able to adapt my function to use the same size of kernel as the CPU function, and that I couldn't run my benchmark on the same kind of computer as the previous benchmarks, the only things I can really add in terms of conclusion is that the script needs to be improved to run using a 9X9 kernel. 127 | 128 | [^ECMA2011]: ECMAScript EC. European Computer Manufacturers Association and others. ECMAScript language specification. 2011. 129 | 130 | [^OST2017]: Cecilia Ostertag, Peter Bock, Ophelie Thierry. Edge Detection Report 1. 2017. 131 | 132 | [^BOV2009]: Bovik AC, editor. The essential guide to image processing. Academic Press; 2009 Jul 8. 133 | 134 | [^PRO2015]: Dimiter Prodanov, Mexican Hat LoG filter plugin for ImageJ 135 | 136 | [^HAL2014]: Hall SG. GPU Accelerated Feature Algorithms for Mobile Devices. March 2014. 137 | -------------------------------------------------------------------------------- /edge3_ostertag.md: -------------------------------------------------------------------------------- 1 | # Edge detection 2 | 3 | Author : Cécilia Ostertag 4 | 5 | ## Introduction 6 | 7 | Image processing is one of the most important fields in the domain of computer vision[^BOV2009]. Most scientific domains use information extracted from images in one way or another. For a computer to make sense of these images, and be able to extract meaningful data from them, it needs to be able to interprete and understand them. 8 | That is where Image Processing comes in, allowing a computer to process an image and detect its major features, and to perform higher-level vision tasks like face recognition. 9 | In our project, we will examine one specific field of image processing called edge detection. 10 | 11 | The physical notion of edge comes from the shape of three dimensional objects or from their material properties. But, seeing as the acquisition process translates 3D scenes to 2D representations, this definition does not apply to image processing. In this report we will use the following definition by Bovik[^BOV2009]: "An edge can generally be defined as a boundary or contour that separates adjacent image regions having relatively distinct characteristics according to some features of interest. Most often this feature is gray level or luminance”. According to this definition, the pixels of an image belonging to an edge are the pixels located in regions of abrupt gray level changes. Moreover, to avoid counting noise pixels as edges, the pixels have to be part of a contour-like structure. 12 | Edge detection is the process of finding the pixels belonging to the edges in an image, and producing a binary image showing the locations of the edge pixels. 13 | 14 | In our project, we began by documenting the main linear edge detection approaches and algorithms, and their implementation in the image processing software ImageJ. We then performed a benchmark on the ImageJ plugins, in order to compare their execution time and the memory load for the Java Virtual Machine (JVM). 15 | 16 | For the second part of our project, we made our own implementations of the Sobel, Prewitt, Robert's cros, Canny and Laplacian of Gaussian operators using ECMAscript6[^ECMA2011]. 17 | 18 | In this report, I will present the last part of our project : the implementation of Canny's algorithm[^CAN1986] using the GPU via the API WebGL 2. Finally, I will be able to compare qualitatively and quantitatively the CPU and GPU implementations, and to compare the GPU implementation with ImageJ plugin's implemenation. 19 | 20 | 21 | **This report (written using the markdown format), and associated files, is stored in a repository on github:** 22 | https://github.com/bockp/Edge-Detection-project 23 | 24 | 25 | ## Material & Methods 26 | 27 | 28 | ### Principle of Canny’s algorithm : 29 | 30 | Canny’s algorithm uses the following steps: 31 | - Noise reduction by convolving the image with a Gaussian filter of a given standard deviation 32 | - Computation of the intensity gradient magnitude and orientation 33 | - Dividing orientation values (theta) into 4 directions : horizontal (0°), north-east /south-west(45°), vertical (90°), and north-west/south-east direction (135°) 34 | - Non-maximum suppression by only keeping pixels which value is the maximum compared to the values of the two surrounding pixels according to the gradient orientation 35 | - Finding strong edge pixels and weak edge pixels using a low and a high threshold values 36 | - Tracing edges with hysteresis, by keeping weak edge pixels next to strong edge pixels and then extending the edges in several passes 37 | 38 | ### Fragment shaders : 39 | 40 | All of the fragment shaders used for this implementation are adapted from Seth George Hall's thesis [^HAL2014] showing an implementation of Canny edge detection using OpenGL ES 2.0. It uses a pipeline of five fragment shaders, linked together via framebuffers ([Fig.1]) : 41 | 42 | ![Fig.1](images/pipeline.png) 43 | 44 | **Fig.1: Canny edge detection pipeline using OpenGL ES 2.0[^HAL2014]** 45 | 46 | The *gpuEdgeCanny()* function is in *src/gpu/canny.js*, and the example files are *examples/testCanny*.js and *example/testCanny.html*. Like the CPU function, this function takes two parameters : the low and high threshold for the hysteresis. 47 | 48 | The first two shaders, in *getFragmentSource_blurH()* and *getFragmentSource_blurV()*, correspond to a gaussian blur performed in two passes, horizontal and vertical, using a 5x5 kernel. The kernel is passed to the shader as a *uniform* float array, containing the following values : 0.0625, 0.25, 0.375, 0.25, 0.0625. The output is the resulting pixel value. 49 | 50 | The third shader, in *getFragmentSource_sobel()*, is the computation of the gradient magnitude and orientation using Sobel's horizontal and vertical kernels. Here I modified the original code because the author ommited to flip the kernels before the convolution so it was giving poor results. The kernels are defined as *uniform* parameters. To compute the gradient direction, the author's method is to multiply the gradient with a rotation matrix and measure it's angle, and then classifying this angle into eight directions. The result of this step is stored into the output : **outColor.r** is the gradient magnitude and **outColor.gb** is the (X,Y) orientation of the gradient. 51 | 52 | The fourth shader, in *getFragmentSource_nonmax()*, performs the non-maximum suppressions and the double threshold. For the non-maximum suppression, the gradient magnitude of the neighboring pixel in the gradient direction are computed and then compared to the gradient magnitude of the current pixel, using the *step* function. If it is not the maximum, the gradient magnitude value is multiplied by 0. The *smoothstep* function is then used to compare the gradient magnitude to the low and high threshold. This way if the gradient magnitude of the current pixel is either not a maximum or inferior to the low threshold the pixel stength is set to 0. If it is superior to the high threshold, meaning that it is a strong pixel, its stength is set to 1, and if it is a weak pixel its strength will be between 0 and 1. In the output, **outColor.r** is the strength value, and the three other components are equal to zero. 53 | 54 | Finally, the last shader, in *getFragmentSource_hysteresis()*, performs the edge tracing with the hysteresis. The edge strength of the eight neighboring pixels is retrieved, and the pixel is accepted as an edge pixel if the sum of the eight neighbors' strenght is superior to 2. The last line is modified so that the output value is equal to 1 if the pixel is an edge, and to 0 if the pixel is not an edge. 55 | 56 | ### Benchmarking process 57 | 58 | The benchmark was done using a computer with an Intel core I7-6700K @4.0 Ghz, on Linux Ubuntu 16.04 64 bits with a kernel 4.10. The GPU is a nvidia gtx1070. The version of ImageJ is the 1.51q, using Java 1.8.0\_112 (64 bits). I fixed the choice of processor with the taskset command to avoid a sharing of the processor load, and fixed the processes with a high priority to avoid preemption. 59 | 60 | For this benchmark, I used the same picture ("coins", in uint8), in five different sizes : 128x105 px, 300x246 px, 512x420 px, 1024x840px, and 2048x1679 px, to show how the performance of the functions vary when increasing the complexity of the input image. I tried to use a 4096x3360 image but that raised an "out of memory" error with the CPU function and a crash of the navigator with the GPU function. 61 | 62 | ## Results 63 | 64 | ### Edge detection on a test image : 65 | 66 | The following figure represents the result of the *gpuEdgeCanny()* function with parameters low threshold = 50.0, high threshold = 100.0, compared with the result of the CPU *canny()* function with parameters low threshold = 15.0, high threshold = 30.0, and sigma = 2.0 , and the plugin Canny Edge Detector[^GIB2011] with parameters low threshold = 2.5, high threshold = 5.0, and sigma = 2.0 ([Fig.2]). We can see that the edges of the face, hat and shoulder are well detected, as well as some details on the hat feathers. The GPU function is more effective at detecting the small details like the eyes, lipes and feathers. Also, contrarly to the CPU function there are no false edges created by the borders of the image. 67 | 68 | ![Fig.2](images/canny_comparison_GPU.jpg) 69 | 70 | **Fig.2: Result of Canny edge detection. 1:original image, 2:output of GPU function, 3:output of CPU function, 4:output of ImageJ Canny Edge Detector plugin (uint8 image)** 71 | 72 | The function works well with float32 images, but there is a problem for uint16 images : the values of the gradient are too high, which results in very thick edges in the output ([Fig.3]). The problem seems to be when the result of the first fragment shader is passed to the second via the framebuffer, but I did not manage to indentify and solve it. 73 | 74 | ![Fig.3](images/all_gpu.png) 75 | 76 | **Fig.3: Result of gpuEdgeCanny() function with low threshold = 50.0, low threshold = 100.0. 1:original image, 2:output with uint8 image, 3:output with uint16 image, 4:output with float32 image** 77 | 78 | ### Benchmark results 79 | 80 | I compared my function with the plugins Canny Edge Detector by Tom Gibara [^GIB2011] (thresholds = 2.5 and 5.0, sigma=2), FeatureJ Edges by Erik Meijering [^MEI2007] (thresholds = 2.5 and 5.0, sigma=2), and my previous JavaScript implementation, using uint8 images ([Fig.4]). This function outperforms the Canny Edge Detector function, which we showed in our previous report was unexpectedly time consuming, so I did not display it on the graph. Contrarly to the CPU implementation, where the execution time was drastically increasing with the dimension of the input image, the GPU implementation has an exetuction time similar to FeatureJ plugin. We can see that for 2024x1679 images, the GPU implementation is 4 times faster than the CPU javascript implementation. This result confirms that using the GPU for graphic computation is faster than using the CPU. Also the GPU implementation does not use any loops or IF statements, which also reduces the execution time. 81 | 82 | ![Fig.4](images/bench_time_GPU.jpeg){width=80% height=80%} 83 | 84 | **Fig.4: Execution time of FeatureJ Edges plugin, CPU implementation, and GPU implemenation, with five increasing image sizes (uint8 images)** 85 | 86 | With uint16 and float32 images ([Fig.5]), we can see that the execution times are almost the same. It seems to be a bit higher with uint16 images, but it is difficult to interpret this result given that the processing of uint16 images is not completely functionnal. 87 | 88 | ![Fig.4](images/alltypes_GPU.jpeg){width=60% height=60%} 89 | 90 | **Fig.4: Execution time of gpuEdgeCanny() function, with five increasing image sizes, for uint8, uint16, and float32 images** 91 | 92 | ## Discussion and Conclusion 93 | 94 | Canny Edge Detector implemented with WebGL 2 can handle the processing of pictures up to 2048x1679 pixels before a crash of the navigator. My implementation handles uint8 and float32 images. Visually, it is more sensible to details than the CPU and ImageJ functions, even with high threshold values, which results in the detection of high quality contours. 95 | 96 | The execution time is smaller than the one for the CPU implementation, and roughly the same as ImageJ's functions, meaning that it is a successfull adaptation from ImageJ's java implementation to WebGL. It allows a user to process images as efficiently but directly using the navigator. However, these functions were benchmarked using a powerfull CPU and GPU, so the performance will be lower using for example a laptop or a smartphone. 97 | 98 | Moreover, with the WebGL implementation, the CPU sends data to the GPU, which adds some time. 99 | 100 | WebGL2.0 is based on OpenGLES3.0 which supports vertex and fragment shader, better suited for 3D graphic applications. CUDA, OpenCL, WebCL and OpenGLES3.1 Compute Shader might be better optimized for our application (source : khronos.org, nvidia.com). 101 | 102 | [^GIB2011]: Gibara T. Canny Edge Detector plugin for ImageJ image processor. 103 | 104 | [^MEI2007]: Meijering E. FeatureJ: A Java Package for Image Feature Extraction. 105 | 106 | [^ECMA2011]: ECMAScript EC. European Computer Manufacturers Association and others. ECMAScript language specification. 2011. 107 | 108 | [^BOV2009]: Bovik AC, editor. The essential guide to image processing. Academic Press; 2009 Jul 8. 109 | 110 | [^CAN1986]: Canny J. A computational approach to edge detection. IEEE Transactions on pattern analysis and machine intelligence. 1986 Nov(6):679-98. 111 | 112 | [^HAL2014]: Hall SG. GPU Accelerated Feature Algorithms for Mobile Devices. March 2014. 113 | 114 | -------------------------------------------------------------------------------- /src/gpu/Processor.js: -------------------------------------------------------------------------------- 1 | /* 2 | * TIMES: Tiny Image ECMAScript Application 3 | * Copyright (C) 2017 Jean-Christophe Taveau. 4 | * 5 | * This file is part of TIMES 6 | * 7 | * This program is free software: you can redistribute it and/or modify it 8 | * under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation, either version 3 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful,Image 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with TIMES. If not, see . 19 | * 20 | * 21 | * Authors: 22 | * Jean-Christophe Taveau 23 | */ 24 | 25 | 'use strict'; 26 | 27 | 28 | export class Processor { 29 | constructor(context,canvas,width=-1,height=-1) { 30 | this.context = context; 31 | this.canvas = canvas; 32 | this.canvas.width = width; 33 | this.canvas.height = height; 34 | this.width = width; 35 | this.height = height; 36 | this.geometries = {}; 37 | this.textures = []; 38 | this.attributes= {}; 39 | this.uniforms = {}; 40 | this.framebuffers = {}; 41 | } 42 | 43 | attribute(a_name,a_num,a_type,a_stride,a_offset) { 44 | this.attributes[a_name] = { 45 | name: a_name, 46 | num : a_num, 47 | type: a_type, 48 | stride: a_stride, 49 | offset: a_offset, 50 | location: null 51 | }; 52 | return this; 53 | } 54 | 55 | clearCanvas(color = [0.1,0.1,0.1,1.0]) { 56 | let gl = this.context; 57 | 58 | // clear color 59 | gl.clearColor(color[0],color[1],color[2],color[3]); 60 | gl.clear(gl.COLOR_BUFFER_BIT); 61 | 62 | return this; 63 | } 64 | 65 | /** 66 | * 67 | * primitives: 68 | * - type: TRIANGLE_STRIP, POINTS, TRIANGLES, LINES 69 | * - vertices: Float32Array([]) 70 | */ 71 | geometry(obj) { 72 | let gl = this.context; 73 | 74 | // Create vertices for rectangle 75 | this.geometries.type = obj.type; 76 | this.geometries.glType = obj.type; 77 | this.geometries.buffer = gl.createBuffer(); 78 | this.geometries.numVertices = obj.num; 79 | gl.bindBuffer(gl.ARRAY_BUFFER,this.geometries.buffer); 80 | gl.bufferData(gl.ARRAY_BUFFER,obj.vertices,gl.STATIC_DRAW); 81 | 82 | // Unbind buffer(s) 83 | gl.bindBuffer(gl.ARRAY_BUFFER, null); 84 | 85 | return this; 86 | } 87 | 88 | /** 89 | * 90 | */ 91 | packWith(shaderProgram) { 92 | let gl = this.context; 93 | 94 | this.shader = shaderProgram; 95 | 96 | // 1- Get Attribute and Uniform locations 97 | Object.values(this.attributes).forEach( (attr) => { 98 | attr.location = shaderProgram.attributes[attr.name].location; 99 | attr.component = shaderProgram.attributes[attr.name].type; 100 | }); 101 | 102 | console.log(this.attributes); 103 | 104 | // 2- Create a VAO 105 | this.vao = gl.createVertexArray(); 106 | gl.bindVertexArray(this.vao); 107 | // 3- Bind the position buffer containing the vertices 108 | gl.bindBuffer(gl.ARRAY_BUFFER, this.geometries.buffer); 109 | // 4- Attributes 110 | Object.values(this.attributes).forEach( (a) => { 111 | console.log(a); 112 | gl.enableVertexAttribArray(this.attributes[a.name].location); 113 | gl.vertexAttribPointer( 114 | this.attributes[a.name].location, 115 | this.attributes[a.name].num, 116 | gl.FLOAT, 117 | false, 118 | this.attributes[a.name].stride, 119 | this.attributes[a.name].offset 120 | ); 121 | } 122 | ); 123 | // Unbind buffers 124 | gl.bindBuffer(gl.ARRAY_BUFFER, null); 125 | gl.bindVertexArray(null); 126 | return this; 127 | } 128 | 129 | /** 130 | * Render in a Frame Buffer 131 | */ 132 | redirectTo(fbo_name,type='uint8',attachment=0) { 133 | let gl = this.context; 134 | 135 | // Be sure no active framebuffer somewhere 136 | // gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER,null); 137 | 138 | if (this.framebuffers[fbo_name] === undefined) { 139 | console.log('CREATE FBO'); 140 | let fbo = gl.createFramebuffer(); 141 | 142 | let [internalFormat, srcType, data] = (type == 'uint16' || type === 'float32') ? 143 | [gl.RGBA32F,gl.FLOAT, new Float32Array(this.width * this.height * 4)] : 144 | [gl.RGBA,gl.UNSIGNED_BYTE, new Uint8ClampedArray(this.width * this.height * 4)]; 145 | 146 | 147 | let texture = this._createTexture( 148 | gl, 149 | data, 150 | this.width, 151 | this.height, 152 | internalFormat, 153 | gl.RGBA, 154 | srcType, 155 | gl.CLAMP_TO_EDGE, 156 | gl.NEAREST, 157 | gl.NEAREST 158 | ); 159 | 160 | gl.bindFramebuffer(gl.FRAMEBUFFER,fbo); 161 | gl.framebufferTexture2D( 162 | gl.FRAMEBUFFER, 163 | gl.COLOR_ATTACHMENT0 + attachment, 164 | gl.TEXTURE_2D, 165 | texture, 166 | 0 167 | ); 168 | 169 | this.framebuffers[fbo_name] = { 170 | id: 'framebuffer', 171 | name: fbo_name, 172 | buffer: fbo, 173 | texture: texture, 174 | format: internalFormat, 175 | srcType: srcType 176 | }; 177 | } 178 | else { 179 | gl.bindFramebuffer(gl.FRAMEBUFFER,this.framebuffers[fbo_name].buffer); 180 | } 181 | 182 | gl.bindTexture(gl.TEXTURE_2D, this.framebuffers[fbo_name].texture); 183 | 184 | 185 | return this; 186 | } 187 | 188 | /** 189 | * Update canvas size 190 | */ 191 | size(w,h) { 192 | this.canvas.width = w; 193 | this.canvas.height = h; 194 | this.width = w; 195 | this.height = h; 196 | return this; 197 | } 198 | 199 | /* 200 | * Pseudo-private - for Internal use, only 201 | */ 202 | _createTexture (context,data,w,h,iformat,format,type, wrap,mini, mag) { 203 | let gl = context; 204 | 205 | // Define a PBO for texture data? 206 | // https://stackoverflow.com/questions/43530082/how-can-i-upload-a-texture-in-webgl2-using-a-pixel-buffer-objecthttps://www.khronos.org/webgl/public-mailing-list/public_webgl/1701/msg00036.php 207 | // https://www.khronos.org/webgl/public-mailing-list/public_webgl/1701/msg00036.php 208 | 209 | // const tex = gl.createTexture(); 210 | // gl.bindTexture(gl.TEXTURE_2D, tex); 211 | // take data from PBO 212 | // gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 300, 150, 0, gl.RGBA, gl.UNSIGNED_BYTE, 0); 213 | 214 | // Create a Pixel Buffer Object (PBO) for fast access to pixel data 215 | const pbo = gl.createBuffer(); 216 | gl.bindBuffer(gl.PIXEL_UNPACK_BUFFER, pbo); 217 | gl.bufferData(gl.PIXEL_UNPACK_BUFFER, data, gl.STATIC_DRAW); 218 | // data is now in PBO 219 | 220 | // Create a texture 221 | let texture = gl.createTexture(); 222 | 223 | // Bind it to texture unit 0' 2D bind point 224 | gl.bindTexture(gl.TEXTURE_2D, texture); 225 | 226 | // Set the parameters so we don't need mips and so we're not filtering 227 | // and we don't repeat 228 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, wrap); 229 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, wrap); 230 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, mini); 231 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, mag); 232 | // Upload the image into the texture. 233 | let mipLevel = 0; // the largest mip 234 | let internalFormat = iformat; // format we want in the texture 235 | let srcFormat = format; // format of data we are supplying 236 | let srcType = type; // type of data we are supplying 237 | 238 | gl.texImage2D( 239 | gl.TEXTURE_2D, 240 | mipLevel, 241 | internalFormat, 242 | w, 243 | h, 244 | 0, // Border 245 | srcFormat, 246 | srcType, 247 | 0 // data already in PBO 248 | ); 249 | 250 | // Job finished: Unbind texture 251 | gl.bindTexture(gl.TEXTURE_2D, null); 252 | 253 | return texture; 254 | }; 255 | 256 | /** 257 | * Create and set up several textures 258 | * 259 | * @param {[Object]} array - Array of texture object. The pixel data must be stored as a Raster 260 | */ 261 | textures(array) { 262 | // TODO 263 | array.forEach( (tex) => this.texture(tex.raster,tex.unit,tex.wrap,tex.mini,tex.mag)); 264 | return this; 265 | } 266 | 267 | /** 268 | * Create and set up a texture 269 | * 270 | */ 271 | texture(raster,unit=0, wrap=gpu.CLAMP,mini=gpu.NEAREST, mag= gpu.NEAREST) { 272 | 273 | if (raster.id !== undefined && raster.id === 'framebuffer') { 274 | console.log('This is a FBO...'); 275 | this.textures[0] = {texture: raster.texture, unit: unit}; 276 | return this; 277 | } 278 | 279 | let gl = this.context; 280 | 281 | const glConstants = { 282 | 'clamp' : gl.CLAMP_TO_EDGE, 283 | 'repeat' : gl.REPEAT, 284 | 'mirror' : gl.MIRRORED_REPEAT, 285 | 'nearest' : gl.NEAREST, 286 | 'uint8': gl.UNSIGNED_BYTE, 287 | 'uint16': gl.UNSIGNED_SHORT, 288 | 'float32': gl.FLOAT, 289 | 'rgba' : gl.UNSIGNED_BYTE, 290 | 'gray' : gl.LUMINANCE, 291 | 'color': gl.RGBA 292 | } 293 | 294 | 295 | this.width = raster.width; 296 | this.height = raster.height; 297 | 298 | let texture = this._createTexture( 299 | gl, 300 | raster.pixelData, 301 | raster.width, 302 | raster.height, 303 | (raster.type === 'uint8' || raster.type === 'uint16' || raster.type === 'float32') ? gl.LUMINANCE : gl.RGBA, 304 | (raster.type === 'uint8' || raster.type === 'uint16' || raster.type === 'float32') ? gl.LUMINANCE : gl.RGBA, 305 | glConstants[raster.type], 306 | wrap, 307 | mini, 308 | mag 309 | ); 310 | 311 | 312 | this.textures.push({texture: texture, unit: unit}); 313 | 314 | return this; 315 | } 316 | 317 | /** 318 | * Configure the rendering/computing engine before run(..) 319 | * 320 | * @author Jean-Christophe Taveau 321 | */ 322 | preprocess(settings=[]) { 323 | let gl = this.context; 324 | 325 | // console.log(settings); 326 | // Add Default viewport 327 | if (settings.find( (elt) => (elt.name === 'viewport')) === undefined) { 328 | settings.push({name:'viewport', params: [0.0,0.0, this.width, this.height]}); 329 | } 330 | 331 | // Add various rendering parameters 332 | 333 | // Blending operations 334 | // gl.enable(gl.BLEND); 335 | // viewport operations 336 | // TODO 337 | 338 | settings.forEach( (s) => { 339 | switch (s.name) { 340 | case 'blend': 341 | gl.blendEquation(s.params[0]); 342 | gl.blendFunc(s.params[1], s.params[2]); 343 | gl.enable(gl.BLEND); 344 | break; 345 | case 'viewport': 346 | gl.viewport(s.params[0],s.params[1],s.params[2],s.params[3]); 347 | break; 348 | } 349 | }); 350 | 351 | 352 | // Activate shader program 353 | gl.useProgram(this.shader.program); 354 | 355 | return this; 356 | } 357 | 358 | /** 359 | * 360 | */ 361 | postprocess() { 362 | // Clean ? 363 | // gl.disable(settings)? 364 | return this; 365 | } 366 | 367 | 368 | /** 369 | * 370 | */ 371 | readPixels(fbo_name) { 372 | let gl = this.context; 373 | 374 | // http://roxlu.com/2014/048/fast-pixel-transfers-with-pixel-buffer-objects 375 | // https://github.com/KhronosGroup/WebGL/blob/master/sdk/tests/conformance2/reading/read-pixels-from-rgb8-into-pbo-bug.html 376 | let fbo = this.framebuffers[fbo_name]; 377 | gl.bindFramebuffer(gl.FRAMEBUFFER,fbo.buffer); 378 | gl.bindTexture(gl.TEXTURE_2D, fbo.texture); 379 | 380 | // console.log(this.width, this.height); 381 | let data = new Float32Array(this.width * this.height * 4); // RGBA 382 | gl.readPixels(0, 0, this.width, this.height, gl.RGBA, fbo.srcType, data); 383 | 384 | return data; 385 | 386 | } 387 | 388 | 389 | /** 390 | * 391 | */ 392 | run() { 393 | let gl = this.context; 394 | 395 | // Bind the position buffer containing the vertices (ie rectangle) 396 | gl.bindVertexArray(this.vao); 397 | 398 | this.textures.forEach( (tex) => { 399 | gl.activeTexture(gl.TEXTURE0 + tex.unit); 400 | gl.bindTexture(gl.TEXTURE_2D, tex.texture); 401 | }); 402 | 403 | gl.drawArrays(this.geometries.glType,0,this.geometries.numVertices); 404 | 405 | // Clean up 406 | // TODO 407 | gl.bindTexture(gl.TEXTURE_2D,null); 408 | gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER,null); 409 | 410 | return this; 411 | } 412 | 413 | 414 | /** 415 | * 416 | */ 417 | uniform(u_name,u_value) { 418 | let gl = this.context; 419 | 420 | let u = this.shader.uniforms[u_name]; 421 | 422 | switch (u.type) { 423 | case 'float': gl.uniform1f(u.location, u_value);break; 424 | case 'int': gl.uniform1i(u.location, u_value);break; 425 | case 'uint': gl.uniform1ui(u.location, u_value);break; 426 | case 'int[]': gl.uniform1iv(u.location, u_value);break; 427 | case 'float[]': gl.uniform1fv(u.location, u_value);break; 428 | case 'mat2': gl.uniformMatrix2fv(u.location, u_value);break; 429 | case 'mat3': gl.uniformMatrix3fv(u.location, u_value);break; 430 | case 'mat4': gl.uniformMatrix4fv(u.location, u_value);break; 431 | case 'sampler2D': gl.uniform1i(u.location, u_value);break; 432 | case 'vec2': gl.uniform2fv(u.location, u_value);break; 433 | case 'vec3': gl.uniform3fv(u.location, u_value);break; 434 | case 'vec4': gl.uniform4fv(u.location, u_value);break; 435 | }; 436 | return this; 437 | } 438 | 439 | } 440 | 441 | 442 | 443 | -------------------------------------------------------------------------------- /edge3_thierry.md: -------------------------------------------------------------------------------- 1 | # Edge detection 2 | 3 | Authors : Ophélie Thierry 4 | 5 | ## Introduction 6 | 7 | ### Several general notions about the edge detection 8 | 9 | Image processing is one of the most important fields in the domain of computer vision[^BOV2009]. Most scientific domains use information extracted from images in one way or another. For a computer to make sense of these images, and be able to extract meaningful data from them, it needs to be able to interprete and understand them. 10 | That is where Image Processing comes in, allowing a computer to process an image and detect its major features and reducing mistakes, variance linked to the experimenter leading to less biased conclusions, perform higher-level vision tasks like face recognition [^DOH2012] [^SCH2015]. In our project, we will examine one specific field of image processing called edge detection. 11 | 12 | The physical notion of edge comes from the shape of three dimensional objects or from their material properties. But, seeing as the acquisition process translates 3D scenes to 2D representations, this definition does not apply to image processing. In this report we will use the following definition by Bovik (2009): "An edge can generally be defined as a boundary or contour that separates adjacent image regions having relatively distinct characteristics according to some features of interest. Most often this feature is gray level or luminance”[^BOV2009]. According to this definition, the pixels of an image belonging to an edge are the pixels located in regions of abrupt gray level changes. Moreover, to avoid counting noise pixels as edges, the pixels have to be part of a contour-like structure. 13 | Edge detection is the process of finding the pixels belonging to the edges in an image, and producing a binary image showing the locations of the edge pixels. 14 | 15 | Among all the implemented image analysis software, ImageJ is one of the most used as one of the most ancient, free, open-source, easy to use and with an extansive architecture implemented in Java. It's a generalist software but the differents plugins coded by the community allow it to take care of a wide range of images and be enough configurable to become very efficient and specific [^SCH2012] [^ELI2015] [^RUE2017] [^KAI2015]. 16 | 17 | ### Interest of changing the implementation language. 18 | 19 | Currently, ImageJ is mainly present as a desktop software downloadable. However, there is an applet which can be used by Java-enabled browser [^SCH2012]. Among the different implementation langage, Java and Javascript share only several expressions in their langage, but Java is a compiled langage and less stringent on its typing and doesn't have a portability as good as the JavaScript which is an interpreted langage and a major component in the development of curent application running within a web browser [^BIE2013]. Implement the ImageJ functions in JavaScript could improve the efficiency of this applications and reduce their calculation time [^WOL2013]. In the Edge Detection case, it was demonstrated in the second repport. 20 | 21 | A supplemental way to improve the efficiency of this applications is to use WebGL, an API allowing to execute scripts in JavaScript on the Graphic Processing Unit (GPU), allowing the parallelization of the operations executed, reducing the calculation time and improve rendering performance and quality [^SHA2013] [^ROS2015]. Also WebGL is well supported by the main web browser [^SHI2017]. Various ameliorations were add in the WebGL2 version as the multiline template literals, the standardisation of the Vertex Array Objects or possibility to create Uniform Buffer Objects. 22 | 23 | ### Aim of the project 24 | 25 | In our project, we began by documenting the main linear edge detection approaches and algorithms, and their implementation in the image processing software ImageJ [^SCH2015]: convolution with edge templates (Prewitt [^PRE1970], Sobel [^SOB1968], Kirsch [^KIR1971], Robert's Cross [^ROB1963]), zero-crossings of Laplacian of Gaussian convolution[^MAR1980], zero-crossings of directional derivatives of smoothed images (Canny[^CAN1986]). 26 | 27 | We then performed a benchmark on the ImageJ plugins, in order to compare their execution time and the memory load for the Java Virtual Machine (JVM). For the second and third part of our project, we made respectively our own implementations of the Sobel, Prewitt, Robert's cros, Canny and Laplacian of Gaussian operators using ECMAscript6[^ECMA2011] and WebGL2 [^HAL2014]. 28 | 29 | This repport will be focused on the three convolutions with edge templates : Prewitt, Sobel and Robert's Cross. Informations about the other edge detection algorithms can be seen in the previous repports. 30 | 31 | The link to our github repository containing our reports in markdown format, the images, and the code for the benchmark and algorithms is : https://github.com/bockp/Edge-Detection-project. 32 | 33 | 34 | 35 | ## Material & Methods 36 | 37 | ### Implementation of the functions 38 | 39 | ![Fig.1](images/kernels2.jpg) 40 | 41 | **Fig.1: Horizontal and vertical kernels 2D kernels : 1:Sobel operator, 2:Prewitt operator, 3:Robert's cross operator** 42 | 43 | ![Fig.2](images/prewitt_horizontal_kernel_in_tupple.PNG) 44 | 45 | **Fig.2: Implementation of the Prewitt horizontal kernel in a list.** 46 | 47 | The corresponding script is in the */src/gpu/edge.js* file, and the related exemples in the *exemples/* repository in the files *testEdge.html* and *testEdge.js*. 48 | 49 | As the three convolution share the same process, a general function has been implementated, taking into argument the raster, the graphical environment and a specific kernel. This function will be called by three functions, each one specialized in a specific convolution. All the specific vertical and horizontal kernels for each convolution are implemented as lists in constant global variable outside the main functions. In order to facilitate the implementation, the six lists have the same size, and zero are add to the Robert cross kernels. The result is an image in which the edges have high pixel values compared to the rest of the image. 50 | 51 | In order to facilitate the localization of the result in the webpage, the gpuEnv where the picture has to be displayed is takken as argument for each function. 52 | 53 | #### *gpuEdgeSobel()*, *gpuEdgePrewitt ()*, *gpuEdgeRobert ()* 54 | 55 | These three functions are build in the same way. Each of them take into account the image and the graphical environment, and call the *gpuEdge()* function with the lists corresponding to the specific kernel. 56 | 57 | #### *gpuEdge()* 58 | 59 | It's the general function which will be called by the three others. It take four arguments : the picture, the graphical environment where the picture has to be displayed and the vertical and horizontal kernel (*kernelV* and *kernelH*) used for treating the picture. This function use only one shader. 60 | 61 | The vertex shader is defined as the constant *src_vs* which transform the image coordinates in order to make them compatible with the displaying in the canvas. The fragment shader, defined as the constant *src_fs*, for each coordinate get the values of the corresponding neighboring elements and use it to realise the convolution according to the values of the kernel used as argument. The final value is stored in the vec4 *outputColor* for each coordinate. 62 | 63 | 64 | The pictures used for the benchmark are the *"coins"* pictures in uint8 with five different sizes in pixels : 128x105, 300x246, 512x420, 1024x840, and 2048x1679. The benchmark process was encoded by Cecilia Ostertag. 65 | 66 | 67 | 68 | ## Results 69 | 70 | ![Fig.3](images/EdgeDetectionUnit8.PNG) 71 | 72 | **Fig.3: Result of the implementation of WebGL edge detection process (respectively from the left to the right : without processing, with prewitt, sobel and robert operators) on uint8 pictures.** 73 | 74 | 75 | ![Fig.4](images/EdgeDetectionFloat32.png) 76 | 77 | **Fig.4: Result of the implementation of WebGL edge detection process (respectively from the left to the right : without processing, with prewitt, sobel and robert operators) on float32 pictures.** 78 | 79 | The figures [Fig.3] and [Fig.4] show the result of the process on uint8 and float32 pictures. When the process is used on uint16 pictures, the process report an error and refuse to display the result. For each format, the prewitt filter give the brightest result. 80 | 81 | 82 | | EdgeDetectionMethod | 128px | 300px | 512px | 1024px | 2048px | 83 | | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | 84 | | Prewit (WGL2) | 3.6 | 25.6 | 83.3 | 261.3 | 1193.7 | 85 | | Sobel (WGL2) | 3.7 | 24 | 75.7| 252.5 | 1268.2 | 86 | | Robet (WGL2) | 5.3 | 23.4 | 75.7 | 249.8 | 1097.3 | 87 | | Prewit (JS) | 50.3 | 50.8 | 51.2 | 65 | 204.4 | 88 | | Sobel (JS) | 52 | 49.5 | 55.5 | 73.4 | 187.6 | 89 | | Robet (JS) | 43.3 | 39.4 | 44.3 | 60.4 | 118.1 | 90 | 91 | **Tab.1: Average the different execution times in ms of the differents implemented process in Javascript (JS) and WebGL2 (WGL2). Ten repetitions have been done on pictures of different sizes.** 92 | 93 | ![Fig.3](images/EdgeDetection_JS_WebGL2.PNG) 94 | 95 | **Fig.3: Comparaison of the execution time (in ms) of the implemented process in Javascript (JS) and WebGL2 (WGL2) according to the size of the picture (px).** 96 | 97 | Without taking into account the 2048x1679px picture, the correlation coefficient calculated remain superior to 0.97 with a slope of nearly 0.30 for each implementation in JavaScript, whereas respectively 0.80 and 0.02 for the WebGL implementation. 98 | 99 | ## Discussion and Conclusion. 100 | 101 | For pictures of small size (less than 500x500px), the implementation in Javascript seem to consume less calculation time than the WebGL2. But the calculation time increase rapidly for the process implemented in JavaScript according to the image size, unlike for the implementation in WebGL2 for which the calculation time remain generally constant. 102 | 103 | For small size pictures, the calcul time consumption seem correlate to the picture size for the JavaScript implementation (until 1000x1000px at least), unlike the WebGL2 implementation. It will be interasting to assess the correlation of the calculation time with the size of the picture, increase the number of repetitions used for the benchmark and use a wider range of small picture in order to improve the correlation calculation for the curve. 104 | 105 | For each implementation language, the Robert cross seem to be the most advantageous in calcul time consumption, but not according to the display, where the orther seems to highlight more details than it. 106 | 107 | Several improvement are still needed in order to make the process works on uint16 pictures. 108 | 109 | 110 | 111 | ## References 112 | 113 | ### Used in the present repport 114 | 115 | [^BIE2013]: Bienfait B, Ertl P. JSME: a free molecule editor in JavaScript. Journal of Cheminformatics. 2013;5:24. doi:10.1186/1758-2946-5-24. 116 | 117 | [^BOV2009]: Bovik AC, editor. The essential guide to image processing. Academic Press; 2009 Jul 8. 118 | 119 | [^CAN1986]: Canny J. A computational approach to edge detection. IEEE Transactions on pattern analysis and machine intelligence. 1986 Nov(6):679-98. 120 | 121 | [^DOH2012]: Doherty G, Mettrick K, Grainge I, Lewis PJ. Harwood C, Wipat A (Ed). Chapter 4 - Imaging fluorescent protein fusions in live bacteria. In Methods in Microbiology, Systems Biology of Bacteria. Academic Press. 2012; 39(4):107-126 122 | 123 | [^ECMA2011]: ECMAScript EC. European Computer Manufacturers Association and others. ECMAScript language specification. 2011. 124 | 125 | [^ELI2015]: Eliceiri KW, Berthold MR, Goldberg IG, et al. Biological Imaging Software Tools. Nature methods. 2012;9(7):697-710. doi:10.1038/nmeth.2084. 126 | 127 | [^SCH2012]: Schneider CA, Rasband WS, Eliceiri KW. NIH Image to ImageJ: 25 years of Image Analysis. Nature methods. 2012;9(7):671-675. 128 | 129 | [^SCH2015]: Schindelin J, Rueden CT, Hiner MC, Eliceiri KW. The ImageJ ecosystem: An open platform for biomedical image analysis. Molecular reproduction and development. 2015 Jul 1;82(7-8):518-29. 130 | 131 | [^SHA2013]: Shamonin DP, Bron EE, Lelieveldt BPF, et al. Fast parallel image registration on CPU and GPU for diagnostic classification of Alzheimer’s disease. Frontiers in Neuroinformatics. 2013;7:50. doi:10.3389/fninf.2013.00050. 132 | 133 | [^SHI2017]: Shi M, Gao J, Zhang MQ. Web3DMol: interactive protein structure visualization based on WebGL. Nucleic Acids Research. 2017;45(Web Server issue):W523-W527. doi:10.1093/nar/gkx383. 134 | 135 | [^SOB1968]: Sobel I. An isotropic 3× 3 image gradient operator, presentation at Stanford Artificial Intelligence Project (SAIL). 136 | 137 | [^WOL2013]: Wollny G, Kellman P, Ledesma-Carbayo MJ, Skinner MM, Hublin JJ, Hierl T. MIA - A free and open source software for gray scale medical image analysis. Source Code Biol Med. 2013; 8:20. doi:10.1186/1751-0473-8-20. 138 | 139 | 140 | ### Used in previous repports 141 | 142 | [^ABD2015]: Abdelsamea MM, Gnecco G, Gaber MM, Elyan E. On the relationship between variational level set-based and som-based active contours. Computational intelligence and neuroscience. 2015 Jan 1;2015:34. 143 | 144 | [^CHAA2014]: Chaabane SB, Fnaiech F. Color edges extraction using statistical features and automatic threshold technique: application to the breast cancer cells. Biomedical engineering online. 2014 Jan 23;13(1):4. 145 | 146 | [^CHO2016]: Choudhry P. High-Throughput Method for Automated Colony and Cell Counting by Digital Image Analysis Based on Edge Detection. PLoS One. 2016; 11(2): e0148469. 147 | 148 | [^DAV1975]: Davis LS. A survey of edge detection techniques. Computer graphics and image processing. 1975 Sep 1;4(3):248-70. 149 | 150 | [^DER1987]: Deriche R. Using Canny's criteria to derive a recursively implemented optimal edge detector. International journal of computer vision. 1987 Jun 1;1(2):167-87. 151 | 152 | [^DIN2001]: Ding L, Goshtasby A. On the Canny edge detector. Pattern Recognition. 2001 Mar 31;34(3):721-5. 153 | 154 | [^GRE2016]: Grega M, Matiolanski A, Leszczuk M. Automated Detection of Firearms and Knives in a CCTV Image. Sensors 2016, 16, 47; doi:10.3390/s16010047. 155 | 156 | [^HAQ2015]: Haq I, Anwar S, Shah K, Khan MT, Shah SA. Fuzzy Logic Based Edge Detection in Smooth and Noisy Clinical Images. PLoS One. 10(9):e0138712, 2015. 157 | 158 | [^JAC2018]: Jackson D, Gilbert J. WebGL 2.0 Specification. 2018. https://www.khronos.org/registry/webgl/specs/latest/2.0/ 159 | 160 | [^JAL2017]: Jalalian A, Mashohor S, Mahmud R, Karasfi B, Saripan MIB, Ramli ARB. Foundation and Methodologies in computer-aided diagnosis systems for breast cancer detection. EXCLI Journal, 16:113-137, 2017. 161 | 162 | [^KEK2010]: Kekre HB, Gharge SM. Image segmentation using extended edge operator for mammographic images. International journal on computer science and Engineering. 2010;2(4):1086-91. 163 | 164 | [^MAI2009]: Maini R, Aggarwal H. Study and comparison of various image edge detection techniques. International journal of image processing (IJIP). 2009 Jan;3(1):1-1. 165 | 166 | [^LUO2017]: Luo S, Yang J, Gao Q, Zhou S, Zhan CA. The Edge Detectors Suitable for Retinal OCT Image Segmentation. Journal of Healthcare Engineering 2017; 2017: 3978410. 167 | 168 | [^RIC1945]: Rice SO. Mathematical analysis of random noise. The Bell System Technical Journal. 1945 Jan;24(1):46-156. 169 | 170 | [^SCH1997]: Scharcanski J and Venetsanopoulos A.N. Edge detection of color images using directional operators. IEEE Trans. Circuits Syst. Video Technol., 7(2):397–401, 1997. 171 | 172 | [^KAI2015]: Kainz P, Mayrhofer-Reinhartshuber M, Ahammer H. IQM: An Extensible and Portable Open Source Application for Image and Signal Analysis in Java. Martens L, ed. PLoS ONE. 2015;10(1):e0116329. doi:10.1371/journal.pone.0116329. 173 | 174 | [^KIR1971]: Kirsch RA. Computer determination of the constituent structure of biological images. Computers and biomedical research. 1971 Jun 1;4(3):315-28. 175 | 176 | [^MAR1980]: Marr D, Hildreth E. Theory of edge detection. Proceedings of the Royal Society of London B: Biological Sciences. 1980 Feb 29;207(1167):187-217. 177 | 178 | [^PRE1970]: Prewitt JM. Object enhancement and extraction. Picture processing and Psychopictorics. 1970 Jan 1;10(1):15-9. 179 | 180 | [^ROS2015]: Rose AS, Hildebrand PW. NGL Viewer: a web application for molecular visualization. Nucleic Acids Research. 2015;43(Web Server issue):W576-W579. doi:10.1093/nar/gkv402. 181 | 182 | [^RUE2017]: Rueden CT, Schindelin J, Hiner MC, DeZonia BE, Walter AE, Arena ET, Eliceiri KW. ImageJ2: ImageJ for the next generation of scientific image data. BMC Bioinformatics. 2017; 18:529. 183 | 184 | [^TRA1993]: Trahanias P.E and Venetsanopoulos A.N. Color edge detection using vector order statistics. IEEE Trans. Image Process., 2(2):259–264, 1993. 185 | 186 | [^TRE2013]: Treloas KK, Simpson MJ, Kabla AJ. Sensitivity of Edge Detection Methods for Quantifying Cell Migration Assays. PLoS One, 8(6):e67389, 2013. 187 | 188 | [^VIN2009]: Vincent OR, Folorunso O. A descriptive algorithm for sobel image edge detection. InProceedings of Informing Science & IT Education Conference (InSITE) 2009 Jun 12 (Vol. 40, pp. 97-107). 189 | 190 | [^ZHA2012]: Zhao J, Zheng W, Zhang L, Tian H. Segmentation of ultrasound images of thyroid nodule for assisting fine needle aspiration cytology. Health information science and systems. 2013 Dec 1;1(1):5. 191 | 192 | [^ZHU2014]: Zhu F, Liu Q, Fu Y, Shen B. Segmentation of Neuronal Structures Using SARSA (lambda)-Based Boundary Amendment with Reinforced Gradient-Descent Curve Shape Fitting. PLoS One, 9(3):1–19, 2014. 193 | -------------------------------------------------------------------------------- /benchmark_results.csv: -------------------------------------------------------------------------------- 1 | Function,Time(ms),Memory(MB) 2 | Find Edges,5,64.21635437011719 3 | Find Edges,1,20.387245178222656 4 | Find Edges,1,20.387245178222656 5 | Find Edges,1,20.387245178222656 6 | Find Edges,1,20.387245178222656 7 | Find Edges,1,20.387245178222656 8 | Find Edges,1,20.387245178222656 9 | Find Edges,1,20.387245178222656 10 | Find Edges,0,20.387245178222656 11 | Find Edges,1,21.559219360351562 12 | Find Edges,1,21.559219360351562 13 | Find Edges,1,21.559219360351562 14 | Find Edges,1,21.559219360351562 15 | Find Edges,1,21.559219360351562 16 | Find Edges,1,21.559219360351562 17 | Find Edges,1,21.559219360351562 18 | Find Edges,1,21.559219360351562 19 | Find Edges,1,21.559219360351562 20 | Find Edges,0,22.66918182373047 21 | Find Edges,1,22.66918182373047 22 | Find Edges,1,22.66918182373047 23 | Find Edges,1,22.66918182373047 24 | Find Edges,1,22.66918182373047 25 | Find Edges,1,22.66918182373047 26 | Find Edges,0,22.66918182373047 27 | Find Edges,1,22.66918182373047 28 | Find Edges,1,23.782470703125 29 | Find Edges,1,23.782470703125 30 | Find Edges,1,23.782470703125 31 | Find Edges,1,23.782470703125 32 | Find Edges,1,23.782470703125 33 | Find Edges,1,23.782470703125 34 | Find Edges,1,23.782470703125 35 | Find Edges,1,23.844985961914062 36 | Find Edges,1,23.970016479492188 37 | Find Edges,1,24.095046997070312 38 | Find Edges,1,25.32953643798828 39 | Find Edges,1,25.32953643798828 40 | Find Edges,1,25.32953643798828 41 | Find Edges,1,25.32953643798828 42 | Find Edges,1,25.32953643798828 43 | Find Edges,1,25.32953643798828 44 | Find Edges,1,25.32953643798828 45 | Find Edges,1,25.32953643798828 46 | Find Edges,1,25.32953643798828 47 | Find Edges,1,26.501510620117188 48 | Find Edges,1,26.501510620117188 49 | Find Edges,1,26.501510620117188 50 | Find Edges,1,26.501510620117188 51 | Find Edges,1,26.501510620117188 52 | Find Edges,0,26.501510620117188 53 | Find Edges,1,26.501510620117188 54 | Find Edges,1,26.501510620117188 55 | Find Edges,1,27.673484802246094 56 | Find Edges,1,27.673484802246094 57 | Find Edges,1,27.673484802246094 58 | Find Edges,1,27.673484802246094 59 | Find Edges,1,27.673484802246094 60 | Find Edges,1,27.673484802246094 61 | Find Edges,1,27.673484802246094 62 | Find Edges,1,27.673484802246094 63 | Find Edges,1,27.673484802246094 64 | Find Edges,1,28.845458984375 65 | Find Edges,1,28.845458984375 66 | Find Edges,1,28.845458984375 67 | Find Edges,1,28.845458984375 68 | Find Edges,1,28.845458984375 69 | Find Edges,1,28.845458984375 70 | Find Edges,1,28.845458984375 71 | Find Edges,1,28.845458984375 72 | Find Edges,1,30.017433166503906 73 | Find Edges,1,30.017433166503906 74 | Find Edges,1,30.017433166503906 75 | Find Edges,1,30.017433166503906 76 | Find Edges,1,30.017433166503906 77 | Find Edges,0,30.017433166503906 78 | Find Edges,1,30.017433166503906 79 | Find Edges,1,30.017433166503906 80 | Find Edges,1,30.017433166503906 81 | Find Edges,1,31.189407348632812 82 | Find Edges,2,31.189407348632812 83 | Find Edges,1,31.189407348632812 84 | Find Edges,1,31.189407348632812 85 | Find Edges,1,31.189407348632812 86 | Find Edges,1,31.189407348632812 87 | Find Edges,1,31.189407348632812 88 | Find Edges,1,31.189407348632812 89 | Find Edges,1,35.02892303466797 90 | Find Edges,0,35.02892303466797 91 | Find Edges,0,35.02892303466797 92 | Find Edges,0,35.02892303466797 93 | Find Edges,0,35.02892303466797 94 | Find Edges,0,35.02892303466797 95 | Find Edges,0,35.02892303466797 96 | Find Edges,0,35.02892303466797 97 | Find Edges,0,35.02892303466797 98 | Find Edges,0,36.200897216796875 99 | Find Edges,0,36.200897216796875 100 | Find Edges,0,36.200897216796875 101 | Find Edges,0,36.200897216796875 102 | Log Filter,162,64.33345031738281 103 | Log Filter,11,65.0953369140625 104 | Log Filter,19,67.9511947631836 105 | Log Filter,18,69.82257843017578 106 | Log Filter,15,70.58446502685547 107 | Log Filter,18,73.4403305053711 108 | Log Filter,12,75.31170654296875 109 | Log Filter,37,76.07359313964844 110 | Log Filter,56,20.394325256347656 111 | Log Filter,41,22.32312774658203 112 | Log Filter,24,24.630523681640625 113 | Log Filter,12,26.453750610351562 114 | Log Filter,11,28.2154541015625 115 | Log Filter,15,30.038681030273438 116 | Log Filter,13,32.80303955078125 117 | Log Filter,13,33.803070068359375 118 | Log Filter,13,35.614471435546875 119 | Log Filter,13,37.42584228515625 120 | Log Filter,13,39.18659210205078 121 | Log Filter,12,41.00981903076172 122 | Log Filter,15,43.77417755126953 123 | Log Filter,15,44.774208068847656 124 | Log Filter,11,46.585609436035156 125 | Log Filter,20,48.39698028564453 126 | Log Filter,17,50.22020721435547 127 | Log Filter,17,51.981910705566406 128 | Log Filter,24,53.805137634277344 129 | Log Filter,13,55.62836456298828 130 | Log Filter,15,57.45159149169922 131 | Log Filter,15,59.274818420410156 132 | Log Filter,17,61.098045349121094 133 | Log Filter,12,62.92127227783203 134 | Log Filter,13,64.74449920654297 135 | Log Filter,11,66.5677261352539 136 | Log Filter,13,68.39095306396484 137 | Log Filter,11,70.80863189697266 138 | Log Filter,11,72.6318588256836 139 | Log Filter,12,74.45508575439453 140 | Log Filter,13,76.27831268310547 141 | Log Filter,10,78.1015396118164 142 | Log Filter,11,80.92362213134766 143 | Log Filter,11,82.68441772460938 144 | Log Filter,11,83.6844482421875 145 | Log Filter,28,21.32537841796875 146 | Log Filter,10,24.138946533203125 147 | Log Filter,12,26.000022888183594 148 | Log Filter,11,27.860252380371094 149 | Log Filter,10,28.62213897705078 150 | Log Filter,12,31.44457244873047 151 | Log Filter,11,33.58869171142578 152 | Log Filter,40,34.35057830810547 153 | Log Filter,11,36.27317810058594 154 | Log Filter,21,39.04499816894531 155 | Log Filter,61,39.806884765625 156 | Log Filter,13,41.72948455810547 157 | Log Filter,10,44.501304626464844 158 | Log Filter,10,45.26319122314453 159 | Log Filter,10,47.185791015625 160 | Log Filter,10,49.957611083984375 161 | Log Filter,10,50.71949768066406 162 | Log Filter,10,52.64209747314453 163 | Log Filter,10,55.413917541503906 164 | Log Filter,37,56.567237854003906 165 | Log Filter,10,58.489837646484375 166 | Log Filter,11,61.26165771484375 167 | Log Filter,10,62.02354431152344 168 | Log Filter,46,63.946144104003906 169 | Log Filter,10,66.71796417236328 170 | Log Filter,16,67.47985076904297 171 | Log Filter,10,69.40245056152344 172 | Log Filter,12,72.17427062988281 173 | Log Filter,11,72.9361572265625 174 | Log Filter,10,74.85875701904297 175 | Log Filter,11,77.63057708740234 176 | Log Filter,9,78.39246368408203 177 | Log Filter,10,80.3150634765625 178 | Log Filter,11,83.08688354492188 179 | Log Filter,21,83.84877014160156 180 | Log Filter,10,85.77136993408203 181 | Log Filter,26,22.022239685058594 182 | Log Filter,22,24.662216186523438 183 | Log Filter,15,25.424102783203125 184 | Log Filter,14,27.173294067382812 185 | Log Filter,10,28.973129272460938 186 | Log Filter,10,30.910308837890625 187 | Log Filter,10,32.659584045410156 188 | Log Filter,14,34.47132110595703 189 | Log Filter,10,36.28302764892578 190 | Log Filter,9,38.082862854003906 191 | Log Filter,10,40.020042419433594 192 | Log Filter,12,41.76940155029297 193 | Log Filter,10,43.58113098144531 194 | Log Filter,9,45.652313232421875 195 | Log Filter,36,47.46405792236328 196 | Log Filter,10,49.27576446533203 197 | Log Filter,41,47.56578063964844 198 | Log Filter,10,49.502960205078125 199 | Log Filter,10,51.252357482910156 200 | Log Filter,10,53.06410217285156 201 | Log Filter,54,54.87583923339844 202 | FeatureJ Laplacian,39,62.32911682128906 203 | FeatureJ Laplacian,27,63.56642150878906 204 | FeatureJ Laplacian,24,64.06294250488281 205 | FeatureJ Laplacian,20,65.80876159667969 206 | FeatureJ Laplacian,49,66.98359680175781 207 | FeatureJ Laplacian,37,66.98359680175781 208 | FeatureJ Laplacian,21,68.59252166748047 209 | FeatureJ Laplacian,38,68.59252166748047 210 | FeatureJ Laplacian,38,69.0925521850586 211 | FeatureJ Laplacian,28,71.27452850341797 212 | FeatureJ Laplacian,52,71.27452850341797 213 | FeatureJ Laplacian,18,71.7745590209961 214 | FeatureJ Laplacian,24,73.0118637084961 215 | FeatureJ Laplacian,37,75.95448303222656 216 | FeatureJ Laplacian,28,75.95448303222656 217 | FeatureJ Laplacian,34,77.38125610351562 218 | FeatureJ Laplacian,35,77.63975524902344 219 | FeatureJ Laplacian,28,78.32379150390625 220 | FeatureJ Laplacian,32,79.81111145019531 221 | FeatureJ Laplacian,43,80.9859619140625 222 | FeatureJ Laplacian,28,81.16995239257812 223 | FeatureJ Laplacian,21,82.59481048583984 224 | FeatureJ Laplacian,53,83.26170349121094 225 | FeatureJ Laplacian,40,18.79090118408203 226 | FeatureJ Laplacian,44,18.79090118408203 227 | FeatureJ Laplacian,38,20.104469299316406 228 | FeatureJ Laplacian,38,21.598106384277344 229 | FeatureJ Laplacian,41,21.779754638671875 230 | FeatureJ Laplacian,30,23.339637756347656 231 | FeatureJ Laplacian,57,23.339637756347656 232 | FeatureJ Laplacian,49,23.968299865722656 233 | FeatureJ Laplacian,38,25.711952209472656 234 | FeatureJ Laplacian,30,25.893585205078125 235 | FeatureJ Laplacian,21,27.703514099121094 236 | FeatureJ Laplacian,23,27.703514099121094 237 | FeatureJ Laplacian,31,29.134750366210938 238 | FeatureJ Laplacian,31,29.44501495361328 239 | FeatureJ Laplacian,22,29.945045471191406 240 | FeatureJ Laplacian,17,31.126190185546875 241 | FeatureJ Laplacian,43,32.686004638671875 242 | FeatureJ Laplacian,36,32.686004638671875 243 | FeatureJ Laplacian,28,34.42755126953125 244 | FeatureJ Laplacian,37,34.42755126953125 245 | FeatureJ Laplacian,34,34.927581787109375 246 | FeatureJ Laplacian,23,36.799903869628906 247 | FeatureJ Laplacian,36,36.981536865234375 248 | FeatureJ Laplacian,103,38.148460388183594 249 | FeatureJ Laplacian,22,39.520713806152344 250 | FeatureJ Laplacian,27,40.951942443847656 251 | FeatureJ Laplacian,48,41.133583068847656 252 | FeatureJ Laplacian,40,42.564781188964844 253 | FeatureJ Laplacian,56,42.69340515136719 254 | FeatureJ Laplacian,25,43.19343566894531 255 | FeatureJ Laplacian,39,44.939239501953125 256 | FeatureJ Laplacian,22,45.24950408935547 257 | FeatureJ Laplacian,36,46.183021545410156 258 | FeatureJ Laplacian,34,47.42664337158203 259 | FeatureJ Laplacian,46,48.98643493652344 260 | FeatureJ Laplacian,32,49.168121337890625 261 | FeatureJ Laplacian,24,50.599388122558594 262 | FeatureJ Laplacian,30,50.72801208496094 263 | FeatureJ Laplacian,34,51.22804260253906 264 | FeatureJ Laplacian,31,52.973846435546875 265 | FeatureJ Laplacian,18,53.155479431152344 266 | FeatureJ Laplacian,29,53.784141540527344 267 | FeatureJ Laplacian,45,55.02776336669922 268 | FeatureJ Laplacian,26,56.45906066894531 269 | FeatureJ Laplacian,34,56.769325256347656 270 | FeatureJ Laplacian,41,58.200599670410156 271 | FeatureJ Laplacian,25,58.200599670410156 272 | FeatureJ Laplacian,37,58.829254150390625 273 | FeatureJ Laplacian,42,60.57292938232422 274 | FeatureJ Laplacian,22,60.75457763671875 275 | FeatureJ Laplacian,54,61.254608154296875 276 | FeatureJ Laplacian,40,62.626861572265625 277 | FeatureJ Laplacian,40,64.05801391601562 278 | FeatureJ Laplacian,53,64.23966217041016 279 | FeatureJ Laplacian,33,65.79953002929688 280 | FeatureJ Laplacian,27,65.79953002929688 281 | FeatureJ Laplacian,46,66.299560546875 282 | FeatureJ Laplacian,31,68.17394256591797 283 | FeatureJ Laplacian,45,68.35557556152344 284 | FeatureJ Laplacian,24,68.85560607910156 285 | FeatureJ Laplacian,32,70.09922790527344 286 | FeatureJ Laplacian,38,71.68242645263672 287 | FeatureJ Laplacian,39,71.68242645263672 288 | FeatureJ Laplacian,41,73.29618835449219 289 | FeatureJ Laplacian,36,73.42481231689453 290 | FeatureJ Laplacian,30,73.92484283447266 291 | FeatureJ Laplacian,26,75.67046356201172 292 | FeatureJ Laplacian,102,75.85210418701172 293 | FeatureJ Laplacian,37,76.48075866699219 294 | FeatureJ Laplacian,22,77.72438049316406 295 | FeatureJ Laplacian,25,79.15560150146484 296 | FeatureJ Laplacian,26,79.28423309326172 297 | FeatureJ Laplacian,57,80.89701843261719 298 | FeatureJ Laplacian,21,80.89701843261719 299 | FeatureJ Laplacian,36,81.39704895019531 300 | FeatureJ Laplacian,42,18.615676879882812 301 | FeatureJ Laplacian,25,18.615676879882812 302 | Canny Edge Detector,210,31.158111572265625 303 | Canny Edge Detector,202,33.08526611328125 304 | Canny Edge Detector,209,35.012420654296875 305 | Canny Edge Detector,207,36.9395751953125 306 | Canny Edge Detector,218,38.866729736328125 307 | Canny Edge Detector,202,40.932334899902344 308 | Canny Edge Detector,203,42.85948944091797 309 | Canny Edge Detector,203,44.786643981933594 310 | Canny Edge Detector,211,46.71379852294922 311 | Canny Edge Detector,210,48.640953063964844 312 | Canny Edge Detector,214,50.56810760498047 313 | Canny Edge Detector,203,52.495262145996094 314 | Canny Edge Detector,219,54.42241668701172 315 | Canny Edge Detector,202,56.349571228027344 316 | Canny Edge Detector,203,58.27672576904297 317 | Canny Edge Detector,204,60.203880310058594 318 | Canny Edge Detector,203,62.13103485107422 319 | Canny Edge Detector,204,64.05818939208984 320 | Canny Edge Detector,206,65.98534393310547 321 | Canny Edge Detector,203,67.9124984741211 322 | Canny Edge Detector,204,69.83965301513672 323 | Canny Edge Detector,205,71.76680755615234 324 | Canny Edge Detector,204,73.69396209716797 325 | Canny Edge Detector,202,75.6211166381836 326 | Canny Edge Detector,204,77.54827117919922 327 | Canny Edge Detector,206,79.47542572021484 328 | Canny Edge Detector,203,81.40258026123047 329 | Canny Edge Detector,218,18.791709899902344 330 | Canny Edge Detector,203,20.12529754638672 331 | Canny Edge Detector,207,22.14575958251953 332 | Canny Edge Detector,205,23.95838165283203 333 | Canny Edge Detector,203,25.978843688964844 334 | Canny Edge Detector,203,27.791465759277344 335 | Canny Edge Detector,203,29.811927795410156 336 | Canny Edge Detector,203,31.624549865722656 337 | Canny Edge Detector,203,33.64501190185547 338 | Canny Edge Detector,204,35.45763397216797 339 | Canny Edge Detector,206,37.47809600830078 340 | Canny Edge Detector,205,39.29071807861328 341 | Canny Edge Detector,205,41.311180114746094 342 | Canny Edge Detector,204,43.123802185058594 343 | Canny Edge Detector,203,45.144264221191406 344 | Canny Edge Detector,206,46.956886291503906 345 | Canny Edge Detector,203,49.070220947265625 346 | Canny Edge Detector,227,51.510292053222656 347 | Canny Edge Detector,203,53.53075408935547 348 | Canny Edge Detector,203,55.34337615966797 349 | Canny Edge Detector,204,57.36383819580078 350 | Canny Edge Detector,203,59.17646026611328 351 | Canny Edge Detector,202,61.196922302246094 352 | Canny Edge Detector,203,63.009544372558594 353 | Canny Edge Detector,203,65.0300064086914 354 | Canny Edge Detector,204,66.8426284790039 355 | Canny Edge Detector,203,68.86309051513672 356 | Canny Edge Detector,204,70.67571258544922 357 | Canny Edge Detector,205,72.69617462158203 358 | Canny Edge Detector,204,74.50879669189453 359 | Canny Edge Detector,204,76.52925872802734 360 | Canny Edge Detector,203,78.43570709228516 361 | Canny Edge Detector,204,80.45616912841797 362 | Canny Edge Detector,205,82.26879119873047 363 | Canny Edge Detector,225,17.649642944335938 364 | Canny Edge Detector,206,19.006813049316406 365 | Canny Edge Detector,208,21.073165893554688 366 | Canny Edge Detector,209,23.062049865722656 367 | Canny Edge Detector,203,24.874671936035156 368 | Canny Edge Detector,203,26.74980926513672 369 | Canny Edge Detector,203,28.801170349121094 370 | Canny Edge Detector,203,30.613792419433594 371 | Canny Edge Detector,203,32.66515350341797 372 | Canny Edge Detector,203,34.415260314941406 373 | Canny Edge Detector,204,36.40412902832031 374 | Canny Edge Detector,203,38.4547119140625 375 | Canny Edge Detector,204,40.267333984375 376 | Canny Edge Detector,203,42.14247131347656 377 | Canny Edge Detector,203,44.19383239746094 378 | Canny Edge Detector,203,46.00645446777344 379 | Canny Edge Detector,203,48.05781555175781 380 | Canny Edge Detector,203,49.80792236328125 381 | Canny Edge Detector,203,51.796791076660156 382 | Canny Edge Detector,203,53.785675048828125 383 | Canny Edge Detector,203,55.598297119140625 384 | Canny Edge Detector,203,57.535301208496094 385 | Canny Edge Detector,202,59.58666229248047 386 | Canny Edge Detector,203,61.39928436279297 387 | Canny Edge Detector,203,63.450645446777344 388 | Canny Edge Detector,202,65.20075225830078 389 | Canny Edge Detector,202,67.18962097167969 390 | Canny Edge Detector,204,69.17945861816406 391 | Canny Edge Detector,203,70.99208068847656 392 | Canny Edge Detector,203,72.86721801757812 393 | Canny Edge Detector,202,74.9185791015625 394 | Canny Edge Detector,202,76.79289245605469 395 | Canny Edge Detector,203,78.84425354003906 396 | Canny Edge Detector,203,80.5943603515625 397 | Canny Edge Detector,203,82.5832290649414 398 | Canny Edge Detector,233,17.961898803710938 399 | Canny Edge Detector,209,19.012283325195312 400 | Canny Edge Detector,203,20.93891143798828 401 | Canny Edge Detector,204,23.795509338378906 402 | FeatureJ Edges,50,33.07456970214844 403 | FeatureJ Edges,40,34.376197814941406 404 | FeatureJ Edges,67,35.760963439941406 405 | FeatureJ Edges,64,37.104156494140625 406 | FeatureJ Edges,66,38.47699737548828 407 | FeatureJ Edges,38,39.84107971191406 408 | FeatureJ Edges,36,41.225982666015625 409 | FeatureJ Edges,90,42.527732849121094 410 | FeatureJ Edges,37,43.90045166015625 411 | FeatureJ Edges,43,45.24360656738281 412 | FeatureJ Edges,51,46.65831756591797 413 | FeatureJ Edges,50,47.95990753173828 414 | FeatureJ Edges,43,49.30315399169922 415 | FeatureJ Edges,47,50.67609405517578 416 | FeatureJ Edges,32,52.02032470703125 417 | FeatureJ Edges,44,53.36450958251953 418 | FeatureJ Edges,41,54.73719787597656 419 | FeatureJ Edges,181,56.080352783203125 420 | FeatureJ Edges,22,57.42350769042969 421 | FeatureJ Edges,25,58.796363830566406 422 | FeatureJ Edges,44,60.1396484375 423 | FeatureJ Edges,52,61.44126892089844 424 | FeatureJ Edges,22,62.78441619873047 425 | FeatureJ Edges,21,62.855499267578125 426 | FeatureJ Edges,21,64.23204803466797 427 | FeatureJ Edges,54,65.63768768310547 428 | FeatureJ Edges,39,66.94024658203125 429 | FeatureJ Edges,23,68.28341674804688 430 | FeatureJ Edges,28,69.62754821777344 431 | FeatureJ Edges,45,70.97126770019531 432 | FeatureJ Edges,29,72.30245971679688 433 | FeatureJ Edges,39,73.6456527709961 434 | FeatureJ Edges,22,74.98904418945312 435 | FeatureJ Edges,33,76.33221435546875 436 | FeatureJ Edges,21,77.67546844482422 437 | FeatureJ Edges,63,79.04814147949219 438 | FeatureJ Edges,53,80.3497543334961 439 | FeatureJ Edges,18,81.94290161132812 440 | FeatureJ Edges,46,83.28901672363281 441 | FeatureJ Edges,49,17.533187866210938 442 | FeatureJ Edges,53,18.88030242919922 443 | FeatureJ Edges,40,20.225570678710938 444 | FeatureJ Edges,57,21.532508850097656 445 | FeatureJ Edges,40,23.127700805664062 446 | FeatureJ Edges,38,24.474868774414062 447 | FeatureJ Edges,26,25.822952270507812 448 | FeatureJ Edges,48,27.206253051757812 449 | FeatureJ Edges,40,28.589569091796875 450 | FeatureJ Edges,20,29.972885131835938 451 | FeatureJ Edges,50,31.356201171875 452 | FeatureJ Edges,54,32.70111846923828 453 | FeatureJ Edges,49,34.140113830566406 454 | FeatureJ Edges,23,35.487213134765625 455 | FeatureJ Edges,73,36.833404541015625 456 | FeatureJ Edges,54,38.14842987060547 457 | FeatureJ Edges,23,39.52398681640625 458 | FeatureJ Edges,47,40.838829040527344 459 | FeatureJ Edges,56,42.208106994628906 460 | FeatureJ Edges,54,43.583709716796875 461 | FeatureJ Edges,24,44.928871154785156 462 | FeatureJ Edges,58,46.2740478515625 463 | FeatureJ Edges,37,46.34278106689453 464 | FeatureJ Edges,43,47.69178771972656 465 | FeatureJ Edges,34,49.099449157714844 466 | FeatureJ Edges,23,50.44557189941406 467 | FeatureJ Edges,38,51.75236511230469 468 | FeatureJ Edges,37,53.097564697265625 469 | FeatureJ Edges,48,54.692718505859375 470 | FeatureJ Edges,24,56.287879943847656 471 | FeatureJ Edges,44,57.634971618652344 472 | FeatureJ Edges,57,58.98027038574219 473 | FeatureJ Edges,48,60.327674865722656 474 | FeatureJ Edges,40,61.923072814941406 475 | FeatureJ Edges,19,63.23181915283203 476 | FeatureJ Edges,24,64.57994079589844 477 | FeatureJ Edges,49,65.96341705322266 478 | FeatureJ Edges,36,67.34672546386719 479 | FeatureJ Edges,22,68.69164276123047 480 | FeatureJ Edges,39,70.07495880126953 481 | FeatureJ Edges,47,71.4886245727539 482 | FeatureJ Edges,40,72.89727020263672 483 | FeatureJ Edges,39,74.21208953857422 484 | FeatureJ Edges,39,75.58784484863281 485 | FeatureJ Edges,43,76.93307495117188 486 | FeatureJ Edges,37,78.20956420898438 487 | FeatureJ Edges,48,79.58509826660156 488 | FeatureJ Edges,39,80.99288177490234 489 | FeatureJ Edges,22,82.33909606933594 490 | FeatureJ Edges,36,17.24675750732422 491 | FeatureJ Edges,51,18.585350036621094 492 | FeatureJ Edges,18,19.923973083496094 493 | FeatureJ Edges,22,21.262550354003906 494 | FeatureJ Edges,46,22.601272583007812 495 | FeatureJ Edges,49,23.970375061035156 496 | FeatureJ Edges,32,23.970375061035156 497 | FeatureJ Edges,56,25.37982177734375 498 | FeatureJ Edges,37,26.744400024414062 499 | FeatureJ Edges,24,28.084083557128906 500 | FeatureJ Edges,39,29.42359161376953 501 | FeatureJ Edges,42,30.763107299804688 502 | --------------------------------------------------------------------------------