├── .gitignore
├── LICENSE
├── README.md
├── images
├── 1. Source Image With Four Areas.png
├── 2. Preprocess Image.png
├── 3. Four mask images.png
├── 4. Fully processed image.png
├── 5. Pixel values.png
├── 6. User click mapping.png
├── 7. Final image.png
└── airplane.png
├── index.css
├── index.html
└── src
├── browser.js
└── worker.js
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .last_modified_timestamp
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2023 Shane O'Sullivan
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Example Canvas Instant Fill Demo
2 |
3 | This simple project demonstrates how to use a web worker to pre-calculate the various spaces in an image that can be filled, in order to fill them immediately when the user clicks on the canvas.
4 |
5 | A full description of how this works is written up on
6 | [My Blog](https://shaneosullivan.wordpress.com/2023/05/23/instant-colour-fill-with-html-canvas/)
7 |
--------------------------------------------------------------------------------
/images/1. Source Image With Four Areas.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/1. Source Image With Four Areas.png
--------------------------------------------------------------------------------
/images/2. Preprocess Image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/2. Preprocess Image.png
--------------------------------------------------------------------------------
/images/3. Four mask images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/3. Four mask images.png
--------------------------------------------------------------------------------
/images/4. Fully processed image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/4. Fully processed image.png
--------------------------------------------------------------------------------
/images/5. Pixel values.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/5. Pixel values.png
--------------------------------------------------------------------------------
/images/6. User click mapping.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/6. User click mapping.png
--------------------------------------------------------------------------------
/images/7. Final image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/7. Final image.png
--------------------------------------------------------------------------------
/images/airplane.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shaneosullivan/example-canvas-fill/5a6bfd3f0f773f52577395ad880b6ec7f06345c9/images/airplane.png
--------------------------------------------------------------------------------
/index.css:
--------------------------------------------------------------------------------
1 | #colourForm label {
2 | color: #ffffff;
3 | cursor: pointer;
4 | padding: 4px;
5 | }
6 |
7 | .github-corner:hover .octo-arm {
8 | animation: octocat-wave 560ms ease-in-out;
9 | }
10 | @keyframes octocat-wave {
11 | 0%,
12 | 100% {
13 | transform: rotate(0);
14 | }
15 | 20%,
16 | 60% {
17 | transform: rotate(-25deg);
18 | }
19 | 40%,
20 | 80% {
21 | transform: rotate(10deg);
22 | }
23 | }
24 | @media (max-width: 500px) {
25 | .github-corner:hover .octo-arm {
26 | animation: none;
27 | }
28 | .github-corner .octo-arm {
29 | animation: octocat-wave 560ms ease-in-out;
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Instant Canvas Fill Example
4 |
5 |
6 |
7 |
8 | This is an example of how to fill an area of an image, displayed in a HTML
9 | Canvas, instantly.
10 |
11 |
12 | The code for this is available on
13 | Github,
14 | or just view the page source. You can also read a description of how it
15 | works
16 | my blog post
17 | about it.
18 |
19 |
20 |
24 |
30 |
31 |
37 |
38 |
39 |
45 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
--------------------------------------------------------------------------------
/src/browser.js:
--------------------------------------------------------------------------------
1 | (function () {
2 | const IMAGE_PATH = "./images/airplane.png";
3 | let fillSpeed = "slow";
4 | let selectedColour = "#FF0000";
5 | let maskInfo = null;
6 |
7 | function runExample() {
8 | const canvas = document.getElementById("canvas");
9 | const context = canvas.getContext("2d");
10 |
11 | addFormListener();
12 |
13 | const { context: unchangingContext } = makeCanvas({
14 | height: canvas.height,
15 | width: canvas.width,
16 | });
17 |
18 | // Load the image into the canvas
19 | const img = new Image();
20 |
21 | img.onload = () => {
22 | context.drawImage(img, 0, 0);
23 | unchangingContext.drawImage(img, 0, 0);
24 |
25 | const dimensions = { height: canvas.height, width: canvas.width };
26 | const sourceImageData = getSrcImageData();
27 | worker.postMessage(
28 | {
29 | action: "process",
30 | dimensions,
31 | buffer: sourceImageData.data.buffer,
32 | },
33 | [sourceImageData.data.buffer]
34 | );
35 | };
36 | img.src = IMAGE_PATH;
37 |
38 | function getSrcImageData() {
39 | return unchangingContext.getImageData(0, 0, canvas.width, canvas.height);
40 | }
41 |
42 | // Listen to a click on the canvas, and try to fill in the selected
43 | // colour based on the x,y coordinates chosen.
44 | canvas.addEventListener("click", (evt) => {
45 | const { x, y } = getEventCoords(evt, canvas.getBoundingClientRect());
46 |
47 | console.log("User clicked the point x", x, "y", y);
48 | fillColour(x, y, selectedColour, context, unchangingContext, worker);
49 | });
50 |
51 | // Set up the worker
52 | const workerUrl = "./src/worker.js";
53 | let worker = new Worker(workerUrl);
54 |
55 | // The worker script communicates with this main thread script by passing
56 | // "message" events to the Worker object. We can listen to those messages
57 | // like this.
58 | worker.addEventListener("message", (evt) => {
59 | const { data } = evt;
60 |
61 | console.log("Main thread got worker data", data);
62 |
63 | switch (data.response) {
64 | case "fill":
65 | // The worker has filled in some pixels, either all the
66 | // possible pixels, or a partial set of pixels as it works
67 | // its way through the search/fill algorithm.
68 | handleFillMessageFromWorker(data, context);
69 | break;
70 | case "process":
71 | // The worker has finished pre-processing the image, and
72 | // sent back a version of the image where each pixel is
73 | // assigned an alpha value from 1 to 255. These alpha values
74 | // are used to determine what discrete fillable area any given
75 | // pixel is in. This means that this algorithm can support up to
76 | // 255 individual discrete fillable spaces. If a space is not
77 | // included in these, and therefore has an alpha of 0, we
78 | // fall back to using the slow method of filling.
79 | handleProcessMessageFromWorker(data);
80 | break;
81 | default:
82 | console.error("Unknown response from worker", data);
83 | }
84 | });
85 | }
86 |
87 | function handleFillMessageFromWorker(data, context) {
88 | const { height, width, pixels } = data;
89 |
90 | if (!pixels) {
91 | // No change was made
92 | return;
93 | }
94 | const imageData = new ImageData(width, height);
95 | imageData.data.set(new Uint8ClampedArray(pixels));
96 |
97 | const { canvas: tempCanvas, context: tempContext } = makeCanvas({
98 | height,
99 | width,
100 | });
101 | tempContext.putImageData(imageData, 0, 0);
102 |
103 | // Draw the full image
104 | context.drawImage(tempCanvas, 0, 0);
105 | }
106 |
107 | // We got data back from the Worker with the pixel data for the
108 | // full image. Each pixel has an alpha value of between 0 and 255.
109 | // If the value is not 0, then that pixel is part of a space that
110 | // can be instantly filled.
111 | // The pixelMaskInfo is an array where each item looks like
112 | // {
113 | // dataUrl?: string,
114 | // pixels?: Array,
115 | // x: number, // The leftmost pixel
116 | // y: number, // The topmost pixel
117 | // height: number, // The height of the bounding box
118 | // width: number, // The width of the bounding box
119 | // }
120 | //
121 | // When the user clicks an (x,y) coordinate and is doing an
122 | // instant fill, we check the alpha value of that pixel and use
123 | // that integer to key into the pixelMaskInfo to select the
124 | // the "pixels" to fill.
125 | function handleProcessMessageFromWorker(data) {
126 | const { height, width, allPixels: pixels } = data;
127 | const pixelMaskInfo = data.pixelMaskInfo;
128 |
129 | if (width !== canvas.width || height != canvas.height) {
130 | // Outdated data, the screen has changed size, so
131 | // ignore it
132 | return;
133 | }
134 |
135 | const { canvas: tempCanvas, context: tempContext } = makeCanvas({
136 | height,
137 | width,
138 | });
139 |
140 | const imageData = new ImageData(width, height);
141 | imageData.data.set(new Uint8ClampedArray(pixels));
142 |
143 | tempContext.putImageData(imageData, 0, 0);
144 |
145 | // Store the mask info for use when the user clicks a pixel
146 | maskInfo = {
147 | node: tempCanvas,
148 | data: tempContext.getImageData(0, 0, width, height),
149 | pixelMaskInfo,
150 | };
151 |
152 | // https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing#globalcompositeoperation
153 | // This is the magic incantation that gets all this canvas sorcery to work!!
154 | // It makes it so that the fillRect() call later only fills in the non-transparent
155 | // pixels and leaves the others transparent. This way, only the background of the image is
156 | // coloured in and the main subject is left as an empty 'mask' in this canvas.
157 | // We can then easily use drawImage to place that masked image on top of the
158 | // canvas the user is drawing
159 | tempContext.globalCompositeOperation = "source-in";
160 | }
161 |
162 | function fillColour(x, y, colour, context, sourceContext, worker) {
163 | // Fill all the transparent pixels in the source image that is being
164 | // coloured in
165 |
166 | let contextForData = context;
167 |
168 | // Only allow the background to be filled quickly if
169 | // the user is colouring in a preselected image, as this
170 | // will not change. If they are sketching, then
171 | // the image is always changing and we cannot take the
172 | // shortcut
173 | const enableFastFill = fillSpeed === "instant";
174 |
175 | x = Math.floor(x);
176 | y = Math.floor(y);
177 |
178 | // First check if this pixel is non-transparent in our cached
179 | // image data we got from the worker's pre-processing.
180 | // If it has a non zero alpha value, instead of using the worker to perform
181 | // a slow algorithmic fill, simply draw a rectangle filled with the right colour
182 | // over the entire cached image data and draw that onto the
183 | // main canvasNode. This works to only draw the right pixels and not
184 | // a full rectangle because we use the `globalCompositeOperation = "source-in"`
185 | // on the canvas pixelMaskContext
186 | if (maskInfo && enableFastFill) {
187 | const firstIdx = getColorIndexForCoord(x, y, maskInfo.node.width);
188 | const alphaValue = maskInfo.data.data[firstIdx + 3];
189 |
190 | if (alphaValue > 0) {
191 | // Yay, we can use the fast approach
192 |
193 | // The alpha value in the maskInfo is an index into the pixelMaskInfo array.
194 | // We subtract 1 from it as the number 0 tells us to NOT fill the pixel
195 | const pixelMaskInfo = maskInfo.pixelMaskInfo[alphaValue - 1];
196 |
197 | let maskDataUrl = pixelMaskInfo.dataUrl;
198 |
199 | const { canvas: pixelMaskCanvasNode, context: pixelMaskContext } =
200 | makeCanvas({
201 | height: pixelMaskInfo.height,
202 | width: pixelMaskInfo.width,
203 | });
204 |
205 | function performDraw() {
206 | // Here's the canvas magic that makes it just draw the non
207 | // transparent pixels onto our main canvas
208 | pixelMaskContext.globalCompositeOperation = "source-in";
209 |
210 | pixelMaskContext.fillStyle = colour;
211 |
212 | pixelMaskContext.fillRect(
213 | 0,
214 | 0,
215 | pixelMaskInfo.width,
216 | pixelMaskInfo.height
217 | );
218 |
219 | context.drawImage(
220 | pixelMaskCanvasNode,
221 | pixelMaskInfo.x,
222 | pixelMaskInfo.y
223 | );
224 | }
225 |
226 | if (!maskDataUrl) {
227 | // Offscreen canvas is not available, so use the array of pixels
228 | // to call putImageData on the canvas. This is a slower operation,
229 | // which is why when OffscreenCanvas is supported by the browser
230 | // we want to use that instead. It turns out that setting a data URI
231 | // source on an Image is about 10x faster than calling
232 | // putImageData on a Canvas.
233 |
234 | const pixelMaskImageData = new ImageData(
235 | pixelMaskInfo.width,
236 | pixelMaskInfo.height
237 | );
238 |
239 | pixelMaskImageData.data.set(
240 | new Uint8ClampedArray(pixelMaskInfo.pixels)
241 | );
242 | pixelMaskContext.putImageData(pixelMaskImageData, 0, 0);
243 |
244 | performDraw();
245 | } else {
246 | // OffscreenCanvas is available, so we have a dataUri to set as the
247 | // src of a simple Image. This is 10x faster than calling
248 | // putImageData on the Canvas context.
249 | const img = new Image();
250 | img.onload = () => {
251 | pixelMaskContext.drawImage(img, 0, 0);
252 | performDraw();
253 | };
254 | img.src = maskDataUrl;
255 | }
256 |
257 | return;
258 | }
259 | }
260 |
261 | const dimensions = {
262 | height: canvas.height,
263 | width: canvas.width,
264 | };
265 |
266 | // You have to get these image data objects new every time, because
267 | // passing through their data buffers to a Worker causes the buffers
268 | // to be fully read then drained and unusable again.
269 | const currentImageData = contextForData.getImageData(
270 | 0,
271 | 0,
272 | dimensions.width,
273 | dimensions.height
274 | );
275 |
276 | const sourceImageData = sourceContext.getImageData(
277 | 0,
278 | 0,
279 | dimensions.width,
280 | dimensions.height
281 | );
282 |
283 | // Delegate the work of filling the image to the web worker.
284 | // This puts it on another thread so large fills don't block the UI thread.
285 | worker.postMessage(
286 | {
287 | action: "fill",
288 | dimensions,
289 | sourceImageData: sourceImageData.data.buffer,
290 | currentImageData: currentImageData.data.buffer,
291 | x,
292 | y,
293 | colour,
294 | },
295 | [currentImageData.data.buffer]
296 | );
297 | }
298 |
299 | function makeCanvas(size) {
300 | const tempCanvas = document.createElement("canvas");
301 | if (size) {
302 | tempCanvas.width = size.width;
303 | tempCanvas.height = size.height;
304 | }
305 | const tempContext = tempCanvas.getContext("2d");
306 |
307 | return { canvas: tempCanvas, context: tempContext };
308 | }
309 |
310 | function getEventCoords(evt, nodeRect) {
311 | let x, y;
312 | if (evt.touches && evt.touches.length > 0) {
313 | x = evt.touches[0].clientX;
314 | y = evt.touches[0].clientY;
315 | } else {
316 | x = evt.clientX;
317 | y = evt.clientY;
318 | }
319 | return { x: Math.round(x - nodeRect.x), y: Math.round(y - nodeRect.y) };
320 | }
321 |
322 | // https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Pixel_manipulation_with_canvas
323 | function getColorIndexForCoord(x, y, width) {
324 | return y * (width * 4) + x * 4;
325 | }
326 |
327 | function addFormListener() {
328 | document.getElementById("speedForm").addEventListener("change", (evt) => {
329 | fillSpeed = evt.target.value;
330 | });
331 | document.getElementById("colourForm").addEventListener("change", (evt) => {
332 | selectedColour = evt.target.value;
333 | });
334 | }
335 |
336 | window.addEventListener("load", runExample);
337 | })();
338 |
--------------------------------------------------------------------------------
/src/worker.js:
--------------------------------------------------------------------------------
1 | /*
2 | This web worker is where all the filling and algorithmic stuff happens.
3 | */
4 |
5 | // This onmessage function is how a web worker receives messages
6 | // from the main UI thread.
7 | onmessage = function (evt) {
8 | const workerData = evt.data;
9 |
10 | console.log("worker got message", workerData);
11 | switch (workerData.action) {
12 | case "fill":
13 | // The user has clicked a pixel and we should start a fill
14 | // from that point using their selected colour
15 | fillAction(workerData, this);
16 | break;
17 | case "process":
18 | // When the image loads in the UI thread we pre-process it to identify
19 | // up to 254 individual sections that can be filled in.
20 | // This allows us to instants fill them later when the user clicks.
21 | processImageAction(workerData, this);
22 | break;
23 | default:
24 | console.error("Unknown action in paint worker", workerData);
25 | }
26 | };
27 |
28 | // A new image for colouring has been loaded, so cache it and
29 | // pre-process it for quicker fills later.
30 | // We go through it pixel by pixel, and find most of the areas that
31 | // can be coloured in. Each of these is identified, it's size and
32 | // pixels stored, then in the end it is all sent back to the main thread
33 | function processImageAction(workerData, self) {
34 | const dimensions = workerData.dimensions;
35 | const buffer = workerData.buffer;
36 | const { height, width } = dimensions;
37 |
38 | const bufferArray = new Uint8ClampedArray(buffer);
39 |
40 | // This array contains the pixels for the full image.
41 | // We use this to keep track of which pixels we have already
42 | // filled in and which we have not, so as to avoid extra work.
43 | let intermediateBuffer = new Array(height * width * 4);
44 | for (let i = 0; i < intermediateBuffer.length; i++) {
45 | intermediateBuffer[i] = 0;
46 | }
47 |
48 | let currentX = 0,
49 | currentY = 0;
50 |
51 | const pixelInfoToPost = [];
52 | const dataUrlPromises = [];
53 |
54 | function processNextPixel() {
55 | // Because we are using the Alpha pixel value to tell the UI thread
56 | // which of the multiple data buffers to use, we can only support
57 | // 254 of them (the number 0 means don't fast fill)
58 | if (pixelInfoToPost.length < 254) {
59 | let initX = currentX;
60 |
61 | for (let y = currentY; y < height; y += 20) {
62 | // Reset the initial X position so that we don't skip
63 | // most of the image when the next Y loop starts
64 | initX = 0;
65 |
66 | for (let x = initX; x < width; x += 20) {
67 | const firstIdx = getColorIndexForCoord(x, y, width);
68 | const alphaValue = intermediateBuffer[firstIdx + 3];
69 | const sourceAlphaValue = bufferArray[firstIdx + 3];
70 |
71 | // If the pixel is still transparent, we have found a pixel that could be
72 | // filled by the user, but which has not yet been processed by this function
73 | if (alphaValue === 0 && sourceAlphaValue === 0) {
74 | currentX = x;
75 | currentY = y;
76 |
77 | const alphaValueToSet = pixelInfoToPost.length + 1;
78 |
79 | // Fill all the pixels we can from this source pixel.
80 | // Set the filled colour to be black, but with the alpha
81 | // value to be the next available index in the
82 | // pixelInfoToPost array. This ensures that later on
83 | // we can easily map from a pixel in the canvas to the
84 | // correct mask to apply for an instant fill by just
85 | // accessing the corresponding index in the array.
86 | fillImage(
87 | dimensions,
88 | `rgba(0,0,0,${alphaValueToSet})`,
89 | x,
90 | y,
91 | buffer,
92 | null,
93 | // We don't care about intermdiate progress, so this is null
94 | null,
95 | // When the fill operation is completed for this part of the
96 | // image
97 | (fillBuffer, _processedPointsCount, fillDimensions) => {
98 | const { minX, maxX, maxY, minY } = fillDimensions;
99 | const fillWidth = maxX - minX + 1;
100 | const fillHeight = maxY - minY + 1;
101 | const fillBufferArray = new Uint8ClampedArray(fillBuffer);
102 |
103 | const partialBuffer = [];
104 |
105 | // Copy over the RGBA values to the intermediateBuffer
106 | for (let fillY = minY; fillY <= maxY; fillY++) {
107 | // It's necessary to process the pixels in this order,
108 | // row by row rather than column by column, as that is how
109 | // the ImageData array is interpreted
110 | for (let fillX = minX; fillX <= maxX; fillX++) {
111 | const fillFirstIndex = getColorIndexForCoord(
112 | fillX,
113 | fillY,
114 | dimensions.width
115 | );
116 | const fillA = fillBufferArray[fillFirstIndex + 3];
117 |
118 | const red = fillBufferArray[fillFirstIndex];
119 | const green = fillBufferArray[fillFirstIndex + 1];
120 | const blue = fillBufferArray[fillFirstIndex + 2];
121 |
122 | partialBuffer.push(0);
123 | partialBuffer.push(0);
124 | partialBuffer.push(0);
125 |
126 | if (alphaValueToSet === fillA) {
127 | intermediateBuffer[fillFirstIndex] = red;
128 | intermediateBuffer[fillFirstIndex + 1] = green;
129 | intermediateBuffer[fillFirstIndex + 2] = blue;
130 | intermediateBuffer[fillFirstIndex + 3] = fillA;
131 |
132 | // Store the non-transparent pixel in the subset of the canvas
133 | // so that, when a fill action is triggered, this pixel will
134 | // be coloured in
135 | partialBuffer.push(255);
136 | } else {
137 | // Store a transparent pixel, so when a fill action is taken, this
138 | // pixel will not be coloured in
139 | partialBuffer.push(0);
140 | }
141 | }
142 | }
143 |
144 | const pixelInfo = {
145 | dataUrl: undefined,
146 | pixels: partialBuffer,
147 | x: minX,
148 | y: minY,
149 | height: fillHeight,
150 | width: fillWidth,
151 | };
152 |
153 | // If OffscreenCanvas is supported, send back a dataUrl rather than an
154 | // array of pixels. This is much faster when the user clicks.
155 | const dataUrlPromise =
156 | getDataUrlFromPixels(partialBuffer, fillWidth, fillHeight) ||
157 | undefined;
158 |
159 | // Store the mask information for later sending back to the UI thread.
160 | pixelInfoToPost.push(pixelInfo);
161 |
162 | if (dataUrlPromise) {
163 | dataUrlPromises.push(dataUrlPromise);
164 | dataUrlPromise.then((dataUrl) => {
165 | // Replace the pixel array with the dataUrl
166 | pixelInfo.dataUrl = dataUrl;
167 | delete pixelInfo.pixels;
168 | });
169 | }
170 |
171 | // Use a setTimeout call before moving on to the next pixel.
172 | // This frees up the thread so that if the user clicks again
173 | // and another message is received, we can receive it rather
174 | // than locking up this thread for potentially a few seconds
175 | setTimeout(processNextPixel, 0);
176 | },
177 | alphaValueToSet
178 | );
179 | return;
180 | }
181 | }
182 | }
183 | }
184 |
185 | // If we're converting all canvases to dataUrls, wait for that to complete.
186 | Promise.all(dataUrlPromises).then(() => {
187 | // Here we've made it through the entire canvas, so send all the pixel
188 | // information back to the UI thread.
189 | self.postMessage(
190 | {
191 | response: "process",
192 | height,
193 | width,
194 | allPixels: intermediateBuffer,
195 | pixelMaskInfo: pixelInfoToPost,
196 | },
197 | // Transfer the buffer
198 | [buffer]
199 | );
200 | });
201 | }
202 |
203 | // Start off the processing.
204 | processNextPixel();
205 | }
206 |
207 | function getDataUrlFromPixels(pixels, width, height) {
208 | if (typeof OffscreenCanvas === "undefined") {
209 | return null;
210 | }
211 | const canvas = new OffscreenCanvas(width, height);
212 | const ctx = canvas.getContext("2d");
213 |
214 | const imageData = new ImageData(width, height);
215 | imageData.data.set(new Uint8ClampedArray(pixels));
216 | ctx.putImageData(imageData, 0, 0);
217 |
218 | // Some older browsers use the 'toBlob' function rather than the
219 | // function in the spec 'convertToBlob', so support that
220 | return canvas[canvas.convertToBlob ? "convertToBlob" : "toBlob"]().then(
221 | (blob) => {
222 | const dataURL = new FileReaderSync().readAsDataURL(blob);
223 | return dataURL;
224 | }
225 | );
226 | }
227 |
228 | function fillAction(workerData, self) {
229 | const { colour, dimensions, sourceImageData, currentImageData, x, y } =
230 | workerData;
231 | const { height, width } = dimensions;
232 |
233 | fillImage(
234 | dimensions,
235 | colour,
236 | x,
237 | y,
238 | currentImageData,
239 | sourceImageData,
240 | // Callback for partial fill progress. This is used to show
241 | // gradual fills to the user in the main thread
242 | (buffer) => {
243 | console.log("fill progressing ...");
244 | // Send the partially complete fill data back to the UI thread
245 | self.postMessage(
246 | {
247 | response: "fill",
248 | colour,
249 | isFinal: false,
250 | height,
251 | width,
252 | pixels: buffer,
253 | },
254 | [buffer]
255 | );
256 | return true;
257 | },
258 | // Callback for the fill being complete
259 | (buffer, processedPointsCount) => {
260 | // complete
261 | console.log("fill is complete");
262 |
263 | // Send the completed fill data back to the UI thread
264 | self.postMessage(
265 | {
266 | response: "fill",
267 | colour,
268 | isFinal: true,
269 | height,
270 | width,
271 | pixels: processedPointsCount > 0 ? buffer : null,
272 | },
273 | [buffer]
274 | );
275 | }
276 | );
277 | }
278 |
279 | function fillImage(
280 | dimensions,
281 | colour,
282 | x,
283 | y,
284 | currentImageBuffer,
285 | sourceImageBuffer,
286 | onProgress,
287 | onComplete,
288 | forceSetAlphaValue
289 | ) {
290 | // https://gist.github.com/krhoyt/2c3514f20a05e4916a1caade0782953f
291 | let destImageData = new ImageData(dimensions.width, dimensions.height);
292 | let destData = destImageData.data;
293 |
294 | const currentImageData = new Uint8ClampedArray(currentImageBuffer);
295 | const sourceImageData = sourceImageBuffer
296 | ? new Uint8ClampedArray(sourceImageBuffer)
297 | : currentImageData;
298 |
299 | let point = null;
300 | const { width, height } = dimensions;
301 |
302 | // Use a custom stack that preallocates the entire possible
303 | // required array size in memory, and then manages the push() and
304 | // shift() calls so that no large array operations are required.
305 | const candidatePoints = createStaticStack(
306 | dimensions.width * dimensions.height
307 | );
308 | candidatePoints.push({ x, y });
309 |
310 | let [r, g, b] = colourStringToRgb(colour);
311 |
312 | const visited = {};
313 | const added = {};
314 |
315 | let processedPointsCount = 0;
316 |
317 | function addCandidate(xCoord, yCoord) {
318 | if (xCoord < 0 || xCoord > width - 1 || yCoord < 0 || yCoord > height - 1) {
319 | return;
320 | }
321 | const key = xCoord + "," + yCoord;
322 | if (!added[key] && !visited[key]) {
323 | candidatePoints.push({
324 | x: xCoord,
325 | y: yCoord,
326 | });
327 | added[key] = true;
328 | }
329 | }
330 |
331 | function getPointIdx(x, y) {
332 | return y * (width * 4) + x * 4;
333 | }
334 |
335 | const whiteSum = 255 * 3;
336 | function isWhite(startIdx) {
337 | const sum =
338 | sourceImageData[startIdx] +
339 | sourceImageData[startIdx + 1] +
340 | sourceImageData[startIdx + 2];
341 |
342 | // Either it's black with full transparency (the default background)
343 | // or it's white drawn by the user
344 | return (
345 | (sum === 0 && sourceImageData[startIdx + 3] === 0) || sum === whiteSum
346 | );
347 | }
348 |
349 | // If the user is sketching, we can't depend on the fillable area
350 | // always having a low alpha value. When they do a fill, it modifies
351 | // srcImageData for the next fill action to be that colour. So, in this
352 | // case we only fill where we have a matching colour to wherever they clicked.
353 | let selectedColourIsWhite = false;
354 |
355 | const selPointIdx = getPointIdx(x, y);
356 | selectedColourIsWhite = isWhite(selPointIdx);
357 |
358 | let minX = x,
359 | maxX = x,
360 | minY = y,
361 | maxY = y;
362 |
363 | while ((point = candidatePoints.shift())) {
364 | const visitedKey = `${point.x},${point.y}`;
365 |
366 | if (!visited[visitedKey]) {
367 | const pointIdx = getPointIdx(point.x, point.y);
368 |
369 | const alphaIdx = pointIdx + 3;
370 | visited[visitedKey] = true;
371 | delete added[visitedKey];
372 |
373 | if (currentImageData.length < alphaIdx) {
374 | continue;
375 | }
376 |
377 | const currentPointIsWhite = isWhite(pointIdx);
378 | let canFill = sourceImageData[alphaIdx] < 255 || currentPointIsWhite;
379 |
380 | // There can be semi-transparent pixels right next to fully opaque pixels.
381 | // Fill these in, but do not let the pixels next to them be filled, unless those
382 | // pixels are also touched by fully transparent pixels.
383 | // This fixes an issue where a seemingly opaque line lets the fill algorithm
384 | // to pass through it.
385 | let canPropagateFromPoint = sourceImageData[alphaIdx] < 100;
386 |
387 | if (canFill) {
388 | minX = Math.min(point.x, minX);
389 | minY = Math.min(point.y, minY);
390 | maxX = Math.max(point.x, maxX);
391 | maxY = Math.max(point.y, maxY);
392 |
393 | if (canPropagateFromPoint) {
394 | addCandidate(point.x, point.y - 1);
395 | addCandidate(point.x, point.y + 1);
396 | addCandidate(point.x - 1, point.y);
397 | addCandidate(point.x + 1, point.y);
398 | }
399 |
400 | destData[pointIdx] = r;
401 | destData[pointIdx + 1] = g;
402 | destData[pointIdx + 2] = b;
403 | destData[alphaIdx] = forceSetAlphaValue ? forceSetAlphaValue : 255;
404 |
405 | processedPointsCount++;
406 |
407 | if (onProgress && processedPointsCount % 5000 === 0) {
408 | // Send intermediate data if we're processing a large area,
409 | // so that the user knows that something is happening
410 | if (onProgress(destData.buffer)) {
411 | destImageData = new ImageData(dimensions.width, dimensions.height);
412 | destData = destImageData.data;
413 | }
414 | }
415 | }
416 | }
417 | }
418 |
419 | if (onComplete) {
420 | onComplete(destData.buffer, processedPointsCount, {
421 | minX,
422 | minY,
423 | maxX,
424 | maxY,
425 | });
426 | }
427 | }
428 |
429 | // A very simple stack structure, that preallocates an array size and only
430 | // supports push and shift operations
431 | function createStaticStack(size) {
432 | const arr = new Array(size);
433 | let shiftNextIdx = 0;
434 | let pushNextIdx = 0;
435 |
436 | return {
437 | push: (item) => {
438 | if (pushNextIdx >= arr.length) {
439 | arr.push(item);
440 | pushNextIdx = arr.length;
441 | } else {
442 | arr[pushNextIdx] = item;
443 | pushNextIdx++;
444 | }
445 | },
446 | shift: () => {
447 | if (shiftNextIdx < pushNextIdx) {
448 | const item = arr[shiftNextIdx];
449 | shiftNextIdx++;
450 | return item;
451 | }
452 | return null;
453 | },
454 | };
455 | }
456 |
457 | function colourStringToRgb(colour) {
458 | if (colour.indexOf("rgba(") === 0) {
459 | return colour
460 | .slice(5)
461 | .split(")")[0]
462 | .split(",")
463 | .map((numStr) => {
464 | return strToNum(numStr.trim());
465 | })
466 | .slice(0, 3);
467 | } else if (colour.indexOf("rgb(") === 0) {
468 | return colour
469 | .slice(4)
470 | .split(")")[0]
471 | .split(",")
472 | .map((numStr) => {
473 | return strToNum(numStr.trim());
474 | })
475 | .slice(0, 3);
476 | } else if (colour.indexOf("#") === 0) {
477 | return hexToRgb(colour);
478 | }
479 | return null;
480 | }
481 |
482 | function hexToRgb(hex) {
483 | const normal = hex.match(/^#([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$/i);
484 | if (normal) {
485 | return normal.slice(1).map((e) => parseInt(e, 16));
486 | }
487 |
488 | const shorthand = hex.match(/^#([0-9a-f])([0-9a-f])([0-9a-f])$/i);
489 | if (shorthand) {
490 | return shorthand.slice(1).map((e) => 0x11 * parseInt(e, 16));
491 | }
492 |
493 | return null;
494 | }
495 |
496 | function strToNum(str) {
497 | if (str === null || str === undefined) {
498 | return str;
499 | }
500 | let strVal = str;
501 | if (Array.isArray(str)) {
502 | strVal = str[0];
503 | }
504 | if (typeof strVal === "string") {
505 | if (strVal.trim().length === 0) {
506 | return 0;
507 | }
508 | return parseFloat(strVal);
509 | }
510 | return strVal;
511 | }
512 |
513 | // https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Pixel_manipulation_with_canvas
514 | function getColorIndexForCoord(x, y, width) {
515 | return y * (width * 4) + x * 4;
516 | }
517 |
--------------------------------------------------------------------------------