├── .gitignore
├── LICENSE.md
├── README.md
├── images
├── BR-FIG-mid-point.jpg
├── conic-gradient.png
├── conicCurveTo.png
├── cursor-position.png
├── enhanced-textmetrics-output.png
├── filtered-canvas.png
├── googlecursor.gif
├── kerning.png
├── line-info.png
├── mesh2d-circular.gif
├── mesh2d-colors.png
├── mesh2d-cylindrical.gif
├── mesh2d-deformation.png
├── mesh2d-twirl.gif
├── perspective.png
├── text-clusters-circle.png
├── text-clusters-output.png
└── variable-width-paragraph.png
├── mdn-drafts
├── 2dcontextlostevent.md
├── 2dcontextrestoreevent.md
├── CanvasRenderingContext2D.fontKerning.md
├── CanvasRenderingContext2D.fontStretch.md
├── CanvasRenderingContext2D.reset.md
├── CanvasRenderingContext2D.roundRect.md
├── CanvasRenderingContext2D.textLetterSpacing.md
├── CanvasRenderingContext2D.textRendering.md
├── CanvasRenderingContext2D.textWordSpacing.md
├── ConvasRenderingContext2D.fontVariantCaps.md
└── QUICK-REFERENCE.md
├── rationale.md
├── spec
├── batch-drawimage.md
├── color-input.md
├── conic-curve-to.md
├── conic-gradient.md
├── context-loss.md
├── display-list-object.md
├── enhanced-textmetrics.md
├── filters-usage.md
├── filters.md
├── layers-with-filters.md
├── layers.md
├── mesh2d.md
├── perspective-transforms.md
├── recording.md
├── reset.md
├── roundrect.md
├── shaders.md
├── text-modifiers.md
├── webText-background.md
├── webText-data-model.md
├── webText-metrics.md
├── webgpu.md
└── will-read-frequently.md
├── template.md
├── va
├── f1.svg
├── f2.svg
├── hand1.svg
├── hand2.svg
├── highlight.min.js
├── index.html
└── theme.css
└── webgpu
├── Di-3d.png
├── interop-demo-2.html
├── interop-demo.html
├── shader-demo.html
├── shader-polyfill.js
├── webgpu-polyfill.js
└── wgpu-matrix.module.js
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
3 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | All Reports in this Repository are licensed by Contributors
2 | under the
3 | [W3C Software and Document License](http://www.w3.org/Consortium/Legal/2015/copyright-software-and-document).
4 |
5 | Contributions to Specifications are made under the
6 | [W3C CLA](https://www.w3.org/community/about/agreements/cla/).
7 |
8 | Contributions to Test Suites are made under the
9 | [W3C 3-clause BSD License](https://www.w3.org/Consortium/Legal/2008/03-bsd-license.html)
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Update Canvas 2D API
2 | ====================
3 |
4 | This repo contains new, current and old proposals for the [Canvas 2D API](https://html.spec.whatwg.org/multipage/canvas.html), following a set of [rationales](rationale.md).
5 |
6 | [Explainer video](https://www.youtube.com/watch?v=dfOKFSDG7IM)
7 |
8 | In active development
9 | ---------------------
10 |
11 | - [**Layers**](spec/layers.md). Support layers in canvas, that will be drawn in one, allowing effects only possible with auxiliary canvases.
12 |
13 | - [**WebGPU Access**](spec/webgpu.md). Switch contexts between Canvas2D and WebGPU.
14 |
15 | - [**Enhanced Text Metrics**](spec/enhanced-textmetrics.md). Extend measureText to support DOM-provided APIs, like selection rect and more.
16 |
17 | - [**Mesh2D**](spec/mesh2d.md). Draw a large number of texture-mapped triangles efficiently.
18 |
19 |
20 |
21 | Launched
22 | --------
23 |
24 | Those proposals have already been incorporated on the [WhatWG spec](https://html.spec.whatwg.org/multipage/canvas.html) and may be in different stages of implementation on browsers.
25 |
26 | - [**Canvas context loss**](spec/context-loss.md). Allow canvas to be discarded and re-drawn by demand.
27 |
28 | - [**willReadFrequently**](spec/will-read-frequently.md). context creation attribute.
29 |
30 | - [**Text modifiers**](spec/text-modifiers.md). CSS text/font properties on Canvas.
31 |
32 | - [**Reset function**](spec/reset.md). Draw primitive.
33 |
34 | - [**RoundRect**](spec/roundrect.md). Draw primitive.
35 |
36 | - [**Conic Gradient**](spec/conic-gradient.md). Draw primitive.
37 |
38 |
39 |
40 | Parked / Future ideas
41 | ---------------------
42 |
43 | - [**Perspective transforms**](spec/perspective-transforms.md). Allow for perspective transforms Canvas 2D rendering. Support 4x4 transform matrices.
44 |
45 | - [**Recorded pictures**](spec/recording.md). Create a record object that receives all the commands from a Canvas2D and can be replayed multiple times.
46 |
47 | - [**Conic curves**](spec/conic-curve-to.md). Draw primitive.
48 |
49 | - **Batch text rendering**.
50 |
51 | - **Text blob**.
52 |
53 | - **Path2D Inspection**. Allow inspection of Path2D objects, that are currently opaque.
54 |
55 | - **Element as a source for drawImage**.
56 |
57 | - [**Display list object**](spec/display-list-object.md). Format and data structure for retained mode drawings, making Canvas apps faster, more accessible and indexable.
58 |
59 | - [**WebGPU Shaders**](spec/shaders.md). Allow for WebGPU shaders to be used as Canvas2D layers filters.
60 |
61 |
62 |
63 | Dropped ideas
64 | -------------
65 |
66 | - [**Color input**](spec/color-input.md). support for new color input on Canvas.
67 |
68 | - [**Batch drawImage**](spec/batch-drawimage.md). Support for multiple images being drawn within a single API call.
69 |
70 | - [**Modern filters**](spec/filters.md). Support composited filters, create a filter object that can be updated, and support more SVG-like filters. Superseded by [layers](spec/layers.md).
71 |
--------------------------------------------------------------------------------
/images/BR-FIG-mid-point.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/BR-FIG-mid-point.jpg
--------------------------------------------------------------------------------
/images/conic-gradient.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/conic-gradient.png
--------------------------------------------------------------------------------
/images/conicCurveTo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/conicCurveTo.png
--------------------------------------------------------------------------------
/images/cursor-position.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/cursor-position.png
--------------------------------------------------------------------------------
/images/enhanced-textmetrics-output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/enhanced-textmetrics-output.png
--------------------------------------------------------------------------------
/images/filtered-canvas.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/filtered-canvas.png
--------------------------------------------------------------------------------
/images/googlecursor.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/googlecursor.gif
--------------------------------------------------------------------------------
/images/kerning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/kerning.png
--------------------------------------------------------------------------------
/images/line-info.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/line-info.png
--------------------------------------------------------------------------------
/images/mesh2d-circular.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/mesh2d-circular.gif
--------------------------------------------------------------------------------
/images/mesh2d-colors.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/mesh2d-colors.png
--------------------------------------------------------------------------------
/images/mesh2d-cylindrical.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/mesh2d-cylindrical.gif
--------------------------------------------------------------------------------
/images/mesh2d-deformation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/mesh2d-deformation.png
--------------------------------------------------------------------------------
/images/mesh2d-twirl.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/mesh2d-twirl.gif
--------------------------------------------------------------------------------
/images/perspective.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/perspective.png
--------------------------------------------------------------------------------
/images/text-clusters-circle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/text-clusters-circle.png
--------------------------------------------------------------------------------
/images/text-clusters-output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/text-clusters-output.png
--------------------------------------------------------------------------------
/images/variable-width-paragraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/images/variable-width-paragraph.png
--------------------------------------------------------------------------------
/mdn-drafts/2dcontextlostevent.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: '2dcontextlost'
4 | mdn_url: /en-US/docs/Web/API/2dcontextlost_event
5 | specifications: [[specURL]]#[[HeadingAnchor]]
6 | browser_compatibility: api.2dcontextlost
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/API](https://developer.mozilla.org/en-US/docs/Web/API).**
11 |
12 | ## Description
13 |
14 | The `contextlost` [`EventHandler`](https://developer.mozilla.org/en-US/docs/Web/Events/Event_handlers) of the[Canvas Api](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API)
15 | is triggered when the user agent detects
16 | that the backing storage associated with 'CanvasRenderingContext2D' on the page is "lost". Contexts
17 | can be lost for several reasons, such as a driver crashes, the application runs out of memory, etc.
18 |
19 | ## Examples
20 |
21 | ```js
22 | const canvas = document.getElementById('canvas');
23 | const ctx = canvas.getContext('2d');
24 |
25 | canvas.addEventListener('contextlost', (event) => {
26 | console.log(event);
27 | });
28 | ```
29 |
30 | If this context ctx is lost, the event "contextlost" will be logged in the console.
31 |
32 | ## See also
33 | [webglcontextlost event](https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/webglcontextlost_event)
34 |
--------------------------------------------------------------------------------
/mdn-drafts/2dcontextrestoreevent.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: '2dcontextrestored'
4 | mdn_url: /en-US/docs/Web/API/2dcontextrestored_event
5 | specifications: [[specURL]]#[[HeadingAnchor]]
6 | browser_compatibility: api.2dcontextlost
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/API](https://developer.mozilla.org/en-US/docs/Web/API).**
11 |
12 | ## Description
13 |
14 | The `contextrestored` [`EventHandler`](https://developer.mozilla.org/en-US/docs/Web/Events/Event_handlers) of the [Canvas Api](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API)
15 | is triggered when the user agent "restores" the backing storage associated with
16 | `CanvasRenderingContext2D` on the page after being "lost".
17 |
18 | Once the context is restored, the resources such as drawings that were created before
19 | the context was lost are no longer valid. You need to reinitialize the state of your
20 | context and recreate resources.
21 |
22 | ## Examples
23 |
24 | ```js
25 | const canvas = document.getElementById('canvas');
26 | const ctx = canvas.getContext('2d');
27 |
28 | canvas.addEventListener('contextrestored', (event) => {
29 | console.log(event);
30 | });
31 | ```
32 |
33 | If this context ctx is lost, this context will then be restored automatically. The event "contextRestored" will be logged in the console.
34 |
35 | ## See also
36 | [webglcontextlost event](https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/webglcontextrestored_event)
37 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.fontKerning.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.fontKerning'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/fontKerning
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-fontkerning
6 | browser_compatibility: api.CanvasRenderingContext2D.fontKerning
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontKerning](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontKerning).**
11 |
12 | ## Description
13 |
14 | The `fontKerning` property of the `CanvasRenderingContext2D` interface
15 | allows developers to set whether to use the kerning information stored in a
16 | font. Kerning defines how letters are spaced. In well-kerned fonts, this
17 | feature makes character spacing appear to be more uniform and pleasant to
18 | read than it would otherwise be.
19 |
20 | This is illustrated in the code below. The images alternate between turnsing
21 | the kerning on and off. The default value is `"auto"`.
22 |
23 |
24 | ## Syntax
25 |
26 | `CanvasRenderingContext2D.fontKerning = fontKerning;`
27 | `var fontKerning = CanvasRenderingContext2D.fontKerning;`
28 |
29 | ### Value
30 |
31 | A `string` representing the current `fontKerning` value. Possible value are:
32 |
33 | `"auto"`
34 | The browser determines whether font kerning should be used or not. For example,
35 | some browsers will disable kerning on small fonts, since applying it could harm the
36 | readability of text.
37 |
38 | `"normal"`
39 | Applies font kerning information stored in the font.
40 |
41 | `"none"`
42 | Disables font kerning information stored in the font.
43 |
44 | ## Example
45 |
46 | This example demonstrates the various `fontKerning` property values.
47 |
48 | ```js
49 | const canvas = document.createElement('canvas');
50 | canvas.width = 1000;
51 | canvas.height = 500;
52 | const ctx = canvas.getContext('2d');
53 | document.body.appendChild(canvas);
54 | ctx.font = '20px serif';
55 | ctx.fontKerning = "auto";
56 | ctx.fillText('auto: VAVATATA WAWA', 20, 50);
57 | ctx.fontKerning = "normal";
58 | ctx.fillText('normal: VAVATATA WAWA', 20, 100);
59 | ctx.fontKerning = "none";
60 | ctx.fillText('none: VAVATATA WAWA', 20, 150);
61 | ```
62 |
63 | ## See also
64 | [CSS property font-kerning](https://developer.mozilla.org/en-US/docs/Web/CSS/font-kerning)
65 |
66 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.fontStretch.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.fontStretch'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/fontStretch
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-fontstretch
6 | browser_compatibility: api.CanvasRenderingContext2D.fontStretch
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontStretch](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontStretch).**
11 |
12 | ## Description
13 |
14 | The `fontStretch` property of the `CanvasRenderingContext2D` interface
15 | allows the developers to select a specific font-face from a font. The default
16 | value is `normal`.
17 |
18 | ## Syntax
19 |
20 | `CanvasRenderingContext2D.fontStretch = fontStretch;`
21 | `var fontStretch = CanvasRenderingContext2D.fontStretch;`
22 |
23 | ### Value
24 |
25 | A `string` representing current `fontStretch` value. The possible font-face values
26 | are `"ultra-condensed"`, `"extra-condensed"`, `"condensed"`, `"semi-condensed"`,
27 | `"normal"`, `"semi-expanded"`, `"expanded"`, `"extra-expanded"` and
28 | `"ultra-expanded"`.
29 |
30 | ## Example
31 |
32 | This example demonstrates the various `fontStretch` property values.
33 |
34 | ```js
35 | const canvas = document.createElement('canvas');
36 | canvas.width = 500;
37 | canvas.height = 500;
38 | const ctx = canvas.getContext('2d');
39 | document.body.appendChild(canvas);
40 |
41 | // Set f1 to be the condensed font-face of font test.
42 | var f1 = new FontFace('test', 'url(/path/to/font1)');
43 | f1.stretch = "condensed";
44 | document.fonts.add(f);
45 | // Set f2 to be the normal font face of font text.
46 | var f2 = new FontFace('test', 'url(/path/to/font2');
47 | document.fonts.add(f2);
48 |
49 | //Since fontStretch is set to condensed, text shouls be shown in the font-face f1.
50 | document.fonts.ready.then(() => {
51 | ctx.font = '25px test';
52 | ctx.fontStretch = "condensed";
53 | ctx.fillText("text", 10, 40);
54 | }
55 | });
56 |
57 | ```
58 |
59 | ## See also
60 | [CSS property font-stretch](https://developer.mozilla.org/en-US/docs/Web/CSS/font-stretch)
61 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.reset.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.reset'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/reset
5 | specifications: https://html.spec.whatwg.org/multipage/canvas.html#dom-context-2d-reset
6 | browser_compatibility: api.CanvasRenderingContext2D.reset
7 | ---
8 |
9 |
10 | **When this feature ships, the content below will live on MDN under
11 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/reset](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/reset).**
12 |
13 | ## Description
14 |
15 | The `reset()` method of the `CanvasRenderingContext2D` interface
16 | resets the rendering context to its default state. This includes setting all pixels in the canvas to transparent black, clearing any saved [states](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D#the_canvas_state), clearing any stored [path operations](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D#paths) and resetting the drawing state to its initial values.
17 |
18 | Drawing state consists of:
19 | - The current transformation matrix.
20 | - The current clipping region.
21 | - The current values of the following attributes:
22 | - `strokeStyle`
23 | - `fillStyle`
24 | - `globalAlpha`
25 | - `lineWidth`
26 | - `lineCap`
27 | - `lineJoin`
28 | - `miterLimit`
29 | - `lineDashOffset`
30 | - `shadowOffsetX`
31 | - `shadowOffsetY`
32 | - `shadowBlur`
33 | - `shadowColor`
34 | - `filter`
35 | - `globalCompositeOperation`
36 | - `font`
37 | - `textAlign`
38 | - `textBaseline`
39 | - `direction`
40 | - `textLetterSpacing`
41 | - `textWordSpacing`
42 | - `fontKerning`
43 | - `fontStretch`
44 | - `fontVariantCaps`
45 | - `textRendering`
46 | - `imageSmoothingEnabled`
47 | - `imageSmoothingQuality`
48 | - The current dash list.
49 |
50 | ## Syntax
51 |
52 | `CanvasRenderingContext2D.reset();`
53 |
54 | ## Example
55 |
56 | ```js
57 | const defaultFillStyle = ctx.fillStyle; // "#000000"
58 | const defaultStrokeStyle = ctx.strokeStyle; // "#000000"
59 | const defaultFont = ctx.font; // "10px sans-serif"
60 | const defaultLineWidth = ctx.lineWidth; // 1
61 |
62 | ctx.strokeRect(0, 0, 300, 150); // Outline everything.
63 | ctx.fillStyle = "cyan";
64 | ctx.strokeStyle = "yellow";
65 | ctx.font = "30px monospace";
66 | ctx.lineWidth = 5;
67 |
68 | ctx.translate(20, 0);
69 | ctx.rotate(Math.PI/16);
70 | ctx.scale(1.5, 1);
71 | ctx.save();
72 |
73 | ctx.fillRect(25, -5, 150, 100);
74 | ctx.beginPath();
75 | ctx.moveTo(100, 0);
76 | ctx.lineTo(150, 80);
77 | ctx.lineTo(50, 80);
78 | ctx.closePath();
79 | ctx.stroke();
80 |
81 | ctx.fillStyle = "magenta";
82 | ctx.fillText("Reset me!", 10, 40);
83 | ```
84 |
85 | This results in the following canvas:
86 |
87 | 
88 |
89 | If we then follow up with:
90 | ```js
91 | ctx.reset(); // All the above work is undone, canvas is now transparent black
92 |
93 | ctx.getTransform().isIdentity; // true
94 | ctx.restore(); // Does nothing, state stack has been cleared
95 | ctx.getTransform().isIdentity; // true
96 |
97 | ctx.fillStyle == defaultFillStyle; // true
98 | ctx.strokeStyle == defaultStrokeStyle; // true
99 | ctx.font == defaultFont; // true
100 | ctx.lineWidth == defaultLineWidth; // true
101 |
102 | ctx.stroke(); // Does not redraw the triangle, the path has been cleared.
103 |
104 | ctx.strokeRect(0, 0, 300, 150); // Outline everything.
105 | ctx.fillText("I have been reset.", 10, 40); // Uses the default font.
106 | ```
107 |
108 | This will then give us the canvas:
109 |
110 | 
111 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.roundRect.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: roundRect
3 | title: 'CanvasRenderingContext2D.roundRect()'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/roundRect
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-roundrect
6 | browser_compatibility: api.CanvasRenderingContext2D.roundRect
7 | ---
8 |
9 |
10 | ## Description
11 |
12 | The `roundRect` method of the `CanvasRenderingContext2D` interface adds a rounded rectangle to the
13 | current sub-path.
14 |
15 | ## Syntax
16 |
17 | `CanvasRenderingContext2D.roundRect(x, y, width, height, radii)`
18 |
19 | ### Parameters
20 |
21 | x
22 | A double that represents the x-axis (horizontal) coordinate of the rectangle's starting point.
23 |
24 | y
25 | A double that represents the y-axis (vertical) coordinate of the rectangle's starting point.
26 |
27 | width
28 | A double that represents the rectangle's width. A positive value means the path is drawn
29 | clockwise, and a negative value means it's drawn counterclockwise and flips the rectangle
30 | horizontally along the `y` axis. That means the radius values that were applied to the left corners
31 | are now applied to the right.
32 |
33 | height
34 | A double that represents the rectangle's height. A positive value means the path is drawn
35 | clockwise, and a negative value means it's drawn counterclockwise and flips the rectangle
36 | vertically along the `x` axis. That means the radius values that applied to the top corners are now applied
37 | to the bottom.
38 |
39 | radii
40 | An array of radius `r`, and each `r` in `radii` could be a double or an object with `{x, y}` properties. If `r` is
41 | a double, the corresponding corner(s) are drawn as a cicurlar arc with radius `r`; if `r` is an
42 | object, the correspondinf corner(s) are drawn as
43 | [elliptical arc](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/ellipse)
44 | whose `radiusX` and `radiusY` are equal to `x` and `y`, respectively. `r`, `x` and `y` must be
45 | non-negative:
46 |
47 | * If `radii`'s size is 1, then arc created by `radii[0]` replaces all 4 corners of rectangle.
48 |
49 | * If `radii`'s size is 2, then the arc created by `radii[0]` replaces the upper left and lower
50 | right corners of the rectangle; the arc created by `radii[1]` replaces the upper right and lower
51 | left corners of rectangle.
52 |
53 | * If `radii`'s size is 3, then the arc created by `radii[0]` replaces the upper left corner; the
54 | arc created by `radii[1]` replaces the upper right and lower left corners; and the arc created by
55 | `radii[2]` replaces the lower right corner of rectangle.
56 |
57 |
58 | * If `radii`'s size is 4, then the arc createed by radii[0], radii[1], radii[2] and radii[3]
59 | replaces upper left, upper right, lower right and lower left corner of rectangle, respectively.
60 |
61 | * If `radii`'s size is not any of the listed values above, then roundRect aborts and returns an
62 | RangeError.
63 |
64 | Note that if the sum of the radii of two corners of the same edge is greater than the length of
65 | that edge, all the `radii` of the rounded rectangle are scaled by a factor of length / (r1 + r2).
66 | If multiple edges have this property, the scale factor of the edge with the smallest scale factor
67 | is used.
68 |
69 |
70 |
71 | ## Examples
72 |
73 |
74 | ### Drawing roundRect with circular arc
75 |
76 | The following example draws four `roundRect` images with `raddi` sizes equal to 1, 2, 3 or 4. `r` is a double.
77 |
78 | ```js
79 | const canvas = document.createElement('canvas');
80 | canvas.width = 1000;
81 | canvas.height = 500;
82 | const ctx = canvas.getContext('2d');
83 | document.body.appendChild(canvas);
84 | ctx.strokeStyle = '#0f0';
85 | ctx.lineWidth = 5;
86 | // radii = [20] and r = 20
87 | ctx.roundRect(50, 50, 100, 100, [20]);
88 | // radii = [20, 40], r1 = 20 and r2 = 40
89 | ctx.roundRect(200, 50, 100, 100, [20, 40]);
90 | // radii = [10, 25, 40], r1 = 10, r2 = 25 and r3 = 40
91 | ctx.roundRect(50, 200, 100, 100, [10, 25, 40]);
92 | // radii = [5, 15, 30, 50], r1 = 5, r2 = 15, r3 = 30 and r4 = 50
93 | ctx.roundRect(200, 200, 100, 100, [5, 15, 30, 50]);
94 | ctx.stroke();
95 | ```
96 |
97 | ### Drawing roundRect with elliptical arc
98 |
99 | The following example draws a rooundRect with a different elliptical arc.
100 |
101 | ```js
102 | const canvas = document.createElement('canvas');
103 | canvas.width = 500;
104 | canvas.height = 500;
105 | const ctx = canvas.getContext('2d');
106 | document.body.appendChild(canvas);
107 | ctx.strokeStyle = '#0f0';
108 | ctx.lineWidth = 5;
109 | ctx.roundRect(50, 50, 120, 120, [new DOMPoint(25, 50)]);
110 |
111 | ctx.roundRect(210, 50, 120, 120, [new DOMPoint(20, 50), new DOMPoint(50, 20), new DOMPoint(20, 50),
112 | new DOMPoint(50, 20)]);
113 |
114 | var DOMPointInit = {
115 | x: 25,
116 | y: 50
117 | }
118 | ctx.roundRect(50, 210, 120, 120, [DOMPointInit]);
119 |
120 | ctx.roundRect(210, 210, 120, 120, [new DOMPoint(20, 50), new DOMPoint(50, 20), 50, 50]);
121 | ctx.stroke();
122 | ```
123 |
124 | ## See also
125 | [CSS bordere-radius](https://developer.mozilla.org/en-US/docs/Web/CSS/border-radius)
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.textLetterSpacing.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.textLetterSpacing'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/textLetterSpacing
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-textletterspacing
6 | browser_compatibility: api.CanvasRenderingContext2D.textLetterSpacing
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textLetterSpacing](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textLetterSpacing).**
11 |
12 | ## Description
13 |
14 | The `textLetterSpacing` property of the `CanvasRenderingContext2D` interface
15 | returns a double that represents horizontal spacing between characters.
16 | Setting `textLetterSpacing` to postive values spreads characters further apart,
17 | while negative values brings them closer together. The default value is 0.
18 |
19 | ## Syntax
20 |
21 | `var textLetterSpacing = CanvasRenderingContext2D.textLetterSpacing`
22 | `CanvasRenderingContext2D.textLetterSpacing = textLetterSpacing`
23 |
24 | ### Value
25 |
26 | A `double` representing horizontal spacing behavior between characters.
27 |
28 | ### Example
29 |
30 | This example demonstrates the various `TextLetterSpacing` property values:
31 |
32 | ```js
33 | const canvas = document.createElement('canvas');
34 | canvas.width = 1000;
35 | canvas.height = 500;
36 | const ctx = canvas.getContext('2d');
37 | document.body.appendChild(canvas);
38 | const letterSpacings = [-2, 0, 4];
39 | ctx.font = '20px serif';
40 |
41 | letterSpacings.forEach(function (letterSpacing, index) {
42 | ctx.textLetterSpacing = letterSpacing;
43 | const y = 50 + index * 50;
44 | ctx.fillText('Hello World (textLetterSpacing: ' + letterSpacing + ')', 20, y);
45 | });
46 | ```
47 |
48 | ## See also
49 | [CSS property letter-spacing](https://developer.mozilla.org/en-US/docs/Web/CSS/letter-spacing)
50 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.textRendering.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.textRendering'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/textRendering
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-textrendering
6 | browser_compatibility: api.CanvasRenderingContext2D.textRendering
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textRendering](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textRendering).**
11 |
12 | ## Description
13 |
14 | The `textRendering` property of `CanvasRenderingContext2D` provides information
15 | to the rendering engine about what to optimize for when rendering text.
16 |
17 | ## Syntax
18 |
19 | `CanvasRenderingContext2D.textRendering = "textRendering";`
20 | `var textRendering = CanvasRenderingContext2D.textRendering;`
21 |
22 | ### Value
23 | A `string` representing the current `textRendering` value. The possible values are:
24 |
25 | `"auto"`
26 | The browser makes educated guesses about when to optimize for speed, legibility,
27 | and geometric precision while drawing text. This is the default value.
28 |
29 | `"optimizeSpeed"`
30 | The browser emphasizes rendering speed over legibility and geometric precision
31 | when drawing text. It disables kerning and ligatures.
32 |
33 | `"optimizeLegibility"`
34 | The browser emphasizes legibility over rendering speed and geometric precision.
35 | This enables kerning and optional ligatures.
36 |
37 | `"geometricPrecision"`
38 | The browser emphasizes geometric precision over rendering speed and legibility.
39 | Certain aspects of fonts, don't scale linearly (e.g. kerning). Geometric Precision
40 | can make text using those fonts look good. This value also allows developers to
41 | scale text fluidly and it accepts float values for font size.
42 |
43 | ### Example
44 |
45 | Example demonstrates the various `textRendering` property values:
46 |
47 | #### optimizespeed vs optimizelegibility
48 | ```js
49 | const canvas = document.createElement('canvas');
50 | canvas.width = 500;
51 | canvas.height = 500;
52 | const ctx = canvas.getContext('2d');
53 | document.body.appendChild(canvas);
54 |
55 | ctx.font = '20px serif';
56 | ctx.textRendering = "optimizespeed";
57 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 50);
58 |
59 | ctx.textRendering = "optimizelegibility";
60 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 100);
61 | ```
62 |
63 | #### optimizespeed vs geometricPrecision
64 | ```js
65 | const canvas = document.createElement('canvas');
66 | canvas.width = 500;
67 | canvas.height = 500;
68 | const ctx = canvas.getContext('2d');
69 | document.body.appendChild(canvas);
70 |
71 | ctx.font = '20px serif';
72 | ctx.textRendering = "optimizespeed";
73 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 50);
74 |
75 | ctx.textRendering = "geometricPrecision";
76 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 100);
77 | ```
78 |
79 | #### geometricPrecision
80 | ```js
81 | const canvas = document.createElement('canvas');
82 | canvas.width = 500;
83 | canvas.height = 500;
84 | const ctx = canvas.getContext('2d');
85 | document.body.appendChild(canvas);
86 |
87 | ctx.font = '30px serif';
88 | ctx.textRendering = "geometricPrecision";
89 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 50);
90 | ctx.font = '29.5px serif';
91 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 100);
92 | ctx.font = '29px serif';
93 | ctx.fillText('LYoWAT - ff fi fl ffl', 20, 150);
94 | ```
95 |
96 | ## See also
97 | [CSS property text-rendering](https://developer.mozilla.org/en-US/docs/Web/CSS/text-rendering)
98 |
--------------------------------------------------------------------------------
/mdn-drafts/CanvasRenderingContext2D.textWordSpacing.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.textWordSpacing'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/textWordSpacing
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-textwordspacing
6 | browser_compatibility: api.CanvasRenderingContext2D.textWordSpacing
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textWordSpacing](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/textWordSpacing).**
11 |
12 | ## Description
13 |
14 | The `textWordSpacing` property of the `CanvasRenderingContext2D` interface
15 | returns a double that represents horizontal spacing between words.
16 | Setting `textWordSpacing` to postive values spreads words further apart,
17 | while negative values brings them closer together. The default value
18 | is 0.
19 |
20 | ## Syntax
21 |
22 | `var textWordSpacing = CanvasRenderingContext2D.textWordSpacing`
23 | `CanvasRenderingContext2D.textWordSpacing = textWordSpacing`
24 |
25 | ### Value
26 |
27 | A double representing horizontal spacing behavior between words.
28 |
29 | ### Example
30 |
31 | This example demonstrates the various `textWordSpacing` property values:
32 |
33 | ```js
34 | const canvas = document.createElement('canvas');
35 | canvas.width = 1000;
36 | canvas.height = 500;
37 | const ctx = canvas.getContext('2d');
38 | document.body.appendChild(canvas);
39 | const wordSpacings = [-7, 0, 10];
40 | ctx.font = '20px serif';
41 |
42 | wordSpacings.forEach(function (wordSpacing, index) {
43 | ctx.textWordSpacing = wordSpacing;
44 | const y = 50 + index * 50;
45 | ctx.fillText('Hello World (textWordSpacing: ' + wordSpacing + ')', 20, y);
46 | });
47 | ```
48 |
49 | ## See also
50 | [CSS property word-spacing](https://developer.mozilla.org/en-US/docs/Web/CSS/word-spacing)
51 |
--------------------------------------------------------------------------------
/mdn-drafts/ConvasRenderingContext2D.fontVariantCaps.md:
--------------------------------------------------------------------------------
1 | ---
2 | recipe: api-interface
3 | title: 'CanvasRenderingContext2D.fontVariantCaps'
4 | mdn_url: /en-US/docs/Web/API/CanvasRenderingContext2D/fontVariantCaps
5 | specifications: https://html.spec.whatwg.org/#dom-context-2d-fontvariantcaps
6 | browser_compatibility: api.CanvasRenderingContext2D.fontVariantCaps
7 | ---
8 |
9 | **When this feature ships, the content below will live on MDN under
10 | [developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontVariantCaps](https://developer.mozilla.org/en-US/docs/Web/CanvasRenderingContext2D/fontVariantCaps).**
11 |
12 | ## Description
13 |
14 | The `fontVariantCaps` property of `CanvasRenderingContext2D` allows developers
15 | to control the use of alternate glyphs for capital letters. If multiple sizes
16 | of capital letter glyphs are available for the chosen font, this
17 | property will choose the one with appropriate size. Otherwise it synthesizes
18 | small-caps by adapting uppercase glyphs. The default value is `normal`.
19 |
20 |
21 | ## Syntax
22 |
23 | `CanvasRenderingContext2D.fontVariantCaps = "fontVariantCaps";`
24 | `var fontVariantCaps = CanvasRenderingContext2D.fontVariantCaps;`
25 |
26 | ### Values
27 |
28 | A `string` representing current the `fontVariantCaps` value. The possible values are:
29 |
30 | `"normal"`
31 | Deactivates of the use of alternate glyphs.
32 |
33 | `"small-caps"`
34 | Enables display of small capitals (OpenType feature: `smcp`). "Small-caps"
35 | glyphs typically use the form of uppercase letters but are reduced to the
36 | size of lowercase letters.
37 |
38 | `"all-small-caps"`
39 | Enables display of small capitals for both upper and lowercase letters
40 | (OpenType features: `c2sc`, `smcp`).
41 |
42 | `"petite-caps"`
43 | Enables display of petite capitals (OpenType feature: `pcap`). Petite-caps glyphs
44 | typically use the form of uppercase letters but are reduced to the size of
45 | lowercase letters.
46 |
47 |
48 | `"all-petite-caps"`
49 | Enables display of petite capitals for both upper and lowercase letters
50 | (OpenType features: `c2pc`, `pcap`).
51 |
52 | `"unicase"`
53 | Enables display of mixture of small capitals for uppercase letters with normal
54 | lowercase letters (OpenType feature: `unic`).
55 |
56 | `"titling-caps"`
57 | Enables display of titling capitals (OpenType feature: `titl`). Uppercase
58 | letter glyphs are often designed for use with lowercase letters. When used in
59 | all uppercase titling sequences they can appear too strong. Titling capitals
60 | are designed specifically for this situation.
61 |
62 |
63 | ### Example
64 |
65 | This example demonstrates the various `fontVariantCaps` property values:
66 |
67 | ```js
68 | const canvas = document.createElement('canvas');
69 | canvas.width = 500;
70 | canvas.height = 500;
71 | const ctx = canvas.getContext('2d');
72 | document.body.appendChild(canvas);
73 | ctx.font = '20px serif';
74 |
75 | ctx.fontVariantCaps = "normal";
76 | ctx.fillText('Normal Caps', 20, 50);
77 |
78 | ctx.fontVariantCaps = "small-caps";
79 | ctx.fillText('Small-Caps', 20, 100);
80 |
81 | ctx.fontVariantCaps = "all-small-caps";
82 | ctx.fillText('All-Small-Caps', 20, 150);
83 |
84 | ctx.fontVariantCaps = "petite-caps";
85 | ctx.fillText('Peetite-Caps', 20, 200);
86 |
87 | ctx.fontVariantCaps = "all-petite-caps";
88 | ctx.fillText('All-Petite-Caps', 20, 250);
89 |
90 | ctx.fontVariantCaps = "unicase";
91 | ctx.fillText('Unicase Case', 20, 300);
92 |
93 | ctx.fontVariantCaps = "titling-caps";
94 | ctx.fillText('Titling-Caps', 20, 350);
95 | ```
96 |
97 | ## See also
98 | [CSS property font-variant-caps](https://developer.mozilla.org/en-US/docs/Web/CSS/font-variant-caps)
99 |
--------------------------------------------------------------------------------
/mdn-drafts/QUICK-REFERENCE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 | The `CanvasRenderingContext2D` interface of the [Canvas API](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API) provides the 2D rendering context for the drawing surface of a `` element. It is used for drawing shapes, text, images, and other objects.
4 |
5 | **You can find [existing documentation](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D) for this interface on MDN. Chrome's work on [New Canvas 2D API](https://www.chromestatus.com/feature/6051647656558592) adds the following members.**
6 |
7 | ## Properties
8 |
9 | **[`CanvasRenderingContext2D.fontKerning`](CanvasRenderingContext2D.fontKerning.md)**
10 |
11 | Indicates whether kerning information stored in a font will be used. Kerning defines how letters are spaced. In well-kerned fonts, this feature makes character spacing appear to be more uniform and pleasant to read than it would otherwise be.
12 |
13 | **[`CanvasRenderingContext2D.fontStretch`](CanvasRenderingContext2D.fontStretch.md)**
14 |
15 | Sets or returns a font's font-face.
16 |
17 | **[`CanvasRenderingContext2D.textLetterSpacing`](CanvasRenderingContext2D.textLetterSpacing.md)**
18 |
19 | Returns a double that represents horizontal spacing between characters. Setting `textLetterSpacing` to postive values spreads characters further apart, while negative values brings them closer together. The default value is `0`.
20 |
21 | **[`CanvasRenderingContext2D.textRendering`](CanvasRenderingContext2D.textRendering())**
22 |
23 | Provides information to the rendering engine about what to optimize for when rendering text.
24 |
25 | **[`CanvasRenderingContext2D.textWordSpacing`](CanvasRenderingContext2D.textWordSpacing.md)**
26 |
27 | Returns a double that represents horizontal spacing between words. Setting `textWordSpacing` to postive values spreads words further apart, while negative values brings them closer together. The default value is `0`.
28 |
29 | **[`CanvasRenderingContext2D.fontVariantCaps`](CanvasRenderingContext2D.fontVariantCaps.md)**
30 |
31 | Controls use of alternate glyphs for capital letters. If multiple sizes of capital letter glyphs are available for the chosen font, this property chooses the one with appropriate size. Otherwise it synthesizes small-caps by adapting uppercase glyphs.
32 |
33 | ## Events
34 |
35 | **[`contextlost`](contextlost.md)**
36 |
37 | Triggered when the user agent detects that the backing storage associated with `CanvasRenderingContext2D` on the page is "lost". Contexts can be lost for several reasons, such as a driver crashes, the application runs out of memory, etc.
38 |
39 | **[`contextrestored`](contextrestored.md)**
40 |
41 | Triggered when the user agent "restores" the backing storage associated with `CanvasRenderingContext2D` on the page after being "lost".
42 |
43 | ## Methods
44 |
45 | **[`CanvasRenderingContext2D.reset()`](CanvasRenderingContext2D.reset.md)**
46 |
47 | Resets the rendering context to its default state. This includes setting all pixels in the canvas to transparent black, clearing any saved states, clearing any stored path operations and resetting the drawing state to its initial values.
48 |
49 | **[`CanvasRenderingContext2D.roundRect()`](CanvasRenderingContext2D.roundRect.md)**
50 |
51 | Adds a rounded rectangle to the current sub-path.
52 |
--------------------------------------------------------------------------------
/rationale.md:
--------------------------------------------------------------------------------
1 |
2 | Rationale
3 | =========
4 |
5 | Canvas is the web’s direct mode rendering solution that closely matches traditional programming models. This is particularly targeted for games and full featured apps.
6 |
7 | Modern 2D developers sometimes have to fallback to GL for features that are expected to be available in 2D but currently aren’t supported by Canvas 2D. There's a always a balance to be struck when adding new APIs to the web. That said, it's important that Canvas2D is able to address developer's use cases in game development and text manipulation.
8 |
9 | The [current Canvas 2D API](https://html.spec.whatwg.org/multipage/canvas.html) was originally proposed in 2013. Since then, a lot of 2D graphics APIs have appeared and changed what developers expect from a good 2D API. This proposal tries to modernize Canvas 2D API, considering current and future usage of Canvas and considering 3 pillars:
10 |
11 | 1. feature parity with other 2D APIs;
12 | 2. access to current capabilities of the Web/CSS;
13 | 3. performance improvement.
14 |
15 |
16 | Feature Parity with 2D APIs
17 | ---------------------------
18 |
19 | Modern 2D graphics developers have grown a set of expectations around what a 2D
20 | API should provide. By looking at other popular APIs, we can figure out blind
21 | spots of Canvas2D and focus on areas where there are developer-driven needs.
22 |
23 | Here's a small round up of 2D APIs used in different libraries. Speically focused on the APIs proposed in this change.
24 |
25 | [**PixiJS**](https://www.pixijs.com/) *[Web, Javascript]*: Filters: generic filters, AlphaFilter (alpha across a set of operations), DisplacementFilter, BlurFilter, ColorMatrixFilter (5x4), NoiseFilter, generic shader filters. RoundRect. Perspective transforms. Batch image drawing.
26 |
27 | [**Direct2D**](https://docs.microsoft.com/en-us/windows/win32/direct2d/direct2d-portal) *[Windows, C++]*: Non-affine transforms through filters. RoundRect. Big support for filters ([effects](https://docs.microsoft.com/en-us/windows/win32/direct2d/built-in-effects)): color matrix, premul, tint, white level, blending and compositing, blur, edge, displacement, light, emboss, brightness, contrast, exposure, sharpen, turbulence. DrawSpriteBatch.
28 |
29 | [**CoreGraphics**](https://developer.apple.com/documentation/coregraphics)/[**Quartz](https://developer.apple.com/library/archive/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Introduction/Introduction.html) *[OSX, objectiveC/Swift]*: Non-affine transforms through CATransform3D (CoreAnimation). RoundedRect. [Filters](https://developer.apple.com/library/archive/documentation/GraphicsImaging/Reference/CoreImageFilterReference/index.html).
30 |
31 | [**Skia**](https://skia.org/) *[C++]*: full perspective transform. drawRoundRect. Image filters.
32 |
33 |
34 | Current HTML/CSS rendering features
35 | -----------------------------------
36 |
37 | Modern browsers implement a rich set of rendering features that are currently unavailable to developers in a immediate API. Bridging that gap and giving more power to developers is a good thing. Canvas should have most (if not all) capabilities that a regular page rendered with CSS has.
38 |
39 |
40 | Canvas2D Specific improvements
41 | ------------------------------
42 |
43 | A common bottleneck for Canvas2D rendering is how many Javascript functions are needed to render a particular scene. With this in mind, adding more expressive APIs (that allow you to render the same scene with fewer commands) will result in better performance.
44 |
--------------------------------------------------------------------------------
/spec/batch-drawimage.md:
--------------------------------------------------------------------------------
1 | Batch drawImage
2 | ===============
3 | **Status**: explainer.
4 |
5 | Many web applications use `Canvas2D.drawImage` in sequence, where a large number of calls can occur on each frame. In those cases, Javascript call can be a strong bottleneck on rendering.
6 |
7 | - near-native performance for sprite/tile based animation and games.
8 | - easily polyfilled.
9 |
10 |
11 | Rationale
12 | ---------
13 |
14 | Batch image render is an expected and commom 2D API pattern. Usually, those functions exist to improve performance (by increasing max number of sprites to be rendered) due to pipelininig benefits. On top of that, Canvas2D can benefit for also reducing the number of Javascript calls necessary per blit.
15 |
16 | Experiments have shown that `drawImage*Batch` can improve Canvas2D sprite rendering performance in the browser by at least 3-5x.
17 |
18 |
19 | Proposal
20 | --------
21 |
22 | ```webidl
23 | interface mixin CanvasDrawImageBatch {
24 | void drawImagePositionBatch(CanvasImageSource source, (Float32Array or sequence) drawParameters);
25 | void drawImageDestRectBatch(CanvasImageSource source, (Float32Array or sequence) drawParameters);
26 | void drawImageSrcDestBatch(CanvasImageSource source, (Float32Array or sequence) drawParameters);
27 | void drawImageTransformBatch(CanvasImageSource source, (Float32Array or sequence) drawParameters);
28 | void drawImage3DTransformBatch(CanvasImageSource source, (Float32Array or sequence) drawParameters);
29 | };
30 |
31 | CanvasRenderingContext2D includes CanvasDrawImageBatch;
32 | OffscreenCanvasRenderingContext2D includes CanvasDrawImageBatch;
33 | ```
34 |
35 | The `drawParameters` is interepreted as a sequence of draw commands, where each sequence has different size depending on the function:
36 |
37 | - `drawImagePositionBatch`
38 |
39 | 2 values per draw `dx, dy`.
40 |
41 | Equivalent to `drawImage(source, dx, dy)`.
42 |
43 | - `drawImageDestRectBatch`
44 |
45 | 4 values per draw `dx, dy, dwidth, dheight`.
46 |
47 | Equivalent to `drawImage(source, dx, dy, dwidth, dheight)`.
48 |
49 | - `drawImageSrcDestBatch`
50 |
51 | 8 values per draw `sx, sy, swidth, sheight, dx, dy, dwidth, dheight`.
52 |
53 | Equivalent to `drawImage(source, sx, sy, swidth, sheight, dx, dy, dwidth, dheight)`.
54 |
55 | - `drawImageTransformBatch`
56 |
57 | 10 values per draw `sx, sy, swidth, sheight, a, b, c, d, e, f`.
58 |
59 | Equivalent to `save(); transform(a, b, c, d, e, f); drawImage(sx, sy, swidth, sheight, 0, 0, 1, 1); restore();`
60 |
61 | - `drawImage3DTransformBatch`
62 |
63 | 20 values per draw `sx, sy, swidth, sheight, m11...m44`.
64 |
65 | Equivalent to `save(); transform(DOMMatrix(m11...m44)); drawImage(sx, sy, swidth, sheight, 0, 0, 1, 1); restore();`
66 |
67 |
68 | Throws an `INDEX_SIZE_ERR` DOM Exception if the size of `drawParameters` is not a mulitple of the required length.
69 |
70 |
71 | ### Implementation
72 |
73 | - A naive implementation (of calling the underlying `drawImage` multiple times) will still get performance improvements as it reduces Javascript overhead.
74 | - Much less type checking of parameters.
75 | - Allow UA to trully batch those calls.
76 |
77 |
78 | ### Open issues and questions
79 |
80 | - Support for non-affine transforms on `drawImageTransformBatch`?
81 | - Support for `sequence` as well as single image.
82 | - Could we have less variants? Maybe remove `drawImageDestRectBatch`?
83 |
84 |
85 | Example usage
86 | -------------
87 |
88 | ```js
89 | const ctx = document.createElement('canvas').getContext('2d');
90 |
91 | const params = new Float32Array([0, 0, 15, 10];
92 |
93 | fetch('sprite.png').then(createImageBitmap).then(source => {
94 | // draws 2 instances of sprite.png at (0,0) and (15, 10).
95 | ctx.drawImagePositionBatch(source, params);
96 | });
97 | ```
98 |
99 |
100 | Alternatives considered
101 | -----------------------
102 |
103 | ### Overload approach
104 |
105 | ```webidl
106 | enum CanvasDrawImageParameterFormat { position, destination-rectangle,
107 | source-and-destination-rectangles, source-rectangle-and-transform};
108 |
109 | void drawImageBatch(CanvasImageSource image, ParameterFormat parameterFormat,
110 | Float32Array drawParameters);
111 |
112 | ```
113 |
114 | Overloading is more performant costly, less explicit, and less friendly to feature detection in the future.
115 |
116 |
117 | References
118 | ----------
119 |
120 | - https://wiki.whatwg.org/wiki/Canvas_Batch_drawImage
121 |
--------------------------------------------------------------------------------
/spec/color-input.md:
--------------------------------------------------------------------------------
1 | CSSColorValue color input
2 | =================
3 | **Status**: explainer.
4 |
5 | Allow color input on canvas to use a CSSColorValue object.
6 |
7 | Rationale
8 | ---------
9 |
10 | The current way to set colors forces developers to build strings, that then are parsed into values.
11 | This is a quality of life change.
12 |
13 |
14 | Proposal
15 | --------
16 |
17 | ```webidl
18 | interface mixin CanvasFillStrokeStyles {
19 | attribute (DOMString or CSSColorValue or
20 | CanvasGradient or CanvasPattern) strokeStyle;
21 | attribute (DOMString or CSSColorValue or
22 | CanvasGradient or CanvasPattern) fillStyle;
23 | };
24 |
25 | interface mixin CanvasShadowStyles {
26 | attribute (DOMString or CSSColorValue) shadowColor;
27 | };
28 |
29 | interface CanvasGradient {
30 | void addColorStop(double offset, DOMString color);
31 | void addColorStop(double offset, CSSColorValue);
32 | }
33 | ```
34 |
35 | Example usage
36 | -------------
37 |
38 | ```js
39 | // Javascript example
40 | const canvas = document.createElement('canvas');
41 | const ctx = canvas.getContext('2d');
42 |
43 | ctx.fillStyle = new CSSRGB(1, 0, 1, 0.5); // half-transparent magenta
44 | ctx.strokeStyle = new CSSHSL(CSS.deg(0), 0.5, 1); // bright red
45 | ```
46 |
47 | Notes on Workers
48 | ----------------
49 |
50 | CSSColorValue types can take doubles or CSS unit values as inputs. The CSS namespace is not (yet) exposed to workers an there are some security concerns involved with doing so. For example, CSS.registerProperty unconditionally casts the ExecutionContext to LocalDOMWindow and then acts on it. This sort of type confusion is a common source of security vulnerability.
51 |
52 | We can instead expose only the CSS unit value types that can be used to create CSSColorValues, these include:
53 | - CSS.percent
54 | - CSS.number
55 | - CSS.deg
56 | - CSS.rad
57 | - CSS.grad
58 |
59 | CSSUnitValue, CSSNumericValue and CSSStyleValue should also get exposed.
60 |
61 | We should also consider exposing:
62 | - CSSMathSum
63 | - CSSMathMin
64 | - CSSMathProduct
65 | - CSSMathMax
66 |
67 | Alternatives considered
68 | -----------------------
69 |
70 | ### Array color input
71 |
72 | `ctx.fillStyle = [0.5, 0.5, 0.5]`
73 |
74 |
75 | References
76 | ----------
77 |
78 | - [CSSColorValue](https://drafts.css-houdini.org/css-typed-om-1/#colorvalue-objects)
79 | - [WHATWG discussion thread](https://github.com/whatwg/html/issues/5616)
80 |
--------------------------------------------------------------------------------
/spec/conic-curve-to.md:
--------------------------------------------------------------------------------
1 | ConicCurveTo
2 | =========
3 | **Status**: explainer.
4 |
5 | Draw curves based on conic sections. These are useful as they can represent
6 | circular, elliptical and hyperbolic paths whereas bezier
7 | curves are limited to quadratic arcs.
8 |
9 |
10 | Rationale
11 | ---------
12 |
13 | Conic curves are lower level primitives to beziers. They allow a better description of more times of curves.
14 |
15 |
16 | Proposal
17 | --------
18 |
19 | ```webidl
20 | interface mixin Canvas {
21 | // all doubles are unrestricted.
22 | void conicCurveTo(double cpx, double cpy, double x, double y, double weight);
23 | };
24 | ```
25 | This is a similar interface to [`bezierCurveTo`](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/bezierCurveTo). `conicCurveTo` defines a curve from the starting point (_P0_) to
26 | destination point (_P2_) that bends towards control point (_P1_) as weighted by
27 | `weight`:
28 |
29 | 
30 |
31 | Fig 1 - Points on a conic curve [Source](http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html)
32 |
33 | - `x`, `y` are the coordinates of _P2_.
34 | - `cpx`, `cpy` are the coordinates of _P1_.
35 | - `weight` defines how much the line bends towards _P1_:
36 | - `weight = 0` defines a straight line from the _P0_ to _P2_.
37 | - `weight = 1` defines a quadratic path.
38 | - `weight < 1` is elliptical, while `weight > 1` is hyperbolic.
39 | - `weight = infinity` essentially makes two line segments _P0P1_ and _P1P2_.
40 |
41 | If _P0_, _P1_ and _P2_ are all the corners of a square, then `weight = sqrt(2)/2` defines a circular arc. This can be used for rounded corners.
42 |
43 | The mathematical derivation of these quantities can be found [here](http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html).
44 |
45 | ### Open issues and questions
46 |
47 | - How much demand is there for this among developers
48 | - This is already implemented by Skia, but are there performance implications?
49 |
50 | Example usage
51 | -------------
52 |
53 | ```js
54 | // Javascript example
55 | const canvas = document.createElement('canvas');
56 | const ctx = canvas.getContext('2d');
57 |
58 | ctx.beginPath();
59 | ctx.moveTo(100, 100); // Starting Point is (100, 100)
60 | // Control Point is (200, 100)
61 | // End Point is (200, 200)
62 | // Weight is 1
63 | ctx.conicCurveTo(200, 100, 200, 200, 1);
64 | ctx.stroke();
65 |
66 | ```
67 |
68 | The above code will produce the following curve, shown in green.
69 | Lines _pc_ and _cd_ are shown in black:
70 |
71 | 
72 |
73 | References
74 | ----------
75 |
76 | - Skia: [SkPath.conicTo](https://skia.org/user/api/SkPath_Reference#SkPath_conicTo)
77 | - [Conic Sections](https://en.wikipedia.org/wiki/Conic_section)
78 | - [Mathematical derivation from NURBS](http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html)
79 |
--------------------------------------------------------------------------------
/spec/conic-gradient.md:
--------------------------------------------------------------------------------
1 | Conic gradient
2 | ==============
3 | **Status**: explainer.
4 |
5 | Adds conic gradients.
6 |
7 | This is equivalent to CSS's `conic-gradient()` function.
8 |
9 |
10 | Rationale
11 | ---------
12 |
13 | This is currently available on CSS.
14 |
15 |
16 | Proposal
17 | --------
18 |
19 | ```webidl
20 | interface mixin CanvasFillStrokeStyles {
21 | // addition:
22 | CanvasGradient createConicGradient(unrestricted double startAngle, unrestricted double cx, unrestricted double cy);
23 | };
24 | ```
25 |
26 | When using conic gradients `CanvasGradient` stops are normalized from 0 to 1 (as opposed to using radians). This is consistent with other gradients that use normalized values.
27 |
28 |
29 | Example usage
30 | -------------
31 |
32 | ```js
33 | // Javascript example
34 | const canvas = document.createElement('canvas');
35 | const ctx = canvas.getContext('2d');
36 |
37 | const grad = ctx.createConicGradient(0, 100, 100);
38 |
39 | grad.addColorStop(0, "red");
40 | grad.addColorStop(0.25, "orange");
41 | grad.addColorStop(0.5, "yellow");
42 | grad.addColorStop(0.75, "green");
43 | grad.addColorStop(1, "blue");
44 |
45 | ctx.fillStyle = grad;
46 | ctx.fillRect(0, 0, 200, 200);
47 | document.body.append(canvas);
48 | ```
49 |
50 | Will result in image:
51 |
52 | 
53 |
54 |
55 |
56 | Alternatives considered
57 | -----------------------
58 |
59 | none.
60 |
61 | References
62 | ----------
63 |
64 | - https://developer.mozilla.org/en-US/docs/Web/CSS/conic-gradient
65 |
--------------------------------------------------------------------------------
/spec/context-loss.md:
--------------------------------------------------------------------------------
1 | Canvas context loss and context restored
2 | ===================
3 | **Status**: explainer.
4 |
5 |
6 | 2D Canvas Rendering contexts are currently required to have persistent backing stores. This proposal aims to relax that requirement by introducing an API that allows canvases to be discarded by the browser and re-drawn by the web application on demand.
7 |
8 |
9 | Rationale
10 | ---------
11 |
12 | This is a long standing [request](https://github.com/whatwg/html/issues/4809) from developers. It's particularly useful on Mobile, where context lost is more common.
13 |
14 |
15 | Proposal
16 | --------
17 |
18 | ```webidl
19 | // WebIDL changes
20 | interface mixin CanvasContextLost {
21 | boolean isContextLost();
22 | };
23 |
24 | CanvasRenderingContext2D includes CanvasContextLost;
25 | OffscreenCanvasRenderingContext2D includes CanvasContextLost;
26 | ```
27 |
28 | When the user agent detects that the backing storage associated with a Canvas2D context has been lost, it must run the following steps:
29 |
30 | 1. Let *canvas* be the context's canvas.
31 | 2. If *canvas*'s *context lost* flag is set, abort these steps.
32 | 3. Set *context lost* flag.
33 | 4. Queue a task to perform the following steps:
34 | 1. Fire a event `contextlost` at *canvas*
35 | 2. If the event's canceled flag is set, abort these steps. The backing buffer for the context will not be restored.
36 | 3. Queue a task to restore the backing buffer for context.
37 | 4. Fire a event `contextrestored` at *canvas* on completion of that task.
38 |
39 | UA could add support for "context loss test" by creating a
40 | `console.resetGraphicsContext()` method.
41 |
42 | ### Open issues and questions
43 |
44 | - Deprecate `webglcontextlost` in favor of `contextlost`?
45 | - Deprecate `webglcontextrestored` in favor of `contextrestored`?
46 | - What the default behavior should be?
47 | - Currently the canvas has to be modified in order for context lost/restored events to fire. Is that the right behavior?
48 | - The canvas has to be in the DOM and also visible in order for context lost/restored events to fire. Is that the right behavior?
49 |
50 | Example usage
51 | -------------
52 |
53 | ```js
54 | const canvas = document.createElement('canvas');
55 | const ctx = canvas.getContext('2d');
56 |
57 | canvas.addEventListener("contextlost", redraw);
58 | canvas.addEventListener("contextrestored", redraw);
59 | ```
60 |
61 |
62 | References
63 | ----------
64 |
65 | - https://wiki.whatwg.org/wiki/Canvas_Context_Loss_and_Restoration
66 |
--------------------------------------------------------------------------------
/spec/display-list-object.md:
--------------------------------------------------------------------------------
1 | Display List Object (DLO)
2 | =========================
3 |
4 | > This explainer is an incomplete working draft.
5 |
6 | **Status**: explainer.
7 |
8 | _A format and API for efficiently creating, transmitting, modifying and rendering UI and graphics on 2D Canvas._
9 |
10 |
11 | Rationale
12 | ---------
13 |
14 | HTML and the DOM provide a format and API respectively to manipulate content using high level abstractions. In the graphics sense, these abstractions operate in "retained mode."
15 |
16 | In contrast, HTML Canvas provides APIs to draw directly to logical and physical surfaces. These abstractions operate in "immediate mode."
17 |
18 | | Abstraction level | Immediate mode | Retained mode |
19 | |-------------------|----------------|-------------------------------------|
20 | | High level | -- | HTML & DOM |
21 | | Low level | 2D Canvas | _**Proposal**: Display List Object_ |
22 |
23 | Retained mode has several benefits over immediate mode:
24 |
25 | * **Accessibility**: retained mode graphics can be inspected by the platform and exposed to accessibility tools
26 | * **Faster loads of initial UI state**: initial state of an application for certain common display sizes can be serialized, cached, streamed and displayed quickly
27 | * **Faster updates of displayed state**: implementation can calculate minimal deltas and apply them more efficiently (e.g. using GPU-accelerated code)
28 | * **Indexability**: retained mode text can be exposed to Web crawlers and search engines
29 |
30 | Display List Object (DLO) is a proposal to add a retained mode API to the low level drawing abstraction provided by HTML Canvas.
31 |
32 | Use cases
33 | ---------
34 |
35 | ### Accessibility, Testability, Indexability
36 |
37 | Currently, applications drawing text to Canvas are inaccessible to browsers, extensions and tools without the use of complex, brittle and resource-intensive approaches like OCR or ML.
38 |
39 | A retained mode Canvas allows applications to present graphics and text to the implementation in a retained object that can be directly inspected by the application, by related code (e.g. testing frameworks), by extensions, and by web services (e.g. search engine crawlers). Text in this retained object remains in a machine readable UTF-8 format, with styling, positional and dimensional information preserved.
40 |
41 | It is expected that this will enable simple and robust pipelines for accessibility and indexabilty of Canvas-based content on the Web than is possible today.
42 |
43 | ### Efficient UI updates
44 |
45 | Currently, applications updating Canvas-based UI state must maintain their own representation of the UI to perform layout and compute invalidation. Although the application possesses a declarative representation of the new desired UI state, it must apply the needed changes imperatively and serially to a Canvas via the high-level, and comparatively slow, JavaScript APIs.
46 |
47 | A retained mode graphics object can serve as a common data structure for both applications and implementations to represent UI state. Updates to this shared representation can be performed by the application efficiently in batches and the resulting end state presented to the implementation for display. This allows the implementation to optimize invalidation and incremental paint using an efficient static internal representation, native platform code, hardware acceleration, and delegation to lower stages of the paint pipeline.
48 |
49 | ### Animation
50 |
51 | Animation simply tightens the bounds and exacerbates the challenges identified above. In addition, applications must maintain and compute updates to a state machine to track progress through an animation's timeline. These state machine updates must then be applied to the graphical elements, which are then repainted to a Canvas.
52 |
53 | A retained mode Canvas allows the Canvas state to be [parameterized](#variables). Future proposals like the Animation State Machine will allow applications to delegate frame-to-frame updates of the Canvas state to the implementation as well.
54 |
55 | This approach unburdens JavaScript execution, reduces call pressure along the API boundary and provides the opportunity for animation engines to support more complex graphics and animations at higher frame rates.
56 |
57 | Requirements
58 | ------------
59 |
60 | The retained mode Canvas will provide the following features:
61 |
62 | * **Legible text**: text should be programmatically inspectable in human-understandable spans like formatted multi-line paragraphs (not glyphs, words or other fragments) without the need for OCR
63 | * **Styled text**: applied text styles should be programmatically inspectable (e.g. size, bold, etc.)
64 | * **Fast**: updating a retained mode Canvas should scale proportional to the size of the update, not the size of the display list
65 | * **Inexpensive**: display lists should not consume backing memory for raster storage unless and until needed (e.g. when a raster image is requested or when a drawing is displayed on screen)
66 | * **Scalable**: scaling a retained mode Canvas does not produce pixelation artifacts like with the current immediate mode Canvas
67 | * **Incrementally adoptable**: applications using the current Canvas APIs should be able to gradually migrate to using a retained mode Canvas
68 |
69 |
70 | Strawman Proposal
71 | -----------------
72 |
73 | We propose a new lightweight low-level drawing object called the Display List Object (DLO) which stores draw commands but does not apply them to any raster backing memory. We further propose a new HTML Canvas context type called `2dretained` as a drawing interface with support for both immediate-mode draw commands and retained-mode DLOs:
74 |
75 | | | Canvas `2d` context | Canvas `2dretained` context new | Display List Object new |
76 | |--------|--------------------------------------------------------|-----------------------------------------------------------------------|------------------------------------------|
77 | | | _Draw commands applied to raster memory and discarded_ | _Draw commands applied to raster memory and appended to display list_ | _Draw commands appended to display list_ |
78 | | Memory | **High**: O(_canvas area_) | **High**: O(_canvas area_) + O(_# draw commands_) | **Low**: O(_# draw commands_) |
79 |
80 | For most vector-based UI and graphics, O(_canvas area_) >> O(# draw commands).
81 |
82 | ### Context
83 |
84 |
85 | A `2dretained` context type is a drop-in replacement for the current `2d` context type and supports the same drawing methods.
86 |
87 | As with existing `2d` contexts, the draw methods of `2dretained` context immediately draw to the context's raster backing memory (and display if on screen). However a `2dretained` context also retains the draw calls in an internal display list.
88 |
89 | ```js
90 | const canvas = document.getElementById("my-canvas-element");
91 | const ctx = canvas.getContext("2dretained");
92 | ctx.strokeRect(50, 50, 50, 50);
93 | ```
94 |
95 | > _**Why**: A drop-in replacement context type allows applications to incrementally adopt retained-mode Canvas. A separate context type ensures that the added internal storage of a retained display list is only required when requested by the application, rather than added to the memory footprint of all existing 2D Canvas contexts._
96 |
97 | ### Accessing a DLO
98 |
99 | The retained display list of a Canvas `2dretained` context can be accessed using `getDisplayList`:
100 |
101 | ```js
102 | dlo = ctx.getDisplayList();
103 | dlo.toJSON();
104 | ```
105 |
106 | ```json
107 | {
108 | "metadata": {
109 | "version": "0.0.1"
110 | },
111 | "commands": [
112 | ["strokeRect", 50, 50, 50, 50]
113 | ]
114 | }
115 | ```
116 |
117 | ### Modifying a DLO
118 |
119 | A DLO can be modified by issuing additional drawing commands using the methods of the DLO instead of the Canvas context. These commands are applied in retained mode and merely appended to the command list of the DLO.
120 |
121 | ```js
122 | dlo.fillText("Hello", 10, 10);
123 | dlo.toJSON();
124 | ```
125 |
126 |
127 | ```json
128 | {
129 | "metadata": {
130 | "version": "0.0.1"
131 | },
132 | "commands": [
133 | ["strokeRect", 50, 50, 50, 50],
134 | ["fillText", "Hello", 10, 10]
135 | ]
136 | }
137 | ```
138 |
139 | Modifications to a DLO do not result in changes to any Canvas contexts or any displayed graphics.
140 |
141 | > **Implementation note**: DLOs are inexpensive to create and modify. Implementations do not need to allocate raster backing memory. The draw methods of a DLO should run in amortized constant time.
142 |
143 |
144 | ### Nesting DLOs
145 |
146 | DLOs can be nested by inserting a display list inon another display list. This creates a tree structure that allows for faster incremental updates to a complex scene:
147 |
148 | ```js
149 | dlo2 = DisplayList();
150 | dlo2.fillText("World", 30, 10);
151 |
152 | dlo.insert(dlo2);
153 | dlo.toJSON();
154 | ```
155 |
156 | ```json
157 | {
158 | "metadata": {
159 | "version": "0.0.1"
160 | },
161 | "commands": [
162 | ["strokeRect", 50, 50, 50, 50],
163 | ["fillText", "Hello", 10, 10],
164 | {
165 | "commands": [
166 | ["fillText", "World", 30, 10]
167 | ]
168 | }
169 | ]
170 | }
171 | ```
172 |
173 | Inserting a display list onto another display list returns a handle that can be used to update the nested display list.
174 |
175 | ```js
176 | handle = dlo.insert(dlo2);
177 | handle.reset();
178 | handle.fillText("世界", 30, 10);
179 | dlo.toJSON();
180 | ```
181 |
182 | ```json
183 | {
184 | "metadata": {
185 | "version": "0.0.1"
186 | },
187 | "commands": [
188 | ["strokeRect", 50, 50, 50, 50],
189 | ["fillText", "Hello", 10, 10],
190 | {
191 | "commands": [
192 | ["fillText", "世界", 30, 10]
193 | ]
194 | }
195 | ]
196 | }
197 | ```
198 |
199 | An optional identifier can be provided to `insert()`. The identifier is serialized with the display list and can be used to obtain handles after deserializing a saved display list.
200 |
201 | ```js
202 | handle = dlo.insert(dlo2, "mySubDisplayList");
203 | jsonDLO = dlo.toJSON();
204 |
205 | newDLO = DisplayList();
206 | newDLO.fromJSON(jsonDLO);
207 | newHandle = newDLO.getById("mySubDisplayList"); // same sub-display list as above
208 | ```
209 |
210 | ```json
211 | {
212 | "metadata": {
213 | "version": "0.0.1"
214 | },
215 | "commands": [
216 | ["strokeRect", 50, 50, 50, 50],
217 | ["fillText", "Hello", 10, 10],
218 | {
219 | "commands": [
220 | ["fillText", "世界", 30, 10]
221 | ],
222 | "id": "mySubDisplayList"
223 | }
224 | ]
225 | }
226 | ```
227 |
228 | > _**Why**: nested DLOs create a tree of grouped draw commands which allows implementations to efficiently compute deltas between DLOs for fast incremental updates in the paint pipeline. This allows drawings to be updated with performance proportional to the change in the drawing rather than performance proportional to the size and complexity of the overall drawing. DLO trees can implement copy-on-write semantics to reduce the memory overhead of accessing, modifying and drawing complex scenes._
229 |
230 | ### Drawing and updating a Canvas with a DLO
231 |
232 | A DLO can be drawn into a Canvas `2dretained` context:
233 |
234 | ```js
235 | ctx.insert(dlo);
236 | ```
237 |
238 | Drawing a DLO applies the commands in the DLO immediately to the Canvas raster backing memory (and display if on screen). Drawing a DLO to a `2dretained` context also appends the commands in the DLO to the internal command list of the context.
239 |
240 | > _**Why**: The append behavior of `drawDisplayList` aids in incremental adoption: applications can draw some parts of their scene with unmodified code that calls `ctx.draw*()` methods directly and get the expected immediate-mode behavior, while newer application code can draw other parts of the scene into a retained-mode DLO which is then appended to the same context. The application can be updated over time to draw more of the scene into the DLO and issue fewer draw commands to the context. Implementations have efficient access to the entire DLO when `drawDisplayList` is used, rather than receiving draw commands one by one from the application when using the `2d` Canvas context._
241 |
242 | A Canvas context of type `2dretained` can be entirely _updated_ to match a given DLO:
243 |
244 | ```js
245 | ctx.updateDisplayList(dlo);
246 | console.assert(ctx.getDisplayList().equals(dlo));
247 | ```
248 |
249 | Updating a `2dretained` canvas context with a DLO is equivalent to resetting the context and drawing the DLO. However in reality, only the difference between the internal display list of the context and the DLO is applied to the canvas, which can be much faster for complex scenes and small updates.
250 |
251 | ```js
252 | // Equivalent approaches with different performance
253 |
254 | // 1. Runs in O(len(dlo)) time
255 | ctx.reset()
256 | ctx.insert(dlo);
257 |
258 | // 2. Runs in O(len(diff(ctx, dlo))) time
259 | ctx.updateDisplayList(dlo);
260 | ```
261 |
262 |
263 | > _**Why**: The replacement behavior of `updateDisplayList` allows applications that do all drawing for a given context into a DLO and get maximum performance by presenting the desired DLO in its entirety to the implementation. The implementation can then efficiently determine and apply the needed updates to the context._
264 |
265 | ### Save and Restore
266 |
267 | > Note: In a retained-mode Canvas application, methods like `save()` and `restore()` should be considered deprecated and DLO-native applications should create scenes by assembling nested DLO's as described above.
268 |
269 | The `save()` method creates a new unnamed sub-display list and moves the DLO's "cursor" into it.
270 |
271 | ```js
272 | dlo.fillText("Hello", 50, 50);
273 | dlo.save();
274 | dlo.fillRect(0, 0, 25, 25); // written into a new nested DLO
275 | dlo.toJSON();
276 | ```
277 |
278 | ```json
279 | {
280 | "metadata": {
281 | "version": "0.0.1"
282 | },
283 | "commands": [
284 | ["fillText", "Hello", 50, 50],
285 | {
286 | "commands": [
287 | ["fillRect", 0, 0, 25, 25]
288 | ],
289 | }
290 | ]
291 | }
292 | ```
293 |
294 | The `restore()` method simply moves the "cursor" of the DLO out of the most recent sub-display list created by the `save()` method:
295 |
296 | ```js
297 | dlo.restore();
298 | dlo.fillText("world", 100, 50);
299 | dlo.toJSON();
300 | ```
301 |
302 | ```json
303 | {
304 | "metadata": {
305 | "version": "0.0.1"
306 | },
307 | "commands": [
308 | ["fillText", "Hello", 50, 50],
309 | {
310 | "commands": [
311 | ["fillRect", 0, 0, 25, 25]
312 | ],
313 | },
314 | ["fillText", "world", 100, 50]
315 | ]
316 | }
317 | ```
318 |
319 | ### Changing canvas state
320 |
321 | Certain Canvas methods change the current drawing state of the Canvas and affect all _subsequent_ draw method calls. These methods include grid transformations (`transform()`, `translate()`, `rotate()`, and `scale()`), default styles (`strokeStyle()`, `fillStyle()`, `lineWidth()`, `font()`, etc.), and the current clipping path.
322 |
323 | These methods can be called against a `2dretained` Canvas context and a DLO object to achieve the same effect.
324 |
325 | ```js
326 | dlo = DisplayList();
327 | dlo.fillText("Hello", 50, 50);
328 |
329 | dlo.translate(5, 5); // DLO not empty, new nested DLO created
330 | dlo.font("bold 48px serif");
331 |
332 | dlo.fillText("world", 45, 45); // translated origin and font style applied
333 | dlo.toJSON();
334 | ```
335 |
336 | ```json
337 | {
338 | "metadata": {
339 | "version": "0.0.1"
340 | },
341 | "commands": [
342 | ["fillText", "Hello", 50, 50],
343 | {
344 | "transform": [1, 0, 5, 0, 1, 5, 0, 0, 1],
345 | "font": "bold 48px serif",
346 | "commands": [
347 | ["fillText", "world", 45, 45]
348 | ],
349 | }
350 | ]
351 | }
352 | ```
353 |
354 | Since the nested DLOs created by these functions are unavailable to the application, the implementation can optimize the tree of DLOs by moving state transformations up or down in the tree in a way that balances the tree while preserving Canvas semantics:
355 |
356 | ```js
357 | dlo.font(""); // clear font selection made above
358 | dlo.fillText("How are you?", 50, 100); // implementation can move "world" to nested DLO and put this text in parent
359 | dlo.toJSON();
360 | ```
361 |
362 | ```json
363 | {
364 | "metadata": {
365 | "version": "0.0.1"
366 | },
367 | "commands": [
368 | ["fillText", "Hello", 50, 50],
369 | {
370 | "transform": [1, 0, 5, 0, 1, 5, 0, 0, 1],
371 | "commands": [
372 | {
373 | "font": "bold 48px serif",
374 | "commands": [
375 | ["fillText", "world", 45, 45]
376 | ]
377 | },
378 | ["fillText", "How are you?", 50, 100]
379 | ],
380 | },
381 | ]
382 | }
383 | ```
384 |
385 | ### Formatted Text
386 |
387 | Applications drawing text to a Canvas often apply their own layout rules (e.g. a document editor wrapping text at some document-defined page margin). To do this, applications need to know the dimensions of formatted text under some constraints, as well as apply line- and word-breaking according to language-specific rules.
388 |
389 | This proposal is meant to interoperate with the [WICG Canvas Formatted Text proposal](https://github.com/WICG/canvas-formatted-text) for handling formatted text. An application would usually create a formatted text metrics object, inspect the resulting dimensions to make application-specific layout decisions, and then draw the (possubly adjusted) text to a Canvas.
390 |
391 | ```js
392 | ftxt = FormattedText.format( [ "The quick ",
393 | {
394 | text: "brown",
395 | style: "color: brown; font-weight: bold"
396 | },
397 | " fox jumps over the lazy dog."
398 | ], "font-style: italic", 350 );
399 |
400 |
401 | // inspect ftxt to make layout decisions...
402 | // adjust text as needed (split, relayout, reformat)...
403 |
404 | // once it is ready, draw to DLO
405 | dlo.fillFormattedText(ftxt, 50, 50 );
406 | dlo.toJSON();
407 | ```
408 |
409 | ```json
410 | {
411 | "metadata": {
412 | "version": "0.0.1",
413 | },
414 | "commands": [
415 | "fillFormattedText", [
416 | [
417 | "the quick ",
418 | {
419 | "text": "brown",
420 | "style": "color: brown; font-weight: bold",
421 | },
422 | " fox jumps over the lazy dog."
423 | ],
424 | {"fontStyle": "italic"},
425 | 350,
426 | 50,
427 | 50
428 | ]
429 | ],
430 | }
431 | ```
432 |
433 | > _**Why**: As above, drawing formatted text makes the text and its associated style information available to the application, extensions and the implementation, improving the accessibility of Canvas-based applications._
434 |
435 | In Discussion
436 | -------------
437 |
438 | ### Variables
439 |
440 | Numeric values can be specified as variables with an initial value and efficiently updated later. Since variables are a retained-mode concept, they are only available on the display list object and not on the retained mode Canvas context.
441 |
442 | ```js
443 | myVar = dlo.variable("myHeight");
444 | myVar.setValue(50);
445 | dlo.drawRect(10, 10, 10, myVar);
446 | dlo.toJSON();
447 | ```
448 |
449 | ```json
450 | {
451 | "metadata": {
452 | "version": "0.0.1"
453 | },
454 | "commands": [
455 | ["drawRect", 10, 10, 10, {"var": "myHeight"}]
456 | ],
457 | "variables": [
458 | {"var": "myHeight", "value": 50}
459 | ]
460 | }
461 | ```
462 |
463 | Variables can be updated, for example in a tight animation loop:
464 |
465 | ```js
466 | dlo = ctx.getDisplayList();
467 | myVar = dlo.getVariable("myHeight");
468 | for (;;) {
469 | // something something rAF
470 | myVar.setValue(myVar.getValue() + 1);
471 | ctx.updateDisplayList(dlo);
472 | }
473 | ```
474 |
475 | > _**Why**: Variables allow the application to delegate multiple updates to a DLO to the implementation, which can compute and apply the delta to a Canvas context more efficiently than the application._
476 |
477 | > _**TODO**: Variables and nested display lists (expressions?)_
478 |
479 | > _**TODO**: Variables and embedded curves?_
480 |
481 | > _**Future**: The animation state machine proposal lets applications delegate even variable updates to the implementation, along pre-specified curves, allowing potentially all frame-to-frame updates of a DLO animation to run near the native speed of the implementation with minimal load on the JavaScript main thread._
482 |
483 |
484 | Resources
485 | ---------
486 |
487 | * [WICG Canvas Formatted Text proposal](https://github.com/WICG/canvas-formatted-text)
488 |
--------------------------------------------------------------------------------
/spec/enhanced-textmetrics.md:
--------------------------------------------------------------------------------
1 | # Enhanced TextMetrics
2 |
3 | **Status**: explainer.
4 |
5 | ## Goals
6 |
7 | Extend the capabilities of `TextMetrics` to support selection rectangles and bounding box queries over character ranges. This would additionally enable precise caret positioning.
8 |
9 | ## Rationale
10 |
11 | Users should be able to interact with canvas-based text input that correctly renders selection and caret positions.
12 |
13 | All metrics available through DOM APIs should also be available on `measureText()`. Furthermore, `measureText()` will always be limited to a single style, and therefore has the potential to be slightly faster (as it doesn’t need layout). `measureText()` must return the same values as the equivalent DOM APIs.
14 |
15 | We also want to provide more power to current canvas text rendering APIs.
16 |
17 | ## Proposal
18 |
19 | ```webidl
20 | dictionary TextClusterOptions {
21 | CanvasTextAlign align;
22 | CanvasTextBaseline baseline;
23 | double x;
24 | double y;
25 | };
26 |
27 | [Exposed=(Window,Worker)]
28 | interface TextCluster {
29 | readonly attribute double x;
30 | readonly attribute double y;
31 | readonly attribute unsigned long start;
32 | readonly attribute unsigned long end;
33 | readonly attribute CanvasTextAlign align;
34 | readonly attribute CanvasTextBaseline baseline;
35 | };
36 |
37 | [Exposed=(Window,Worker)] interface TextMetrics {
38 | // ... extended from current TextMetrics.
39 |
40 | sequence getSelectionRects(unsigned long start, unsigned long end);
41 | DOMRectReadOnly getActualBoundingBox(unsigned long start, unsigned long end);
42 | sequence getTextClusters(optional TextClusterOptions options);
43 | sequence getTextClusters(unsigned long start, unsigned long end, optional TextClusterOptions options);
44 |
45 | unsigned long getIndexFromOffset(double offset);
46 | };
47 |
48 | interface CanvasRenderingContext2D {
49 | // ... extended from current CanvasRenderingContext2D.
50 |
51 | void fillTextCluster(TextCluster textCluster, double x, double y, optional TextClusterOptions options);
52 | void strokeTextCluster(TextCluster textCluster, double x, double y, optional TextClusterOptions options);
53 | };
54 | ```
55 |
56 | `getSelectionRects()` returns the set of rectangles that the UA would render as selection to select a particular character range.
57 |
58 | `getActualBoundingBox()` returns an equivalent box to `TextMetrics.actualBoundingBox`, i.e., the bounding rectangle for the drawing of that range. Notice that this can be (and usually is) different from the selection rect, as those are about the flow and advance of the text. A font that is particularly slanted or whose accents go beyond the flow of text will have a different paint bounding box. For example: if you select this: ***W*** you will see that the end of the W is outside the selection area, which would be covered by the paint (actual bounding box) area.
59 |
60 | The `getIndexFromOffset` method returns the strign indext for the character at the given `offset` distance from the start position of the text run (accounting for `textAlign` and `textBaseline`) with offset always increasing
61 | left to right (so negative offsets are valid). Values to the left or right of the text bounds will return 0 or
62 | `string.length` depending on the writing direction. The functionality is similar but not identical to [`document.caretPositionFromPoint`](https://developer.mozilla.org/en-US/docs/Web/API/Document/caretPositionFromPoint). In particular, there is no need to return the element containing the caret and offsets beyond the boundaries of the string are acceptable.
63 |
64 | `getTextClusters()` provides the ability to render minimal grapheme clusters (in conjunction with a new method for the canvas rendering context, more on that later). That is, for the character range given as in input, it returns the minimal rendering operations broken down as much as logically possible, with their corresponding positional data. The position is calculated with the original anchor point for the text as reference, while the `align` and `baseline` parameters in the options dictionary determine the desired alignment of each cluster. If no options dictionary is passed, these values are the same as for the anchor point. If no range is passed, the whole text gets split into clusters.
65 |
66 | To actually render these clusters on the screen, two new methods for the rendering context is proposed: `fillTextCluster()` and `strokeTextCluster()`. They renders the cluster with the `align` and `baseline` stored in the object, ignoring the values set in the context. Additionally, to guarantee that the rendered cluster is accurate with the measured text, the rest of the `CanvasTextDrawingStyles` must be applied as they were when `ctx.measureText()` was called, regardless of any changes in these values on the context since. Note that to guarantee that the shaping of each cluster is indeed the same as it was when measured, it's necessary to use the whole string as context when rendering each cluster.
67 |
68 | To enable additional flexibility, an options dictionary can be passed to `fillTextCluster()` and `strokeTextCluster()` to override the values for `align`, `baseline`, `x`, and `y` that will be used to render that cluster. For example, calling `ctx.fillTextCluster(cluster, 10, 10, {x: 0, y:0})` will render the cluster exactly at position `(10, 10)`, instead of rendering as if the text as a whole was placed at `(10, 10)` (which is what the internal `x` and `y` values of the cluster represent). This same overriding applies to the `align` and `baseline` parameters if they are passed in the options dictionary. These options passed to `fillTextCluster()` don't modify the underlying cluster object, and only apply to the rendering of that specific call.
69 |
70 | For `align` specifically, the position is calculated in regards of the advance of said grapheme cluster in the text. For example: if the `align` passed to the function is `center`, for the letter **T** in the string **Test**, the position returned will be not exactly be in the middle of the **T**. This is because the advance is reduced by the kerning between the first two letters, making it less than the width of a **T** rendered on its own.
71 |
72 | `getSelectionRects()`, `getActualBoundingBox()`, and `getTextClusters()` operate in character ranges and use positions relative to the text’s origin (i.e., `textBaseline`/`textAlign` is taken into account).
73 |
74 | `getIndexFromOffset()` is recent rename. The previous name was `caretPositionFromPoint()` and is available in Chrome Canary from version `128.0.6587.0`.
75 |
76 | ## Example usage
77 |
78 | ```js
79 | const canvas = document.querySelector('canvas');
80 | const ctx = canvas.getContext("2d");
81 |
82 | const textMetrics = ctx.measureText("let's do this");
83 | ctx.fillStyle = 'red';
84 | const boxForSecondWord = textMetrics.getActualBoundingBox(6, 8);
85 | ctx.fillRect(
86 | boxForSecondWord.x,
87 | boxForSecondWord.y,
88 | boxForSecondWord.width,
89 | boxForSecondWord.height,
90 | );
91 | const selectionForThirdWord = textMetrics.getSelectionRects(9, 13);
92 | ctx.fillStyle = 'lightblue';
93 | for (const s of selectionForThirdWord) {
94 | ctx.fillRect(s.x, s.y, s.width, s.height);
95 | }
96 | ctx.fillStyle = 'black';
97 | ctx.fillText("let's do this");
98 | ```
99 |
100 | Expected output:
101 |
102 | 
103 |
104 | `getSelectionRects()` and `getActualBoundingBox()` can be used on Chrome Canary (starting from version `127.0.6483.0` and `128.0.6573.0` respectively) by enabling the feature with `--enable-features=ExtendedTextMetrics` (or the general `--enable-experimental-web-platform-features`).
105 |
106 | ```js
107 | const canvas = document.getElementById('canvas');
108 | const ctx = canvas.getContext('2d');
109 |
110 | ctx.font = '60px serif';
111 | ctx.textAlign = 'left';
112 | ctx.textBaseline = 'middle';
113 |
114 | const text = 'Colors 🎨 are 🏎️ fine!';
115 | let tm = ctx.measureText(text);
116 | let clusters = tm.getTextClusters();
117 |
118 | const colors = ['orange', 'navy', 'teal', 'crimson'];
119 | for(let cluster of clusters) {
120 | ctx.fillStyle = colors[cluster.begin % colors.length];
121 | ctx.fillTextCluster(cluster, 0, 0);
122 | }
123 | ```
124 |
125 | Expected output:
126 |
127 | 
128 |
129 | ```js
130 | const canvas = document.getElementById("canvas");
131 | const ctx = canvas.getContext('2d');
132 |
133 | const center_x = 250;
134 | const center_y = 250;
135 | const radius = 150;
136 | ctx.font = '50px serif';
137 | ctx.textAlign = 'left';
138 | let text = "🐞 Render this text on a circle! 🐈⬛";
139 |
140 | const tm = ctx.measureText(text);
141 | // We want the x-position of the center of each cluster.
142 | const clusters = tm.getTextClusters({align: 'center'});
143 |
144 | for (const cluster of clusters) {
145 | // Since ctx.textAlign was set to 'left' before measuring, all values of
146 | // cluster.x are positive.
147 | let p = cluster.x / tm.width;
148 | let rad = 2 * Math.PI * p;
149 | let x = radius * Math.cos(rad) + center_x;
150 | let y = radius * Math.sin(rad) + center_y;
151 | ctx.save();
152 | ctx.translate(x, y);
153 | ctx.rotate(rad + Math.PI / 2);
154 | ctx.translate(-x, -y);
155 | // The cluster is rendered at precisely (x, y), using align as 'center'
156 | // and baseline as 'middle', even if different values were used when
157 | // measuring.
158 | ctx.fillTextCluster(cluster, x, y,
159 | {align: 'center', baseline: 'middle', x: 0, y: 0});
160 | ctx.restore();
161 | }
162 | ```
163 |
164 | Expected output:
165 |
166 | 
167 |
168 | `getTextClusters()` and `fillTextCluster()` can be used on Chrome Canary (starting from version `132.0.6783.0`) by enabling the feature with `--enable-features=ExtendedTextMetrics` (or the general `--enable-experimental-web-platform-features`). `strokeTextCluster()` is available in Chrome Canary from version `135.0.7039.0`.
169 |
170 | ## Alternatives and Open Questions
171 |
172 |
--------------------------------------------------------------------------------
/spec/filters-usage.md:
--------------------------------------------------------------------------------
1 | Filter syntax alternatives
2 | ===================
3 | **Status**: explainer.
4 |
5 | A javascript interface for using SVG filters within canvas.
6 |
7 | The SVG filter exposes deep, flexible drawing modifiers for 2d graphics.
8 | Integrating these into canvas 2d should be technically feasible once an
9 | interface is defined.
10 |
11 | boilerplate:
12 | ```js
13 | // Javascript example
14 | const canvas = document.createElement('canvas');
15 | const ctx = canvas.getContext('2d');
16 |
17 | ctx.beginLayer(/* filter parameter */)
18 | ctx.fillStyle = "magenta";
19 | ctx.fillRect(10, 10, 300, 200);
20 | ctx.endLayer();
21 | ```
22 |
23 | Last approved (but reverted) spec change (https://github.com/whatwg/html/pull/6763):
24 |
25 | ```js
26 | ctx.beginLayer(new CanvasFilter([
27 | {
28 | filter: "colorMatrix",
29 | type: "matrix",
30 | values: [
31 | 0, 1, 0, 0, 0,
32 | 1, 0, 0, 0, 0,
33 | 0, 0, 1, 0, 0,
34 | 0, 0, 0, 1, 0
35 | ],
36 | },
37 | {
38 | filter: "gaussianBlur",
39 | stdDeviation: 5,
40 | }
41 | ]));
42 |
43 | // If we overload `beginLayer` to add alpha and composite operation:
44 | ctx.beginLayer({
45 | filter: new CanvasFilter([
46 | {
47 | filter: "colorMatrix",
48 | type: "matrix",
49 | values: [
50 | 0, 1, 0, 0, 0,
51 | 1, 0, 0, 0, 0,
52 | 0, 0, 1, 0, 0,
53 | 0, 0, 0, 1, 0
54 | ],
55 | },
56 | {
57 | filter: "gaussianBlur",
58 | stdDeviation: 5,
59 | }
60 | ]),
61 | alpha: 0.5,
62 | compositeOperation: "xor"});
63 |
64 | // Optimizing away the temporary CanvasFilter object. This produces an
65 | // ambiguous API with the `filter` key meaning different things (e.g.
66 | // is the argument passed to `beginLayer` a filter or an options object?)
67 | ctx.beginLayer({
68 | filter: [
69 | {
70 | filter: "colorMatrix",
71 | type: "matrix",
72 | values: [
73 | 0, 1, 0, 0, 0,
74 | 1, 0, 0, 0, 0,
75 | 0, 0, 1, 0, 0,
76 | 0, 0, 0, 1, 0
77 | ],
78 | },
79 | {
80 | filter: "gaussianBlur",
81 | stdDeviation: 5,
82 | }
83 | ],
84 | alpha: 0.5,
85 | compositeOperation: "xor"});
86 |
87 | // Alternative key names, avoiding using `filter:` at two different levels:
88 | ctx.beginLayer({
89 | filter: { name: "gaussianBlur",
90 | stdDeviation: 5 },
91 | alpha: 0.5,
92 | compositeOperation: "xor"});
93 |
94 | ctx.beginLayer({
95 | filter: { filter_name: "gaussianBlur",
96 | stdDeviation: 5 },
97 | alpha: 0.5,
98 | compositeOperation: "xor"});
99 |
100 | ctx.beginLayer({
101 | filters: { filter: "gaussianBlur",
102 | stdDeviation: 5 },
103 | alpha: 0.5,
104 | compositeOperation: "xor"});
105 | ```
106 |
107 | Pros:
108 | - Simple to spec, document and implement. We can reuse the SVG specification
109 | and documentation. Implementation uses a simple IDL definition.
110 |
111 |
112 | Class-based, top-level object
113 | -----------------------------
114 | ```js
115 | const turbulence = new CanvasFilter.Turbulence(
116 | /*baseFrequency=*/0.05, /*numOctaves=*/2);
117 | const displacementMap = new CanvasFilter.DisplacementMap(
118 | /*displacementMap=*/turbulence, /*strength=*/30);
119 | const blur = new CanvasFilter.GaussianBlur(/*stdDeviation=*/2);
120 |
121 | ctx.beginLayer(new CanvasFilter.Sequence([displacementMap, blur]));
122 |
123 | ctx.beginLayer({
124 | filter: new CanvasFilter.Sequence([displacementMap, blur])
125 | alpha: 0.5,
126 | compositeOperation: "xor"
127 | });
128 | ```
129 |
130 | Pros:
131 | - Fully typed syntax, IDL-validated.
132 |
133 | Cons:
134 | - Complex to spec, document and implement. We would essentially need to
135 | re-write the SVG filter spec and write an IDL interface for every single
136 | filters.
137 |
138 |
139 | MongoDB-like syntax
140 | -------------------
141 | ```js
142 | ctx.beginLayer(new CanvasFilter([
143 | {displacementMap: {turbulence: [0.05, 2]}},
144 | {gaussianBlur: 2}
145 | ]));
146 |
147 | ctx.beginLayer(new CanvasFilter([
148 | {displacementMap: {
149 | strength: 30,
150 | map: {turbulence: {baseFrequency: 0.05, numOctaves: 2}}},
151 | {gaussianBlur: 2}
152 | ]));
153 |
154 | // single step version (no array)
155 | ctx.beginLayer(new CanvasFilter({gaussianBlur: 2}));
156 |
157 | // With alpha and composite operation:
158 | ctx.beginLayer({
159 | filter: new CanvasFilter([
160 | {displacementMap: {
161 | strength: 30,
162 | map: {turbulence: {baseFrequency: 0.05, numOctaves: 2}}},
163 | {gaussianBlur: 2}
164 | ]),
165 | alpha: 0.5,
166 | compositeOperation: "xor"
167 | });
168 | ```
169 |
170 | Pros:
171 | - Minimal syntax, fun to use.
172 |
173 | Cons:
174 | - Using filter type as key suggests that one could do:
175 | ```js
176 | new CanvasFilter({gaussianBlur: 2, displacementMap: {...}})
177 | ```
178 |
179 |
180 |
181 | Function-based, top-level object
182 | -----------------------------
183 | ```js
184 | const turbulence = CanvasFilter.Turbulence(
185 | /*baseFrequency=*/0.05, /*numOctaves=*/2);
186 | const displacementMap = CanvasFilter.DisplacementMap(
187 | /*displacementMap=*/turbulence, /*strength=*/30);
188 | const blur = CanvasFilter.GaussianBlur(/*stdDeviation=*/2);
189 |
190 | ctx.beginLayer(CanvasFilter.Sequence([displacementMap, blur]));
191 |
192 | // With alpha and composite operation:
193 | ctx.beginLayer({
194 | filter: CanvasFilter.Sequence([displacementMap, blur]),
195 | alpha: 0.5,
196 | compositeOperation: "xor"
197 | });
198 | ```
199 |
200 |
201 | Single-class, parameters
202 | ------------------------
203 | ```js
204 | // here either windows.CanvasFilter or ctx.createCanvasFilter
205 |
206 | // Create filters
207 | const turbulence = new CanvasFilter("turbulence");
208 | turbulence.baseFrequency = 0.05;
209 | turbulence.numOctaves = 2;
210 | const displacementMap = new CanvasFilter("displacementMap");
211 | displacementMap.in = ctx; // Draw ops as inputs
212 | displacementMap.in2 = turbulence;
213 | displacementMap.scale = 30;
214 | const outputFilter = new CanvasFilter();
215 | outputFilter.type = "gaussianBlur";
216 | outputFilter.in = displacementMap;
217 | outputFilter.stdDeviation = 2;
218 |
219 | // Use output filter in canvas
220 | ctx.beginLayer(outputFilter);
221 | ```
222 |
--------------------------------------------------------------------------------
/spec/filters.md:
--------------------------------------------------------------------------------
1 | Feature
2 | =======
3 | **Status**: deprecated.
4 |
5 | A javascript interface for using SVG filters within canvas.
6 |
7 | The SVG filter exposes deep, flexible drawing modifiers for 2d graphics.
8 | Integrating these into canvas 2d should be technically feasible once an
9 | interface is defined.
10 |
11 | [Alternative usages](https://github.com/fserb/canvas2D/blob/master/spec/filters-usage.md)
12 |
13 |
14 | Rationale
15 | ---------
16 |
17 | A strong filter library is available on all major 2D APIs. A lot of those filters are currently implemented inderectly by Canvas2D supporting URL filters (but not on OffscreenCanvas).
18 |
19 | This proposal tries to build up the basis for having more interesting filters available directly on canvas, programatically.
20 |
21 |
22 | Proposal
23 | --------
24 |
25 | ```webidl
26 | // CanvasFilter, exposed to window and workers
27 | // with generic objects we don't need to define every filter within idl files
28 | [
29 | Exposed=(Window,Worker)
30 | ] interface CanvasFilter {
31 |
32 | [CallWith=(ScriptState), RaisesException] constructor((object or FrozenArray) init);
33 |
34 | };
35 | ```
36 |
37 | Example usage
38 | -------------
39 |
40 | ```js
41 | // Javascript example
42 | const canvas = document.createElement('canvas');
43 | const ctx = canvas.getContext('2d');
44 |
45 | /*
46 | Create overall filter, the first primitive will get drawing operations as input
47 | the output of each primitive will be sent to the input of the following element
48 | in the array. The final element will output to the screen.
49 |
50 | Here the final filter graph will look like this:
51 |
52 | turbulence
53 | |
54 | canvasDrawOps -----> displacementMap -------> blur ------> screen
55 | */
56 |
57 | // Definiing to a separate object will allow us to vary parameters
58 | const filterArray = [{turbulence: {frequency: 0.05, numOctaves: 2}},
59 | {displacementMap: {in: "SourceGraphic", in2: "previous", scale: 30},
60 | {blur: {stdDeviation: 2}}];
61 |
62 | // Construct the canvas filter
63 | ctx.filter = new CanvasFilter(filterArray);
64 |
65 | // Draw with created filter
66 | ctx.fillStyle = "magenta";
67 | ctx.fillRect(10, 10, 300, 200);
68 |
69 | // Modify filter
70 | filterArray[0]['turbulence']['frequency'] = 1.5; // Denser noise pattern
71 | filterArray[2]['blur']['stdDeviation'] = 0.5; // Less blur
72 |
73 | // Must construct a new filter object
74 | ctx.filter = new CanvasFilter(filterArray);
75 |
76 | // Draw on top with modified filter
77 | ctx.fillStyle = "cyan";
78 | ctx.beginPath();
79 | ctx.arc(160, 110, 80, 0, 2 * Math.PI);
80 | ctx.fill();
81 | ```
82 |
83 | The above code will produce the the following canvas:
84 |
85 | 
86 |
87 | Alternatives considered
88 | -----------------------
89 |
90 | ### Explicit inputs and outputs
91 |
92 | ```webidl
93 | interface CanvasFilter {
94 | attribute CanvasImageSource in;
95 | attribute CanvasImageSource in2;
96 | }
97 |
98 | interface CanvasGaussianBlurFilter : CanvasFilter {
99 | attribute unrestricted double stdDeviation;
100 | attribute DOMString edgeMode; // "duplicate", "wrap", "none" (default "none")
101 | }
102 | ```
103 |
104 | ```js
105 | const blurFilter = new CanvasGaussianBlurFilter();
106 |
107 | // Javascript example
108 | const canvas = document.createElement('canvas');
109 | const ctx = canvas.getContext('2d');
110 |
111 | // Create filters
112 | const turbulence = new CanvasFilter("turbulence");
113 | turbulence.baseFrequency = 0.05;
114 | turbulence.numOctaves = 2;
115 | const displacementMap = new CanvasFilter("displacementMap");
116 | displacementMap.in = ctx; // Draw ops as inputs
117 | displacementMap.in2 = turbulence;
118 | displacementMap.scale = 30;
119 | const outputFilter = new CanvasFilter();
120 | outputFilter.type = "gaussianBlur";
121 | outputFilter.in = displacementMap;
122 | outputFilter.stdDeviation = 2;
123 |
124 | // Attach output filter to canvas
125 | ctx.filter = outputFilter;
126 |
127 | // Draw with created filter
128 | ctx.fillStyle = "magenta";
129 | ctx.fillRect(10, 10, 300, 200);
130 |
131 | // Modify filter
132 | turbulence.baseFrequency = 1.5; // Denser noise pattern
133 | blur.stdDeviation = 0.5; // Less blur
134 | ctx.filter.update();
135 |
136 | // Draw on top with modified filter
137 | ctx.fillStyle = "cyan";
138 | ctx.beginPath();
139 | ctx.arc(160, 110, 80, 0, 2 * Math.PI);
140 | ctx.fill();
141 | ```
142 |
143 | This approach forgoes the `Sequence` class in favor of making inputs more
144 | explicit. The downside is that without the final filter construction phase it's
145 | less clear what's going on. Also the code to make draw ops inputs is not
146 | straightforward.
147 |
148 |
149 | References
150 | ----------
151 |
152 | - https://developer.mozilla.org/en-US/docs/Web/SVG/Element/filter
153 | - https://en.wikipedia.org/wiki/SVG_filter_effects
154 |
--------------------------------------------------------------------------------
/spec/layers-with-filters.md:
--------------------------------------------------------------------------------
1 | # Layers with filter parameter
2 |
3 | **Status**: explainer.
4 |
5 | ## Goals
6 | Extend the [canvas 2d layer proposal](https://github.com/fserb/canvas2D/blob/master/spec/layers.md) to support filter as a layer parameter.
7 |
8 | ## Rationale
9 |
10 | The main purpose of the layer API is to apply effects like transparency or filters to a group of draw calls as a whole without having to use a temporary canvas. The only way to apply these effects right now is to set them as global rendering state in the context. Setting a filter at the context level has a cost however. When drawing a path or an image with a context-level filter, implementations need to implicitly create layer, as if the draw call was wrapped between `beginLayer()` and `endLayer()` calls. Thus, layers and filters go hand in hand: using filters always requires an implicit or explicit layer.
11 |
12 | Developers should therefore avoid setting a context-level filter and use it for multiple independent draw calls. For instance, the following would be inefficient because each draw call would create individual layers:
13 | ```js
14 | // Draw a sepia checkerboard:
15 | ctx.filter = 'sepia()';
16 | ctx.fillStyle = 'grey';
17 | for (let x = 0; x < 4; ++x) {
18 | for (let y = 0; y < 8; ++y) {
19 | ctx.fillRect((x * 2 + y % 2) * 10, y * 10, 10, 10);
20 | }
21 | }
22 | ```
23 |
24 | The correct way to write this code would be to wrap the whole thing in a single layer:
25 | ```js
26 | // Draw a sepia checkerboard:
27 | ctx.filter = 'sepia()';
28 | ctx.fillStyle = 'grey';
29 | ctx.beginLayer();
30 | for (let x = 0; x < 4; ++x) {
31 | for (let y = 0; y < 8; ++y) {
32 | ctx.fillRect((x * 2 + y % 2) * 10, y * 10, 10, 10);
33 | }
34 | }
35 | ctx.endLayer();
36 | ```
37 |
38 | With this in mind, it's a good practice to always clear the context filter after the layer, to avoid accidentally using it for more draw calls later:
39 | ```js
40 | ctx.save();
41 | ctx.filter = 'sepia()';
42 | ctx.beginLayer();
43 | // ...
44 | ctx.endLayer();
45 | ctx.restore();
46 | ```
47 | or:
48 | ```js
49 | ctx.filter = 'sepia()';
50 | ctx.beginLayer();
51 | // ...
52 | ctx.endLayer();
53 | ctx.filter = 'none';
54 | ```
55 |
56 | The first approach is quite verbose and it's inefficient because `save()` and `restore()` save a lot more states than is necessary. The second approach require manual state and scope management, which is error prone. A better solution would be to have the filter be a property of the layer itself:
57 | ```js
58 | ctx.beginLayer({filter: 'sepia()'});
59 | // ...
60 | ctx.endLayer();
61 | ```
62 |
63 |
64 | ## Proposal
65 |
66 | ```webidl
67 | typedef record CanvasFilterPrimitive;
68 | typedef (DOMString or
69 | CanvasFilterPrimitive or
70 | sequence) CanvasFilterInput;
71 |
72 | dictionary BeginLayerOptions {
73 | CanvasFilterInput? filter;
74 | };
75 |
76 | interface mixin CanvasLayers {
77 | undefined beginLayer(optional BeginLayerOptions options = {});
78 | undefined endLayer();
79 | };
80 | ```
81 |
82 | With this proposal, `beginLayer()` can be called with an optional filter as argument, in which case the layer's resulting texture will be rendered in the canvas using that filter. Filters are specified as a CSS filter string or as CanvasFilterInput objects ([originally proposed here](https://github.com/whatwg/html/issues/5621)) which describes SVG filters with a JavaScript syntax. [See below](#possible-api-improvements) for possible future improvements, [and here](https://docs.google.com/document/d/1jeLn8TbCYVuFA9soUGTJnRjFqLkqDmhJElmdW3w_O4Q/edit#heading=h.52ab2yqq661g) for a full analysis of alternatives considered.
83 |
84 | ## Example usage
85 |
86 | ```js
87 | // Javascript example
88 | const canvas = document.createElement('canvas');
89 | const ctx = canvas.getContext('2d');
90 |
91 | ctx.globalAlpha = 0.5;
92 |
93 | ctx.beginLayer({filter: 'blur(4px)'});
94 |
95 | ctx.fillStyle = 'rgba(225, 0, 0, 1)';
96 | ctx.fillRect(50, 50, 75, 50);
97 | ctx.fillStyle = 'rgba(0, 255, 0, 1)';
98 | ctx.fillRect(70, 70, 75, 50);
99 | ctx.endLayer();
100 | ```
101 |
102 | Would produce the same outcome as,
103 |
104 | ```js
105 | // Javascript example
106 | const canvas = document.createElement('canvas');
107 | const ctx = canvas.getContext('2d');
108 |
109 | const canvas2 = document.createElement('canvas');
110 | const ctx2 = canvas.getContext('2d');
111 | ctx2.fillStyle = 'rgba(225, 0, 0, 1)';
112 | ctx2.fillRect(50, 50, 75, 50);
113 | ctx2.fillStyle = 'rgba(0, 255, 0, 1)';
114 | ctx2.fillRect(70, 70, 75, 50);
115 |
116 | ctx.globalAlpha = 0.5;
117 | ctx.filter = 'blur(4px)';
118 | ctx.drawImage(canvas2, 0, 0);
119 | ```
120 |
121 | Filters can be specified as a `CanvasFilterPrimitive` object:
122 | ```js
123 | ctx.beginLayer({filter: {name: 'gaussianBlur', stdDeviation: 4}});
124 | ```
125 |
126 | Filters can also be specified as a list, to chain filter effects:
127 | ```js
128 | ctx.beginLayer({filter: [
129 | {name: 'gaussianBlur', stdDeviation: 4},
130 | {name: 'dropShadow', dx: 5, dy: 5}
131 | ]});
132 | ```
133 |
134 | ## Possible API improvements
135 | In addition to supporting a `CanvasFilterInput` argument, we could also support a standalone `CanvasFilter` objects. This could provide optimization opportunities by allowing filter to be parsed and resolved once and reused in multiple layers. Note that filters like `dropShadow` can have colors that depends on the Canvas element style (e.g. `currentColor`, `color-scheme`, `forced-colors`, etc.), meaning that these filters can only be entirely resolved once they're used in a particular context.
136 |
137 | ```webidl
138 | typedef record CanvasFilterPrimitive;
139 | typedef (DOMString or
140 | CanvasFilterPrimitive or
141 | sequence) CanvasFilterInput;
142 |
143 | [
144 | Exposed=(Window, Worker)
145 | ] interface CanvasFilter {
146 | constructor(CanvasFilterInput init);
147 |
148 | };
149 |
150 | dictionary BeginLayerOptions {
151 | (CanvasFilterInput or CanvasFilter)? filter = null;
152 | };
153 |
154 | interface mixin CanvasLayers {
155 | undefined beginLayer(optional BeginLayerOptions options = {});
156 | undefined endLayer();
157 | };
158 | ```
159 |
160 | Example usage:
161 | ```js
162 | // No filter:
163 | ctx.beginLayer();
164 |
165 | // Without intermediate CanvasFilter object:
166 | ctx.beginLayer({filter: {name: 'gaussianBlur', stdDeviation: 4}});
167 |
168 | // Composite filters without a CanvasFilter object:
169 | ctx.beginLayer(
170 | {filter: [{name: 'gaussianBlur', stdDeviation: 4},
171 | {name: 'dropShadow', dx: 5, dy: 5}]});
172 |
173 | // CanvasFilter object:
174 | const reusableFilter = new CanvasFilter(
175 | {name: 'gaussianBlur', stdDeviation: 4});
176 | ctx1.beginLayer({filter: reusableFilter});
177 | ctx2.beginLayer({filter: reusableFilter});
178 | ```
179 |
180 | This API would be easily extendable, allowing for more arguments to be added to `beginLayer` if we ever need to. For instance, beginLayer could accept parameters like `alpha`, `compositeOperation` or `antialiasing`:
181 | ```js
182 | ctx.beginLayer({filter: [{name: 'gaussianBlur', stdDeviation: 2},
183 | {name: 'dropShadow', dx: 5, dy: 5}],
184 | compositeOp: "source-over",
185 | antialiasing: "disabled"});
186 | ```
187 |
188 | ## Corner cases
189 |
190 | ### Interaction with `ctx.filter = ...;`
191 | If a context-level filter and a layer filter are both specified, the context-level filter apply on the result of the layer filter. Thus, the following two snippets produce the same results:
192 |
193 | ```js
194 | ctx.filter = 'drop-shadow(10px 10px 0px blue)';
195 | ctx.beginLayer({filter: 'drop-shadow(5px 5px 0px red)'});
196 | ctx.fillRect(20, 60, 100, 20);
197 | ctx.fillRect(60, 20, 20, 100);
198 | ctx.endLayer();
199 | ```
200 |
201 | ```js
202 | ctx.filter = 'drop-shadow(10px 10px 0px blue)';
203 | ctx.beginLayer();
204 | ctx.filter = 'drop-shadow(5px 5px 0px red)';
205 | ctx.beginLayer();
206 | ctx.fillRect(20, 60, 100, 20);
207 | ctx.fillRect(60, 20, 20, 100);
208 | ctx.endLayer();
209 | ctx.endLayer();
210 | ```
211 |
212 | ### Filter dimensions
213 | In SVG, filters can have their own dimensions, specified using the `width`, `height`, `x`, and `y` properties. Canvas layers can do the same things using clipping.
214 |
215 | For instance, the following two snippets produce the same results:
216 | ```html
217 |
220 |
221 |
222 |
223 |
224 |
225 | ```
226 |
227 | ```html
228 |
240 | ```
--------------------------------------------------------------------------------
/spec/mesh2d.md:
--------------------------------------------------------------------------------
1 | # Mesh2D
2 |
3 | **Status**: explainer
4 |
5 | ## Goals
6 | Provide a simple and high-performance canvas 2D triangle mesh API that can be used to batch-render a large number of textured triangles.
7 |
8 | Offer full control of the mesh geometry and texture mapping, to support complex effects that require an intentional mesh design (custom topology, variable density, ordering, etc) -- as opposed to auto-generating the mesh.
9 |
10 | ## Rationale
11 | Triangle meshes are a basic building block of most low-level graphics APIs, and their rendering is heavily optimized on modern hardware. They can represent complex geometries, and provide an efficient mechanism for texture mapping and morphing.
12 |
13 | Mapping textures to arbitrary geometries is of great importance to animation engines (e.g. Lottie, Rive), both as a direct animation mechanism and as an utility for implementing various morphological effects. Since Canvas2D does not currently support drawing triangle meshes, such animation engines are either resorting to inefficient workarounds or limiting the set of supported features on the web.
14 |
15 | ## Use cases
16 |
17 | ### 2D texture mapping
18 | Mapping textures to complex 2D shapes is a useful technique for implementing various distortion effects (stretch/compress, fish eye, etc).
19 |
20 | 
21 |
22 | ### 2D shape animations
23 | Currently, shapes can be animated as 2D paths - which offer great geometric accuracy, but very few fill options (solid color, gradients, patterns).
24 |
25 | Meshes enable natural animation of heavily textured shapes, as they seamlessly translate the geometric transformation to texture space.
26 |
27 | 
28 | 
29 |
30 | ### Color/gradient meshes
31 | Instead of sampling from a texture, meshes can also use color information assigned to each vertex to generate 2D multi-point gradients.
32 |
33 | 
34 |
35 | ### 3D effect simulation
36 | While a 2D mesh does not offer native support for 3D transformations, it does simplify the emulation of such effects by separating the vertex geometry from texture mapping concerns.
37 |
38 | Clients can easily create 3D-like effects by manipulating a small number (relative to the shape and texture size) of vertices, and then passing them to the 2D drawing API.
39 |
40 | 
41 |
42 |
43 | ## Proposal
44 | ### Mesh2D API
45 | Mesh2D is a collection of APIs for drawing 2D meshes and managing mesh-related data.
46 |
47 | At a high level, drawing a triangle mesh requires
48 |
49 | - a **mesh geometry** (vertex positions and triangle topology)
50 | - a **texture source** (images, gradients, colors, patterns, etc)
51 | - vertex -> texture **mapping information** (per-vertex uv coordinates or per-vertex colors)
52 |
53 |
54 | ```webidl
55 | // Describes the mesh geometry (vertex positions), and can be constructed
56 | // from a float32 typed array (two floats per vertex).
57 | [ Exposed=(Window,Worker,PaintWorklet) ] interface Mesh2DVertexBuffer {};
58 |
59 | // Describes the mapping of vertices to UV coordinates. Similar format and
60 | // factory to Mesh2DVertexBuffer.
61 | [ Exposed=(Window,Worker,PaintWorklet) ] interface Mesh2DUVBuffer {};
62 |
63 | // Describes the mapping of vertices to colors (one RGBA color per vertex).
64 | // Can be constructed from a uint8 typed array (four uints per color).
65 | [ Exposed=(Window,Worker,PaintWorklet) ] interface Mesh2DColorBuffer {};
66 |
67 | // Describes the mesh topology (triangle grouping), as an array of vertex indices,
68 | // three indices per triangle. Can be constructed from an uint16 type array.
69 | // (n.b. these are vertex indices, not float32 indices, in the vertex array)
70 | [ Exposed=(Window,Worker,PaintWorklet) ] interface Mesh2DIndexBuffer {};
71 |
72 | typedef (CanvasImageSource or
73 | CanvasGradient or
74 | CanvasPattern) MeshTextureSource;
75 |
76 | interface CanvasRenderingContext2D {
77 | // Mesh buffer factories.
78 | [RaisesException] Mesh2DVertexBuffer createMesh2DVertexBuffer(Float32Array buffer);
79 | [RaisesException] Mesh2DUVBuffer createMesh2DUVBuffer(Float32Array buffer);
80 | [RaisesException] Mesh2DColorBuffer createMesh2DColorBuffer(Uint8ClampedArray buffer);
81 | [RaisesException] Mesh2DIndexBuffer createMesh2DIndexBuffer(Uint16Array buffer);
82 |
83 | // Triangle mesh using a texture source and UV mapping.
84 | [HighEntropy, RaisesException] void drawMesh(Mesh2DVertexBuffer vertex_buffer,
85 | Mesh2DUVBuffer uv_buffer,
86 | Mesh2DIndexBuffer index_buffer,
87 | MeshTextureSource texture_source);
88 |
89 | // Triangle mesh using explicit vertex colors.
90 | [HighEntropy, RaisesException] void drawMesh(Mesh2DVertexBuffer vertex_buffer,
91 | Mesh2DColorBuffer color_buffer,
92 | Mesh2DIndexBuffer index_buffer);
93 | };
94 | ```
95 |
96 | ## Example usage
97 | ### Allocating mesh buffers
98 | ```js
99 | const ctx = document.getElementById("canvas").getContext("2d");
100 |
101 | // A trivial 4-vertex, 2-triangle mesh:
102 | // 0 1
103 | // *--*
104 | // |\ |
105 | // | \|
106 | // *--*
107 | // 3 2
108 |
109 | // Vertex position buffer: Float32Array, two float32s per vertex.
110 | const vbuf = ctx.createMesh2DVertexBuffer(new Float32Array([
111 | 0, 0,
112 | 50, 0,
113 | 50, 100,
114 | 0, 100
115 | ]));
116 |
117 | // Index buffer: Uint16Array, three uints per triangle.
118 | const ibuf = ctx.createMesh2DIndexBuffer(new Uint16Array([
119 | 0, 2, 1,
120 | 0, 2, 3
121 | ]));
122 |
123 | // Color buffer: four uint8s per vertex, RGBA format.
124 | const cbuf = ctx.createMesh2DColorBuffer(new Uint8ClampedArray([
125 | 255, 0, 0, 255,
126 | 0, 255, 0, 255,
127 | 0, 0, 255, 255,
128 | 0, 0, 0, 255
129 | ]));
130 |
131 | // UV buffer: Float32Array, two float32s per vertex.
132 | const uvbuf = ctx.createMesh2DUVBuffer(new Float32Array([
133 | 0, 0,
134 | 1, 0,
135 | 1, 1,
136 | 0, 1
137 | ]));
138 | ```
139 |
140 | ### Drawing a mesh
141 |
142 | ```js
143 | const ctx = document.getElementById("canvas").getContext("2d");
144 |
145 | // A color mesh, which does not require a texture.
146 | ctx.drawMesh(vbuf, cbuf, ibuf);
147 |
148 | // A textured mesh, using ImageBitmap, HTMLImageElement,
149 | // SVGImageElement, OffscreenCanvas, HTMLCanvasElement, VideoFrame,
150 | // HTMLVideoElement or CanvasPattern texture sources.
151 | ctx.drawMesh(vbuf, uvbuf, ibuf, texture_source);
152 | ```
153 |
154 | ## Rendering model
155 | In order to minimize friction, Mesh2D adheres to the same model as other existing mesh APIs.
156 |
157 | ### Draw order
158 | Conceptually, all triangles are rendered individually in the order specified by the index buffer. In practice, this process is heavily parallelized in hardware.
159 |
160 | ### Anti-aliasing
161 | In order to avoid interior seaming artifacts, anti-aliasing is always disabled.
162 |
163 | ### Global state
164 | The render context transformation, clip state, blend mode and opacity are observed when rendering the mesh.
165 |
166 | ### Overlap
167 | Triangle overlap is allowed, and the behavior is well defined thanks to the deterministic draw order.
168 |
169 | ### Triangle interpolation
170 | Vertex attributes (UV coordinates and colors) are interpolated across the triangle area using [barycentric weights](https://tinyurl.com/msjb6f89).
171 |
172 | ### UV coordinates range
173 | UV coordinates are normalized ([0..1]) relative to the current texture size. Extra normal values are clamped.
174 |
175 | When used in conjunction with a generated texture with an explicit repeat mode (e.g. pattern), UV coordinates are not clamped but are instead tiled based on the specified repeat mode.
176 |
177 |
178 | ### Triangle winding
179 | In the absence of backface culling provisions, the order of vertices for a given triangle does not matter.
180 |
181 | ## References
182 |
183 | Some examples of the same idea outside Canvas:
184 | - [*`glDrawElements()`*](https://registry.khronos.org/OpenGL-Refpages/gl4/html/glDrawElements.xhtml) in OpenGL.
185 | - [*`vkCmdDrawIndexed()`*](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdDrawIndexed.html) in Vulkan.
186 | - [*`MDLMesh`*](https://developer.apple.com/documentation/modelio/mdlmesh) in Metal.
187 | - [*`drawVertices()`* and *`drawMesh()`*](https://api.skia.org/classSkCanvas.html) in Skia.
188 | - [*`drawVertices()`*](https://api.flutter.dev/flutter/dart-ui/Canvas/drawVertices.html) in Flutter.
189 | - [*`Mesh`*](https://threejs.org/docs/#api/en/objects/Mesh) in Three.js.
190 |
--------------------------------------------------------------------------------
/spec/perspective-transforms.md:
--------------------------------------------------------------------------------
1 | Perspective transforms
2 | ======================
3 | **Status**: explainer.
4 |
5 | Allow for perspective (non-affine) 2D transforms. Non-affine transforms are
6 | transformations where parallel lines don't remain parallel after the transform.
7 |
8 |
9 | Rationale
10 | ---------
11 |
12 | Non-affine/perspective transforms are supported on all major 2D APIs (either directly or throught filters). For flat fill, it can be replaced by regular paths. Unfortunately, this doesn't work for drawImages, where non-affine transform need to be applied to the render.
13 |
14 | The use cases include effects rendering images with flipping and perspective effects.
15 |
16 | Proposal
17 | --------
18 |
19 | ```webidl
20 | interface mixin CanvasTransform {
21 | // Already exists:
22 | [NewObject] DOMMatrix getTransform();
23 | void resetTransform();
24 |
25 | void transform(unrestricted double a, unrestricted double b, unrestricted double c, unrestricted double d, unrestricted double e, unrestricted double f);
26 | void setTransform(unrestricted double a, unrestricted double b, unrestricted double c, unrestricted double d, unrestricted double e, unrestricted double f);
27 |
28 | // updates:
29 | void scale(unrestricted double x, unrestricted double y, optional unrestricted double z);
30 | void translate(unrestricted double x, unrestricted double y, optional unrestricted double z);
31 | void rotate3d(unrestricted double angleZ, optional unrestricted double angleY, optional unrestricted dobule angleX);
32 | void rotateAxis(unrestricted double axisX, unrestricted double axisY, unrestricted double axisZ, unrestricted double angle);
33 | void perspective(unrestricted double length);
34 |
35 | void setTransform(optional DOMMatrixInit transform = {});
36 | void transform(DOMMatrixInit transform);
37 | };
38 | ```
39 |
40 | We now allow a full 4x4 Matrix as the state of a Canvas transform. We also create
41 | a new `transform()` function that multiplies the current matrix by the passed
42 | parameter.
43 |
44 | Finally, we support `scale`/`translate`/`rotate` with extra optional parameters.
45 |
46 |
47 | ### Open issues and questions
48 |
49 | * do we need to specify non-affine perspective texture transforms for drawImage?
50 | * support pre-multiply for transform?
51 |
52 | Example usage
53 | -------------
54 |
55 | ```js
56 | // Javascript example, draws a trapezoid
57 | const canvas = document.createElement('canvas');
58 | const ctx = canvas.getContext('2d');
59 |
60 | const w = canvas.width;
61 | const h = canvas.height;
62 | ctx.fillStyle = "magenta";
63 | ctx.fillRect(w/2 - 50, h/2 - 50, 100, 100);
64 | ctx.fillStyle = "rgba(0, 2550, 255, 0.8)";
65 | ctx.translate(w/2, h/2);
66 | ctx.perspective(100);
67 | ctx.rotate3d(1.0, 0, 0);
68 | ctx.translate(-w/2, -h/2);
69 | ctx.fillRect(w/2 - 50, h/2 - 50, 100, 100);
70 | ```
71 |
72 | The above code will produce the the following canvas:
73 |
74 | 
75 |
76 | References
77 | ----------
78 |
79 | - https://drafts.csswg.org/css-transforms-2
80 | - https://drafts.fxtf.org/geometry/#DOMMatrix
81 |
--------------------------------------------------------------------------------
/spec/recording.md:
--------------------------------------------------------------------------------
1 | Recorded Pictures
2 | =================
3 | **Status**: explainer.
4 |
5 | This feature allows users to batch multiple 2D commands in a single object that
6 | can be draw ("replayed") multiple times on the curent Canvas.
7 |
8 | This feature enables big performance improvements across canvas usages, as it
9 | removes a lot of Javascript bottlenecks.
10 |
11 |
12 | Proposal
13 | --------
14 |
15 | ```webidl
16 | interface mixin CanvasRecorder {
17 | Canvas2DRecording createCanvasRecording();
18 | void playback(Canvas2DRecording rec);
19 | };
20 |
21 | interface Canvas2DRecording {
22 | readonly attribute HTMLCanvasElement canvas;
23 | };
24 |
25 | Canvas2DRecording includes CanvasState;
26 | Canvas2DRecording includes CanvasTransform;
27 | Canvas2DRecording includes CanvasCompositing;
28 | Canvas2DRecording includes CanvasImageSmoothing;
29 | Canvas2DRecording includes CanvasFillStrokeStyles;
30 | Canvas2DRecording includes CanvasShadowStyles;
31 | Canvas2DRecording includes CanvasFilters;
32 | Canvas2DRecording includes CanvasRect;
33 | Canvas2DRecording includes CanvasDrawPath;
34 | Canvas2DRecording includes CanvasUserInterface;
35 | Canvas2DRecording includes CanvasText;
36 | Canvas2DRecording includes CanvasDrawImage;
37 | Canvas2DRecording includes CanvasImageData;
38 | Canvas2DRecording includes CanvasPathDrawingStyles;
39 | Canvas2DRecording includes CanvasTextDrawingStyles;
40 | Canvas2DRecording includes CanvasPath;
41 |
42 | CanvasRenderingContext2D includes CanvasRecorder;
43 | OffscreenCanvasRenderingContext2D includes CanvasRecorder;
44 | ```
45 | `Canvas2DRecording` are canvas-independent (they can replayed on any canvas).
46 | They are also transferable.
47 |
48 | All rendering state is repeated from the call time.
49 |
50 | ### Open issues and questions
51 | - do the recordings need to be Canvas dependent?
52 | - do they need to be frozen?
53 | - can they be transfered?
54 | - should they be on the global namespace? create vs new?
55 |
56 |
57 | Example usage
58 | -------------
59 |
60 | ```js
61 | // Javascript example
62 | const canvas = document.createElement('canvas');
63 | const ctx = canvas.getContext('2d');
64 |
65 | const rec = ctx.createCanvasRecording();
66 |
67 | rec.fillStyle = "red";
68 | rec.fillRect(0, 0, 10, 10);
69 |
70 | ctx.playback(rec);
71 | ```
72 |
73 | Alternatives considered
74 | -----------------------
75 |
76 | - change canvas state to a "now recording" mode.
77 | - add a `freeze` or `done` method to `Canvas2DRecording`
78 |
79 |
80 | References
81 | ----------
82 |
--------------------------------------------------------------------------------
/spec/reset.md:
--------------------------------------------------------------------------------
1 | clear function
2 | ==============
3 | **Status**: explainer.
4 |
5 | Provide a `reset()` function that resets the state of the Canvas.
6 |
7 |
8 | Rationale
9 | ---------
10 |
11 | Currently, there's no canonical way of clearing a Canvas, some of the used
12 | ways are actually wrong, and users are overall [confused](https://stackoverflow.com/questions/2142535/how-to-clear-the-canvas-for-redrawing) about it. Current ways of clearing a canvas:
13 |
14 | 1. `ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);` is wrong, as it doesn't reset the path.
15 | 2. `ctx.fillStyle = "white"; ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);` can lead to performance issues.
16 | 3. `ctx.canvas.width = ctx.canvas.width` clears the canvas, but also resets style. Very opaque.
17 | 4. `ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); ctx.beginPath();` is the only good way of doing so.
18 |
19 | This is a small quality of life improvement.
20 |
21 |
22 | Proposal
23 | --------
24 |
25 | ```webidl
26 | interface CanvasRenderingContext2D {
27 | // extending original
28 | void reset();
29 | };
30 |
31 | interface OffscreenCanvasRenderingContext2D {
32 | // extending original
33 | void reset();
34 | };
35 | ```
36 |
37 | `reset()` clears the backing buffer to transparent black, the current path and sets the transformation
38 | stack empty.
39 |
40 | ### Open issues and questions
41 |
42 | - support a optional color parameter
43 |
44 |
45 | Example usage
46 | -------------
47 |
48 | ```js
49 | // Javascript example
50 | const canvas = document.createElement('canvas');
51 | const ctx = canvas.getContext('2d');
52 |
53 | ctx.reset();
54 | ```
55 |
56 | Alternatives considered
57 | -----------------------
58 |
59 | None.
60 |
61 |
62 | References
63 | ----------
64 |
65 | None.
66 |
--------------------------------------------------------------------------------
/spec/roundrect.md:
--------------------------------------------------------------------------------
1 | RoundRect
2 | =========
3 | **Status**: explainer.
4 |
5 | Addition to CanvasPath that allows users to render rectangles with rounded corners.
6 |
7 |
8 | Rationale
9 | ---------
10 |
11 | Almost all 2D APIs support a roundrect primitive.
12 |
13 | Even thought, theoretically, one could reproduce this exiting path functions, it's hard to get this function right (with weird values of radius) and performant.
14 |
15 |
16 | Proposal
17 | --------
18 |
19 | ```webidl
20 |
21 |
22 | interface mixin CanvasPath {
23 | // all doubles are unrestricted.
24 | void roundRect(unrestricted double x, unrestricted double y, unrestricted double w, unrestricted double h, sequence<(unrestricted double or DOMPoint)> radii);
25 | };
26 | ```
27 |
28 | `radius` specifies the radius of corners. Each corner is represented by a single radius.
29 |
30 | If `radii.length == 1` then all 4 corners have the same length.
31 |
32 | If `radii.length == 2` then the first value applies to the top-left and bottom-right corners, and the second value applies to the top-right and bottom-left corners.
33 |
34 | If `radii.length == 3` then the first value applies to the top-left corner, the second value applies to the top-right and bottom-left corners, and the third value applies to the bottom-right corner.
35 |
36 | If `radii.length == 4` then each corner is specified, in order: top-left, top-right, bottom-right, bottom-left.
37 |
38 | If `w` and `h` are both greater than or equal to 0, or if both are smaller than 0, then the primitive is drawn clockwise. Otherwise, it is drawn conterclockwise.
39 |
40 | When `w` is negative, the rounded rectangle is flipped horizontally, which means that the radius values that apply to the left corners, are actual used on the right and vice versa. Similarly, when `h` is negative, the rounded rect is flipped vertically.
41 |
42 | When a value `r` in `radii` is a `DOMPoint`, the corresponding corner(s) are drawn as elliptical arcs whose x and y radii are equal to `r.x` and `r.y`, respecively.
43 |
44 | When a value `r` in `radii` is a `double`, the corresponding corner(s) are drawn as circular arcs of radius `r`.
45 |
46 | When the sum of the radii of two corners of the same edge is greater than the length of the edge, the all the radii of the rounded rectangle are scaled by a factor of len/(r1+r2). If multiple edges have this property, the scale factor of the edge with the smallest scale factor is used. This is consistent with CSS behaviour.
47 |
48 | If a value in `radii` is a negative number, then roudRect() throws an `IndexSizeError` DOM Exception.
49 |
50 | If a value in `radii` is a `DOMPoint` whose `x` or `y` attributes are negative numbers, then roundRect() throws an `IndexSizeError` DOM Exception.
51 |
52 | If any of `x`, `y`, `width` or `height` ar non-finite numbers, or if a value in radii is a non-finite number, or if a value of `radii` is a DOMPoint who `x` or `y` attributes are non-finite numbers, the roundRect aborts without throwing an exception and without adding anything to the current path.
53 |
54 |
55 | ### Open issues and questions
56 |
57 | - is `sequence` better than explicit list of radii?
58 | - support integer as an optional parameter
59 | - what happens to other radius lengths?
60 |
61 |
62 | Example usage
63 | -------------
64 |
65 | ```js
66 | // Javascript example
67 | const canvas = document.createElement('canvas');
68 | const ctx = canvas.getContext('2d');
69 |
70 | ctx.beginPath();
71 | ctx.roundRect(10, 10, 50, 50, [10]);
72 | ctx.fill();
73 |
74 | ```
75 |
76 | Alternatives considered
77 | -----------------------
78 |
79 | ### DOMRoundRect
80 |
81 | Specify a `DOMRoundRect` object and make functions `*RoundRect(DOMRoundRect obj)`. It seems a bit at odds with the rest of Canvas2D APIs.
82 |
83 | ### RoundRect as standalone drawing primitive
84 |
85 | To make RoundRect more quickly accessible, it could exist on its own like fillRect and strokeRect:
86 |
87 | ```webidl
88 | interface mixin CanvasRoundRect {
89 | // all doubles are unrestricted.
90 | void clearRoundRect(double x, double y, double w, double h, sequence radius);
91 | void fillRoundRect(double x, double y, double w, double h, sequence radius);
92 | void strokeRoundRect(double x, double y, double w, double h, sequence radius);
93 | };
94 | ```
95 |
96 | Producing code like:
97 | ```js
98 | // Javascript example
99 | const canvas = document.createElement('canvas');
100 | const ctx = canvas.getContext('2d');
101 |
102 | ctx.fillRoundRect(10, 10, 50, 50, [10]);
103 | ctx.fillRoundRect(100, 10, 50, 50, [10, 10, 0, 10]);
104 | ```
105 |
106 | This has the benefit of producing one-liner versions of round rects, but does not allow the user to interact with `clip`, `isPointIn...`, `scrollToPath`. It would also make it impossible to draw multiple round rects with a single draw command.
107 |
108 | References
109 | ----------
110 |
111 | None.
112 |
--------------------------------------------------------------------------------
/spec/shaders.md:
--------------------------------------------------------------------------------
1 | WebGPU Shaders
2 | =======
3 | **Status**: explainer.
4 |
5 | As a follow up to [WebGPU Interop](webgpu.md), we want to design a clean way
6 | to use WebGPU fragment shaders as Canvas2D filters.
7 |
8 | In theory, the interop primitives could allow this to be polyfilled, but
9 | encapsulating the shaders as filter objects would allow them to be used on layers,
10 | which have much better ergonomics as well as potentially better optimization on
11 | some architectures.
12 |
13 |
14 | Proposal
15 | --------
16 |
17 | ```webidl
18 |
19 | interface mixin CanvasShaderCreation {
20 | CanvasShader? createCanvasShader({
21 | string WGSLCode,
22 | GPUBlendState? blendState,
23 | GPUSamplerDescriptor? sampler,
24 | sequence? features,
25 | record? limits = {};
26 | });
27 | };
28 |
29 | CanvasRenderingContext2D includes CanvasShaderCreation;
30 | OffscreenCanvasRenderingContext2D includes CanvasShaderCreation;
31 |
32 | [Exposed=(Window,Worker)] interface CanvasShader {
33 | }
34 |
35 | typedef record CanvasFilterPrimitive;
36 | ```
37 |
38 | `createCanvasShader` allow the creation of a filter that encapsulates a WGSL shader,
39 | with some limitations. Namely:
40 |
41 | - there's only one binding group (0) that is the default. Uniforms, textures and samplers are definied and used in this binding group.
42 | - there's only one entry point.
43 | - there's always a texture available for sampling with the layer content.
44 | - extra textures and samplers can be passed as uniforms on filter usage.
45 |
46 | ### Open issues and questions
47 |
48 |
49 | Example usage
50 | -------------
51 |
52 | ```js
53 | // Use a WebGPU shader to draw a red square.
54 | const canvas = document.createElement('canvas');
55 | const ctx = canvas.getContext('2d');
56 |
57 | const gpufilter = ctx.createCanvasShader(`
58 | var width : f32 = 100;
59 |
60 | @filter
61 | fn MyFilter(texcoord: vec2) -> vec4 {
62 | return vec4(1.0, 0, 0, 1.0);
63 | }
64 | `);
65 |
66 | ctx.beginLayer({filter: gpufilter, width: 100});
67 | ctx.fillRect(10, 10, 100, 100);
68 | ctx.endLayer();
69 |
70 | ```
71 |
72 |
73 | References
74 | ----------
75 |
76 | - [WebGPU spec](https://gpuweb.github.io/gpuweb)
77 |
--------------------------------------------------------------------------------
/spec/text-modifiers.md:
--------------------------------------------------------------------------------
1 | Text Modifiers
2 | ==============
3 | **Status**: explainer.
4 |
5 | Currently, some CSS text rendering properties can't be used for canvas text.
6 | Some browsers do support setting them a Canvas style object, for an attached
7 | canvas. This spec tries to bring those properties inside canvas.
8 |
9 | Those should be trivially implemented, as they are already support by CSS.
10 |
11 |
12 | Rationale
13 | ---------
14 |
15 | There are many applications that want to use more modern text parameters that browsers current support but are not available on Canvas.
16 |
17 | There are many web apps (from Adobe, Microsoft, Google) that are looking forward for this feature.
18 |
19 |
20 | Proposal
21 | --------
22 |
23 | ```webidl
24 | interface mixin CanvasTextDrawingStyles {
25 | // current values (font, textAlign, textBaseline, direction)...
26 |
27 | // new values
28 | attribute DOMString letterSpacing; // CSS letter-spacing
29 | attribute DOMString wordSpacing; // CSS word-spacing
30 | attribute DOMString fontVariantCaps; // CSS font-variant-caps
31 | attribute DOMString fontKerning; // CSS font-kerning
32 | attribute DOMString fontStretch; // CSS font-stretch
33 | attribute DOMString textRendering; // CSS text-rendering
34 | };
35 | ```
36 |
37 | Those properties behave similarly to other text properties on Canvas and have
38 | the same effect as their CSS equivalents.
39 |
40 | ### Open issues and questions
41 |
42 | - should we normalize the prefix to be always `text` instead of `text` and `font`?
43 |
44 | Example usage
45 | -------------
46 |
47 | ```js
48 | // Javascript example
49 | const canvas = document.createElement('canvas');
50 | const ctx = canvas.getContext('2d');
51 |
52 | ctx.letterSpacing = "3px";
53 | ctx.fontVariantCaps = "all-small-caps";
54 |
55 | ```
56 |
57 | Alternatives considered
58 | -----------------------
59 |
60 | None.
61 |
62 |
63 | References
64 | ----------
65 |
66 | None.
67 |
--------------------------------------------------------------------------------
/spec/webText-background.md:
--------------------------------------------------------------------------------
1 | # WebText
2 |
3 | **Status**: explainer
4 |
5 | ## Rationale
6 | The 2D canvas exposes limited layout information – text can be measured and
7 | rendered as a single line only, optionally, compressed to fit within a certain
8 | width. If developers want to display a paragraph of text (ex: usage for Google
9 | doc), developers need to manage the exact (x, y) location for the start of each
10 | line and manage the line break at the end of the line. Layout needs to be
11 | recalculated after each insertion and deletion. This makes the rendering of the
12 | paragraphs very hard with canvas.
13 |
14 | The 2D canvas offers limited text placement information (via measureText), so
15 | apps that require accurate cursor placement in text with kerning and ligatures
16 | are impossible to be implemented perfectly. For example, kerning on the "/"
17 | character results inaccurate cursor placement over the word "google":
18 |
19 | 
20 | *wrong cursor placement in google *
21 | ## Use Case
22 | 1. Use case: multiple line text placement (P0)
23 | Given a text element with desired styles applied, Canvas can generate a
24 | multi-line paragraph with a reference coordinate (x, y) and a given width.
25 |
26 | Note: Its users’ responsibility to ensure rendered text is properly
27 | constrained to fit in the space provided by adjusting font-size, line width,
28 | line-spacing, etc., on the input text objects.
29 |
30 | 1. Use Case: grapheme info (P0)
31 | Cursor placement: Users are able to place the cursor precisely after each
32 | grapheme (solving the Google.com example above).
33 |
34 | Selection placement: Similarly, when users select a block of text, the
35 | bounding box is placed correctly.
36 |
37 | Highlight placement: Users are able to implement text highlighting for a
38 | specific block of text in the layout (example: search for a phrase in the
39 | paragraph)
40 |
41 | 1. Use Case: customized per-line lengths (P1)
42 | Users can specify the constraints per-line, such as line width, line spacing.
43 | This is useful to render images that flow with the text. (example)
44 |
45 | 1. Use Case: text style edit (P2)
46 | Text addition and deletion: Users can add or remove grapheme at the start,
47 | middle or end of the paragraph (layout).
48 |
49 | Edit paragraph style (example style: font): Users can select text in a
50 | paragraph and change its style. The change of layout of the paragraph should
51 | not cause any change in text styles.
52 |
53 | Move: Users can select some text and move it to a different place in the
54 | paragraph, where the moved text keeps its original style.
55 |
56 | Click: If a point on the layout is clicked, the API allows the users to find
57 | which grapheme was clicked.
58 |
59 | Multiple paragraph changes: Users can select one or more lines from one or
60 | more span or div and change the style (font, size, color, highlight, underline,
61 | etc).
62 |
63 | Link: Users can make a selection of text and add a link to it, and follow the
64 | link if the bounding box of the text is clicked.
65 |
66 |
67 | ## Background
68 | ### Terminology:
69 | **Glyph**: A glyph is a specific shape, design, or representation of a
70 | character, which stored as an image in a font. More than one glyph may be used
71 | to draw a single symbol – for example, the symbol “é” may be drawn by combining
72 | the glyph “e” with the glyph “´”.
73 |
74 | **Code points** (UTF-16): Code points are the atomic parts of Unicode text. Most
75 | of them fit into one JavaScript character (ex: letter ‘A’), some of them occupy
76 | two (ex: emojis). The latter is because they are not in the basic multilingual
77 | plane of Unicode, so they take more than one 16 bits to encode. UTF-16 handles
78 | this by using surrogate pairs, that is, two 16-bit code units that can be
79 | combined to get a higher value Unicode code point. (ex: 🙂 \ud83d \ude42)
80 |
81 | ```javascript
82 | '🙂'.length => 2
83 | '🙂'.charCodeAt(0).toString(16) => 'd83d'
84 | '🙂'.charCodeAt(1).toString(16) => 'de42'
85 | ```
86 |
87 | **Grapheme cluster** (user-perceived characters): represent the written symbols,
88 | as displayed on screen or paper. They are the smallest unit of the writing
89 | symbol, visually. Ex: for languages like English, the term Grapheme, letter
90 | and glyph are interchangeable. For emojis, each emoji is a grapheme cluster.
91 | Sometimes, one or more code points are needed to encode a single grapheme
92 | cluster (see below).
93 |
94 | | Grapheme | A | 👨👩👧 | क्षि |
95 | |----------|---|----|----|
96 | | Code point | U+0041 | U+1F468, U+200D, U+1F469, U+200D, U+1F467| U+0915, U+094D, U+0937, U+093F |
97 | | Meaning | A | family: man, woman, girl | क, ् , ष , ि
98 |
99 | Users are expected to see cursors move by one grapheme cluster at a time,
100 | selections start and end at grapheme cluster boundaries, and pressing backspace
101 | once deletes the last grapheme cluster even if it took several keystrokes to
102 | enter (eg, À takes A and the accent, users don’t expect to see accent get delete
103 | and A remains after pressing backspace).
104 |
105 | With combinations, Grapheme clusters could exceed the line height, considering
106 | the following extreme example, 5 grapheme clusters(Z, A, L, G, O) with 58
107 | javascript coding.
108 |
109 |
110 | Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘
111 |
112 | ## Kerning
113 | Kerning is a typographic technique used to create visually appealing and
114 | readable text by adjusting the space between certain letter pairs. In the
115 | example below, without kerning, blindly applying the same letter spacing for
116 | the letters "AW" leaves an ambiguously large space, and with the same
117 | spacing “WE” leaves crowds W and E. Correcting this requires adjusting the
118 | placement of each character, overlapping or spreading out their bounding
119 | regions depending on the context of adjacent characters.
120 |
121 | 
122 | *AWE in kerning *
123 |
124 | In canvas, the metrics including width are available for individual letters or
125 | words, but no information available for layout after kerning applied. This is
126 | the main cause of the wrong cursor position in the example http://google.com
127 | above.
128 |
--------------------------------------------------------------------------------
/spec/webText-data-model.md:
--------------------------------------------------------------------------------
1 | # WebText - Data Model
2 |
3 | **Status**: explainer
4 |
5 | ## Creation
6 | Text is hard. Luckily, the browser has already solved this problem as indicated
7 | above. Canvas can leverage on the existing browser capabilities of rendering
8 | multi-line formatted text. This solves the use case 1, 3 and 4 (partially). We
9 | can then add metrics incorporating grapheme clusters to solve the cursor
10 | placement issues (use case 2 & case 4). With this solution, the new metrics
11 | will be available for other components of the web platform to use as well.
12 |
13 | Note: Its users’ responsibility to ensure rendered text is properly constrained
14 | to fit in the space provided by adjusting font-size, line width, line-spacing,
15 | etc., on the input text objects.
16 |
17 | ```javascript
18 | // Creation
19 | const g = document.createElement("div");
20 | g.style.textAlign = "center";
21 | g.style.fontFamily = "Arial, sans-serif";
22 | g.innerText = "Hello 👪";
23 |
24 | // Users can:
25 | // 1. set up a container box for display for the div element
26 | // 2. set up a viewbox for text to render temporary to and a container box for the result display on the screen.
27 | ctx.drawElement(divElement, dx, dy, dWidth, dHeight)
28 | ctx.drawElement(divElement, vx, vy, vWidth, vHeight, dx, dy, dWidth, dHeight)
29 |
30 | canvas = document.createElement("canvas");
31 | ctx = canvas.getContext('2d');
32 |
33 | ctx.canvas.width = 100;
34 | ctx.canvas.height = 50;
35 | ctx.drawElement(g, 10, 10, 100, 20); // everything fits in nicely.
36 |
37 | ctx.canvas.width = 20; // chanve canvas size to 20x50.
38 | // viewbox: 0, 0, 100x20, output box 10, 0, 20x10. So "Hello 👪" is scalled down to fit in 20x10.
39 | ctx.drawElement(g, 0, 0, 100, 20, 10, 0, 20, 10);
40 | ```
41 |
42 | ### Creat paragraphs with Variable length
43 | Text blocks need to be flexible, so that users can insert images, animations or
44 | videos in between the text. The following is an example of having a picture
45 | floating on the left.
46 |
47 | 
48 | *example of paragraphs with variable length *
49 |
50 | ```javascript
51 | const g = document.createElement("div");
52 | g.style.textAlign = "center";
53 | g.style.fontFamily = "Arial, sans-serif";
54 | g.innerText = "text and more text ...";
55 |
56 | // Set up image
57 | const img = document.createElement("img");
58 | img.src = "images/dog.png"; // Set the image source
59 | img.style.width = "100px"; // Example width
60 | img.style.height = "auto"; // Keep aspect ratio
61 | img.style.float = "left"; // Float to the left of the text
62 | g.appendChild(img);
63 |
64 | canvas = document.createElement("canvas");
65 | ctx = canvas.getContext('2d');
66 |
67 | ctx.canvas.width = 300;
68 | ctx.canvas.height = 3000;
69 | ctx.drawElement(g, 10, 10, 100, 20);
70 | ```
71 |
72 | ### Special case: Inconsistent style over a grapheme cluster
73 | If the users force the grapheme cluster to have two styles by applying 2
74 | different styles to the glyphs, the grapheme cluster should still be
75 | rendered as one, uniform style, the style should be the one the glyphs start
76 | with. For example: the symbol “क्षि” in the example above is drawn by combining
77 | the four glyphs क, ् , ष and ि, user could force the first two glyphs ‘क’
78 | and ‘ ् ’to have 1 style (ex: size 10); and force the last two glyphs ‘ष’ and
79 | ‘ ि’ to have a different style (ex: size 12 and bold). The resulting grapheme,
80 | क्षि, should follow style one (e.g. size 10).
81 |
82 | ```javascript
83 | // Existing code
84 | const g = document.createElement("div");
85 | g.style.textAlign = "center";
86 | g.style.fontFamily = "Arial, sans-serif";
87 |
88 | // Create a span for 'Hello 👪'
89 | const firstPart = document.createElement("span");
90 | firstPart.innerText = "\u0915\u094D";
91 | firstPart.style.fontWeight = "bold";
92 | firstPart.style.fontSize = "12px";
93 |
94 | const secondPart = document.createElement("span");
95 | secondPart.innerText = "\u0937\u093F";
96 | secondPart.style.color = "blue";
97 | secondPart.style.fontSize = "10px";
98 |
99 | g.appendChild(secondPart);
100 | document.body.appendChild(g);
101 |
102 | //क्षि shoul be bold and has a fontsize of bold, black, 12px.
103 | ```
104 |
105 | ## Edit
106 | This section describes how insertion and deletion is done.
107 |
108 | ### Insertion
109 | To insert text, it follows the same way as insertion is currently done with html.
110 |
111 | ``` javasctipt
112 | // Initial set up:
113 | const g = document.createElement("div");
114 | g.style.textAlign = "center";
115 | g.style.fontFamily = "Arial, sans-serif";
116 | g.innerText = "Hello 👪";
117 |
118 | // Add more text to g.innerText:
119 | g.innerText += ", welcome.";
120 |
121 | // Add more text with different style
122 | const additionalText = document.createElement("span");
123 | additionalText.style.color = "blue"; // different style
124 | additionalText.style.fontWeight = "bold";
125 | additionalText.innerText = " More styled text!";
126 |
127 | // Append the new element to the div
128 | g.appendChild(additionalText);
129 |
130 | ```
131 | ### Deletion
132 | The API should support 2 deletion modes:
133 | Delete by Grapheme clusters
134 | Delete by glyphs
135 |
136 | Delete by grapheme clusters:
137 | Considering graphemes like emojis, ex: 👨👩👧👧 (U+1F468 U+200D U+1F469 U+200D
138 | U+1F467 U+200D U+1F467, which means "family: man, woman, girl, girl"), letters
139 | with accent, ex; Ç (C U+0043 + ̧ U+0327), the “backspace” key deletes the
140 | entire grapheme clusters. With the example above, the emoji 👨👩👧👧is removed
141 | instead of turning to 👨👩👧 (delete one girl); the letter Ç is removed instead
142 | of turning to C.
143 |
144 | Delete by glyphs:
145 | Consider languages like Sanskrit, backspace deletes glyphs instead of grapheme.
146 | This is because new grapheme clusters can be created by alternating glyphs.
147 | Example: किमपि (pronunciation: kimapi, meaning: something) is made of 3
148 | grapheme clusters e.g. (ka, i), (ma), (pa, i). In the ideal implementation
149 | (implementation in android and mac), backspace deletes the last glyph
150 | (i.e., i). So users can insert another vowel to make a new grapheme cluster,
151 | ex: (pa, u) instead of (pa, i). This example, किमपि takes 5 backspace to delete
152 | it completely:
153 | किमपि→किमप(remove i)→किम(remove pa)→कि(remove ma)→क(remove i)→’’ (remove ka).
154 |
155 | However, with the same language, Sanskrit, the delete key (delete after the
156 | cursor) behaves differently. Delete deletes the whole grapheme cluster. This is
157 | because the grapheme clusters cannot start with a vowel. With the example
158 | above, it would take 3 deletes to delete all: किमपि→मपि→पि→’’.
159 |
160 | *Developer notes: Emojis should always be deleted as a grapheme cluster. For
161 | grapheme clusters for characters, I think we can use uft-8 as a standard.
162 | The main difference between the two grapheme clusters is that Ç is a utf-8
163 | char itself with code U+00c7 and grapheme clusters like ‘पि’, ‘म’ or ‘कि’ are
164 | not.*
165 |
166 | ```javascript
167 | grapheme = grapheme1 // backspace hit on grapheme1
168 | const emojiRegex = /\p{Emoji}/u; // Uses the buildin javascript emoji regex. By checking unicode range for emojis, the simplified version for c++ could be the following:
169 | // std::wregex emojiRegex(L"[\\x{1F600}-\\x{1F64F}]") = \p{Emoji}/u;
170 |
171 | if (emojiRegex.test(grapheme1)) { // it's an emoji
172 | grapheme1.destroy(); // remove the emoji
173 | }
174 |
175 | if (grapheme1.length() > 1) {
176 | return grapheme1.substring(0, grapheme1.length()-1) // remove the last glyph
177 | } else {
178 | grapheme1.destroy(); // remove the emoji
179 | }
180 | ```
181 |
182 | ### Change style
183 | This section describes how to change styles for text on the screen.
184 |
185 |
186 | ``` javascript
187 | // Existing code
188 | const g = document.createElement("div");
189 | g.style.textAlign = "center";
190 | g.style.fontFamily = "Arial, sans-serif";
191 |
192 | // Create a span for 'Hello 👪'
193 | const helloText = document.createElement("span");
194 | helloText.innerText = "Hello 👪";
195 | // Apply initial styles to 'Hello 👪'
196 | helloText.style.color = "red"; // Example style
197 |
198 |
199 | const additionalText = document.createElement("span");
200 | additionalText.style.color = "blue";
201 | additionalText.style.fontWeight = "bold";
202 | additionalText.innerText = " More styled text!";
203 |
204 | g.appendChild(additionalText);
205 | document.body.appendChild(g);
206 |
207 | // Now change style for helloText
208 | helloText.style.color = "green"; // Change color
209 | helloText.style.fontSize = "24px"; // Change font size
210 |
211 | // Now change style for additionalText.
212 | additionalText.style.color = "green"; // Change color to green
213 | additionalText.style.fontSize = "20px"; // Change font size
214 | ```
--------------------------------------------------------------------------------
/spec/webText-metrics.md:
--------------------------------------------------------------------------------
1 | # WebText - Metrics
2 |
3 | **Status**: explainer
4 |
5 | Metrics for the text element, so that users are able to place the cursor
6 | precisely after each grapheme cluster (this solves the Google.com example
7 | above). Note that I use grapheme instead of grapheme cluster in the code
8 | for simplicity.
9 |
10 | ## Proposal #1
11 | ### Background
12 | The cursor placement issue is already solved in the browser: when users search
13 | for a specific phrase on the web, the browser needs to know how to place the
14 | range, where to place the beginning and the ending caret so that the result is
15 | highlighted properly. To archive that, browsers support functions such as
16 | [caretPositionFromPoint (x, y)](https://drafts.csswg.org/cssom-view/#dom-document-caretpositionfrompoint)
17 | to place the caret, and which returns a
18 | [CaretPosition](https://drafts.csswg.org/cssom-view/#caretposition) object
19 | containing information like
20 | [getClientRect()](https://drafts.csswg.org/cssom-view/#dom-caretposition-getclientrect)
21 | for the exact caret position. For selections, browsers support functions like
22 | [window.getSelection()](https://developer.mozilla.org/en-US/docs/Web/API/Window/getSelection),
23 | then users can call functions like selection.getRangeAt(0) to get the range;
24 | with this information, users can then get the start and the end caret location.
25 |
26 | This new API should take advantage of these existing functions to place the
27 | cursors properly.
28 |
29 | ### Implementation Details
30 | Examples of precise cursor placement, text selection, horizontal and vertical
31 | cursor moves:
32 | ``` javascript
33 | // When users click on (x,y) and it's within the text boundry
34 | gfx::rect getCurposition(int x, int y) {
35 | CaretPosition position = caretPositionFromPoint(x, y);
36 | return getClientRect();
37 | }
38 |
39 | // When users selected some text, getting the start and the ending caret position:
40 | const selection = window.getSelection();
41 | const range = selection.getRangeAt(0);
42 | // start caret:
43 | range.collapse(true); // Collapse to start
44 | const startRect = range.getClientRects()[0];
45 | // end caret:
46 | range.collapse(false); // Collapse to end
47 | const endRect = range.getClientRects()[0];
48 |
49 | // Horizontal moves:
50 | // user clicked on (x, y) and moves to the right
51 | index = caretPositionFromPoint(x, y).offset;
52 | function getPreviousGrapheme(text, index) {
53 | // Create a segmenter for grapheme clustering. It could be a combination of different languages, so I choose language = 'undefined'
54 | const segmenter = new Intl.Segmenter(undefined, { granularity: 'grapheme' });
55 | const segments = segmenter.segment(text);
56 |
57 | let currentLength = 0;
58 | let lastSegmentLength = 0;
59 |
60 | for (const segment of segments) {
61 | if (currentLength + segment.segment.length >= codePointIndex) {
62 | break;
63 | }
64 | currentLength += segment.segment.length;
65 | lastSegmentLength = segment.segment.length;
66 | }
67 |
68 | return lastSegmentLength;
69 | }
70 | // Then create new caret to the previous grapheme:
71 | const newCodePointIndex = codePointIndex - lastSegmentLength;
72 |
73 | // Set the new caret position
74 | const range = document.createRange();
75 | range.setStart(element, newCodePointIndex);
76 | range.collapse(true);
77 |
78 | const selection = window.getSelection();
79 | selection.removeAllRanges();
80 | selection.addRange(range);
81 |
82 | // Vertical Moves:
83 | function moveCaretUpDown(direction) {
84 | const selection = window.getSelection();
85 | if (!selection.rangeCount) return;
86 |
87 | const range = selection.getRangeAt(0);
88 | const rects = range.getClientRects();
89 | if (rects.length === 0) return;
90 |
91 | const lineHeight = parseInt(getComputedStyle(range.startContainer.parentElement).lineHeight);
92 | const currentRect = rects[0];
93 | const newX = currentRect.left;
94 | const newY = direction === 'up' ? currentRect.top - lineHeight : currentRect.bottom + lineHeight;
95 |
96 | const newCaretPos = document.caretPositionFromPoint(newX, newY);
97 | return newCaretPos;
98 | }
99 |
100 | // Then we can use the same technique to draw the new cursor.
101 | ```
102 |
103 | ## Alternative considered
104 | ``` javascript
105 | //Properties for text element: (for text in the same span)
106 | start_location; // (x, y)
107 | lines[]; // line iterator
108 | line_spacing; // the space between 2 lines
109 | getPosition(grapheme, index); // something that takes the index and find the position of the next grapheme matches the given one
110 | getGraphemeAtPoint(x,y) // returns the grapheme object occurs at (x, y). If (x, y) is in between 2 grapheme boxes, then return the one after it.
111 | getGrapheme(index) // return the grapheme object at the given index.
112 | getSetting() // the settings used for this span, font, font-weight, font-size, textAlignment, etc.
113 | getIndex(Grapheme) // return the index of the given grapheme
114 |
115 | // Properties for line object:
116 | start_location; // (x, y)
117 | bounding_box; // The boundry box for the line object
118 | graphme_iter; // grapheme iterator.
119 | next(); // returns the line object after this line (line below for text written in horizaontal direction).
120 | previous(); // returns the line object before this line (line above for text written in horizaontal direction).
121 | getGraphemeAtPosition(x, y); // return graphame at position (x, y). If (x, y) is between 2 graphemes (ex: the overlap area in the kerning example) return the first grapheme.
122 | getGraphemePosition(textValue, int index); // the return the position of the first grapheme matches the text value after index.
123 | getLineNumber() // return an int represent the line number in the text.
124 |
125 | // Properties for grapheme object:
126 | start_location; // (x, y)
127 | bounding_box; // The boundry box of grapheme
128 | getCodePoints(); // Returns the inner text of the grapheme, a single letter for english
129 | next(); // returns the grapheme object before this grapheme (left one in the ltr text).
130 | previous(); // returns the grapheme object after this grapheme (right one in the ltr text)
131 | isEmoji(); // returns a boolean value specifies if the grapheme is an emoji
132 | length(); // returns the number of code points in this grapheme. क्षि in the example above would return 4, क, ् , ष , ि
133 | getSetting() // inherit the setting from text.
134 | line* line // a reference to the line object
135 |
136 | // Genereal API:
137 | // the existing textmetrics object should be available in the new API as well.
138 | measureText(grapheme[]) // returns the text metrics object for the list of graphemes.
139 | ```
140 |
141 | *Developer notes: the property list above doesn’t need to be built for the entire text object. After receiving a request for location (x, y), we can use measureText() or some other functions to estimate which line we need to build the grapheme property map.*
142 |
143 | *Developer notes: Split grapheme from glyphs is needed in many different
144 | scenarios. Some programming languages offer options to split by grapheme
145 | cluster, like [swift](https://docs.swift.org/swift-book/documentation/the-swift-programming-language/stringsandcharacters/);
146 | and some standalone tools exist as well. I found this implementation on
147 | github to split graphemes for a set of glyphs.
148 | [Grapheme splitter](https://github.com/orling/grapheme-splitter/tree/master)*
149 |
150 | ### Usage example
151 | #### Grapheme
152 | ``` javacript
153 | const g = document.createElement("div");
154 | g.innerText = "Hello 👪, क्षि";
155 |
156 | // properties for 'h'
157 | start_location: (0,10)
158 | bounding_box: 0, 10, 10x10
159 | getTextValue() -> h
160 | next(); -> grapheme object for 'e'
161 | previous(); -> null
162 | isEmoji(); -> false
163 | length(); -> 1
164 | line: line("Hello 👪, क्षि")
165 | getSetting() -> default setting, ariel, font 10px, ...
166 |
167 | // properties for '👪'
168 | start_location: (60,10)
169 | bounding_box: 60, 10, 10x10
170 | getTextValue() -> \u1f46a, \u0dc6a
171 | next(); -> grapheme object for ','
172 | previous(); -> grapheme object for ' '
173 | isEmoji(); -> true
174 | length(); -> 2
175 | line: line("Hello 👪, क्षि")
176 | getSetting() -> default setting, ariel, font 10px, ...
177 |
178 | // properties for 'क्षि'
179 | start_location: (90,10)
180 | bounding_box: 90, 10, 10x10
181 | getTextValue() -> क ् ष ि
182 | next(); -> grapheme object for ' '
183 | previous(); -> null
184 | isEmoji(); -> false
185 | length(); -> 4
186 | line: line("Hello 👪, क्षि")
187 | getSetting() -> default setting, ariel, font 10px, ...
188 | ```
189 | #### Line
190 | ``` javascript
191 | const g = document.createElement("div");
192 | g.innerText = "Hello 👪, क्षि";
193 |
194 | start_location: (0, 10);
195 | bounding_box:(0, 10, 100x10);
196 | previous() -> null;
197 | next() -> null;
198 | graphme[] = grapheme('H') // points to H to start the interation;
199 | getGraphemeAtPosition(x, y); // getGraphemeAtPosition(15, 5): return e (as a grapheme object)
200 | getGraphemePosition(textValue, int index); // getGraphemePosition('l', 3): return l (the grapheme object for second l).
201 | getLineNumber(): 0
202 | ```
203 |
204 | ### Cursor Position
205 | A cursor is a reference to an inter-grapheme cluster point. Horizontally, the
206 | cursor position always aligns with the x coordinate for the end of the previous
207 | grapheme cluster; and vertically, the cursor position aligns with the height of
208 | the font bounding box of the grapheme before it (as shown in the image below,
209 | the cursor height changes when it moves from ‘Hello’ to ‘world’). For kerning
210 | cases, horizontally, the cursor position is in the middle of the overlapped
211 | area, i.e., if one grapheme’s bounding box ends at x = a horizontally and one’s
212 | bounding box starts on x = b horizontally, then the cursor is at (a + b)/2 (as
213 | shown in the picture below, the cursor between ‘A’ and ‘W’.)
214 |
215 | 
216 | *Cursor Position *
217 |
218 | When the cursor moves, it should follow the following rules: Cursor moves
219 | horizontally (right or left) by the width of the next grapheme cluster; and
220 | moves vertically (up or down) by the height of the lines + line spacing
221 | between the two lines.
222 |
223 | 
224 | *Vertical for cursor *
225 |
226 | The following are examples of how web developers could use our API to solve the
227 | use cases we have above:
228 |
229 | ``` javascript
230 | // Cursor properties:
231 | Corsor{
232 | x; // x position
233 | y; // y position
234 | height; // height of the cursor
235 | width; // width of the cursor
236 | Grapheme grapheme_before;
237 | Grapheme grapheme_after;
238 | }
239 |
240 |
241 | const g = document.createElement("div");
242 | g.innerText = "Hello 👪, क्षि";
243 |
244 | // find cusor position when randomly click on the screen.
245 | // assuming cursor is clicked at position (35, 5):
246 | function getCursorAtPosition(x, y) {
247 | current_grapheme = getGraphemeAtPoint(x, y); // with (35, 5), it should return 'l'
248 | next_grapheme = current_grapheme.next(); // with (35, 5), it should be 'o'
249 | // find the horizontal cursor position
250 | cursor_x = (current_grapheme.x + current_grapheme.width + next_grapheme.x)/2;
251 | // find the vertical cursor position
252 | cursor_y = measureText(current_grapheme).fontBoundingBoxAscent + measureText(current_grapheme).fontBoundingBoxDescent;
253 | return Cursor(cursor_x, cursor_y);
254 | }
255 |
256 | function moveRight(cursor) {
257 | current_grapheme = getGraphemeAtPoint(cursor.x, cursor.x);
258 | next_grapheme = current_grapheme.next();
259 | return getCursorAtPosition(next_grapheme.x, next_grapheme.y);
260 | }
261 |
262 | function moveDown(cursor) {
263 | current_grapheme = getGraphemeAtPoint(cursor.x, cursor.x);
264 | location_x = cursor.x;
265 | location_y = current_grapheme.line.next().start_location.y;
266 | return getCursorAtPosition(location_x, location_y);
267 | }
268 | ```
269 | #### Insertion and deletion at cursor point
270 | Users also need to make simple edits on the text element. Note that users are
271 | responsible for re-layout after the edits.
272 | ```
273 | const g = document.createElement("div");
274 | g.innerText = "Hello 👪, क्षि";
275 |
276 | // assument cursor moved to between ' ' and '👪'. Users want to insert family
277 | index = getIndex('👪');
278 | text = g.innerText;
279 | let newText = text.slice(0, index) + 'family' + text.slice(index);
280 | g.innerText = newText;
281 | ```
282 |
283 | ### Selection
284 | A selection contains two cursors, start and end, which can be used to modify
285 | text. Selections may contain text from different spans or divs. Implementations
286 | can create, break, or merge spans as appropriate.
287 |
288 | ## Further
289 | - Indexability
290 | - Accessibility
291 | - Vertical text metrics
292 |
293 | ## Further reading
294 | - [Text Rendering Hates You](https://faultlore.com/blah/text-hates-you/)
295 | - [Adventures in Text Rendering: Kerning and Glyph Atlases | Warp](https://www.warp.dev/blog/adventures-text-rendering-kerning-glyph-atlases)
296 | - [Breaking paragraphs into lines](http://www.eprg.org/G53DOC/pdfs/knuth-plass-breaking.pdf)
297 | - [Swift - String and Characters](https://docs.swift.org/swift-book/documentation/the-swift-programming-language/stringsandcharacters/)
298 | - [Poposal intl segmenter](https://docs.swift.org/swift-book/documentation/the-swift-programming-language/stringsandcharacters/)
299 | - [Javascript: Strings](https://exploringjs.com/impatient-js/ch_strings.html)
300 | - [W3C: Text-Processing](https://www.w3.org/TR/international-specs/#char_indexing)
301 | - [Unicode Text Segmentation](https://www.unicode.org/reports/tr29/)
302 | - [Browser Rendering pipeline](https://webperf.tips/tip/browser-rendering-pipeline/)
--------------------------------------------------------------------------------
/spec/webgpu.md:
--------------------------------------------------------------------------------
1 | WebGPU Transfer
2 | =======
3 | **Status**: explainer.
4 |
5 | This proposal tries to create better interoperability between Canvas 2D and WebGPU, addressing both performance and ergonomics problems. It provides a low level primitive to transfer a canvas' backbuffer to a WebGPU texture (resetting the canvas to an empty image), and a matching primitive to bring back that texture to Canvas2D.
6 |
7 | There are two use cases in mind for this proposal:
8 |
9 | 1. Having access to text and path drawing in WebGPU.
10 |
11 | 2. Being able to apply WebGPU rendering (like filter effects, shaders, or simple rendering) in a canvas that is ultimately handled by Canvas2D.
12 |
13 |
14 | Proposal
15 | --------
16 |
17 | ```webidl
18 | dictionary Canvas2dGPUTransferOption {
19 | // This GPUDevice will be given access to the canvas' texture.
20 | GPUDevice device;
21 |
22 | // This label will be assigned to the GPUTexture returned by transferToGPUTexture.
23 | DOMString? label;
24 |
25 | // This controls the GPUTextureUsage flags of the GPUTexture returned by
26 | // transferToGPUTexture. The default value will return a canvas texture that is
27 | // usable as a render attachment, and bindable as a 2D texture.
28 | GPUTextureUsageFlags usage = 0x14; // TEXTURE_BINDING | RENDER_ATTACHMENT
29 | };
30 |
31 | [
32 | RuntimeEnabled=Canvas2dGPUTransfer,
33 | Exposed=(Window, Worker),
34 | SecureContext
35 | ] interface mixin Canvas2dGPUTransfer {
36 | [RaisesException] GPUTexture transferToGPUTexture(Canvas2dGPUTransferOption options);
37 | [RaisesException] undefined transferBackFromGPUTexture();
38 | GPUTextureFormat getTextureFormat();
39 | };
40 |
41 | CanvasRenderingContext2D includes Canvas2DGPUTransfer;
42 | OffscreenCanvasRenderingContext2D includes Canvas2DGPUTransfer;
43 | ```
44 |
45 | `transferToGPUTexture()` returns a [GPUTexture](https://gpuweb.github.io/gpuweb/#gputexture) that can be used in a WebGPU pipeline. After the function is called, the Canvas2D is returned to an empty, newly-initialized state.
46 |
47 | `transferBackFromGPUTexture()` moves the `GPUTexture` back to the canvas, preserving any changes that were made to it in the interim. The `GPUTexture` enters a destroyed state, and becomes unavailable for further use in WebGPU.
48 |
49 | An exception is raised if `transferBackFromGPUTexture()` is invoked before any calls to `transferToGPUTexture()`.
50 |
51 | It is legal to invoke drawing commands on the Canvas2D context after `transferToGPUTexture` is invoked. The `GPUTexture` from the canvas remains in a valid state and can continue to be used. However, `transferBackFromGPUTexture()` is no longer allowed once the canvas has been drawn to; invoking it will raise an exception.
52 |
53 | It is legal to invoke `transferToGPUTexture()` more than once without an intervening call to `transferBackFromGPUTexture()`. When this occurs, the previously-created GPUTexture will immediately enter a destroyed state and will no longer be usage. A new texture will be returned which holds the canvas' current contents.
54 |
55 | Polyfill for the current proposal [here](../webgpu/webgpu-polyfill.js).
56 |
57 | ### Open issues and questions
58 |
59 |
60 | Example usage
61 | -------------
62 |
63 | [Using Canvas2D as WebGPU texture with no copy](../webgpu/interop-demo.html)
64 |
65 | [Using Canvas2D and WebGPU together with no copy](../webgpu/interop-demo-2.html)
66 |
67 | Example for using 2D text on WebGPU:
68 |
69 | ```js
70 | const device = await (await navigator.gpu.requestAdapter()).requestDevice();
71 | const canvas = new OffscreenCanvas(256, 256);
72 | const ctx = canvas.getContext('2d');
73 | ctx.fillText("some text", 10, 50);
74 |
75 | const canvasTexture = ctx.transferToGPUTexture({device: device});
76 |
77 | const pipeline = device.createRenderPipeline(...);
78 |
79 | const sampler = device.createSampler({
80 | magFilter: 'linear',
81 | minFilter: 'linear',
82 | });
83 |
84 | device.createBindGroup({
85 | layout: pipeline.getBindGroupLayout(0),
86 | entries: [
87 | {
88 | binding: 0,
89 | resource: sampler,
90 | },
91 | {
92 | binding: 1,
93 | resource: canvasTexture.createView(),
94 | },
95 | ],
96 | });
97 |
98 | // ... continue WebGPU work.
99 | ```
100 |
101 | Example for using WebGPU in Canvas 2D:
102 |
103 | ```js
104 | const device = await (await navigator.gpu.requestAdapter()).requestDevice();
105 | const canvas = document.createElement("canvas");
106 | const ctx = canvas.getContext('2d');
107 |
108 | // ... some Canvas2D work.
109 | ctx.fillRect(0, 0, 1, 1);
110 |
111 | const canvasTexture = ctx.transferToGPUTexture({device: device});
112 |
113 | const pipeline = device.createRenderPipeline({fragment: {targets: [{
114 | format: ctx.getTextureFormat(),
115 | }]}});
116 |
117 | const commandEncoder = device.createCommandEncoder();
118 |
119 | const renderPassColorAttachment = {
120 | view: canvasTexture.createView(),
121 | clearValue: {r: 0, g: 0, b: 0, a: 1},
122 | loadOp:'clear',
123 | storeOp:'store'
124 | };
125 | const renderPassEncoder = commandEncoder.beginRenderPass({
126 | colorAttachments: [renderPassColorAttachment]
127 | });
128 | renderPassEncoder.setPipeline(renderPipeline);
129 | renderPassEncoder.setBindGroup(0, uniformBindGroup);
130 | renderPassEncoder.draw(3, 1, 0, 0);
131 | renderPassEncoder.end();
132 | device.queue.submit([commandEncoder.finish()]);
133 |
134 | ctx.transferBackFromGPUTexture();
135 |
136 | // ... continue Canvas2D work.
137 | ctx.fillRect(1, 1, 1, 1);
138 | ```
139 |
140 | References
141 | ----------
142 |
143 | - [WebGPU spec](https://gpuweb.github.io/gpuweb)
144 |
--------------------------------------------------------------------------------
/spec/will-read-frequently.md:
--------------------------------------------------------------------------------
1 | willReadFrequently
2 | ==================
3 | **Status**: explainer.
4 |
5 | Adds a `willReadFrequently` options to `CanvasRenderingContext2DSettings` to allow
6 | signaling that this a Canvas that gets directly read a lot.
7 |
8 | This allows UA to optimize Canvas2D for direct reading access through `getImageData`.
9 |
10 |
11 | Rationale
12 | ---------
13 |
14 | There's an important use case for apps/games that are very reliant on `getImageData`.
15 | This operation is particularly slow when Canvas2D is being backed by accelerated graphics (GPU), that speed up considerably most other use cases.
16 | Historically, browsers have tried heuristics to decide if a canvas is being read frequently or not, to switch code paths. This is unreliable, complex and brittle. To prevent this, we propose a hint option that will allow developers to tell the browser that this is a canvas that rely on `getImageData` performance.
17 |
18 |
19 | Proposal
20 | --------
21 |
22 | ```webidl
23 | dictionary CanvasRenderingContext2DSettings {
24 | // ... current values
25 | boolean willReadFrequently = false;
26 | };
27 | ```
28 |
29 | When the user sets `willReadFrequently` to true, the UA can optimize for read access, usually by not using the GPU for rendering.
30 |
31 |
32 | ### Open issues and questions
33 |
34 | None.
35 |
36 | Example usage
37 | -------------
38 |
39 | ```js
40 | // Javascript example
41 | const canvas = document.createElement('canvas');
42 | const ctx = canvas.getContext('2d', {willReadFrequently: true});
43 |
44 | ```
45 |
46 | Alternatives considered
47 | -----------------------
48 |
49 |
50 | References
51 | ----------
52 |
53 | - https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/getContext
54 |
--------------------------------------------------------------------------------
/template.md:
--------------------------------------------------------------------------------
1 | Feature
2 | =======
3 | **Status**: explainer.
4 |
5 | {summary}
6 |
7 | {goals and use cases}
8 |
9 |
10 | Proposal
11 | --------
12 |
13 | ```webidl
14 | interface mixin CanvasFeature {
15 |
16 | };
17 |
18 | CanvasRenderingContext2D includes CanvasFeature;
19 | OffscreenCanvasRenderingContext2D includes CanvasFeature;
20 | ```
21 |
22 | {overall usage}
23 |
24 | ### Open issues and questions
25 |
26 |
27 | Example usage
28 | -------------
29 |
30 | ```js
31 | // Javascript example
32 | const canvas = document.createElement('canvas');
33 | const ctx = canvas.getContext('2d');
34 |
35 | ```
36 |
37 | Alternatives considered
38 | -----------------------
39 |
40 | ### Alternative 1
41 |
42 | {notes}
43 |
44 |
45 | References
46 | ----------
47 |
48 | - {link1}
49 |
--------------------------------------------------------------------------------
/va/f1.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/va/f2.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/va/hand1.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/va/hand2.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/va/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Parametrized Display Lists
8 |
89 |
90 |
91 |
92 |
93 |
94 |
103 |
104 |
105 |
106 |
107 | Parametrized Display Lists
108 |
109 |
110 |
111 |
112 | Squircle CSS
113 |
114 | #round-rect {
115 | background-color: #BCAF9C;
116 | border-radius: 5px;
117 | }
118 |
119 | #squircle-1 {
120 | background-image:
121 | url("squircle.dlo" param(color #DC3973) param(curve 10%));
122 | }
123 |
124 | #squircle-2 {
125 | background-image:
126 | url("squircle.dlo" param(color #36382E) param(curve 20%));
127 | }
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 | There's not current way to parametrize SVG paths, so each path has to be manually constructed. With DLOs, a generic Squircle object can be created that supports any curvature and color.
139 |
140 |
141 |
142 |
143 |
144 | Responsive design for shapes
145 |
146 | #original {
147 | /*
148 | The original SVG and DLO have identical outputs.
149 | */
150 | background-image: url("header.dlo");
151 | }
152 |
153 | #svg {
154 | /*
155 | SVG without preserving aspect ratio, will get stretched like
156 | any other image.
157 | */
158 | background-image: url("header.svg");
159 | }
160 |
161 | #dlo {
162 | /*
163 | The display list object is able to parametrize on the size of
164 | the object. In the example, the columns can remain fixed
165 | size, while the center gets stretched to fill the area.
166 | */
167 | background-image: url("header.dlo");
168 | }
169 |
170 |
171 |
172 | Original
173 | Stretched SVG
174 | Stretched DLO
175 |
176 | Original
177 | Stretched SVG
178 | Stretched DLO
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 | Formatted Text Highlight
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 | Button animation with states
200 |
201 |
202 |
203 |
204 |
205 |
206 | Display List performance
207 |
208 |
209 |
210 |
211 |
212 | Display List with parameters
213 |
214 |
215 |
216 |
217 |
218 | Parametrized animations
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
--------------------------------------------------------------------------------
/va/theme.css:
--------------------------------------------------------------------------------
1 | :root {
2 | --text: #FFFFFF;
3 | --variable: #73DFA5;
4 | --variable2: #FFFFFF;
5 | --variable3: #E08569;
6 | --definition: #73DFA5;
7 | --keyword: #FF659C;
8 | --operator: #FF659C;
9 | --property: #1AC8FF;
10 | --attribute: #73DFA5;
11 | --meta: #F1ECFE;
12 | --tag: #FF659C;
13 | --number: #7A7FFD;
14 | --string: #DFD473;
15 | --comment: #807796;
16 | }
17 |
18 | .hljs {
19 | color: var(--text);
20 | background-color: rgba(0,0,0,0.7);
21 | }
22 |
23 | .hljs-meta {
24 | color: var(--meta);
25 | }
26 |
27 | .hljs-number {
28 | color: var(--number);
29 | }
30 |
31 | .hljs-tag {
32 | color: var(--tag);
33 | }
34 |
35 | .hljs-variable {
36 | color: var(--variable);
37 | }
38 |
39 | .hljs-variable2 {
40 | color: var(--variable2);
41 | }
42 | .hljs-variable3 {
43 | color: var(--variable3);
44 | }
45 |
46 | .hljs-title,
47 | .hljs-title.function_,
48 | .hljs-title.class_,
49 | .hljs-title.class_.inherited__,
50 | .hljs-definition {
51 | color: var(--definition);
52 | }
53 |
54 | .hljs-name,
55 | .hljs-selector-tag,
56 | .hljs-keyword {
57 | color: var(--keyword);
58 | }
59 |
60 | .hljs-operator {
61 | color: var(--operator);
62 | }
63 |
64 | .hljs-attribute,
65 | .hljs-property {
66 | color: var(--property);
67 | }
68 |
69 | .hljs-attr,
70 | .hljs-literal,
71 | .hljs-operator,
72 | .hljs-variable,
73 | .hljs-variable.language_,
74 | .hljs-selector-attr,
75 | .hljs-selector-class {
76 | color: var(--attribute);
77 | }
78 |
79 | .hljs-built_in,
80 | .hljs-selector-id,
81 | .hljs-regexp,
82 | .hljs-string,
83 | .hljs-meta .hljs-string {
84 | color: var(--string);
85 | }
86 |
87 | .hljs-comment,
88 | .hljs-code,
89 | .hljs-formula {
90 | color: var(--comment);
91 | }
92 |
93 | .hljs-symbol,
94 | .hljs-quote,
95 | .hljs-selector-pseudo,
96 | .hljs-subst,
97 | .hljs-section,
98 | .hljs-bullet,
99 | .hljs-emphasis,
100 | .hljs-strong,
101 | .hljs-addition,
102 | .hljs-deletion,
103 | .hljs-doctag,
104 | .hljs-meta .hljs-keyword,
105 | .hljs-template-tag,
106 | .hljs-template-variable,
107 | .hljs-type {
108 | background-color: red;
109 | color: #000;
110 | }
111 |
112 | /* purposely ignored */
113 | .hljs-char.escape_,
114 | .hljs-link,
115 | .hljs-params,
116 | .hljs-property,
117 | .hljs-punctuation {
118 | /* background-color: green;
119 | color: #000;
120 | */
121 | }
122 |
--------------------------------------------------------------------------------
/webgpu/Di-3d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fserb/canvas2D/17dde06b80fe3ea0aaa35e3b427964080ea02e50/webgpu/Di-3d.png
--------------------------------------------------------------------------------
/webgpu/interop-demo-2.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Use WebGPU combined with Canvas2D
8 |
9 |
10 |
11 |
12 |
13 |
341 |
342 |
343 |
344 |
--------------------------------------------------------------------------------
/webgpu/interop-demo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Use a canvas 2D as a texture in WebGPU
8 |
9 |
10 |
11 |
12 |
13 |
325 |
326 |
327 |
--------------------------------------------------------------------------------
/webgpu/shader-demo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Canvas Shaders
7 |
8 |
9 |
10 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/webgpu/shader-polyfill.js:
--------------------------------------------------------------------------------
1 | import "./webgpu-polyfill.js";
2 |
3 | class CanvasShader {
4 | constructor(opts) {
5 | this.opts = opts;
6 | }
7 |
8 | async init() {
9 | const adapter = await navigator.gpu.requestAdapter();
10 | this.device = await adapter.requestDevice();
11 |
12 | this.vert = this.device.createShaderModule({ code: `
13 | @vertex fn mainvs(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
14 | var pos = array(vec2(3.0, 0.0), vec2(-1.0, -2.0), vec2(-1.0, 2.0));
15 | return vec4f(pos[VertexIndex], 0.0, 1.0);
16 | }
17 | `});
18 | this.frag = this.device.createShaderModule({code: this.opts.code});
19 | }
20 |
21 | applyFilter(octx) {
22 | const canvas = new OffscreenCanvas(octx.canvas.width, octx.canvas.height);
23 | const ctx = canvas.getContext("webgpu");
24 | const format = navigator.gpu.getPreferredCanvasFormat();
25 | ctx.configure({
26 | device: this.device,
27 | format: format,
28 | alphaMode: 'premultiplied',
29 | });
30 |
31 | const texture = octx.transferToWebGPU({device: this.device});
32 |
33 | const pipeline = this.device.createRenderPipeline({
34 | layout: 'auto',
35 | vertex: {module: this.vert, entryPoint: 'mainvs'},
36 | fragment: {module: this.frag, entryPoint: 'mainfs',
37 | targets: [{ format: format}]},
38 | });
39 | const bindGroup = this.device.createBindGroup({
40 | layout: pipeline.getBindGroupLayout(0),
41 | entries: [{ binding: 0, resource: texture.createView() }],
42 | });
43 | const encoder = this.device.createCommandEncoder();
44 | const pass = encoder.beginRenderPass({
45 | colorAttachments: [{
46 | view: ctx.getCurrentTexture().createView(),
47 | clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
48 | loadOp: 'clear',
49 | storeOp: 'store',
50 | }],
51 | });
52 | pass.setPipeline(pipeline);
53 | pass.setBindGroup(0, bindGroup);
54 | pass.draw(3);
55 | pass.end();
56 | this.device.queue.submit([encoder.finish()]);
57 |
58 | return canvas;
59 | }
60 | }
61 |
62 | class CanvasShaderPolyfill {
63 | async createCanvasShader(opts) {
64 | const cs = new CanvasShader(opts);
65 | await cs.init();
66 | return cs;
67 | }
68 |
69 | beginLayer(filter) {
70 | if (this._beginLayer && !(filter instanceof CanvasShader)) {
71 | return this._beginLayer(filter);
72 | }
73 |
74 | this._layerCanvas = new OffscreenCanvas(this.canvas.width, this.canvas.height);
75 | this._layer = this._layerCanvas.getContext("2d");
76 | this._layerFilter = filter;
77 | }
78 |
79 | endLayer() {
80 | if (!this._layer && this._endLayer) {
81 | return this._endLayer();
82 | }
83 |
84 | const ctx = this._layer;
85 | const canvas = this._layerCanvas;
86 | const filter = this._layerFilter;
87 | delete this._layer;
88 | delete this._layerCanvas;
89 | delete this._layerFilter;
90 |
91 | const out = filter.applyFilter(ctx);
92 |
93 | this.save();
94 | this.drawImage(out, 0, 0);
95 | this.restore();
96 | }
97 | }
98 |
99 | for (const ctx of [CanvasRenderingContext2D, OffscreenCanvasRenderingContext2D]) {
100 | ctx.prototype._beginLayer = ctx.prototype.beginLayer;
101 | ctx.prototype._endLayer = ctx.prototype.endLayer;
102 | ctx.prototype.beginLayer = CanvasShaderPolyfill.prototype.beginLayer;
103 | ctx.prototype.endLayer = CanvasShaderPolyfill.prototype.endLayer;
104 |
105 | for (const f of Object.getOwnPropertyNames(CanvasShaderPolyfill.prototype)) {
106 | if (f === 'constructor' || ctx.prototype.hasOwnProperty(f)) continue;
107 | if (f === 'beginLayer' || f == 'endLayer') continue;
108 | ctx.prototype[f] = CanvasShaderPolyfill.prototype[f];
109 | }
110 | }
111 |
112 | export function CanvasShaderContext(ctx) {
113 | return new Proxy(ctx, {
114 | get: function(target, key) {
115 | if (target._layer && key !== 'endLayer') {
116 | if (typeof target._layer[key] === "function") {
117 | return function(...args) {
118 | return target._layer[key](...args);
119 | }
120 | } else {
121 | return target._layer[key];
122 | }
123 | } else {
124 | if (typeof target[key] === "function") {
125 | return function(...args) {
126 | return target[key](...args);
127 | }
128 | } else {
129 | return target[key];
130 | }
131 | }
132 | },
133 |
134 | set: function(target, key, value) {
135 | if (target._layer) {
136 | target._layer[key] = value;
137 | } else {
138 | target[key] = value;
139 | }
140 | return true;
141 | }
142 | });
143 | }
144 |
--------------------------------------------------------------------------------
/webgpu/webgpu-polyfill.js:
--------------------------------------------------------------------------------
1 | class WebGPUInteropPolyfill {
2 | getTextureFormat() {
3 | return 'rgba8unorm';
4 | }
5 |
6 | transferToGPUTexture(opts) {
7 | this._device = opts.device;
8 | this._outTexture = this._device.createTexture({
9 | size: [this.canvas.width, this.canvas.height],
10 | format: 'rgba8unorm',
11 | usage:
12 | GPUTextureUsage.TEXTURE_BINDING |
13 | GPUTextureUsage.COPY_DST |
14 | GPUTextureUsage.RENDER_ATTACHMENT,
15 | });
16 | this._device.queue.copyExternalImageToTexture(
17 | { source: this.canvas },
18 | { texture: this._outTexture },
19 | [this.canvas.width, this.canvas.height]);
20 | return this._outTexture;
21 | }
22 |
23 | transferBackFromGPUTexture() {
24 | const canvas = new OffscreenCanvas(this.canvas.width, this.canvas.height);
25 | const ctx = canvas.getContext("webgpu");
26 | ctx.configure({
27 | device: this._device,
28 | format: this._outTexture.format,
29 | });
30 | const mod = this._device.createShaderModule({ code: `
31 | @vertex fn mainvs(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
32 | var pos = array(vec2(3.0, 0.0), vec2(-1.0, -2.0), vec2(-1.0, 2.0));
33 | return vec4f(pos[VertexIndex], 0.0, 1.0);
34 | }
35 |
36 | @group(0) @binding(0) var myTexture: texture_2d;
37 | @fragment fn mainfs(@builtin(position) position : vec4f) -> @location(0) vec4f {
38 | return textureLoad(myTexture, vec2i(position.xy), 0);
39 | }`
40 | });
41 | const pipeline = this._device.createRenderPipeline({
42 | layout: 'auto',
43 | vertex: {module: mod, entryPoint: 'mainvs'},
44 | fragment: {module: mod, entryPoint: 'mainfs',
45 | targets: [{ format: this._outTexture.format }]},
46 | });
47 |
48 | const bindGroup = this._device.createBindGroup({
49 | layout: pipeline.getBindGroupLayout(0),
50 | entries: [{ binding: 0, resource: this._outTexture.createView() }],
51 | });
52 |
53 | const encoder = this._device.createCommandEncoder();
54 | const pass = encoder.beginRenderPass({
55 | colorAttachments: [{
56 | view: ctx.getCurrentTexture().createView(),
57 | clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
58 | loadOp: 'clear',
59 | storeOp: 'store',
60 | }],
61 | });
62 | pass.setPipeline(pipeline);
63 | pass.setBindGroup(0, bindGroup);
64 | pass.draw(3);
65 | pass.end();
66 | this._device.queue.submit([encoder.finish()]);
67 |
68 | this._outTexture.destroy();
69 | delete this._outTexture;
70 |
71 | this.clearRect(0, 0, canvas.width, canvas.height);
72 | this.drawImage(canvas, 0, 0, canvas.width, canvas.height);
73 | }
74 | }
75 |
76 | for (const ctx of [CanvasRenderingContext2D, OffscreenCanvasRenderingContext2D]) {
77 | for (const f of Object.getOwnPropertyNames(WebGPUInteropPolyfill.prototype)) {
78 | if (f === 'constructor' || ctx.prototype.hasOwnProperty(f)) continue;
79 | ctx.prototype[f] = WebGPUInteropPolyfill.prototype[f];
80 | }
81 | }
82 |
--------------------------------------------------------------------------------