├── .gitignore ├── cc0-80x15.png ├── .pr-preview.json ├── w3c.json ├── CODE_OF_CONDUCT.md ├── copyright.include ├── Makefile ├── .editorconfig ├── .github └── workflows │ └── auto-publish.yml ├── README.md ├── status.include ├── generate-markdown.py ├── questionnaire.markdown ├── LICENCE └── index.bs /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | -------------------------------------------------------------------------------- /cc0-80x15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/w3c/security-questionnaire/HEAD/cc0-80x15.png -------------------------------------------------------------------------------- /.pr-preview.json: -------------------------------------------------------------------------------- 1 | { 2 | "src_file": "index.bs", 3 | "type": "bikeshed", 4 | "params": { 5 | "force": 1 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /w3c.json: -------------------------------------------------------------------------------- 1 | { 2 | "group": ["wg/privacy", "ig/security"], 3 | "contacts": ["tjwhalen", "simoneonofri"], 4 | "repo-type": "note" 5 | } 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | All documentation, code, communication and discussion in this repository are covered by the [W3C Code of Ethics and Professional Conduct](https://www.w3.org/Consortium/cepc/). 4 | -------------------------------------------------------------------------------- /copyright.include: -------------------------------------------------------------------------------- 1 | CC0 To the extent 2 | possible under law, the editors have waived all copyright and related or neighboring rights to this work. 3 | This document is also made available under the W3C Software and Document License. 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | specs = $(patsubst %.bs,build/%.html,$(wildcard *.bs)) 2 | 3 | .PHONY: all clean 4 | .SUFFIXES: .bs .html 5 | 6 | all: $(specs) questionnaire.markdown 7 | 8 | clean: 9 | rm -rf build questionnaire.markdown *~ 10 | 11 | build: 12 | mkdir -p build 13 | 14 | build/%.html: %.bs Makefile build 15 | bikeshed --die-on=warning spec $< $@ 16 | 17 | questionnaire.markdown: index.bs generate-markdown.py 18 | ./generate-markdown.py < $< > $@ 19 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | indent_style = space 8 | indent_size = 8 9 | end_of_line = lf 10 | insert_final_newline = true 11 | trim_trailing_whitespace = true 12 | 13 | [*.bs] 14 | indent_size = 4 15 | 16 | [Makefile] 17 | indent_style = tab 18 | 19 | [*.md] 20 | indent_size = 4 21 | trim_trailing_whitespace = false 22 | 23 | [*.py] 24 | indent_size = 4 25 | 26 | [*.sh] 27 | indent_size = 4 28 | 29 | [*.{yml,yaml,yamllint}] 30 | indent_size = 2 31 | -------------------------------------------------------------------------------- /.github/workflows/auto-publish.yml: -------------------------------------------------------------------------------- 1 | # Configuration options https://w3c.github.io/spec-prod/ 2 | name: Publish the ED and Note 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | pull_request: {} 9 | 10 | jobs: 11 | validate-and-publish: 12 | name: Validate and Publish 13 | runs-on: ubuntu-latest # only linux supported at present 14 | permissions: 15 | contents: write 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: w3c/spec-prod@v2 19 | with: 20 | TOOLCHAIN: bikeshed 21 | GH_PAGES_BRANCH: gh-pages 22 | W3C_ECHIDNA_TOKEN: ${{ secrets.ECHIDNA_TOKEN }} 23 | W3C_WG_DECISION_URL: "https://github.com/w3ctag/security-questionnaire/pull/171" 24 | W3C_NOTIFICATIONS_CC: "${{ secrets.CC }}" 25 | W3C_BUILD_OVERRIDE: | 26 | status: NOTE 27 | shortname: security-privacy-questionnaire -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This repository contains the [Editor's Draft](https://w3c.github.io/security-questionnaire/) of the [Self-Review Questionnaire: Security and Privacy](https://www.w3.org/TR/security-privacy-questionnaire/) document, which spec authors can use to identify and work through possible security and privacy concerns related to their spec. 2 | 3 | The questionnaire is a joint product of two groups: the [Privacy Working Group](https://www.w3.org/groups/wg/privacy/), and the [Security Interest Group](https://www.w3.org/groups/ig/security/). 4 | 5 | When folks request a [design review](https://github.com/w3ctag/design-reviews) from the TAG, [filling out](questionnaire.markdown) the security and privacy questionnaire helps the TAG to understand potential security and privacy issues and mitigations for the design, and can save us asking redundant questions. 6 | 7 | Before requesting security and 8 | privacy review 9 | from the Security Interest Group and PING, respectively, documents must 10 | contain both "Security Considerations" and "Privacy Considerations" 11 | sections for their documents, as described in Section 2.15. While 12 | your answers to the questions in this document will inform your 13 | writing of those sections, it is not appropriate to merely copy this 14 | questionnaire into those sections. 15 | 16 | [Further instructions on requesting security and privacy reviews can be found in the Guide.](https://w3c.github.io/documentreview/#how_to_get_horizontal_review) 17 | -------------------------------------------------------------------------------- /status.include: -------------------------------------------------------------------------------- 1 |

This section describes the status of this document at the time of its publication. A list of current W3C publications and the latest revision of this technical report can be found in the W3C technical reports index at https://www.w3.org/TR/.

2 | 3 |

This document was published by the Privacy Working Group and the Security Interest Group as a Group Note using the Note track.

4 | 5 |

Group Notes are not endorsed by W3C nor its Members.

6 | 7 |

8 | This is a draft document and may be updated, replaced or obsoleted 9 | by other documents at any time. It is inappropriate to cite 10 | this document as other than work in progress. 11 |

12 | 13 | 14 |

[STATUSTEXT]

15 | 16 |

17 | Feedback and comments on this document are welcome. Please 18 | file an issue 19 | in this document’s 20 | GitHub repository. 21 |

22 | 23 |

24 | The W3C Patent Policy does not carry any licensing requirements or commitments on this document. 25 |

26 | 27 |

This document is governed by the 03 November 2023 W3C Process Document.

28 | -------------------------------------------------------------------------------- /generate-markdown.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | class MarkdownGenerator: 6 | def __init__(self): 7 | self.in_question = False 8 | self.first_line_in_question = False 9 | self.prefix = "" 10 | self.qnum = 0 11 | 12 | def process_line(self, line, outfile): 13 | if self.in_question: 14 | if not line.startswith(""): 15 | line = line.lstrip() 16 | if self.first_line_in_question: 17 | self.qnum += 1 18 | self.prefix = "%02d. " % self.qnum 19 | self.first_line_in_question = False 20 | print(self.prefix, line, end='', file=outfile) 21 | self.prefix = " " 22 | else: 23 | self.in_question = False 24 | elif line.startswith("

2 | Title: Self-Review Questionnaire: Security and Privacy 3 | Status: ED 4 | TR: https://www.w3.org/TR/security-privacy-questionnaire/ 5 | ED: https://w3c.github.io/security-questionnaire/ 6 | Shortname: security-privacy-questionnaire 7 | Repository: w3c/security-questionnaire 8 | Level: None 9 | Editor: Peter Snyder, w3cid 109401, Brave Software https://brave.com, pes@brave.com 10 | Editor: Simone Onofri, w3cid 38211, W3C https://w3.org, simone@w3.org 11 | Former Editor: Jason Novak, Apple Inc., https://apple.com 12 | Former Editor: Lukasz Olejnik, Independent researcher, https://lukaszolejnik.com 13 | Former Editor: Mike West, Google Inc., mkwst@google.com 14 | Former Editor: Theresa O’Connor, w3cid 40614, Apple Inc. https://apple.com, hober@apple.com 15 | Former Editor: Yan Zhu, Yahoo Inc., yan@brave.com 16 | Group: privacywg 17 | Markup Shorthands: css no, markdown yes 18 | Local Boilerplate: status yes 19 | Local Boilerplate: copyright yes 20 | Boilerplate: conformance no 21 | Abstract: This document contains a set of questions to be used when 22 | evaluating the security and privacy implications of web platform 23 | technologies. 24 | 25 | 26 |

Introduction

27 | 28 | When designing new features for the Web platform, 29 | we must always consider the security and privacy implications of our work. 30 | New Web features should always 31 | maintain or enhance 32 | the overall security and privacy of the Web. 33 | 34 | This document contains a set of questions 35 | intended to help spec authors 36 | as they think through 37 | the security and privacy implications 38 | of their work and write the narrative Security Considerations and Privacy 39 | Considerations sections for inclusion in-line in their specifications, 40 | as described below in [[#considerations]]. 41 | It also documents mitigation strategies 42 | that spec authors can use to address 43 | security and privacy concerns they encounter as they work on their spec. 44 | 45 | This document is itself a work in progress, 46 | and there may be security or privacy concerns 47 | which this document does not (yet) cover. 48 | Please [let us know](https://github.com/w3c/security-questionnaire/issues/new) 49 | if you identify a security or privacy concern 50 | this questionnaire should ask about. 51 | 52 |

How To Use The Questionnaire

53 | 54 | Work through these questions 55 | early on in the design process, 56 | when things are easier to change. 57 | When privacy and security issues are only found later, 58 | after a feature has shipped, 59 | it's much harder to change the design. 60 | If security or privacy issues are found late, 61 | user agents may need to adopt breaking changes 62 | to fix the issues. 63 | 64 | Keep these questions in mind while working on specifications. 65 | Periodically revisit this questionnaire and continue to consider the questions, 66 | particularly as a design changes over time. 67 | 68 |

Additional resources

69 | 70 | The Mitigating Browser Fingerprinting in Web Specifications 71 | [[FINGERPRINTING-GUIDANCE]] document published by the Privacy WG goes into 72 | further depth about browser fingerprinting and should be considered in 73 | parallel with this document. 74 | 75 | The IETF's RFC about privacy considerations, [[RFC6973]], is a 76 | wonderful resource, particularly section 7. 77 | 78 |

TAG, Privacy WG, Security IG, and this questionnaire

79 | 80 | Before requesting 81 | privacy and 82 | security reviews from 83 | the [Privacy Working Group](https://www.w3.org/groups/wg/privacy/) and 84 | the [Security Interest Group](https://www.w3.org/groups/ig/security/), 85 | write "Security Considerations" and 86 | "Privacy Considerations" sections in your document, as described in 87 | [[#considerations]]. Answering the questions in this 88 | document will, we hope, inform your writing of those sections. It is not 89 | appropriate, however, to merely copy this questionnaire into those sections. 90 | Instructions for requesting security and privacy reviews can be 91 | found in the document 92 | [How to do Wide Review](https://www.w3.org/guide/documentreview/#how_to_get_horizontal_review). 93 | 94 | When requesting 95 | a [review](https://github.com/w3ctag/design-reviews) 96 | from the [Technical Architecture Group (TAG)](https://www.w3.org/2001/tag/), 97 | please provide the TAG with answers 98 | to the questions in this document. 99 | [This Markdown 100 | template](https://raw.githubusercontent.com/w3c/security-questionnaire/main/questionnaire.markdown) 101 | may be useful when doing so. 102 | 103 | 104 |

Questions to Consider

105 | 106 |

107 | What information does this feature expose, 108 | and for what purposes? 109 |

110 | 111 | User agents should only expose information to the Web 112 | when doing so is necessary to serve a clear user need. 113 | Does your feature expose information to websites? 114 | If so, how does exposing this information benefit the user? 115 | Are the risks to the user outweighed by the benefits to the user? 116 | If so, how? 117 | 118 | See also 119 | 120 | * [[DESIGN-PRINCIPLES#priority-of-constituencies]] 121 | 122 | When answering this question, please consider each of these four possible 123 | areas of information disclosure / sharing. 124 | 125 | For the below sub-questions, 126 | please take the term *potentially identifying information* 127 | to mean information that describes the browser user, 128 | distinct from others who use the same browser version. 129 | Examples of such *potentially identifying information* include information 130 | about the browser user's environment (e.g., operating system configuration, 131 | browser configuration, hardware capabilities), and the user's prior activities 132 | and interests (e.g., browsing history, purchasing preferences, personal 133 | characteristics). 134 | 135 | 1. What information does your spec expose to the **first party** that 136 | the **first party** cannot currently easily determine. 137 | 2. What information does your spec expose to **third parties** that 138 | **third parties** cannot currently easily determine. 139 | 3. What *potentially identifying information* does your spec expose to the 140 | **first party** that the **first party** can already access (i.e., what 141 | identifying information does your spec duplicate or mirror). 142 | 4. What *potentially identifying information* does your spec expose to 143 | **third parties** that **third parties** can already access. 144 | 145 |

146 | Do features in your specification expose the minimum amount of information 147 | necessary to implement the intended functionality? 148 |

149 | 150 | Features should only expose information 151 | when it's absolutely necessary. 152 | If a feature exposes more information than is necessary, 153 | why does it do so, and can that the same functionality be achieved by 154 | exposing less information? 155 | 156 | See also 157 | 158 | * [[#data-minimization]] 159 | 160 |

161 | Content Security Policy [[CSP]] unintentionally exposed redirect targets 162 | cross-origin by allowing one origin to infer details about another origin 163 | through violation reports (see [[HOMAKOV]]). The working group eventually 164 | mitigated the risk by reducing a policy's granularity after a redirect. 165 |

166 | 167 |

168 | Do the features in your specification expose personal information, 169 | personally-identifiable information (PII), or information derived from 170 | either? 171 |

172 | 173 | Personal information is any data about a user 174 | (for example, their home address), 175 | or information that could be used to identify a user, 176 | such as an alias, email address, or identification number. 177 | 178 | Note: Personal information is 179 | distinct from personally identifiable information 180 | (PII). 181 | PII is a legal concept, 182 | the definition of which varies from jurisdiction to jurisdiction. 183 | When used in a non-legal context, 184 | PII tends to refer generally 185 | to information 186 | that could be used to identify a user. 187 | 188 | When exposing 189 | personal information, PII, or derivative information, 190 | specification authors must prevent or, when prevention is not possible, minimize 191 | potential harm to users. 192 | 193 |

194 | A feature 195 | which gathers biometric data 196 | (such as fingerprints or retina scans) 197 | for authentication 198 | should not directly expose this biometric data to the web. 199 | Instead, 200 | it can use the biometric data 201 | to look up or generate some temporary key which is not shared across origins 202 | which can then be safely exposed to the origin. [[WEBAUTHN]] 203 |

204 | 205 | Personal information, PII, or their derivatives 206 | should not be exposed to origins 207 | without [[DESIGN-PRINCIPLES#user-intent|meaningful user consent]]. 208 | Many APIs 209 | use the Permissions API to acquire meaningful user consent. 210 | [[PERMISSIONS]] 211 | 212 | Keep in mind 213 | that each permission prompt 214 | added to the web platform 215 | increases the risk 216 | that users will ignore 217 | the contents of all permission prompts. 218 | Before adding a permission prompt, consider your options for using 219 | a less obtrusive way to gain meaningful user consent. 220 | [[ADDING-PERMISSION]] 221 | 222 |

223 | `` can be used to upload 224 | documents containing personal information 225 | to websites. 226 | It makes use of 227 | the underlying native platform's file picker 228 | to ensure the user understands 229 | that the file and its contents 230 | will be exposed to the website, 231 | without a separate permissions prompt. 232 |

233 | 234 | See also 235 | 236 | * [[#user-mediation]] 237 | * [[DESIGN-PRINCIPLES#user-intent]] 238 | 239 |

240 | How do the features in your specification deal with sensitive information? 241 |

242 | 243 | Personal information is not the only kind of sensitive information. 244 | Many other kinds of information may also be sensitive. 245 | What is or isn't sensitive information can vary 246 | from person to person 247 | or from place to place. 248 | Information that would be harmless if known about 249 | one person or group of people 250 | could be dangerous if known about 251 | another person or group. 252 | Information about a person 253 | that would be harmless in one country 254 | might be used in another country 255 | to detain, kidnap, or imprison them. 256 | 257 | Examples of sensitive information include: 258 | caste, 259 | citizenship, 260 | color, 261 | credentials, 262 | criminal record, 263 | demographic information, 264 | disability status, 265 | employment status, 266 | ethnicity, 267 | financial information, 268 | health information, 269 | location data, 270 | marital status, 271 | political beliefs, 272 | profession, 273 | race, 274 | religious beliefs or nonbeliefs, 275 | sexual preferences, 276 | and 277 | trans status. 278 | 279 | When a feature exposes sensitive information to the web, 280 | its designers must take steps 281 | to mitigate the risk of exposing the information. 282 | 283 |
284 | 285 | The Credential Management API allows sites 286 | to request a user's credentials 287 | from a password manager. [[CREDENTIAL-MANAGEMENT-1]] 288 | If it exposed the user's credentials to JavaScript, 289 | and if the page using the API were vulnerable to [=XSS=] attacks, 290 | the user's credentials could be leaked to attackers. 291 | 292 | The Credential Management API 293 | mitigates this risk 294 | by not exposing the credentials to JavaScript. 295 | Instead, it exposes 296 | an opaque {{FormData}} object 297 | which cannot be read by JavaScript. 298 | The spec also recommends 299 | that sites configure Content Security Policy [[CSP]] 300 | with reasonable [=connect-src=] and [=form-action=] values 301 | to further mitigate the risk of exfiltration. 302 | 303 |
304 | 305 | Many use cases 306 | which require location information 307 | can be adequately served 308 | with very coarse location data. 309 | For instance, 310 | a site which recommends restaurants 311 | could adequately serve its users 312 | with city-level location information 313 | instead of exposing the user's precise location. 314 | 315 | See also 316 | 317 | * [[DESIGN-PRINCIPLES#do-not-expose-use-of-assistive-tech]] 318 | 319 |

320 | Does data exposed by your specification carry related but distinct 321 | information that may not be obvious to users? 322 |

323 | 324 | Features which enable users 325 | to share data with origins 326 | should ensure that such data 327 | does not carry embedded, possibly hidden, information 328 | without the user's awareness, understanding, and consent. 329 | 330 | Documents 331 | such as image or video files 332 | often contain metadata about 333 | where and when the image, video, or audio was captured 334 | and 335 | what kind of device captured or produced the data. 336 | When uploaded, 337 | this kind of metadata 338 | may reveal to origins 339 | information the user did not intend to reveal, 340 | such as the user's present or past location 341 | and socioeconomic status. 342 | 343 | User agents should enable users to choose 344 | whether or not to share such data with sites, 345 | and the default should be that such data 346 | is not shared. 347 | 348 |

349 | Do the features in your specification introduce state 350 | that persists across browsing sessions? 351 |

352 | 353 | The Web platform already includes many mechanisms 354 | origins can use to 355 | store information. 356 | Cookies, 357 | `ETag`, 358 | `Last Modified`, 359 | {{localStorage}}, 360 | and 361 | {{indexedDB}}, 362 | are just a few examples. 363 | 364 | Allowing a website 365 | to store data 366 | on a user’s device 367 | in a way that persists across browsing sessions 368 | introduces the risk 369 | that this state may be used 370 | to track a user 371 | without their knowledge or control, 372 | either in [=first-party-site context|first-=] or [=third-party context|third-party=] contexts. 373 | 374 | One way 375 | user agents prevent origins from 376 | abusing client-side storage mechanisms 377 | is by providing users with the ability 378 | to clear data stored by origins. 379 | Specification authors should include similar 380 | protections to make sure that new 381 | client-side storage mechanisms 382 | cannot be misused to track users across domains 383 | without their control. 384 | However, just giving users the ability 385 | to delete origin-set state is usually 386 | not sufficient since users rarely 387 | manually clear browser state. 388 | Spec authors should consider ways 389 | to make new features more privacy-preserving without full storage clearing, 390 | such as 391 | reducing the uniqueness of values, 392 | rotating values, 393 | or otherwise making features no more identifying than is needed. 394 | 395 | 396 | Additionally, specification authors 397 | should carefully consider and specify, when possible, 398 | how their features should interact with browser caching 399 | features. Additional mitigations may be necessary to 400 | prevent origins from abusing caches to 401 | identify and track users across sites or sessions without user consent. 402 | 403 |

404 | Platform-specific DRM implementations 405 | (such as [=content decryption modules=] in [[ENCRYPTED-MEDIA]]) 406 | might expose origin-specific information 407 | in order to help identify users 408 | and determine whether they ought to be granted access 409 | to a specific piece of media. 410 | These kinds of identifiers 411 | should be carefully evaluated 412 | to determine how abuse can be mitigated; 413 | identifiers which a user cannot easily change 414 | are very valuable from a tracking perspective, 415 | and protecting such identifiers 416 | from an [=active network attacker=] 417 | is vital. 418 |

419 | 420 |

421 | Do the features in your specification expose information about the 422 | underlying platform to origins? 423 |

424 | 425 | 426 | (Underlying platform information includes 427 | user configuration data, 428 | the presence and attributes of hardware I/O devices such as sensors, 429 | and the availability and behavior of various software features.) 430 | 431 | If so, is the same information exposed across origins? 432 | Do different origins see different data or the same data? 433 | Does the data change frequently or rarely? 434 | Rarely-changing data exposed to multiple origins 435 | can be used to uniquely identify a user across those origins. 436 | This may be direct 437 | (when the piece of information is unique) 438 | or indirect 439 | (because the data may be combined with other data to form a fingerprint). [[FINGERPRINTING-GUIDANCE]] 440 | 441 | When considering whether or not to expose such information, 442 | specs and user agents 443 | should not consider the information in isolation, 444 | but should evaluate the risk of adding it 445 | to the existing fingerprinting surface of the platform. 446 | 447 | Keep in mind that 448 | the fingerprinting risk of a particular piece of information 449 | may vary between platforms. 450 | The fingerprinting risk of some data 451 | on the hardware and software platforms *you* use 452 | may be different than 453 | the fingerprinting risk on other platforms. 454 | 455 | When you do decide to expose such information, 456 | you should take steps to mitigate the harm of such exposure. 457 | 458 | Sometimes the right answer is to not expose the data in the first place (see [[#drop-feature]]). 459 | In other cases, 460 | reducing fingerprintability may be as simple as 461 | ensuring consistency—for instance, 462 | by ordering a list of available resources—but sometimes, 463 | more complex mitigations may be necessary. 464 | See [[#mitigations]] for more. 465 | 466 | If features in your spec expose such data 467 | and does not define adequate mitigations, 468 | you should ensure that such information 469 | is not revealed to origins 470 | without [[DESIGN-PRINCIPLES#user-intent|meaningful user consent]], 471 | and 472 | you should clearly describe this 473 | in your specification's Security and Privacy Considerations sections. 474 | 475 |

476 | WebGL's `RENDERER` string 477 | enables some applications to improve performance. 478 | It's also valuable fingerprinting data. 479 | This privacy risk must be carefully weighed 480 | when considering exposing such data to origins. 481 |

482 | 483 |

484 | The [=PDF viewer plugin objects=] list almost never changes. 485 | Some user agents have [disabled direct enumeration of the plugin list](https://bugzilla.mozilla.org/show_bug.cgi?id=757726) 486 | to reduce the fingerprinting harm of this interface. 487 |

488 | 489 | See also: 490 | 491 | * [[DESIGN-PRINCIPLES#device-ids|Use care when exposing identifying information about devices]] 492 | * [[DESIGN-PRINCIPLES#device-enumeration|Use care when exposing APIs for selecting or enumerating devices]] 493 | 494 |

495 | Does this specification allow an origin to send data to the underlying 496 | platform? 497 |

498 | 499 | If so, what kind of data can be sent? 500 | 501 | Platforms differ in how they process data passed into them, 502 | which may present different risks to users. 503 | 504 | Don't assume the underlying platform will safely handle the data that is passed. 505 | Where possible, mitigate attacks by limiting or structuring the kind of data is passed to the platform. 506 | 507 |
508 | URLs may or may not be dereferenced by a platform API, 509 | and if they are dereferenced, 510 | redirects may or may not be followed. 511 | If your specification sends URLs to underlying platform APIs, 512 | the potential harm of *your* API 513 | may vary depending on 514 | the behavior of the various underlying platform APIs it's built upon. 515 | 516 | What happens when `file:`, `data:`, or `blob:` URLs 517 | are passed to the underlying platform API? 518 | These can potentially read sensitive data 519 | directly form the user's hard disk or from memory. 520 | 521 | Even if your API only allows `http:` and `https:` URLs, 522 | such URLs may be vulnerable to [=CSRF=] attacks, 523 | or be redirected to `file:`, `data:`, or `blob:` URLs. 524 |
525 | 526 |

527 | Do features in this specification enable access to device sensors? 528 |

529 | 530 | If so, what kinds of information from or about the sensors are exposed to origins? 531 | 532 | Information from sensors may serve as a fingerprinting vector across origins. 533 | Additionally, 534 | sensors may reveal something sensitive about the device or its environment. 535 | 536 | If sensor data is relatively stable 537 | and consistent across origins, 538 | it could be used as a cross-origin identifier. 539 | If two User Agents expose such stable data from the same sensors, 540 | the data could even be used as a cross-browser, or potentially even a cross-device, identifier. 541 | 542 |

543 | Researchers discovered that 544 | it's possible to use 545 | a sufficiently fine-grained gyroscope 546 | as a microphone [[GYROSPEECHRECOGNITION]]. 547 | This can be mitigated by lowering the gyroscope's sample rates. 548 |

549 | 550 |

551 | Ambient light sensors could allow an attacker to learn whether or not a 552 | user had visited given links [[OLEJNIK-ALS]]. 553 |

554 | 555 |

556 | Even relatively short lived data, like the battery status, may be able to 557 | serve as an identifier [[OLEJNIK-BATTERY]]. 558 |

559 | 560 |

561 | Do features in this specification enable new script execution/loading 562 | mechanisms? 563 |

564 | 565 | New mechanisms for executing or loading scripts have a risk of enabling novel attack surfaces. 566 | Generally, if a new feature needs this you should consult with a wider audience, 567 | and think about whether or not an existing mechanism can be used 568 | or the feature is really necessary. 569 | 570 | 571 | 572 |

573 | JSON modules are expected to be treated only as data, 574 | but the initial proposal allowed an adversary to swap it out with code without the user knowing. 575 | Import assertions were implemented 576 | as a mitigation for this vulnerability. 577 |

578 | 579 |

580 | Do features in this specification allow an origin to access other devices? 581 |

582 | 583 | If so, what devices do the features in this specification allow an origin to 584 | access? 585 | 586 | Accessing other devices, both via network connections and via 587 | direct connection to the user's machine (e.g. via Bluetooth, 588 | NFC, or USB), could expose vulnerabilities - some of 589 | these devices were not created with web connectivity in mind and may be inadequately 590 | hardened against malicious input, or with the use on the web. 591 | 592 | Exposing other devices on a user’s local network also has significant privacy 593 | risk: 594 | 595 | * If two user agents have the same devices on their local network, an 596 | attacker may infer that the two user agents are running on the same host 597 | or are being used by two separate users who are in the same physical 598 | location. 599 | * Enumerating the devices on a user’s local network provides significant 600 | entropy that an attacker may use to fingerprint the user agent. 601 | * If features in this spec expose persistent or long lived identifiers of 602 | local network devices, that provides attackers with a way to track a user 603 | over time even if a user takes steps to prevent such tracking (e.g. 604 | clearing cookies and other stateful tracking mechanisms). 605 | * Direct connections might be also be used to bypass security checks that 606 | other APIs would provide. For example, attackers used the WebUSB API to 607 | access others sites' credentials on a hardware security, bypassing 608 | same-origin checks in an early U2F API. [[YUBIKEY-ATTACK]] 609 | 610 |

611 | The Network Service Discovery API [[DISCOVERY-API]] recommended CORS 612 | preflights before granting access to a device, and requires user agents to 613 | involve the user with a permission request of some kind. 614 |

615 | 616 |

617 | Likewise, the Web Bluetooth [[WEB-BLUETOOTH]] has an extensive discussion of 618 | such issues in [[WEB-BLUETOOTH#privacy]], which is worth 619 | reading as an example for similar work. 620 |

621 | 622 |

623 | [[WEBUSB]] addresses these risks through a combination of user mediation / 624 | prompting, secure origins, and feature policy. 625 | See [[WEBUSB#security-and-privacy]] for more. 626 |

627 | 628 |

629 | Do features in this specification allow an origin some measure of control over 630 | a user agent's native UI? 631 |

632 | 633 | Features that allow for control over a user agent’s UI (e.g. full screen 634 | mode) or changes to the underlying system (e.g. installing an ‘app’ on a 635 | smartphone home screen) may surprise users or obscure security / privacy 636 | controls. To the extent that your feature does allow for the changing of a 637 | user agent’s UI, can it affect security / privacy controls? What analysis 638 | confirmed this conclusion? 639 | 640 |

641 | What temporary identifiers do the features in this specification create or 642 | expose to the web? 643 |

644 | 645 | If a standard exposes a temporary identifier to the web, the identifier 646 | should be short lived and should rotate on some regular duration to mitigate 647 | the risk of this identifier being used to track a user over time. When a 648 | user clears state in their user agent, these temporary identifiers should be 649 | cleared to prevent re-correlation of state using a temporary identifier. 650 | 651 | If features in this spec create or expose temporary identifiers to the 652 | web, how are they exposed, when, to what entities, and, how frequently are 653 | those temporary identifiers rotated? 654 | 655 | Example temporary identifiers include TLS Channel ID, Session Tickets, and 656 | IPv6 addresses. 657 | 658 |

659 | The index attribute in the Gamepad API [[GAMEPAD]] — an integer that starts 660 | at zero, increments, and is reset — is a good example of a privacy friendly 661 | temporary identifier. 662 |

663 | 664 | 665 |

666 | How does this specification distinguish between behavior in first-party and 667 | third-party contexts? 668 |

669 | 670 | The behavior of a feature should be considered not just in the context of its 671 | being used by a first party origin that a user is visiting but also the 672 | implications of its being used by an arbitrary third party that the first 673 | party includes. When developing your specification, consider the implications 674 | of its use by third party resources on a page and, consider if support for 675 | use by third party resources should be optional to conform to the 676 | specification. If supporting use by third party resources is mandatory for 677 | conformance, please explain why and what privacy mitigations are in place. 678 | This is particularly important as user agents may take steps to reduce the 679 | availability or functionality of certain features to third parties if the 680 | third parties are found to be abusing the functionality. 681 | 682 |

683 | How do the features in this specification work in the context of a browser’s 684 | Private Browsing or Incognito mode? 685 |

686 | 687 | Most browsers implement a private browsing or incognito mode, 688 | though they vary significantly in what functionality they provide and 689 | how that protection is described to users [[WU-PRIVATE-BROWSING]]. 690 | 691 | One commonality is that they provide a different set of state 692 | than the browser's 'normal' state. 693 | 694 | Do features in this spec provide information that would allow for the 695 | correlation of a single user's activity across normal and private 696 | browsing / incognito modes? Do features in the spec result in 697 | information being written to a user’s host that would persist 698 | following a private browsing / incognito mode session ending? 699 | 700 | There has been research into both: 701 | 702 | * Detecting whether a user agent is in private browsing mode [[RIVERA]] 703 | using non-standardized methods such as [window.requestFileSystem()](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestFileSystem). 704 | * Using features to fingerprint a browser and correlate private and 705 | non-private mode sessions for a given user. [[OLEJNIK-PAYMENTS]] 706 | 707 | Spec authors should avoid, as much as possible, making the presence of 708 | private browsing mode detectable to sites. [[DESIGN-PRINCIPLES#do-not-expose-use-of-private-browsing-mode]] 709 | 710 |

711 | Does this specification have both "Security Considerations" and "Privacy 712 | Considerations" sections? 713 |

714 | 715 | Specifications should have both "Security Considerations" and "Privacy 716 | Considerations" sections to help implementers and web developers 717 | understand the risks that a feature presents and to ensure that 718 | adequate mitigations are in place. While your answers to the 719 | questions in this document will inform your writing of those sections, 720 | do not merely copy this questionnaire into those sections. Instead, 721 | craft language specific to your specification that will be helpful to 722 | implementers and web developers. 723 | 724 | [[RFC6973]] is an excellent resource to consult when considering 725 | privacy impacts of your specification, particularly Section 7 of 726 | RFC6973. [[RFC3552]] provides general advice as to writing Security 727 | Consideration sections, and Section 5 of RFC3552 has specific requirements. 728 | 729 | Generally, these sections should contain clear descriptions of the 730 | privacy and security risks for the features your spec introduces. It is also 731 | appropriate to document risks that are mitigated elsewhere in the 732 | specification and to call out details that, if implemented 733 | other-than-according-to-spec, are likely to lead to vulnerabilities. 734 | 735 | If it seems like none of the features in your specification have security or 736 | privacy impacts, say so in-line, e.g.: 737 | 738 | > There are no known security impacts of the features in this specification. 739 | 740 | Be aware, though, that most specifications include features that have at least some 741 | impact on the fingerprinting surface of the browser. If you believe 742 | your specification in an outlier, justifying that claim is in 743 | order. 744 | 745 |

746 | Do features in your specification enable origins to downgrade default 747 | security protections? 748 |

749 | 750 | Do features in your spec 751 | enable an origin to opt-out of security settings 752 | in order to accomplish something? 753 | If so, 754 | in what situations do these features allow such downgrading, and why? 755 | 756 | Can this be avoided in the first place? 757 | If not, are mitigations in place 758 | to make sure this downgrading doesn’t dramatically increase risk to users? 759 | For instance, 760 | [[PERMISSIONS-POLICY]] defines a mechanism 761 | that can be used by sites to prevent untrusted <{iframe}>s from using such a feature. 762 | 763 |
764 | The {{Document/domain|document.domain}} setter can be used to relax the [=same-origin policy=]. 765 | The most effective mitigation 766 | would be to remove it from the platform (see [[#drop-feature]]), 767 | though that 768 | [may be challenging](https://github.com/mikewest/deprecating-document-domain/) 769 | for compatibility reasons. 770 |
771 | 772 |
773 | The Fullscreen API enables 774 | a (portion of a) web page 775 | to expand to fill the display. [[FULLSCREEN]] 776 | This can hide 777 | several User Agent user interface elements 778 | which help users to understand 779 | what web page they are visiting 780 | and whether or not the User Agent believes they are [safe](https://w3ctag.github.io/design-principles/#safe-to-browse). 781 | 782 | Several mitigations are defined in the specification 783 | and are widely deployed in implementations. 784 | For instance, the Fullscreen API is a [=policy-controlled feature=], 785 | which enables sites to disable the API in <{iframe}>s. 786 | [[FULLSCREEN#security-and-privacy-considerations]] encourages implementations 787 | to display an overlay which informs the user that they have entered fullscreen, 788 | and to advertise a simple mechanism to exit fullscreen (typically the `Esc` key). 789 |
790 | 791 |

792 | What happens when a document that uses your feature is kept alive in BFCache 793 | (instead of getting destroyed) after navigation, and potentially gets reused 794 | on future navigations back to the document? 795 |

796 | 797 | After a user navigates away from a document, 798 | the document might stay around in a non-"[=Document/fully active=]" state 799 | and kept in the "back/forward cache (BFCache)", 800 | and might be reused when the user navigates back to the document. 801 | From the user’s perspective, 802 | the non-[=Document/fully active=] document is already discarded 803 | and thus should not get updates/events that happen after they navigated away from it, 804 | especially privacy-sensitive information (e.g. geolocation). 805 | 806 | Also, as a document might be reused even after navigation, 807 | be aware that tying something to a document’s lifetime 808 | also means reusing it after navigations. 809 | If this is not desirable, 810 | consider listening to changes to the [=Document/fully active=] state 811 | and doing cleanup as necessary. 812 | 813 | For more detailed guidance on how to handle BFCached documents, 814 | see [[DESIGN-PRINCIPLES#support-non-fully-active]] and the [Supporting BFCached Documents](https://w3ctag.github.io/bfcache-guide/) guide. 815 | 816 | Note: It is possible for a document to become non-[=Document/fully active=] for other reasons not related to BFcaching, 817 | such as when the iframe holding the document [=becomes disconnected=]. 818 | Our advice is that all non-[=Document/fully active=] documents should be treated the same way. 819 | The only difference is that BFCached documents might become [=Document/fully active=] again, 820 | whereas documents in detached iframes will stay inactive forever. 821 | Thus, we suggest paying extra attention to the BFCache case. 822 | 823 |
824 | Screen WakeLock API [releases the wake lock](https://w3c.github.io/screen-wake-lock/#handling-document-loss-of-full-activity) 825 | when a document becomes no longer fully active. 826 |
827 |
828 | [=Sticky activation=] is determined by the "last activation timestamp", 829 | which is tied to a document. 830 | This means after a user triggers activation once on a document, 831 | the document will have sticky activation forever, 832 | even after the user navigated away and back to it again. 833 |
834 | 835 |

836 | What happens when a document that uses your feature gets disconnected? 837 |

838 | If the iframe element containing a document [=becomes disconnected=], 839 | the document will no longer be [=Document/fully active=]. 840 | The document will never become fully active again, 841 | because if the iframe element [=becomes connected=] again, it will load a new document. 842 | The document is gone from the user's perspective, 843 | and should be treated as such by your feature as well. 844 | You may follow the guidelines for BFCache mentioned above, 845 | as we expect BFCached and detached documents to be treated the same way, 846 | with the only difference being that BFCached documents can become [=Document/fully active=] again. 847 | 848 |

849 | Does your spec define when and how new kinds of errors should be raised? 850 |

851 | 852 | Error handling, 853 | and what conditions constitute error states, 854 | can be the source of unintended information leaks and privacy vulnerabilities. 855 | Triggering an error, 856 | what information is included with (or learnable by) the error, 857 | and which parties in an application can learn about the error can all 858 | affect (or weaken) user privacy. 859 | Proposal authors should carefully think 860 | through each of these dimensions to ensure that user privacy and security are 861 | not harmed through error handling. 862 | 863 | A partial list of how error definitions and error handling can put 864 | users at risk include: 865 | 866 | - If your spec defines an error state based whether certain system resources 867 | are available, 868 | applications can use that error state as a probe to learn 869 | about the availability of those system resources. 870 | This can harm user privacy 871 | when user agents do not intend for applications to learn about those system 872 | resources. 873 | - Specs often include information with error objects that are intended to help 874 | authors identify and debug issues in applications. 875 | Spec authors should 876 | carefully think through what information such debugging information exposes, 877 | and whether (and which) actors on a page are able to access that information. 878 | 879 |

880 | Does your feature allow sites to learn about the user's use of assistive technology? 881 |

882 | The Web is designed to work for everyone, and Web standards should be designed 883 | for people using assistive technology (AT) just as much as for users relying 884 | on mice, keyboards, and touch screens. Accessibility and universal access 885 | are core to the W3C's mission. 886 | 887 | Specification authors should keep in mind that Web users who rely on 888 | assistive technology face some unique risks when using the Web. 889 | The use of assistive technologies may cause those Web users to stand 890 | out among other Web users, increasing the risk of unwanted reidentification 891 | and privacy harm. Similarly, some Web site operators may try to 892 | discriminate against Web users who rely on assistive technology. 893 | 894 | Feature designers and spec authors should therefore be thoughtful and 895 | careful to limit if, and what, websites can learn about the use of assistive 896 | technologies. Spec authors must minimize the explicit and implicit information that 897 | their features reveal about assistive technology use. 898 | Examples of explicit information about assistive technology 899 | include device identifiers or model names. Examples of implicit 900 | information about the use of assistive technology might include 901 | user interaction patterns that are unlikely to be generated by a 902 | mouse, keyboard, or touch screen. 903 | 904 |

905 | [[wai-aria-1.3]] defines additional markup authors can use to make 906 | their pages easier to navigate with assistive technology. The spec 907 | includes the <{html-global/aria-hidden}> 908 | attribute, that site authors can use to indicate that certain content 909 | should be hidden from assistive technology. 910 | 911 | A malicious site author might 912 | abuse the `aria-hidden` attribute to learn if a user is using assistive 913 | technology, possibly by revealing certain page content to assistive technology, 914 | while showing very different page content to other users. A malicious 915 | site author could then possibly infer from the user's behavior which 916 | content the user was interacting with, and so whether assistive technology 917 | was being used. 918 |

919 | 920 |

921 | What should this questionnaire have asked? 922 |

923 | 924 | This questionnaire is not exhaustive. 925 | After completing a privacy review, 926 | it may be that 927 | there are privacy aspects of your specification 928 | that a strict reading, and response to, this questionnaire, 929 | would not have revealed. 930 | If this is the case, 931 | please convey those privacy concerns, 932 | and indicate if you can think of improved or new questions 933 | that would have covered this aspect. 934 | 935 | Please consider [filing an issue](https://github.com/w3c/security-questionnaire/issues/new) 936 | to let us know what the questionnaire should have asked. 937 | 938 |

Threat Models

939 | 940 | To consider security and privacy it is convenient to think in terms of threat 941 | models, a way to illuminate the possible risks. 942 | 943 | There are some concrete privacy concerns that should be considered when 944 | developing a feature for the web platform [[RFC6973]]: 945 | 946 | * Surveillance: Surveillance is the observation or monitoring of an 947 | individual's communications or activities. 948 | * Stored Data Compromise: End systems that do not take adequate measures to 949 | secure stored data from unauthorized or inappropriate access. 950 | * Intrusion: Intrusion consists of invasive acts that disturb or interrupt 951 | one's life or activities. 952 | * Misattribution: Misattribution occurs when data or communications related 953 | to one individual are attributed to another. 954 | * Correlation: Correlation is the combination of various pieces of 955 | information related to an individual or that obtain that characteristic 956 | when combined. 957 | * Identification: Identification is the linking of information to a 958 | particular individual to infer an individual's identity or to allow the 959 | inference of an individual's identity. 960 | * Secondary Use: Secondary use is the use of collected information about an 961 | individual without the individual's consent for a purpose different from 962 | that for which the information was collected. 963 | * Disclosure: Disclosure is the revelation of information about an 964 | individual that affects the way others judge the individual. 965 | * Exclusion: Exclusion is the failure to allow individuals to know about 966 | the data that others have about them and to participate in its handling 967 | and use. 968 | 969 | In the mitigations section, this document outlines a number of techniques 970 | that can be applied to mitigate these risks. 971 | 972 | Enumerated below are some broad classes of threats that should be 973 | considered when developing a web feature. 974 | 975 |

976 | Passive Network Attackers 977 |

978 | 979 | A passive network attacker has read-access to the bits going over 980 | the wire between users and the servers they're communicating with. She can't 981 | *modify* the bytes, but she can collect and analyze them. 982 | 983 | Due to the decentralized nature of the internet, and the general level of 984 | interest in user activity, it's reasonable to assume that practically every 985 | unencrypted bit that's bouncing around the network of proxies, routers, and 986 | servers you're using right now is being read by someone. It's equally likely 987 | that some of these attackers are doing their best to understand the encrypted 988 | bits as well, including storing encrypted communications for later 989 | cryptanalysis (though that requires significantly more effort). 990 | 991 | * The IETF's "Pervasive Monitoring Is an Attack" document [[RFC7258]] is 992 | useful reading, outlining some of the impacts on privacy that this 993 | assumption entails. 994 | 995 | * Governments aren't the only concern; your local coffee shop is likely to 996 | be gathering information on its customers, your ISP at home is likely to 997 | be doing the same. 998 | 999 |

1000 | Active Network Attackers 1001 |

1002 | 1003 | An active network attacker has both read- and write-access to the 1004 | bits going over the wire between users and the servers they're communicating 1005 | with. She can collect and analyze data, but also modify it in-flight, 1006 | injecting and manipulating Javascript, HTML, and other content at will. 1007 | This is more common than you might expect, for both benign and malicious 1008 | purposes: 1009 | 1010 | * ISPs and caching proxies regularly cache and compress images before 1011 | delivering them to users in an effort to reduce data usage. This can be 1012 | especially useful for users on low-bandwidth, high-latency devices like 1013 | phones. 1014 | 1015 | * ISPs also regularly inject JavaScript [[COMCAST]] and other identifiers 1016 | [[VERIZON]] for less benign purposes. 1017 | 1018 | * If your ISP is willing to modify substantial amounts of traffic flowing 1019 | through it for profit, it's difficult to believe that state-level 1020 | attackers will remain passive. 1021 | 1022 |

1023 | Same-Origin Policy Violations 1024 |

1025 | 1026 | The same-origin policy is the cornerstone of security on the web; 1027 | one origin should not have direct access to another origin's data (the policy 1028 | is more formally defined in Section 3 of [[RFC6454]]). A corollary to this 1029 | policy is that an origin should not have direct access to data that isn't 1030 | associated with *any* origin: the contents of a user's hard drive, 1031 | for instance. Various kinds of attacks bypass this protection in one way or 1032 | another. For example: 1033 | 1034 | * Cross-site scripting attacks involve an 1035 | attacker tricking an origin into executing attacker-controlled code in 1036 | the context of a target origin. 1037 | 1038 | * Cross-site request forgery attacks trick user 1039 | agents into exerting a user's ambient authority on sites where they've 1040 | logged in by submitting requests on their behalf. 1041 | 1042 | * Data leakage occurs when bits of information are inadvertently made 1043 | available cross-origin, either explicitly via CORS headers [[CORS]], 1044 | or implicitly, via side-channel attacks like [[TIMING]]. 1045 | 1046 |

1047 | Third-Party Tracking 1048 |

1049 | 1050 | Part of the power of the web is its ability for a page to pull in content 1051 | from other third parties — from images to javascript — to enhance the content 1052 | and/or a user's experience of the site. However, when a page pulls in 1053 | content from third parities, it inherently leaks some information to third 1054 | parties — referer information and other information that may be used to track 1055 | and profile a user. This includes the fact that cookies go back to the 1056 | domain that initially stored them allowing for cross origin tracking. 1057 | Moreover, third parties can gain execution power through third party 1058 | Javascript being included by a webpage. While pages can take steps to 1059 | mitigate the risks of third party content and browsers may differentiate 1060 | how they treat first and third party content from a given page, the risk of 1061 | new functionality being executed by third parties rather than the first party 1062 | site should be considered in the feature development process. 1063 | 1064 | The simplest example is injecting a link to a site that behaves differently 1065 | under specific condition, for example based on the fact that user is or is not 1066 | logged to the site. This may reveal that the user has an account on a site. 1067 | 1068 |

1069 | Legitimate Misuse 1070 |

1071 | 1072 | Even when powerful features are made available to developers, it does not 1073 | mean that all the uses should always be a good idea, or justified; in fact, 1074 | data privacy regulations around the world may even put limits on certain uses 1075 | of data. In the context of first party, a legitimate website is potentially 1076 | able to interact with powerful features to learn about user behavior or 1077 | habits. For example: 1078 | 1079 | * Tracking the user while browsing the website via mechanisms such as mouse 1080 | move tracking 1081 | 1082 | * Behavioral profiling of the user based on the usage patterns 1083 | 1084 | * Accessing powerful features that enable the first-party to learn about 1085 | the user's system, the user themselves, or the user's susurroundings, such 1086 | as could be done through a webcam or sensors 1087 | 1088 | This point is admittedly different from others - and underlines that even if 1089 | something may be possible, it does not mean it should always be done, 1090 | including the need for considering a privacy impact assessment or even an 1091 | ethical assessment. When designing features with security and privacy 1092 | in mind, all both use and misuse cases should be in scope. 1093 | 1094 |

1095 | Mitigation Strategies 1096 |

1097 | 1098 | To mitigate the security and privacy risks you’ve identified in your 1099 | specification, 1100 | you may want to apply one or more of the mitigations described below. 1101 | 1102 |

1103 | Data Minimization 1104 |

1105 | 1106 | Minimization is a strategy that involves exposing as little information to 1107 | other communication partners as is required for a given operation to 1108 | complete. More specifically, it requires not providing access to more 1109 | information than was apparent in the user-mediated access or allowing the 1110 | user some control over which information exactly is provided. 1111 | 1112 | For example, if the user has provided access to a given file, the object 1113 | representing that should not make it possible to obtain information about 1114 | that file's parent directory and its contents as that is clearly not what is 1115 | expected. 1116 | 1117 | In context of data minimization it is natural to ask what data is passed 1118 | around between the different parties, how persistent the data items and 1119 | identifiers are, and whether there are correlation possibilities between 1120 | different protocol runs. 1121 | 1122 | For example, the W3C Device APIs Working Group has defined a number of 1123 | requirements in their Privacy Requirements document. [[DAP-PRIVACY-REQS]] 1124 | 1125 | Data minimization is applicable to specification authors and implementers, as 1126 | well as to those deploying the final service. 1127 | 1128 | As an example, consider mouse events. When a page is loaded, the application 1129 | has no way of knowing whether a mouse is attached, what type of mouse it is 1130 | (e.g., make and model), what kind of capabilities it exposes, how many are 1131 | attached, and so on. Only when the user decides to use the mouse — presumably 1132 | because it is required for interaction — does some of this information become 1133 | available. And even then, only a minimum of information is exposed: you could 1134 | not know whether it is a trackpad for instance, and the fact that it may have 1135 | a right button is only exposed if it is used. For instance, the Gamepad API 1136 | makes use of this data minimization capability. It is impossible for a Web game 1137 | to know if the user agent has access to gamepads, how many there are, what 1138 | their capabilities are, etc. It is simply assumed that if the user wishes to 1139 | interact with the game through the gamepad then she will know when to action 1140 | it — and actioning it will provide the application with all the information 1141 | that it needs to operate (but no more than that). 1142 | 1143 | The way in which the functionality is supported for the mouse is simply by 1144 | only providing information on the mouse's behaviour when certain events take 1145 | place. The approach is therefore to expose event handling (e.g., triggering 1146 | on click, move, button press) as the sole interface to the device. 1147 | 1148 | Two specifications that have minimized the data their features expose 1149 | are: 1150 | 1151 | * [[BATTERY-STATUS]] The user agent should not expose high precision readouts 1152 | * [[GENERIC-SENSOR]] Limit maximum sampling frequency, 1153 | Reduce accuracy 1154 | 1155 |

1156 | Default Privacy Settings 1157 |

1158 | 1159 | Users often do not change defaults, as a result, it is important that the 1160 | default mode of a specification minimizes the amount, identifiability, and 1161 | persistence of the data and identifiers exposed. This is particularly true 1162 | if a protocol comes with flexible options so that it can be tailored to 1163 | specific environments. 1164 | 1165 |

1166 | Explicit user mediation 1167 |

1168 | 1169 | If the security or privacy risk of a feature cannot otherwise be mitigated in 1170 | a specification, optionally allowing an implementer to prompt a user may 1171 | be the best mitigation possible, understanding it does not entirely remove 1172 | the privacy risk. If the specification does not allow for the implementer to 1173 | prompt, it may result in divergence implementations by different user agents 1174 | as some user agents choose to implement more privacy-friendly version. 1175 | 1176 | It is possible that the risk of a feature cannot be mitigated because the 1177 | risk is endemic to the feature itself. For instance, [[GEOLOCATION]] 1178 | reveals a user’s location intentionally; user agents generally gate access to 1179 | the feature on a permission prompt which the user may choose to accept. This 1180 | risk is also present and should be accounted for in features that expose 1181 | personal data or identifiers. 1182 | 1183 | Designing such prompts is difficult as is determining the duration that the 1184 | permission should provide. 1185 | 1186 | Often, the best prompt is one that is clearly tied to a user action, like the 1187 | file picker, where in response to a user action, the file picker is brought 1188 | up and a user gives access to a specific file to an individual site. 1189 | 1190 | Generally speaking, the duration and timing of the prompt should be inversely 1191 | proportional to the risk posed by the data exposed. In addition, the prompt 1192 | should consider issues such as: 1193 | 1194 | * How should permission requests be scoped? Especially when requested by an 1195 | embedded third party iframe? 1196 | * Should persistence be based on the pair of top-level/embedded origins or a 1197 | different scope? 1198 | * How is it certain that the prompt is occurring in context of requiring the 1199 | data and at a time that it is clear to the user why the prompt is occurring. 1200 | * Explaining the implications of permission before prompting the user, in a 1201 | way that is accessible and localized -- _who_ is asking, _what_ are they 1202 | asking for, _why_ do they need it? 1203 | * What happens if the user rejects the request at the time of the prompt or 1204 | if the user later changes their mind and revokes access. 1205 | 1206 | These prompts should also include considerations for what, if any, control a 1207 | user has over their data after it has been shared with other parties. For 1208 | example, are users able to determine what information was shared with other 1209 | parties? 1210 | 1211 |

1212 | Explicitly restrict the feature to first party origins 1213 |

1214 | 1215 | As described in the "Third-Party Tracking" section, web pages mix 1216 | first and third party content into a single application, which 1217 | introduces the risk that third party content can misuse the same set of web 1218 | features as first party content. 1219 | 1220 | Authors should explicitly specify a feature's scope of availability: 1221 | 1222 | * When a feature should be made available to embedded third parties -- and 1223 | often first parties should be able to explicitly control that (using 1224 | iframe attributes or feature policy) 1225 | * Whether a feature should be available in the background or only in the 1226 | top-most, visible tab. 1227 | * Whether a feature should be available to offline service workers. 1228 | * Whether events will be fired simultaneously 1229 | 1230 | Third party access to a feature should be an optional implementation for 1231 | conformance. 1232 | 1233 |

1234 | Secure Contexts 1235 |

1236 | 1237 | If the primary risk that you’ve identified in your specification is the 1238 | threat posed by [=active network attacker=], offering a feature to an 1239 | insecure origin is the same as offering that feature to every origin because 1240 | the attacker can inject frames and code at will. Requiring an encrypted and 1241 | authenticated connection in order to use a feature can mitigate this kind of 1242 | risk. 1243 | 1244 | Secure contexts also protect against [=passive network attackers=]. For 1245 | example, if a page uses the Geolocation API and sends the sensor-provided 1246 | latitude and longitude back to the server over an insecure connection, then 1247 | any passive network attacker can learn the user's location, without any 1248 | feasible path to detection by the user or others. 1249 | 1250 | However, requiring a secure context is not sufficient to mitigate many 1251 | privacy risks or even security risks from other threat actors than active 1252 | network attackers. 1253 | 1254 |

1255 | Drop the feature 1256 |

1257 | 1258 | Possibly the simplest way 1259 | to mitigate potential negative security or privacy impacts of a feature 1260 | is to drop the feature, 1261 | though you should keep in mind that some security or privacy risks 1262 | may be removed or mitigated 1263 | by adding features to the platform. 1264 | Every feature in a specification 1265 | should be seen as 1266 | potentially adding security and/or privacy risk 1267 | until proven otherwise. 1268 | Discussing dropping the feature 1269 | as a mitigation for security or privacy impacts 1270 | is a helpful exercise 1271 | as it helps illuminate the tradeoffs 1272 | between the feature, 1273 | whether it is exposing the minimum amount of data necessary, 1274 | and other possible mitigations. 1275 | 1276 | Consider also the cumulative effect 1277 | of feature addition 1278 | to the overall impression that users have 1279 | that [it is safe to visit a web page](https://w3ctag.github.io/design-principles/#safe-to-browse). 1280 | Doing things that complicate users' understanding 1281 | that it is safe to visit websites, 1282 | or that complicate what users need to understand 1283 | about the safety of the web 1284 | (e.g., adding features that are less safe) 1285 | reduces the ability of users 1286 | to act based on that understanding of safety, 1287 | or to act in ways that correctly reflect the safety that exists. 1288 | 1289 | Every specification should seek to be as small as possible, even if only 1290 | for the reasons of reducing and minimizing security/privacy attack surface(s). 1291 | By doing so we can reduce the overall security and privacy attack surface 1292 | of not only a particular feature, but of a module (related set of 1293 | features), a specification, and the overall web platform. 1294 | 1295 | Examples 1296 | 1297 | * [Mozilla](https://bugzilla.mozilla.org/show_bug.cgi?id=1313580) and 1298 | [WebKit](https://bugs.webkit.org/show_bug.cgi?id=164213) 1299 | dropped the Battery Status API 1300 | * [Mozilla dropped](https://bugzilla.mozilla.org/show_bug.cgi?id=1359076) 1301 | devicelight, deviceproximity and userproximity events 1302 | 1303 |

1304 | Making a privacy impact assessment 1305 |

1306 | 1307 | Some features potentially supply sensitive data, and it is 1308 | the responsibility of the end-developer, system owner, or manager to realize 1309 | this and act accordingly in the design of their system. Some use may 1310 | warrant conducting a privacy impact assessment, especially when data 1311 | relating to individuals may be processed. 1312 | 1313 | Specifications that include features that expose sensitive data should include 1314 | recommendations that websites and applications adopting the API conduct a 1315 | privacy impact assessment of the data that they collect. 1316 | 1317 | A feature that does this is: 1318 | 1319 | * [[GENERIC-SENSOR]] advises to consider performing of a privacy impact 1320 | assessment 1321 | 1322 | Documenting these impacts is important for organizations although it should 1323 | be noted that there are limitations to putting this onus on organizations. 1324 | Research has shown that sites often do not comply with security/privacy 1325 | requirements in specifications. For example, in [[DOTY-GEOLOCATION]], it was 1326 | found that none of the studied websites informed users of their privacy 1327 | practices before the site prompted for location. 1328 | 1329 | # Appendix A: Markdown Template # {#template} 1330 | 1331 | See this rendered at 1332 | [https://github.com/w3c/security-questionnaire/blob/main/questionnaire.markdown](https://github.com/w3c/security-questionnaire/blob/main/questionnaire.markdown). 1333 | 1334 |
1335 | path: questionnaire.markdown
1336 | highlight: markdown
1337 | 
1338 | 1339 |

Acknowledgements

1340 | 1341 | Many thanks to 1342 | Alice Boxhall, 1343 | Alex Russell, 1344 | Anne van Kesteren, 1345 | Chris Cunningham, 1346 | Coralie Mercier, 1347 | Corentin Wallez, 1348 | Daniel Appelquist, 1349 | David Baron, 1350 | Domenic Denicola, 1351 | Dominic Battre, 1352 | Hadley Beeman, 1353 | Jeffrey Yasskin, 1354 | Jeremy Roman, 1355 | Jonathan Kingston, 1356 | Kenneth Rohde Christiansen, 1357 | Marcos Caceres, 1358 | Marijn Kruisselbrink, 1359 | Mark Nottingham, 1360 | Martin Thomson, 1361 | Michael(tm) Smith, 1362 | Mike Perry, 1363 | Nick Doty, 1364 | Robert Linder, 1365 | Piotr Bialecki, 1366 | Rossen Atanassov, 1367 | Samuel Weiler, 1368 | Sangwhan Moon, 1369 | Tantek Çelik, 1370 | Thomas Steiner, 1371 | Yves Lafon, 1372 | Wendy Seltzer, 1373 | and 1374 | the many current and former participants in PING, the Privacy Working Group, 1375 | the Security Interest Group, and the TAG 1376 | for their contributions to this document. 1377 | 1378 | Special thanks to 1379 | Rakina Zata Amni 1380 | for her edits which help spec authors take the bfcache into account. 1382 | 1383 | Mike West 1384 | wrote the initial version of this document 1385 | and edited it for a number of years. 1386 | Yan Zhu 1387 | took over from Mike. 1388 | Jason Novak 1389 | and 1390 | Lukasz Olejnik 1391 | took it over from her, 1392 | and the TAG as a whole evolved and published the document for several years``` 1393 | before handing it over to the current maintainers. 1394 | The current editors are indebted to all of their hard work. 1395 | We hope we haven't made it (much) worse. 1396 | 1397 |
1398 | urlPrefix: https://www.w3.org/TR/encrypted-media/; spec: ENCRYPTED-MEDIA
1399 |     text: content decryption module; url: #dfn-cdm; type: dfn
1400 | urlPrefix: https://privacycg.github.io/storage-access/; spec: STORAGE-ACCESS
1401 |     text: first-party-site context; url: #first-party-site-context; type: dfn
1402 |     text: third-party context; url: #third-party-context; type: dfn
1403 | urlPrefix: https://html.spec.whatwg.org/multipage/; spec: HTML
1404 |     text: PDF viewer plugin objects; url: system-state.html#pdf-viewer-plugin-objects; type: dfn
1405 | urlPrefix: https://www.w3.org/TR/wai-aria-1.3/; spec: wai-aria-1.3
1406 |     text: aria-hidden; for: html-global; url: #aria-hidden; type: element-attr
1407 | 
1408 | 1409 | 1415 | 1416 |
1417 | {
1418 |   "ADDING-PERMISSION": {
1419 |     "href": "https://github.com/w3c/adding-permissions",
1420 |     "title": "Adding another permission? A guide",
1421 |     "authors": [ "Nick Doty" ],
1422 |     "publisher": "W3C Privacy Working Group"
1423 |   },
1424 |   "COMCAST": {
1425 |       "href": "https://arstechnica.com/tech-policy/2014/09/why-comcasts-javascript-ad-injections-threaten-security-net-neutrality/",
1426 |       "title": "Comcast Wi-Fi serving self-promotional ads via JavaScript injection",
1427 |       "publisher": "Ars Technica",
1428 |       "authors": [ "David Kravets" ]
1429 |   },
1430 |   "DOTY-GEOLOCATION": {
1431 |     "href": "https://escholarship.org/uc/item/0rp834wf",
1432 |     "title": "Privacy Issues of the W3C Geolocation API",
1433 |     "authors": [ "Nick Doty, Deirdre K. Mulligan, Erik Wilde" ],
1434 |     "publisher": "UC Berkeley School of Information"
1435 |   },
1436 |   "GYROSPEECHRECOGNITION": {
1437 |     "href": "https://www.usenix.org/system/files/conference/usenixsecurity14/sec14-paper-michalevsky.pdf",
1438 |     "title": "Gyrophone: Recognizing Speech from Gyroscope Signals",
1439 |     "publisher": "Proceedings of the 23rd USENIX Security Symposium",
1440 |     "authors": [ "Yan Michalevsky", "Dan Boneh", "Gabi Nakibly"]
1441 |   },
1442 |   "HOMAKOV": {
1443 |       "href": "http://homakov.blogspot.de/2014/01/using-content-security-policy-for-evil.html",
1444 |       "title": "Using Content-Security-Policy for Evil",
1445 |       "authors": [ "Egor Homakov" ]
1446 |   },
1447 |   "OLEJNIK-ALS": {
1448 |     "href": "https://blog.lukaszolejnik.com/stealing-sensitive-browser-data-with-the-w3c-ambient-light-sensor-api/",
1449 |     "title": "Stealing sensitive browser data with the W3C Ambient Light Sensor API",
1450 |     "publisher": "Lukasz Olejnik",
1451 |     "authors": [ "Lukasz Olejnik" ]
1452 |   },
1453 |   "OLEJNIK-BATTERY": {
1454 |     "href": "https://eprint.iacr.org/2015/616",
1455 |     "title": "The leaking battery: A privacy analysis of the HTML5 Battery Status API",
1456 |     "publisher": "Cryptology ePrint Archive, Report 2015/616",
1457 |     "authors": [ "Lukasz Olejnik", "Gunes Acar", "Claude Castelluccia", "Claudia Diaz"]
1458 |   },
1459 |   "OLEJNIK-PAYMENTS": {
1460 |     "href": "https://blog.lukaszolejnik.com/privacy-of-web-request-api/",
1461 |     "title": "Privacy of Web Request API",
1462 |     "authors": [ "Lukasz Olejnik" ],
1463 |     "publisher": "Lukasz Olejnik"
1464 |   },
1465 |   "RIVERA": {
1466 |     "href": "https://gist.github.com/jherax/a81c8c132d09cc354a0e2cb911841ff1",
1467 |     "title": "Detect if a browser is in Private Browsing mode",
1468 |     "authors": [ "David Rivera" ],
1469 |     "publisher": "David Rivera"
1470 |   },
1471 |   "TIMING": {
1472 |       "href": "https://media.blackhat.com/us-13/US-13-Stone-Pixel-Perfect-Timing-Attacks-with-HTML5-WP.pdf",
1473 |       "title": "Pixel Perfect Timing Attacks with HTML5",
1474 |       "authors": [ "Paul Stone" ],
1475 |       "publisher": "Context Information Security"
1476 |   },
1477 |   "VERIZON": {
1478 |       "href": "https://adage.com/article/digital/verizon-target-mobile-subscribers-ads/293356",
1479 |       "title": "Verizon looks to target its mobile subscribers with ads",
1480 |       "publisher": "Advertising Age",
1481 |       "authors": [ "Mark Bergen", "Alex Kantrowitz" ]
1482 |   },
1483 |   "WU-PRIVATE-BROWSING": {
1484 |     "href": "https://dl.acm.org/citation.cfm?id=3186088",
1485 |     "title": "Your Secrets Are Safe: How Browsers' Explanations Impact Misconceptions About Private Browsing Mode",
1486 |     "publisher": "WWW '18 Proceedings of the 2018 World Wide Web Conference",
1487 |     "authors": [ "Yuxi Wu", "Panya Gupta", "Miranda Wei", "Yasemin Acar", "Sascha Fahl", "Blase Ur"]
1488 |   },
1489 |   "YUBIKEY-ATTACK": {
1490 |       "href": "https://www.wired.com/story/chrome-yubikey-phishing-webusb/",
1491 |       "title": "Chrome Lets Hackers Phish Even 'Unphishable' YubiKey Users",
1492 |       "authors": [ "Andy Greenberg" ],
1493 |       "publisher": "Wired"
1494 |   }
1495 | }
1496 | 
1497 | --------------------------------------------------------------------------------