├── .gitattributes ├── .github ├── ARTICLE_TEMPLATE.md ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── chore.md │ └── doc.md └── REVIEW_TEMPLATE.md ├── .gitignore ├── .remarkignore ├── .spelling ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── code └── framework │ ├── README.md │ ├── cypress │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── cypress.json │ ├── cypress │ │ ├── fixtures │ │ │ └── example.json │ │ ├── integration │ │ │ ├── functional.spec.js │ │ │ ├── sanity.spec.js │ │ │ └── smoke.spec.js │ │ ├── plugins │ │ │ └── index.js │ │ └── support │ │ │ ├── commands.js │ │ │ └── index.js │ └── tsconfig.json │ ├── karate │ ├── .gitignore │ ├── Makefile │ ├── README.md │ └── src │ │ └── test │ │ ├── fixtures │ │ └── postman-post-payload.json │ │ └── karate │ │ ├── api-postman-contract.feature │ │ ├── api-postman-get.feature │ │ ├── api-postman-post.feature │ │ └── ui-amazon.feature │ ├── playwright │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── package.json │ └── tests │ │ ├── functional.spec.js │ │ └── smoke.spec.js │ └── robot │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── Resources │ ├── AmazonActions.robot │ ├── Assert.robot │ ├── CommonActions.robot │ ├── PageObjects │ │ ├── NavHeader.robot │ │ ├── ProductAddedToCart.robot │ │ ├── ProductDetail.robot │ │ └── SearchResults.robot │ └── StepDefinitions.robot │ └── Tests │ ├── Amazon.robot │ └── AmazonRefactored.robot ├── docs ├── .markdownlint.json ├── .nojekyll ├── _coverpage.md ├── _media │ ├── articles │ │ ├── Test-Strategy-(SADDEST-PPIRATEE).png │ │ ├── Test-Strategy-Overview-(SADDEST-PPIRATEE).png │ │ ├── modified-htsm1.png │ │ ├── test-pyramid-icecream.png │ │ ├── test-pyramid-rosie-circles.png │ │ ├── test-pyramid-trophy.png │ │ ├── test-pyramid.png │ │ ├── test-scope-1.png │ │ ├── test-scope-2.png │ │ └── test-types-layered.png │ ├── logo.png │ ├── notebook │ │ ├── specification-by-example-1.png │ │ ├── user-story-mapping-1.png │ │ └── user-story-mapping-2.jpg │ ├── supporters │ │ └── Equal_Experts.png │ └── talks │ │ └── J B Rainsberger - Integrated tests are a scam.png ├── _sidebar.md ├── _template.md ├── concepts │ ├── requirements.md │ ├── testability.md │ ├── tester-responsibilities.md │ ├── testing-purpose.md │ ├── what-testing-is-not.md │ └── what-testing-is.md ├── fields │ └── usability.md ├── index.html ├── index.md ├── next │ └── index.md ├── notebook │ ├── specification-by-example.md │ └── user-story-mapping.md ├── roles │ ├── agile-team-member.md │ ├── automation-tester.md │ ├── bug-hunter.md │ ├── certifications.md │ ├── coach.md │ ├── exploratory-tester.md │ ├── index.md │ ├── lead-tester.md │ ├── manual-tester.md │ ├── mentor.md │ ├── recruiter.md │ └── technical-tester.md ├── toolbox │ ├── charters.md │ ├── dev-methodologies.md │ ├── framework │ │ ├── cypress.md │ │ ├── karate.md │ │ ├── playwright.md │ │ └── robot.md │ ├── heuristics.md │ ├── mnemonics.md │ ├── note-taking.md │ ├── oracles.md │ ├── test-strategy.md │ └── tester-tools.md └── types │ ├── test-pyramid.md │ └── test-types.md ├── package-lock.json ├── package.json └── time-counter.txt /.gitattributes: -------------------------------------------------------------------------------- 1 | # Linguist overrides 2 | *.md linguist-detectable=true 3 | README.md linguist-detectable=false 4 | -------------------------------------------------------------------------------- /.github/ARTICLE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # TITLE_HERE 2 | 3 | > **tl;dr** SUMMARY_HERE 4 | 5 | ## Theory 6 | 7 | … 8 | 9 | ## Practice 10 | 11 | … 12 | 13 | ## Teachers 14 | 15 | - [Name](#link) 16 | 17 | ## Sources 18 | 19 | - [title](#link) 20 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: dialex 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with a single custom sponsorship URL 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/chore.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Chore 3 | about: Fix or improve something related with the platform 4 | title: 'chore: SUMMARY' 5 | labels: 0.project 6 | assignees: dialex 7 | 8 | --- 9 | 10 | ### What I propose 11 | 12 | … 13 | 14 | ### Why it should be done 15 | 16 | … 17 | 18 | ### Admin 19 | 20 | - [ ] Choose the appropriate labels 21 | - [ ] Choose **Maintenance** project 22 | - [ ] Choose **v0.9** milestone 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/doc.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Doc 3 | about: Create or review an existing documentation page 4 | title: 'doc: TITLE' 5 | labels: create, review 6 | assignees: dialex 7 | 8 | --- 9 | 10 | ### What I propose 11 | 12 | … 13 | 14 | ### Why it should be done 15 | 16 | … 17 | 18 | ### References 19 | 20 | … 21 | 22 | ### Admin 23 | 24 | - [ ] Choose the appropriate labels 25 | - [ ] Choose **Writing** project 26 | - [ ] Choose a milestone 27 | -------------------------------------------------------------------------------- /.github/REVIEW_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Something 2 | 3 | > Something is ... (get definition from official website, 2 sentences max) 4 | > 5 | > — [Official website](#) 6 | 7 | ## Code 8 | 9 | Example of automation at [GitHub](#). 10 | 11 | ## Review 12 | 13 | | Category | Opinion | Score | 14 | | ----------------- | ------- | :-----: | 15 | | _Use cases_ | | | 16 | | _Learning curve_ | | | 17 | | _Language_ | | | 18 | | _Ecosystem_ | | | 19 | | _Readability_ | | | 20 | | _Extensibility_ | | | 21 | | _Maintainability_ | | | 22 | | _Documentation_ | | | 23 | | **VERDICT** | | **X/5** | 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | node_modules/ 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /.remarkignore: -------------------------------------------------------------------------------- 1 | docs/_coverpage.md 2 | docs/_template.md 3 | docs/types/test-types.md 4 | -------------------------------------------------------------------------------- /.spelling: -------------------------------------------------------------------------------- 1 | # markdown-spellcheck dictionary 2 | # one word per line 3 | docsify 4 | tl 5 | dr 6 | subteam 7 | devops 8 | acs 9 | pos 10 | trello 11 | advocation 12 | explainability 13 | navbar 14 | lichaw 15 | gojko 16 | adzic 17 | zig 18 | ziglar 19 | pax 20 | gdocs 21 | hypercard 22 | connor 23 | paulk 24 | lina 25 | zubyte 26 | phillipe 27 | bojorquez 28 | se 29 | restassured 30 | pycharm 31 | bret 32 | pettichord 33 | automatability 34 | kedemo 35 | javascript 36 | intellisense 37 | pageobject 38 | QAs 39 | mockups 40 | ilities 41 | plugins 42 | async 43 | learnability 44 | emilsson 45 | it-bility 46 | deployability 47 | reusability 48 | alister 49 | simo 50 | cem 51 | kaner 52 | diogo 53 | nunes 54 | configs 55 | flakyness 56 | greenlees 57 | quinert 58 | lifecycle 59 | deery 60 | charrett 61 | clokie 62 | inattentional 63 | bartel 64 | ashby 65 | quaere 66 | connolly 67 | pageobjects 68 | wynne 69 | winteringham 70 | microservices 71 | keogh 72 | gáspár 73 | ToC 74 | screensaver 75 | SaaS 76 | JSONs 77 | Win10 78 | cli 79 | a11y 80 | pentesters 81 | pentest 82 | pejgan 83 | brickarp 84 | vallone 85 | png 86 | descope 87 | kanban 88 | onboarding 89 | whitespace 90 | standalone 91 | envs 92 | docusaurus 93 | codeceptjs 94 | frontend 95 | dodds 96 | linters 97 | gareev 98 | backend 99 | maaret 100 | pyhäjärvi 101 | - docs/toolbox/note-taking.md 102 | udos 103 | deas 104 | ssues 105 | uestions 106 | - docs/concepts/testability.md 107 | alue 108 | i 109 | ntrinsic 110 | roject 111 | ubjective 112 | - docs/_template.md 113 | TITLE_HERE 114 | SUMMARY_HERE 115 | - docs/toolbox/mnemonics.md 116 | NoNeLaNe 117 | - docs/toolbox/tester-tools.md 118 | GitLens 119 | markdownlint 120 | vscode-icons 121 | KeepingYouAwake 122 | BugMagnet 123 | Clipy 124 | F.lux 125 | Vysor 126 | InVision 127 | PerfectPixel 128 | ColorZilla 129 | Wiremock 130 | Wireshark 131 | TestBuddy 132 | RapidReporter 133 | ExploratoryTesting 134 | Nightwatch 135 | Testcafe 136 | SpecFlow 137 | Capybara 138 | Watir 139 | Mabl 140 | Applitools 141 | Chai 142 | Sinon 143 | RSpec 144 | Codeception 145 | Appium 146 | Katalon 147 | Stryker 148 | Mailinator 149 | Mapil 150 | BrowserStack 151 | SauceLabs 152 | JMeter 153 | Sitespeed.io 154 | chaosmonkey 155 | Ghostery 156 | TunnelBear 157 | Grafana 158 | Datadog 159 | Splunk 160 | PowerBI 161 | TestRail 162 | Mochawesome 163 | Licecap 164 | Lightshot 165 | Typora 166 | Marp 167 | Mermaid.js 168 | Mindmup 169 | XMind 170 | Diagrams.net 171 | Testers.io 172 | Webhint 173 | JSVerify 174 | junit-quickcheck 175 | FreeLearningResourcesForSoftwareTesters 176 | scrcpy 177 | Maildev 178 | Zalenium 179 | k6 180 | Trivy 181 | Frida 182 | Magisk 183 | BackstopJS 184 | Tota11y 185 | bazel 186 | httpstat.us 187 | Hoppscotch 188 | - docs/toolbox/heuristics.md 189 | Heu-risk 190 | - docs/toolbox/test-strategy.md 191 | Ws 192 | - docs/toolbox/framework/playwright.md 193 | 3x 194 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at www.diogonunes.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Welcome! Here's what you need to know when contributing to this repo. 4 | 5 | ## Folders & Files 6 | 7 | - `docs`: markdown files that will be converted into html pages by [docsify](https://docsify.js.org/#/) 8 | - `.spelling`: used by [markdown-spellcheck](https://www.npmjs.com/package/markdown-spellcheck) to whitelist words 9 | 10 | ## Tasks 11 | 12 | ```shell 13 | deps:install # install all dependencies required to run this project 14 | lint # performs lint validations on all relevant *.md files 15 | pr:prepare # checks if your branch is good enough for a Pull Request 16 | pr:preview # runs the website locally on your machine 17 | pr:ready # makes your branch ready for Pull Request (increments version) 18 | test # checks if the live website is ok 19 | clean # deletes unversioned generated files 20 | ``` 21 | 22 | To run a task use `npm run `, e.g. `npm run pr:prepare`. 23 | 24 | ## Details 25 | 26 | ### How to use [Markdown linting](https://github.com/DavidAnson/markdownlint) 27 | 28 | - Install: `npm i -g markdownlint-cli` 29 | - Run validation: `markdownlint docs --config .markdownlint.json` 30 | 31 | Configure which [rules](https://github.com/DavidAnson/markdownlint#rules--aliases) or [groups of rules](https://github.com/DavidAnson/markdownlint#tags) should be globally [used](https://github.com/DavidAnson/markdownlint#optionsconfig) with a `.markdownlint.json` file. 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Progress](https://img.shields.io/badge/progress-alpha%20version-blue.svg)](https://github.com/dialex/start-testing/milestones?direction=asc&sort=title&state=open) [![Live Demo](https://img.shields.io/badge/status-live-brightgreen.svg)](https://dialex.github.io/start-testing/) 2 | 3 | 👉 **[check the course here](https://dialex.github.io/start-testing)** 👈 4 | 5 | ## About the project 6 | 7 | This repo contains the source of a testing course. 8 | 9 | - Written in [Markdown](http://commonmark.org/), so that it's **cross-platform** 🌍 10 | - Hosted in [GitHub](https://dialex.github.io/start-testing), so that **anyone can contribute** 🤝 11 | - Rendered with [Docsify](https://github.com/QingWei-Li/docsify/), so that it **looks awesome** 😎 12 | -------------------------------------------------------------------------------- /code/framework/README.md: -------------------------------------------------------------------------------- 1 | # Guide to review test frameworks 2 | 3 | ## Review criteria 4 | 5 | | Category | Opinion | Score | 6 | | ----------------- | ------- | :------: | 7 | | _Use cases_ | | ❓ | 8 | | _Learning curve_ | | ❓ | 9 | | _Language_ | | ❓ | 10 | | _Ecosystem_ | | ❓ | 11 | | _Readability_ | | ❓ | 12 | | _Extensibility_ | | ❓ | 13 | | _Maintainability_ | | ❓ | 14 | | _Documentation_ | | ❓ | 15 | | **VERDICT** | | **\_/5** | 16 | 17 | ## Automation scope: [Todo list](http://todomvc.com/examples/react/#/) 18 | 19 | - [ ] **Smoke Tests** 20 | - [ ] Access homepage on browser 21 | - [ ] **Functional Tests** 22 | - [ ] Add item to list 23 | - [ ] Mark item as done 24 | - [ ] Edit item text 25 | - [ ] Remove item from list 26 | 27 | ## Automation scope: [Amazon](https://amazon.com/) 28 | 29 | - [ ] **Smoke Tests** 30 | - [ ] Access homepage on browser 31 | - [ ] **Functional Tests** 32 | - [ ] Search for book 33 | - [ ] Add book to cart 34 | 35 | ## Automation scope: [Postman API](https://docs.postman-echo.com/) 36 | 37 | - [ ] Successful GET 38 | - [ ] Successful POST 39 | - [ ] Failed method 40 | - [ ] Assert response structure 41 | 42 | ## TODO 43 | 44 | What about these sites? 45 | 46 | - http://automationpractice.com/index.php 47 | - http://uitestingplayground.com/click 48 | - https://demoqa.com/ 49 | - https://the-internet.herokuapp.com/ 50 | -------------------------------------------------------------------------------- /code/framework/cypress/.gitignore: -------------------------------------------------------------------------------- 1 | assets/ 2 | -------------------------------------------------------------------------------- /code/framework/cypress/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | echo "⚠️ Assuming you have Node.JS and NPM installed..." 3 | npm install cypress 4 | 5 | clean: 6 | rm -rf assets 7 | 8 | test: clean 9 | npx cypress run 10 | 11 | test-iterative: clean 12 | npx cypress open 13 | -------------------------------------------------------------------------------- /code/framework/cypress/README.md: -------------------------------------------------------------------------------- 1 | # Cypress 2 | 3 | Review [here](https://github.com/dialex/start-testing/tree/main/docs/toolbox/framework/cypress.md). 4 | 5 | ## Commands 6 | 7 | ```sh 8 | make install # Install dependencies 9 | make test # Run tests 10 | make test-iterative # Run tests iteratively 11 | ``` 12 | 13 | ## Setup 14 | 15 | - [IDE auto-complete for Cypress code](https://docs.cypress.io/guides/tooling/intelligent-code-completion.html#Set-up-in-your-Dev-Environment) 16 | - [IDE auto-complete for Cypress config](https://docs.cypress.io/guides/tooling/intelligent-code-completion.html#Set-up-in-your-Dev-Environment-1) 17 | 18 | ## Automation scope: Amazon 19 | 20 | - **Sanity Tests** 21 | - Run a test with an assertion 22 | - **Smoke Tests** 23 | - Access homepage on browser 24 | - **Functional Tests** 25 | - Search for item 26 | - Add item to cart 27 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress.json: -------------------------------------------------------------------------------- 1 | { 2 | "video": true, 3 | "videosFolder": "assets/videos", 4 | "screenshotsFolder": "assets/screenshots", 5 | "trashAssetsBeforeRuns": true, 6 | "watchForFileChanges": false 7 | } 8 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/fixtures/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Using fixtures to represent data", 3 | "email": "hello@cypress.io", 4 | "body": "Fixtures are a great way to mock data for responses to routes" 5 | } -------------------------------------------------------------------------------- /code/framework/cypress/cypress/integration/functional.spec.js: -------------------------------------------------------------------------------- 1 | describe("Users on Amazon store", function() { 2 | var searchTerm = "Explore It"; 3 | 4 | it("search for product", function() { 5 | // ARRANGE 6 | var searchTermEncoded = searchTerm.replace(/ /g, "+"); 7 | cy.visit("https://amazon.com/"); 8 | 9 | // ACT 10 | cy.get("#twotabsearchtextbox") 11 | .type(searchTerm) 12 | .type("{enter}"); 13 | 14 | // ASSERT 15 | cy.url().should("include", "s?k=" + searchTermEncoded); 16 | cy.get(".s-result-item") 17 | .first() 18 | .should("contain", "Reduce Risk and Increase Confidence") 19 | .should("contain", "Elisabeth Hendrickson"); 20 | }); 21 | 22 | it("add item to cart", function() { 23 | // ARRANGE 24 | cy.visit("https://amazon.com/"); 25 | cy.get("#twotabsearchtextbox").type(searchTerm); 26 | cy.get(".nav-search-submit > .nav-input").click(); 27 | 28 | // ACT 29 | cy.get(".s-result-item .s-image") 30 | .first() 31 | .click(); 32 | cy.get("#add-to-cart-button").click(); 33 | 34 | // ASSERT 35 | cy.contains("Cart").should("be.visible"); 36 | cy.get("#nav-cart-count").contains(1); 37 | }); 38 | }); 39 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/integration/sanity.spec.js: -------------------------------------------------------------------------------- 1 | describe('Test Framework', function () { 2 | it('runs an assertion that always passes', function () { 3 | expect(true).to.equal(true) 4 | }) 5 | }) 6 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/integration/smoke.spec.js: -------------------------------------------------------------------------------- 1 | describe('Amazon homepage', function () { 2 | it('is online', function () { 3 | cy.visit("https://amazon.com/") 4 | }) 5 | 6 | it('renders the UI', function () { 7 | cy.visit("https://amazon.com/") 8 | cy.get('#twotabsearchtextbox') 9 | cy.contains("Departments") 10 | cy.contains("Cart") 11 | }) 12 | }) 13 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/plugins/index.js: -------------------------------------------------------------------------------- 1 | // *********************************************************** 2 | // This example plugins/index.js can be used to load plugins 3 | // 4 | // You can change the location of this file or turn off loading 5 | // the plugins file with the 'pluginsFile' configuration option. 6 | // 7 | // You can read more here: 8 | // https://on.cypress.io/plugins-guide 9 | // *********************************************************** 10 | 11 | // This function is called when a project is opened or re-opened (e.g. due to 12 | // the project's config changing) 13 | 14 | module.exports = (on, config) => { 15 | // `on` is used to hook into various events Cypress emits 16 | // `config` is the resolved Cypress config 17 | } 18 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/support/commands.js: -------------------------------------------------------------------------------- 1 | // *********************************************** 2 | // This example commands.js shows you how to 3 | // create various custom commands and overwrite 4 | // existing commands. 5 | // 6 | // For more comprehensive examples of custom 7 | // commands please read more here: 8 | // https://on.cypress.io/custom-commands 9 | // *********************************************** 10 | // 11 | // 12 | // -- This is a parent command -- 13 | // Cypress.Commands.add("login", (email, password) => { ... }) 14 | // 15 | // 16 | // -- This is a child command -- 17 | // Cypress.Commands.add("drag", { prevSubject: 'element'}, (subject, options) => { ... }) 18 | // 19 | // 20 | // -- This is a dual command -- 21 | // Cypress.Commands.add("dismiss", { prevSubject: 'optional'}, (subject, options) => { ... }) 22 | // 23 | // 24 | // -- This is will overwrite an existing command -- 25 | // Cypress.Commands.overwrite("visit", (originalFn, url, options) => { ... }) 26 | -------------------------------------------------------------------------------- /code/framework/cypress/cypress/support/index.js: -------------------------------------------------------------------------------- 1 | // *********************************************************** 2 | // This is processed and loaded automatically before your test 3 | // files. This is a great place to put global configuration and 4 | // behavior that modifies Cypress. 5 | // 6 | // Read more at https://on.cypress.io/configuration 7 | // *********************************************************** 8 | 9 | // Import commands.js using ES2015 syntax: 10 | import './commands' 11 | 12 | // Pass anything here you'd normally pass to cy.server() 13 | Cypress.Server.defaults({ 14 | //whitelist: (xhr) => true // Mutes XHR requests 15 | }) 16 | -------------------------------------------------------------------------------- /code/framework/cypress/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowJs": true, 4 | "baseUrl": "node_modules", 5 | "types": [ 6 | "cypress" 7 | ] 8 | }, 9 | "include": [ 10 | "**/*.*" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /code/framework/karate/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | karate.jar 3 | -------------------------------------------------------------------------------- /code/framework/karate/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | echo "⚠️ Assuming you have Java 8 or above installed..." 3 | curl -L https://dl.bintray.com/ptrthomas/karate/karate-0.9.5.jar -o karate.jar 4 | 5 | clean: 6 | rm -rf target/ 7 | 8 | test: clean 9 | java -jar karate.jar --threads=4 src/test/karate --clean 10 | -------------------------------------------------------------------------------- /code/framework/karate/README.md: -------------------------------------------------------------------------------- 1 | # Karate 2 | 3 | Review [here](https://github.com/dialex/start-testing/tree/main/docs/toolbox/framework/karate.md). 4 | 5 | ## Commands 6 | 7 | ```sh 8 | make install # Install dependencies 9 | make test # Run tests 10 | ``` 11 | 12 | ## Automation scope: [Postman API](https://docs.postman-echo.com/) 13 | 14 | - [x] Successful GET 15 | - [x] Successful POST 16 | - [x] Failed method 17 | - [x] Assert response structure 18 | 19 | ## Automation scope: Amazon 20 | 21 | - [x] **Smoke Tests** 22 | - [x] Access homepage on browser 23 | - [x] **Functional Tests** 24 | - [x] Search for item 25 | - [x] Add item to cart 26 | -------------------------------------------------------------------------------- /code/framework/karate/src/test/fixtures/postman-post-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "some-field": "some value", 3 | "another-field": 2, 4 | "list-of-stuff": [ 5 | { 6 | "name": "stuff1" 7 | }, 8 | { 9 | "name": "stuff2" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /code/framework/karate/src/test/karate/api-postman-contract.feature: -------------------------------------------------------------------------------- 1 | Feature: Postman Echo GET 2 | 3 | Background: 4 | * url 'https://postman-echo.com' 5 | 6 | # Inspired by https://github.com/intuit/karate/blob/master/karate-junit4/src/test/java/com/intuit/karate/junit4/demos/schema-like.feature 7 | Scenario: Contract of /get response 8 | 9 | Given path "/get" 10 | And param word1 = "Domo arigato" 11 | And param word2 = "Sayonara" 12 | And def schema = 13 | """ 14 | { args: { word1: #string, word2: #string } } 15 | """ 16 | When method get 17 | Then match response contains schema 18 | -------------------------------------------------------------------------------- /code/framework/karate/src/test/karate/api-postman-get.feature: -------------------------------------------------------------------------------- 1 | Feature: Postman Echo GET 2 | 3 | Background: 4 | * url 'https://postman-echo.com' 5 | * def endpoint = "get" 6 | 7 | Scenario: Endpoint /get exists 8 | 9 | Given path endpoint 10 | When method get 11 | Then status 200 12 | 13 | Scenario: Returns the parameters received 14 | 15 | Given path endpoint 16 | And param param1 = "Hajime" 17 | And param arg2 = "Dozo" 18 | When method get 19 | Then status 200 20 | And match response == "#notnull" 21 | And match response.args.param1 == "Hajime" 22 | # Assertion using JsonPath (see https://stackoverflow.com/questions/62280561/) 23 | # Option 1: compare JsonPath result with an array (which is misleading) 24 | And match $..arg2 == ["Dozo"] 25 | # Option 2: store JsonPath result in tmp var (which is cumbersome) 26 | * def tmpKarateSupportForJsonPathIsPooPoo = $..arg2 27 | And match tmpKarateSupportForJsonPathIsPooPoo[0] == "Dozo" 28 | -------------------------------------------------------------------------------- /code/framework/karate/src/test/karate/api-postman-post.feature: -------------------------------------------------------------------------------- 1 | Feature: Postman Echo POST 2 | 3 | Background: 4 | * url 'https://postman-echo.com' 5 | * def endpoint = "post" 6 | 7 | Scenario: Endpoint /post exists 8 | 9 | Given path endpoint 10 | And request {} 11 | When method post 12 | Then status 200 13 | 14 | Scenario: Returns the payload received 15 | 16 | Given path endpoint 17 | And request read('../fixtures/postman-post-payload.json') 18 | When method post 19 | Then status 200 20 | And match response.data.some-field == "some value" 21 | And match response.data.list-of-stuff[1].name == "stuff2" 22 | 23 | # this is actually the "get started" example from the docs... 24 | # you know, the kind of code you want to always work, 25 | # because it's your first interaction with the tool (ಠ _ ಠ) 26 | Scenario: Failed request returns 500 27 | 28 | Given url "http://myhost.com/v1/cats" 29 | And request { name: "Billie"} 30 | When method post 31 | Then status 500 32 | # Then status 201 33 | # And match response == { id: "#notnull", name: "Billie" } 34 | -------------------------------------------------------------------------------- /code/framework/karate/src/test/karate/ui-amazon.feature: -------------------------------------------------------------------------------- 1 | Feature: Amazon UI automation 2 | 3 | Background: 4 | * def baseUrl = 'https://www.amazon.com/' 5 | * configure driver = { type: 'chrome' } # this is optional 6 | 7 | Scenario: Access homepage on browser 8 | Given driver baseUrl 9 | Then driver.url == baseUrl 10 | And assert locate("#twotabsearchtextbox").exists 11 | And assert locate("{}Cart").exists 12 | 13 | Scenario: Search for item 14 | Given driver baseUrl 15 | And def searchTerm = "Explore It" 16 | And def searchTermEncoded = "Explore+It" 17 | # This one-liner should work, but doesn't ¯\_(ツ)_/¯ 18 | # When input("#twotabsearchtextbox", [searchTerm, Key.ENTER]) 19 | When input("#twotabsearchtextbox", searchTerm) 20 | # This should work, but doesn't 21 | # And click("#nav-search-submit-text") 22 | And mouse("#nav-search-submit-text").click() 23 | # This should not be necessary, but it is (╯°□°)╯︵ ┻━┻ 24 | And waitFor('#nav-search-submit-text').click() 25 | Then match driver.url contains "s?k=" + searchTermEncoded 26 | And locate("div.s-result-list").exists 27 | And locate("{span}Explore It!: Reduce Risk and Increase Confidence with Exploratory Testing").exists 28 | # Read more about this issue at https://github.com/intuit/karate/issues/1169 29 | 30 | # FIXME: I can't get this to work, I give up (╯°□°)╯︵ ┻━┻ 31 | # Scenario: Add item to cart 32 | # Given driver baseUrl 33 | # And def searchTerm = "Cypress" 34 | # And input("#twotabsearchtextbox", searchTerm) 35 | # And mouse('#nav-search-submit-text').click() 36 | # When mouse(".s-result-item .s-image").click() 37 | # And mouse('#add-to-cart-button').click() 38 | # Then match text("#nav-cart-count") == 1 39 | -------------------------------------------------------------------------------- /code/framework/playwright/.gitignore: -------------------------------------------------------------------------------- 1 | results/ 2 | package-lock.json 3 | -------------------------------------------------------------------------------- /code/framework/playwright/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | echo "⚠️ Assuming you have Node/npm installed..." 3 | npm install 4 | 5 | clean: 6 | rm -rf results 7 | 8 | test: clean 9 | npm run test 10 | 11 | test-record: clean 12 | npm run test-record 13 | -------------------------------------------------------------------------------- /code/framework/playwright/README.md: -------------------------------------------------------------------------------- 1 | # Playwright 2 | 3 | Review [here](https://github.com/dialex/start-testing/tree/main/docs/toolbox/framework/playwright.md). 4 | 5 | ## Commands 6 | 7 | ```sh 8 | make install # Install dependencies 9 | make test # Run tests 10 | make test-record # Record user steps as test code 11 | ``` 12 | 13 | ## Automation scope: Amazon 14 | 15 | - [x] **Smoke Tests** 16 | - [x] Access homepage on browser 17 | - [x] **Functional Tests** 18 | - [x] Search for book 19 | - [x] Add book to cart 20 | -------------------------------------------------------------------------------- /code/framework/playwright/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "playwright", 3 | "version": "1.1.0", 4 | "description": "", 5 | "main": "", 6 | "directories": { 7 | "test": "tests" 8 | }, 9 | "scripts": { 10 | "test": "npx playwright test", 11 | "test-record": "npx playwright codegen" 12 | }, 13 | "author": "Diogo Nunes", 14 | "license": "MIT", 15 | "devDependencies": { 16 | "@playwright/test": "^1.16.3", 17 | "expect-playwright": "^0.8.0", 18 | "playwright": "^1.16.3" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /code/framework/playwright/tests/functional.spec.js: -------------------------------------------------------------------------------- 1 | const { chromium } = require("playwright"); 2 | const { test, expect } = require("@playwright/test"); 3 | 4 | const AMAZON_HOME = "https://amazon.com/"; 5 | 6 | test.describe("Users on Amazon store", function () { 7 | const searchTerm = "Explore It testing"; 8 | const searchFull = "Explore It!: Reduce Risk and Increase Confidence with Exploratory Testing"; 9 | 10 | let browser, page, context; 11 | 12 | test.beforeAll(async () => { 13 | browser = await chromium.launch(); 14 | context = await browser.newContext(); 15 | page = await context.newPage(); 16 | }); 17 | 18 | test.afterAll(async () => { 19 | await browser.close(); 20 | }); 21 | 22 | test("should search for product", async () => { 23 | // ARRANGE 24 | await page.goto(AMAZON_HOME); 25 | 26 | // ACT 27 | await page.fill('[aria-label="Search"]', searchTerm); 28 | await page.keyboard.press("Enter"); 29 | await page.waitForNavigation(); // without it I get "page.$$: Protocol error: Cannot find context with specified id" 30 | 31 | // ASSERT 32 | const results = await page.$$('div[data-component-type="s-search-result"]'); 33 | await expect(results.length).toBeGreaterThan(0); 34 | const firstResult = await results[0].innerText(); 35 | await expect(firstResult).toContain("Reduce Risk and Increase Confidence"); 36 | await expect(firstResult).toContain("by Elisabeth Hendrickson"); 37 | }); 38 | 39 | test("add item to cart", async () => { 40 | // ARRANGE 41 | await page.goto(AMAZON_HOME); 42 | await page.fill('[aria-label="Search"]', searchTerm); 43 | await page.keyboard.press("Enter"); 44 | await page.waitForNavigation(); 45 | 46 | // ACT 47 | await page.click(`text=${searchFull}`); 48 | await expect(page.locator("#add-to-cart-button")).toBeVisible(); 49 | await page.click("#add-to-cart-button"); 50 | 51 | // ASSERT 52 | await expect(page).toHaveSelector("text=1 Cart"); 53 | }); 54 | }); 55 | -------------------------------------------------------------------------------- /code/framework/playwright/tests/smoke.spec.js: -------------------------------------------------------------------------------- 1 | const { chromium } = require("playwright"); 2 | const { test, expect } = require("@playwright/test"); 3 | 4 | const AMAZON_HOME = "https://amazon.com/"; 5 | 6 | test.describe("Amazon", function () { 7 | let browser, page, context; 8 | 9 | test.beforeAll(async () => { 10 | browser = await chromium.launch(); 11 | context = await browser.newContext(); 12 | page = await context.newPage(); 13 | }); 14 | 15 | test.afterAll(async () => { 16 | await browser.close(); 17 | }); 18 | 19 | test("should be online", async () => { 20 | await page.goto(AMAZON_HOME); 21 | expect(page).not.toBeNull(); 22 | }); 23 | 24 | test("should render the UI", async () => { 25 | await page.goto(AMAZON_HOME); 26 | await expect(page).toHaveTitle(/Amazon.+/); 27 | await expect(page.locator("#twotabsearchtextbox")).toBeVisible(); 28 | await expect(page.locator("#nav-search-dropdown-card")).toHaveText(/Departments/); // ! only worked with regex 29 | await expect(page.locator("#nav-tools")).toHaveText(/Cart/); 30 | }); 31 | }); 32 | -------------------------------------------------------------------------------- /code/framework/robot/.gitignore: -------------------------------------------------------------------------------- 1 | Results/ 2 | -------------------------------------------------------------------------------- /code/framework/robot/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | echo "⚠️ Assuming you have Python, pip and brew installed..." 3 | pip install robotframework 4 | pip install robotframework-seleniumlibrary 5 | pip install robotframework-requests 6 | brew cask install chromedriver 7 | brew cask install geckodriver 8 | 9 | clean: 10 | rm -rf Results 11 | mkdir Results 12 | 13 | test: clean 14 | # robot -d Results Tests 15 | robot -d Results Tests/AmazonRefactored.robot 16 | -------------------------------------------------------------------------------- /code/framework/robot/README.md: -------------------------------------------------------------------------------- 1 | # Robot Framework 2 | 3 | Review [here](https://github.com/dialex/start-testing/tree/main/docs/toolbox/framework/robot.md). 4 | 5 | ## Commands 6 | 7 | ```sh 8 | make install # Install dependencies 9 | make test # Run tests 10 | ``` 11 | 12 | ## Automation scope: Amazon 13 | 14 | - **Sanity Tests** 15 | - Run a test with an assertion 16 | - **Smoke Tests** 17 | - Access homepage on browser 18 | - **Functional Tests** 19 | - Search for item 20 | - Add item to cart 21 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/AmazonActions.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Resource PageObjects/NavHeader.robot 4 | Resource PageObjects/ProductDetail.robot 5 | Resource PageObjects/SearchResults.robot 6 | 7 | *** Variables *** 8 | 9 | ${BookName} explore it 10 | 11 | *** Keywords *** 12 | 13 | Search For Product 14 | NavHeader.Search ${BookName} 15 | # Set Test Variable ${LastSerchTerm} ${BookName} 16 | 17 | Select Product From Results 18 | SearchResults.Select Product 0 19 | 20 | Add Product To Cart 21 | ProductDetail.Add To Cart 22 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/Assert.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | 5 | *** Keywords *** 6 | 7 | Page Has Text 8 | [Arguments] ${text} 9 | Page Should Contain ${text} 10 | 11 | Page Has Element 12 | [Arguments] ${locator} 13 | Page Should Contain Element ${locator} 14 | 15 | Element Has Text 16 | [Arguments] ${locator} ${text} 17 | Element Should Contain ${locator} ${text} 18 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/CommonActions.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | 5 | *** Keywords *** 6 | 7 | Begin Test 8 | [Arguments] ${browser} ${url} 9 | Open Browser about:blank ${browser} 10 | Go To ${url} 11 | 12 | End Test 13 | Close Browser 14 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/PageObjects/NavHeader.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | 5 | *** Keywords *** 6 | 7 | Wait Until Page Loads 8 | Wait Until Page Contains Element id:twotabsearchtextbox 9 | 10 | Search 11 | [Arguments] ${term} 12 | Input Text id:twotabsearchtextbox ${term} 13 | Press Key id:twotabsearchtextbox \\13 14 | SearchResults.Wait Until Page Loads 15 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/PageObjects/ProductAddedToCart.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | 5 | *** Keywords *** 6 | 7 | Wait Until Page Loads 8 | Wait Until Page Contains Element id:huc-v2-confirm-text-container 9 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/PageObjects/ProductDetail.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | Resource ProductAddedToCart.robot 5 | 6 | *** Keywords *** 7 | 8 | Wait Until Page Loads 9 | Page Should Contain Stock 10 | 11 | Add To Cart 12 | Click Button id:add-to-cart-button 13 | ProductAddedToCart.Wait Until Page Loads 14 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/PageObjects/SearchResults.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 4 | Resource ProductDetail.robot 5 | 6 | *** Keywords *** 7 | 8 | Wait Until Page Loads 9 | Wait Until Page Contains results for 10 | 11 | Select Product 12 | [Arguments] ${index} 13 | Click Element xpath://*[@id="result_${index}"]//a/h2 14 | ProductDetail.Wait Until Page Loads 15 | -------------------------------------------------------------------------------- /code/framework/robot/Resources/StepDefinitions.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Resource AmazonActions.robot 4 | 5 | *** Keywords *** 6 | 7 | # Given 8 | 9 | I search for a product 10 | AmazonActions.Search For Product 11 | AmazonActions.Select Product From Results 12 | 13 | # When 14 | 15 | I add a product to an empty cart 16 | AmazonActions.Add Product To Cart 17 | 18 | # Then 19 | 20 | my cart has that item 21 | Assert.Page Has Text Added to Cart 22 | Assert.Page Has Element id:nav-cart-count 23 | Assert.Element Has Text id:nav-cart-count 1 24 | -------------------------------------------------------------------------------- /code/framework/robot/Tests/Amazon.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Documentation This is an example of how you can test Amazon using Robot Framework 4 | Library SeleniumLibrary run_on_failure=Capture Page Screenshot 5 | 6 | *** Variables *** 7 | 8 | *** Test Cases *** 9 | 10 | ################### 11 | # Navigation.robot 12 | ################### 13 | 14 | Homepage renders on Chrome 15 | [Documentation] Access homepage on browser. Should support multiple browsers. 16 | [Tags] Smoke 17 | Open Browser https://amazon.com/ chrome 18 | Close Browser 19 | 20 | Homepage renders on Firefox 21 | [Documentation] Should support multiple browsers 22 | [Tags] Smoke 23 | Open Browser https://amazon.com/ ff 24 | Close Browser 25 | 26 | ################# 27 | # Shopping.robot 28 | ################# 29 | 30 | Search for item 31 | [Documentation] Should display a list of results based on the search criteria 32 | [Tags] Functional 33 | Open Browser https://amazon.com/ chrome 34 | Wait Until Page Contains Element id:twotabsearchtextbox 35 | Input Text id:twotabsearchtextbox explore it 36 | Press Key id:twotabsearchtextbox \\13 37 | Wait Until Page Contains results for 38 | Page Should Contain Element css:.s-result-item 39 | Close Browser 40 | 41 | Add item to cart 42 | [Documentation] Should display a success message and increment the cart total items 43 | [Tags] Functional 44 | Open Browser https://amazon.com/ chrome 45 | Wait Until Page Contains Element id:twotabsearchtextbox 46 | Input Text id:twotabsearchtextbox explore it 47 | Press Key id:twotabsearchtextbox \\13 48 | Wait Until Page Contains results for 49 | Click Element xpath://*[@id="result_0"]//a/h2 50 | Wait Until Page Contains price 51 | Page Should Contain Stock 52 | Click Button id:add-to-cart-button 53 | Wait Until Page Contains Element id:huc-v2-confirm-text-container 54 | Page Should Contain Added to Cart 55 | Page Should Contain Element id:nav-cart-count 56 | Element Should Contain id:nav-cart-count 1 57 | # Element Text Should Be id:nav-cart-count 1 58 | Close Browser 59 | 60 | *** Keywords *** 61 | -------------------------------------------------------------------------------- /code/framework/robot/Tests/AmazonRefactored.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | 3 | Documentation This is an example of how you can test Amazon using Robot Framework 4 | Resource ../Resources/Assert.robot 5 | Resource ../Resources/AmazonActions.robot 6 | Resource ../Resources/CommonActions.robot 7 | Resource ../Resources/StepDefinitions.robot 8 | Force Tags Example 9 | Test Setup CommonActions.Begin Test chrome https://amazon.com/ 10 | Test Teardown CommonActions.End Test 11 | 12 | *** Variables *** 13 | 14 | *** Test Cases *** 15 | 16 | ################### 17 | # Navigation.robot 18 | ################### 19 | 20 | Homepage renders on Chrome 21 | [Documentation] Should support multiple browsers. 22 | [Tags] Smoke 23 | Assert.Page Has Text Today's Deals 24 | 25 | ################# 26 | # Shopping.robot 27 | ################# 28 | 29 | Search for item 30 | [Documentation] Should display a list of results based on the search criteria 31 | [Tags] Functional 32 | AmazonActions.Search For Product 33 | Assert.Page Has Element css:.s-result-item 34 | 35 | Add item to cart 36 | [Documentation] Should display a success message and increment the cart total items 37 | [Tags] Functional 38 | AmazonActions.Search For Product 39 | AmazonActions.Select Product From Results 40 | AmazonActions.Add Product To Cart 41 | Assert.Page Has Text Added to Cart 42 | Assert.Page Has Element id:nav-cart-count 43 | Assert.Element Has Text id:nav-cart-count 1 44 | 45 | Add item to cart (Gherkin style) 46 | [Documentation] Should display a success message and increment the cart total items 47 | [Tags] Acceptance 48 | Given I search for a product 49 | When I add a product to an empty cart 50 | Then my cart has that item 51 | 52 | *** Keywords *** 53 | -------------------------------------------------------------------------------- /docs/.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "line_length": false, 4 | "MD007": { "indent": 2 }, 5 | "MD026": false 6 | } 7 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/.nojekyll -------------------------------------------------------------------------------- /docs/_coverpage.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # Start Testing 5 | 6 | > A community-driven testing course 7 | 8 | - [Markdown](http://commonmark.org/) 🌍 **cross-platform** 9 | - [GitHub](https://github.com/dialex/start-testing) 🤝 **anyone can contribute** 10 | - [Docsify](https://github.com/QingWei-Li/docsify/) 😎 **looks awesome** 11 | 12 | [Let's Start](#syllabus) 13 | 14 |

color

15 | -------------------------------------------------------------------------------- /docs/_media/articles/Test-Strategy-(SADDEST-PPIRATEE).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/Test-Strategy-(SADDEST-PPIRATEE).png -------------------------------------------------------------------------------- /docs/_media/articles/Test-Strategy-Overview-(SADDEST-PPIRATEE).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/Test-Strategy-Overview-(SADDEST-PPIRATEE).png -------------------------------------------------------------------------------- /docs/_media/articles/modified-htsm1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/modified-htsm1.png -------------------------------------------------------------------------------- /docs/_media/articles/test-pyramid-icecream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-pyramid-icecream.png -------------------------------------------------------------------------------- /docs/_media/articles/test-pyramid-rosie-circles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-pyramid-rosie-circles.png -------------------------------------------------------------------------------- /docs/_media/articles/test-pyramid-trophy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-pyramid-trophy.png -------------------------------------------------------------------------------- /docs/_media/articles/test-pyramid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-pyramid.png -------------------------------------------------------------------------------- /docs/_media/articles/test-scope-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-scope-1.png -------------------------------------------------------------------------------- /docs/_media/articles/test-scope-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-scope-2.png -------------------------------------------------------------------------------- /docs/_media/articles/test-types-layered.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/articles/test-types-layered.png -------------------------------------------------------------------------------- /docs/_media/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/logo.png -------------------------------------------------------------------------------- /docs/_media/notebook/specification-by-example-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/notebook/specification-by-example-1.png -------------------------------------------------------------------------------- /docs/_media/notebook/user-story-mapping-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/notebook/user-story-mapping-1.png -------------------------------------------------------------------------------- /docs/_media/notebook/user-story-mapping-2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/notebook/user-story-mapping-2.jpg -------------------------------------------------------------------------------- /docs/_media/supporters/Equal_Experts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/supporters/Equal_Experts.png -------------------------------------------------------------------------------- /docs/_media/talks/J B Rainsberger - Integrated tests are a scam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/_media/talks/J B Rainsberger - Integrated tests are a scam.png -------------------------------------------------------------------------------- /docs/_sidebar.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | - [**Home**](/) 8 | - **Concepts** 9 | - [Testing's purpose](/concepts/testing-purpose.md) 10 | - [Testing is…](/concepts/what-testing-is.md) 11 | - [Testing is not…](/concepts/what-testing-is-not.md) 12 | - [Tester's responsibilities](/concepts/tester-responsibilities.md) 13 | - [Requirements](/concepts/requirements.md) 14 | - [Testability](/concepts/testability.md) 15 | - **Toolbox** 16 | - [Note-taking](/toolbox/note-taking.md) 17 | - [Heuristics](/toolbox/heuristics.md) 18 | - [Charters](/toolbox/charters.md) 19 | - [Oracles](/toolbox/oracles.md) 20 | - [Mnemonics](/toolbox/mnemonics.md) 21 | - [Development methodologies](/toolbox/dev-methodologies.md) 22 | - [Test strategy](/toolbox/test-strategy.md) 23 | - [Tools](/toolbox/tester-tools.md) 24 | - **Test types** 25 | - [Categories](/types/test-types?id=categories) 26 | - [Functional vs Non-functional](/types/test-types?id=functional-testing) 27 | - [Black box vs White box](/types/test-types?id=black-box-testing) 28 | - [Types](/types/test-types?id=types) 29 | - [Unit tests](/types/test-types?id=🏝️-unit-tests) 30 | - [Integration tests](/types/test-types?id=🧩-integration-tests) 31 | - [End-to-end tests](/types/test-types?id=🧑-end-to-end-tests) 32 | - [Regression tests](/types/test-types?id=✅-regression-tests) 33 | - […and many more](/types/test-types.md) 34 | - [Approaches](/types/test-types?id=testing-approaches) 35 | - [Exploratory testing](/types/test-types?id=🧭-exploratory-testing) 36 | - [Automation in testing](/types/test-types?id=🤖-automation-in-testing) 37 | - [Test pyramid](/types/test-pyramid.md) 38 | - **Tester roles** 39 | - ~~Exploration tester~~ 40 | - ~~Automation tester~~ 41 | - ~~Manual vs Technical tester~~ 42 | - ~~Bug hunter~~ 43 | - ~~Agile team member~~ 44 | - ~~Mentor vs Coach~~ 45 | - ~~Lead tester~~ 46 | - ~~Recruiter~~ 47 | - ~~Certifications~~ 48 | - **Fields** 49 | - ~~Usability (UI/UX)~~ 50 | - ~~Performance & Load~~ 51 | - ~~Security~~ 52 | - ~~Risk~~ 53 | - ~~Data~~ 54 | - ~~Platform (DevOps)~~ 55 | - ~~Metrics~~ 56 | - ~~Monitoring~~ 57 | - ~~Logging~~ 58 | - **Next steps** 59 | - ~~Staying up to date~~ 60 | - **EXTRA: Test Frameworks** 61 | - [Cypress](/toolbox/framework/cypress.md) 62 | - [Karate](/toolbox/framework/karate.md) 63 | - [Robot Framework](/toolbox/framework/robot.md) 64 | - [Playwright](/toolbox/framework/playwright.md) 65 | - **EXTRA: Notebook** 66 | - [User Story Mapping](/notebook/user-story-mapping.md) 67 | - [Specification by Example](/notebook/specification-by-example.md) 68 | -------------------------------------------------------------------------------- /docs/_template.md: -------------------------------------------------------------------------------- 1 | # TITLE_HERE 2 | 3 | > **tl;dr** SUMMARY_HERE 4 | 5 | ## Theory 6 | 7 | ## Practice 8 | 9 | ## Teachers 10 | 11 | - [XXX](YYY) 12 | 13 | ## Sources 14 | 15 | - [XXX](YYY) 16 | -------------------------------------------------------------------------------- /docs/concepts/requirements.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | 3 | > **tl;dr** What your stakeholders require to achieve their goals. Testers clarify and detail those needs. 4 | 5 | ## Theory 6 | 7 | Here are some **concepts** you should keep in mind: 8 | 9 | - _Requirement:_ Some behaviour or property needed by a stakeholder to achieve a goal. 10 | - _Specification:_ The list containing all requirements for a given project/product. 11 | - _User story:_ Concise description of a requirement, told from the perspective of the stakeholder who desires it. Typically follows the structure `As a , I want to so that `. 12 | - _Acceptance criteria_ (AC): Statements that are expected to be true if the user story is correctly implemented. If the ACs are met, the implementation is _accepted_. If there's a mismatch, either the ACs are revised or the implementation redone. 13 | - _Backlog:_ The list containing all user stories to implement for a given project/product. 14 | 15 | These are the **main roles** that engage in breaking down a product into requirements: 16 | 17 | - The _Stakeholder:_ Anyone that has authority to influence the specification. They might have that power because: they will use it (users); they will maintain it (architects); they will help users (support); they are paying for it (client). 18 | - The _Product Owner_ (PO): Discovers who are the relevant stakeholders. Gathers requirements from stakeholders. Takes decisions as a spokesman for all stakeholders. 19 | - The _Business Analyst_ (BA): Improves the specification based on his/her experience and business context, without biasing it. Can act as Product Owner. 20 | - The _"three amigos"_ are a PO/BA, a developer and a tester. Seems like the start of a joke, but it is really just a nickname for temporary subteam with those three roles. 21 | 22 | There are several **ways to write the specification** of a product, however there are two main approaches: 23 | 24 | - In a _Waterfall project_, the client gathers requirements from their relevant stakeholders and writes a document containing them. That document is casually referred to as "the specification" or more formally as Software Requirements Specification (SRS). Once approved, the specification does not change and the team starts implementing it. 25 | - In an _Agile project_, the PO engages in conversations with the client's stakeholders and gathers their goals and requirements. The three amigos collaborate to write user stories that will implement those requirements. The list of stories not yet implemented is called the backlog. The details of those stories, as well as their priority, can be changed during the project. 26 | 27 | ## Practice 28 | 29 | > A requirement is a quality that matters to someone who matters. 30 | 31 | Nowadays there are several Agile techniques to write requirements effectively: 32 | 33 | - [User story mapping](http://amzn.to/2mW1rkx). Starts with stakeholders and breaks down into Goals > Activities > Tasks > Stories. 34 | - [User journey](http://amzn.to/2mWyt4d) Starts with users and breaks down into Goals > User Journeys > Actions > Stories. 35 | - [Specification by example](http://amzn.to/2FZy1ux). Details your stories using conversations to extract rules and examples. 36 | 37 | Since POs/BAs are usually the drivers of requirement analysis we won't go into details (recommended reading in the [section](#sources) below). Nevertheless, attend the workshops if you can — the closer you are to the [source of truth](https://en.wikipedia.org/wiki/Chinese_whispers) the better. 38 | 39 | So you might be asking **how can testers add value to this process?** 40 | 41 | > Business perspective: Build the right thing meeting the client's expectations and requirements. 42 | > 43 | > _Concern: Will it be useful?_ 44 | > 45 | > Technical perspective: Build it right using the correct technology, architecture, tools and practices. 46 | > 47 | > _Concern: Will it work?_ 48 | 49 | - **Align perspectives**. Each side has its own concerns, assumptions and biases. Chat with stakeholders (askers) and developers (givers) to check if they have a common understanding of what needs to be done. 50 | - **Raise risks**. That's why you are one of the three amigos. Usually the POs are focused on functionality and your developers on implementation details. You can remind them of risks such as non-functional requirements, impacts with previous stories or the cost of automating a specific tests. 51 | - **Ask questions**. Discuss "what if" scenarios. Use personas to discover user-specific issues. Clarify the rules for extreme or unusual values. It's cheaper to improve the design than it is to fix the implementation. 52 | - **Write scenarios**. When doing [specification by example](/notebook/specification-by-example.md), you should be writing those examples. Most likely you will [automate](/roles/automation-tester.md) them later on, using the Gherkin syntax `Given When Then `. 53 | - **Bring your toolbox**. [Mnemonics](/toolbox/mnemonics.md) such as the five W's are useful to detail stories and create scenarios with less assumptions. Your [list of biases](/toolbox/biases.md) might also uncover weak requirements. 54 | - **Clarify stories**. Your questions lead to explicit requirements and more examples. Doing so you are increasing the probability of meeting the stakeholder's requirement. 55 | - **Think again**. The more you know, more assumptions you make and more casual you are when testing. Fresh eyes find failure, so stay sharp. 56 | - **Don't be fooled**. Question requirements and extract their value/usefulness. Be aware of echo chambers. 57 | 58 | > It's easier to fool people than to convince them that they have been fooled. 59 | > 60 | > — Mark Twain 61 | 62 | ## Teachers 63 | 64 | - [Donna Lichaw](https://www.donnalichaw.com/) 65 | - [Jeff Patton](https://jpattonassociates.com/blog/) 66 | - [Gojko Adzic](https://gojko.net/books/) 67 | 68 | ## Sources 69 | 70 | - [Definition of requirement](http://www.iiba.org/babok-guide/babok-guide-v2/babok-guide-online/chapter-one-introduction/1-3-key-concepts.aspx) 71 | - [Definition of user story](https://www.mountaingoatsoftware.com/agile/user-stories) 72 | - [Story Mapping, Visual Way of Building Product Backlog](https://www.thoughtworks.com/insights/blog/story-mapping-visual-way-building-product-backlog) 73 | - [The New User Story Backlog is a Map](https://jpattonassociates.com/the-new-backlog/) 74 | - [User Journeys – The Beginner's Guide](https://theuxreview.co.uk/user-journeys-beginners-guide/) 75 | - [Introducing Example Mapping](https://medium.com/@mattwynne/introducing-example-mapping-42ccd15f8adf) 76 | - [What is Specification by Example?](https://blog.red-badger.com/blog/2012/07/31/what-is-specification-by-example) 77 | - [Writing more effective requirements](https://thelifeofoneman.com/writing-more-effective-requirements) 78 | -------------------------------------------------------------------------------- /docs/concepts/testability.md: -------------------------------------------------------------------------------- 1 | # Testability 2 | 3 | > **tl;dr** Testability measures the ability to test. When it's easy, you get deeper and faster info about the product. 4 | 5 | ## Theory 6 | 7 | Testability measures our human ability to test — how skilled we are, how easy it is to test, and how deep we can go. One of the [responsibilities of a tester](/concepts/tester-responsibilities.md) is to advocate testability within the team, highlighting what is making testing harder or slower. 8 | 9 | > If testing is questioning a product in order to evaluate it, then testability is anything that makes it easier to question or evaluate that product. 10 | > 11 | > When testing is hard or slow, bugs have more time and opportunity to stay hidden. Those bugs — deeper, less obvious, more intermittent — may be far worse than any bugs discovered so far. 12 | > 13 | > — [Michael Bolton](http://www.developsense.com/blog/2017/09/deeper-testing-3-testability/) 14 | 15 | [Bret Pettichord](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.94.5966&rep=rep1&type=pdf) defines testability as visibility and control. **Visibility** is our ability to observe the states, outputs and other side effects of the system under test. **Control** is our ability to give inputs to the system under test or set it in specific states. 16 | 17 | Do not confuse with [automatability](https://www.youtube.com/watch?v=gL8hmAH4ZGM) which measures how easy it is to automate the interaction and control of our system. For instance, [logging](/fields/logging.md) is a feature that improves _testability_ because it helps humans inspect how the system works; browser cookies enhance _automatability_ because it allows automation to control a user session. 18 | 19 | ## Practice 20 | 21 | There are four main groups of variables that influence testability: value-related, intrinsic, project-related and subjective. Below are some [heuristics](/toolbox/heuristics.md) adapted from [James Bach](http://www.satisfice.com/tools/testable.pdf). To discover even more dimensions that influence testability, refer to [Maria Kedemo](https://mkedemo.wordpress.com/2015/11/22/dimensions-of-testability-v1-1/)'s **dimensions of testability**. 22 | 23 | - **Value**: changing the quality standard or our knowledge of it. 24 | - [_Oracles_](/toolbox/oracles.md). We need ways to detect each kind of problem that is worth looking for. 25 | - _Users_. The more we can talk to and observe users, the easier it is to test for them. 26 | - _Environment_. Testing is more accurate when performed in the users' environment (or similar). 27 | - **Intrinsic** (aka. product): changing the system itself. 28 | - _Reliability_. Issues slow down testing since we must stop to report them or work around them. 29 | - _Tolerance_. The less quality required or the more risk that can be taken, the less testing is needed. 30 | - _Controllability_. Ideally we can provide any possible input and invoke any possible state or combination of states easily and on demand. 31 | - **Project**: changing the conditions under which we test. 32 | - _Information_. We get all information we want or need to test well. 33 | - _Sandboxing_. We are free to do any testing without fear of disrupting users or team members. 34 | - _Time_. We need time to think, prepare and deal with surprises. 35 | - **Subjective** (aka. tester): changing the tester or the test process. 36 | - [_Test strategy_](/toolbox/test-strategy.md). A strategy will reduce waste by focusing the testing efforts on what matters. 37 | - _Context knowledge_. The more we know about the users and the system, the better we can test. 38 | - _Technical knowledge_. Our knowledge of technology and tools makes testing easier for us. 39 | 40 | Here's a [mnemonic](/toolbox/mnemonics.md) to remember these dimensions: usability, security and other -ilities are equally important; testability is VIP as well; thus testability dimensions are **`VIPS`** (**v**alue, **i**ntrinsic, **p**roject, **s**ubjective). Here's another: [`SOCKS`](https://www.a-sisyphean-task.com/2012/07/putting-your-testability-socks-on.html). 41 | 42 | ### Checklist 43 | 44 | This checklist adapted from [Ash Winter](https://testingisbelieving.blogspot.com/2017/08/the-team-test-for-testability.html) can be used for a quick **health check on your testability**. For each question answer Yes (+1) or No (+0). If your final score is below 8, you are working under unnecessary risk. 45 | 46 | 1. Do developers react positively when a bug is reported? 47 | 2. Can anyone access a prioritised list of the open bugs? 48 | 3. Does your team measure critical metrics about the system? 49 | 4. Is it possible to simulate a failure of a dependency (e.g.. 3rd party)? 50 | 5. Is it possible to test with enough isolation a specific system behaviour? 51 | 6. Can any team member test an unfinished feature from their machines? 52 | 7. Can you set your system into a given state to repeat a test? 53 | 8. Can any team member create a test environment? 54 | 9. Can you test on production (e.g.. feature flags)? 55 | 10. Is it possible to see and query logs from production? 56 | 11. Does your team have regular contact with the users of the system? 57 | 12. Does your team maintain a knowledge base on how their system is built and tested? 58 | 59 | If you were unsatisfied with score you got, there are methods to improve it. Of course you have ~~boring~~ ~~expensive~~ [maturity models](https://www.tmmi.org/tmmi-documents/#) in the market to formally evaluate your testability. However, if you prefer something simple and tailored for your team you can use the [**Test Improvement Assessment**](http://www.huibschoots.nl/wordpress/wp-content/uploads/2017/02/Test-Improvement-Huib-Schoots-Joep-Schuurkes.pdf). Essentially your team selects which testability criteria are relevant for your context, scores them and finally agrees on how to improve. If you need hints on practices that can improve your system's testability, [Michael Bolton](http://www.developsense.com/blog/2009/07/testability/) has a few. 60 | 61 | Both the checklist and the assessment are practical methods to discuss testability. To visually **report the state of testability** you can use a [mind map](/toolbox/note-taking.md), as illustrated (pun intended) by [Adam Knight](https://www.a-sisyphean-task.com/2014/07/a-map-for-testability.html). 62 | 63 | ## Teachers 64 | 65 | - [Ash Winter](https://testingisbelieving.blogspot.com/2019/02/ask-me-anything-testability.html) 66 | - [Bret Pettichord](http://www.pettichord.com/) 67 | - [James Bach](https://vimeo.com/78912852) 68 | - [Maria Kedemo](https://mkedemo.wordpress.com/2015/11/22/dimensions-of-testability-v1-1/) 69 | - [Michael Bolton](http://www.developsense.com/blog/category/testability/) 70 | 71 | ## Sources 72 | 73 | - [Test improvement in an agile/CD environment](http://www.huibschoots.nl/wordpress/?p=2543) 74 | - [The team test for testability](https://testingisbelieving.blogspot.com/2017/08/the-team-test-for-testability.html) 75 | - [Deeper Testing: Testability](http://www.developsense.com/blog/2017/09/deeper-testing-3-testability/) 76 | - [Heuristics of Software Testability](http://www.satisfice.com/tools/testable.pdf) 77 | - [Putting Your Testability Socks On](https://www.a-sisyphean-task.com/2012/07/putting-your-testability-socks-on.html) 78 | - [A Map For Testability](https://www.a-sisyphean-task.com/2014/07/a-map-for-testability.html) 79 | - [Design for Testability](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.94.5966&rep=rep1&type=pdf) 80 | - [30 days of Testability challenge](https://dojo.ministryoftesting.com/dojo/lessons/30-days-of-testability) 81 | -------------------------------------------------------------------------------- /docs/concepts/tester-responsibilities.md: -------------------------------------------------------------------------------- 1 | # Tester's responsibilities 2 | 3 | > **tl;dr** To master the necessary skills to fulfil the [purpose of testing](/concepts/testing-purpose.md). 4 | 5 | ## Theory 6 | 7 | Your responsibility as a tester is to fulfil the [purpose of testing](/concepts/testing-purpose.md). 8 | 9 | That means you will be performing a [diverse set of activities](/concepts/what-testing-is.md), which include: 10 | 11 | - Clarify [requirements](/concepts/requirements.md) between stakeholders and developers; 12 | - Elaborate a [test strategy](/toolbox/test-strategy.md) for your product; 13 | - Advocate the right [development methodology](/toolbox/dev-methodologies.md) for your team; 14 | - Collect and develop [tools and techniques](/toolbox/tester-tools.md) to support your testing; 15 | - Choose which [test types](/types/index.md) bring the most value at a given time; 16 | - Adapt your [role](/roles/index.md) to the current team's needs. 17 | 18 | While you do all that, be aware and [avoid the common pitfalls](/concepts/what-testing-is-not.md). 19 | 20 | ## Practice 21 | 22 | > We're extensions to the senses of our stakeholders. We help the team sense things that they might not be able to sense on their own, due to their limited time and the mindset required to do their job. 23 | > 24 | > — [Michael Bolton](http://www.developsense.com/blog/2010/05/testers-get-out-of-the-quality-assurance-business/) 25 | > 26 | > If you feel that your value is not appreciated or you can be easily replaced, then shame on you. Think to yourself: am I doing all I can to help my team succeed? Does my team value my contributions? 27 | > 28 | > — [John Andrews](https://testingfromthehip.wordpress.com/2016/01/08/am-i-really-a-valuable-member-of-my-team/) 29 | 30 | If you want an (almost) exhaustive list of responsibilities and activities, this [link](https://dojo.ministryoftesting.com/dojo/lessons/what-do-software-testers-do-version-0-1) is for you. 31 | 32 | ### 🐞 Bug catcher 33 | 34 | > The tester does not "break the product". The tester finds a product that is already broken. 35 | > 36 | > — ["The life of one man"](https://thelifeofoneman.com/the-role-of-the-tester) 37 | 38 | Finding bugs — probably the most widely know mission of a tester. Why? It is the job of the tester to inform the stakeholders about anything that threatens the value of the product. To ensure the core functionality still work after adding new features. To ensure the critical issues are detected before the enhancement opportunities. To ensure the team is aware of the known issues and their risk (probability + impact). 39 | 40 | Issues that slow down testing are terribly important, because they give bugs the opportunity to hide for longer. So report not only _bugs_ in the product but also _issues_ that slow down testing. 41 | 42 | Keep in mind that the goal of a tester is not finding issues (per se). If that was the case you would become a perfectionist, someone that raises an excessive number of low-value issues. 43 | 44 | Your _ultimate goal_ should be pursuing quality and keeping it at a high level. Using that mindset, finding issues becomes just a consequence of that pursuit. And naturally you will focus on issues that threat quality. 45 | 46 | ### 👥 Teammate 47 | 48 | Part of your role is keeping these two groups aligned. First, you need to align yourself with them. More about stakeholders when you get to [requirements](/concepts/requirements.md). 49 | 50 | - **Development**: Some developers think that all testers do is question their work and expose their flaws. On the contrary, one of the tester's goal is to help developers look good (by finding issues early) and save them debugging time (by investigating themselves). 51 | - **Business**: Set expectations, explain that software development is not a precise number on an Excel sheet or Gantt chart. Provide the information they need to make informed decisions, and then let them make the decisions. The only person who should be signing off the product is its owner. 52 | 53 | You are often the bearer of bad news. Own it and deliver the information with compassion and humility. Attempt to fix the bad news before reporting them and you might end up with good news. 54 | 55 | Strive to earn the respect of your team. As a tester you need to know the product like it's the back of your hand. Do your due diligence to become a reliable and knowledgeable source of information to the rest of the team. 56 | 57 | ### ❤️ (User) Friend 58 | 59 | Developers focus on code and functionality, while managers focus on business growth and profit. Besides these groups, there are two more that would appreciate some love from you: 60 | 61 | - **Support**: The bugs waiting to be fixed become a burden for the customer team or the customer support team. This group will appreciate if you tell what is currently buggy and whether there is a workaround. 62 | - **End user**: Often a feature works but the user experience is not taken into account. Ask yourself what type of emotions you would feel when using the product. Be the user's voice and "complain" to your team so that they don't have to. 63 | 64 | Share some of that love with your team too. Seek a joyful and friendly environment in your team. Celebrate success and keep a journal of small victories or praises. 65 | 66 | ### 🔦 Guiding light 67 | 68 | > Half my day is facilitating conversations between stakeholders and attempting to understand what each person is expecting from a release. The goal is get the release into a place where all stakeholders' expectations are met. 69 | > 70 | > If this is not possible I warn the stakeholder that this will require new timelines and I provide data to demonstrate why and how. 71 | > 72 | > — [Phillipe Bojorquez](https://club.ministryoftesting.com/t/what-do-testers-do-on-a-daily-basis/12687/2) 73 | 74 | James Bach compares a tester to the front lights of a car. The analogy aims to explain that your role is to _illuminate_ what is unknown and ahead of the team. You do not control the system, instead you provide input for others to act. You are not the _driver_ (Product Owner), but what your light uncovers surely influences the driver. 75 | 76 | Testers attempt to forecast multiple scenarios that might hurt the team, so that it can prepare in advance and reduce the risk. However, it's impossible to think about every trouble ahead of time. It's part of your role to keep gathering information along the way, so that your team can react and make better decisions. 77 | 78 | Your (business) stakeholders will not always know what they want. Sometimes they are transparent about it and reach to you for advice, e.g. "What are your thoughts on X? How should we do Y?". Other times you must observe them carefully to notice their hesitation or their fragile/biased reasoning. That's a silent call for your help. 79 | 80 | Even though you don't own the product, you can give them your advice and support it with data (your experience, domain knowledge, market benchmarks, competitors, etc.). This strengthens your relationship because you show that you care without being prescriptive. 81 | 82 | ### 🔎 Inspector 83 | 84 | > Your testing should provide enough information for the team to make its own perceived view of quality (…) to help decide things like: when to release, when to bug fix, when to scale back, when to move on, etc. 85 | > 86 | > — [John Andrews](https://testingfromthehip.wordpress.com/2016/06/14/what-is-the-main-purpose-of-our-testing/) 87 | 88 | One of your responsibilities is to uncover information and deliver it. To do so, there are many questions a tester could ask. [Mnemonics](/toolbox/mnemonics.md) are a clever way to avoid forgetting them. If you can only memorise four questions, these are the ones: 89 | 90 | 1. **What are we building?** _What features? What are the components? How do they integrate?_ 91 | 2. **For who?** _What value are they expecting? How will they use it? How can they get help?_ 92 | 3. **What could go wrong?** _What's the impact? Who would suffer? How long would it take to fix it?_ 93 | 4. **How would we find out?** _Can we detect a failure? Can we prevent or mitigate it?_ 94 | 95 | Once you have gathered that information, share it with your team and other relevant stakeholders. Be mindful about your audience — deliver just the right amount of data using the most effective medium for them (e.g. using diagrams for non-technical people). 96 | 97 | Get the team to sit down and agree on your project's goal. In the future, if anyone starts losing track revisit the agreed goal. After all, one of your duties is to verify if what is being built is as expected. 98 | 99 | ### 🏕️ Scout 100 | 101 | > Always leave the campsite better than you found it. 102 | > 103 | > — [The boy scout rule](http://programmer.97things.oreilly.com/wiki/index.php/The_Boy_Scout_Rule) 104 | 105 | Besides the code, the team and the processes can also benefit from your testing. Work closely with your team to identify bottlenecks and keep on improving. Help your colleagues maintain a high velocity by recommending tools and practices that expedite your work. Use [retrospectives](/roles/agile-team-member.md) to share your concerns and suggestions. 106 | 107 | Working as a team, you achieve more, faster and with less pain. 108 | 109 | ### 🍎 Thinker 110 | 111 | Testers add value to teams by contributing with different perspectives. If you always use the same thinking, you get biased and you might miss important aspects. Next time, try to combine different approaches: 112 | 113 | - **Technical thinking**: usage of _experience_ to select the right tools, techniques and technologies. This is useful to minimise the effort of [testing](/concepts/testing-purpose.md) and make development more efficient. 114 | - **Creative thinking**: usage of _creativity_ to analyse the same context using a different perspective. This is useful to uncover new information that no one thought about before (aka. _unknown unknowns_). 115 | - **Critical thinking**: usage of _scepticism_ to question what is known or assumed to be the truth. This is useful to detect assumptions or biases and review the "why" and "how" of a requirement. 116 | - **Practical thinking**: usage of _visualisation_ to draw the ideas under discussion or ask for examples. This is useful to predict how an idea will be done and remove any obstacles to its implementation. 117 | - **Black box thinking**: usage of _ignorance_ to skip implementation details. This is useful to focus on behaviour and end-to-end flow, impersonating a user. 118 | 119 | ### 📚 Learner 120 | 121 | Skilled testers come in all "flavours". Some are highly technical and will highlight technical issues you haven't noticed. Some are skilled at seeing the app from the users' perspective and highlight problems that will cost you customers. Some will let you know if you are breaking any regulation or standard. 122 | 123 | A good tester should be meticulous, curious, creative, determined and mindful of biases. A greater tester, nurtures the [student mindset](/next/index.md) and is humble enough to be always learning. 124 | 125 | ## Sources 126 | 127 | - [Four and more questions](http://www.developsense.com/blog/2018/03/four-and-more-questions/) 128 | - [Why you might need testers](http://www.investigatingsoftware.co.uk/2017/04/why-you-might-need-testers.html) 129 | - [Am I really a valuable member of my team?](https://testingfromthehip.wordpress.com/2016/01/08/am-i-really-a-valuable-member-of-my-team/) 130 | - [The role of the tester](https://thelifeofoneman.com/the-role-of-the-tester) 131 | - [Ten Misconceptions About Software Testing That Non-Testers Share](https://dojo.ministryoftesting.com/dojo/lessons/ten-misconceptions-about-software-testing-that-non-testers-share) 132 | - [What's the difference between a good test and a bad test?](https://dojo.ministryoftesting.com/dojo/lessons/designing-tests-what-s-the-difference-between-a-good-test-and-a-bad-test) 133 | - [How To Think Like a Software Tester](https://thelifeofoneman.com/think-like-software-tester) 134 | - [What testers find](http://www.satisfice.com/blog/archives/572) 135 | - [Testers: Get Out of the Quality Assurance Business](http://www.developsense.com/blog/2010/05/testers-get-out-of-the-quality-assurance-business/) 136 | - [99 Second Introduction to Lateral and Critical Thinking](https://dojo.ministryoftesting.com/dojo/lessons/99-second-introduction-to-lateral-and-critical-thinking) 137 | - [Modern Testing Principles](https://dojo.ministryoftesting.com/dojo/lessons/modern-testing-principles) 138 | - [What do software testers do](https://dojo.ministryoftesting.com/dojo/lessons/what-do-software-testers-do-version-0-1) 139 | -------------------------------------------------------------------------------- /docs/concepts/testing-purpose.md: -------------------------------------------------------------------------------- 1 | # Testing's purpose 2 | 3 | > **tl;dr** To measure and maximise quality delivered, while minimising risk to an acceptable level. 4 | 5 | ## Theory 6 | 7 | Are you looking for the official definition of testing? You won't like it. Fine, here it is: 8 | 9 | > The process consisting of all lifecycle activities, both static and dynamic, concerned with planning, preparation and evaluation of software products and related work products to determine that they satisfy specified requirements, to demonstrate that they are fit for purpose and to detect defects. 10 | > 11 | > — International Software Testing Qualifications Board [(ISTQB)](http://glossary.istqb.org/search/testing) 12 | 13 | You just read 43 words in a single sentence. [Hemingway](http://www.hemingwayapp.com/) hates it and your brain hates it too. Instead, let's focus on what is the purpose of testing — why we need it and what it is. From there, we can think about the [responsibilities of a tester](/concepts/tester-responsibilities.md). 14 | 15 | Quality is **value to some person, who matters**. A bug is anything that threatens quality. This simple definition shows how subjective quality really is. Here are a few other perspectives: 16 | 17 | - Aesthetic view: _Quality is emotive and simplicity._ 18 | - ✔ Fosters excellence and pride in workmanship. 19 | - ✘ Can become a cloak for perfectionists. 20 | - Manufacturing view: _Quality is conformance to specifications._ 21 | - ✔ Fits the mindset of highly regulated contexts, e.g. health industry. 22 | - ✘ Can produce products that satisfy no one, besides the specification's author. 23 | - Customer view: _Quality is whatever satisfies the customer._ 24 | - ✔ Increases customer loyalty. 25 | - ✘ Can lead to eternal chasing of trends and competitors. 26 | 27 | Quality is a moving target — it adapts to its changing context (e.g. time, priorities, trends). Stakeholders will form a _perception_ of the current quality based on their values, knowledge, skills and past experiences. Given the diversity of stakeholders, each will have a different perception of quality. That's one of the reasons why you cannot _assure quality_. 28 | 29 | > Apple shipped the first version of Hypercard with about 500 known bugs in it, yet the product was a smashing success. The Hypercard QA team chose the right bugs to ship with. They also chose the right features. 30 | > 31 | > — [James Bach](http://www.satisfice.com/articles/gooden2.pdf) 32 | 33 | Even though you cannot guarantee quality, you can do your best effort to pursuit it. Thus the **purpose of testing** is to measure and maximise quality delivered, while minimising risk to an acceptable level. The more we test, the more we know about our products. With that knowledge, teams can make informed decisions on how to improve and when to release the product. 34 | 35 | ## Practice 36 | 37 | For specific examples, continue reading [what testing is](/concepts/what-testing-is.md) and [what testing is not](/concepts/what-testing-is-not.md). 38 | 39 | To summarise it: 40 | 41 | - Testing uncovers information that enables better decisions; 42 | - Testing uses creativity to discover assumptions and unexpected behaviours; 43 | - Testing wears multiple hats to understand each stakeholders' perspective; 44 | - Testing democratises information and helps stakeholders talk to each other. 45 | 46 | ## Teachers 47 | 48 | - [Ministry of Testing](https://dojo.ministryoftesting.com/) 49 | - [Rapid Software Testing](http://www.satisfice.com/testmethod.shtml) 50 | 51 | ## Sources 52 | 53 | - [Quality is a journey – but do you know your destination?](https://mavericktester.com/2018/03/14/quality-engineering/) 54 | - [The Anatomy of a Definition of Testing](https://qahiccupps.blogspot.pt/2016/11/the-anatomy-of-definition-of-testing.html) 55 | - [Testing Terminology](http://pixelgrill.com/testing-terminology/) 56 | - [The Challenge of "Good Enough" Software](http://www.satisfice.com/articles/gooden2.pdf) 57 | -------------------------------------------------------------------------------- /docs/concepts/what-testing-is-not.md: -------------------------------------------------------------------------------- 1 | # Testing is not… 2 | 3 | ## ☑️ Checking 4 | 5 | > When you check, you confirm what you already know. When you test, you search for new information. 6 | > 7 | > — [Connor Roberts](http://pixelgrill.com/what-is-testing/) 8 | 9 | As you should know by now, testing encompasses many activities — checking is just one of those. 10 | 11 | According to [Rapid Software Testing](http://www.satisfice.com/blog/archives/856), checking is "the process of making evaluations by applying algorithmic decisions to observations of a product". Algorithmic meaning objective and repeatable. That's why checking is an activity that can be performed by a tool instead of a human. 12 | 13 | However, testing is a cognitive work that can only be performed by a human, optionally supported by tools. 14 | 15 | ## 🤖 Automation 16 | 17 | > Much of what we find as testers comes off-script and high-value unknowns are found by letting humans do what humans do best - thinking creatively! 18 | > 19 | > — [Connor Roberts](http://pixelgrill.com/what-is-testing/) 20 | 21 | Tools can be used to support many testing activities — automating checks is just one of those. 22 | 23 | Testing is about using and creating tools to support your work, not trying to get them to replace you. Testing as an exploratory, intellectual activity, cannot be replaced by automated checks. No automation will ever replace the tester reaction of "hmm, that's odd". 24 | 25 | Automation is a tool that frees us from repetitive monotonous tasks; a means to save time and invest it in using our brains towards our testing goal. And since words matter, we prefer to say ["Automation in Testing"](https://automationintesting.com/about/) than "Test Automation". 26 | 27 | Remember: your product will be used by humans, like you. If only bots "test" your product, what kind of product will you deliver? 28 | 29 | ## 💥 Breaking software 30 | 31 | > Somehow, code that worked just fine for the developer doesn't work for the tester. The reason is that the tester did something the developer didn't expect. 32 | > 33 | > — [Kate Paulk](https://dojo.ministryoftesting.com/dojo/lessons/ten-misconceptions-about-software-testing-that-non-testers-share) 34 | 35 | Testing is about exploring and discovering new information. Sometimes, testing attacks the software to check how it stands its ground (e.g. Penetration Testing). But most of the times, testers just search places that are broken and report them. They might not look broken, they might look unexpected or unpleasant to the user. 36 | 37 | It's like holding an object in your hand and, gently, look for cracks. In dysfunctional teams, testing can be blamed for not finding bugs or finding too many issues. Testing is just "the messenger", so focus on bringing reliable and relevant "news" to your team. 38 | 39 | ## 🐵 Straightforward 40 | 41 | > Testing is often thought of as something anyone can do (…) It takes real skill to do these things well and in a systematic way. 42 | > 43 | > — [Claire Reckless](https://dojo.ministryoftesting.com/dojo/lessons/so-what-is-software-testing) 44 | 45 | Anyone can "play around" with a product. But testers explore it in a structured way. They use their intuition to look for problematic areas and their empathy to think/feel like a user. They report their findings objectively, together with recommendations. 46 | 47 | Anyone, even bots, can perform an action and compare the actual result with an expectation. But testers design scenarios to maximise coverage while minimising execution time. Some will be automated, and for those they will use/create tools and frameworks. 48 | 49 | > If you have skilled testers with the freedom and knowledge to investigate beyond test scripts, you will find your testers doing (…) a number of things to add value to your software, that an untrained person wouldn't know are even possible. 50 | > 51 | > — [Kate Paulk](https://dojo.ministryoftesting.com/dojo/lessons/ten-misconceptions-about-software-testing-that-non-testers-share) 52 | 53 | ## 💯 Quality Assurance (QA) 54 | 55 | > Assuring quality requires control and when there are so many variables in play, control comes down to everyone doing their best work to make the software as good as they can get it. 56 | > 57 | > — [Kate Paulk](https://dojo.ministryoftesting.com/dojo/lessons/ten-misconceptions-about-software-testing-that-non-testers-share) 58 | 59 | Quality is a team effort. If something fails in production it's because the whole team failed: maybe the PO had unclear requirements, the developer forgot to consider an extra scenario, the DevOps deployed at the wrong time and the tester did not explore enough to spot the issue. 60 | 61 | The people who test are as human as the people who code, and all humans make mistakes. Testing is neither invincible nor a gatekeeper. Even with medical software, where lives are at stake, mistakes happen. 62 | 63 | > There is a powerful alternative to the orthodox, expensive, and boring methodologies that aim at the best possible quality: (…) the discipline of good enough software development. 64 | > 65 | > — [James Bach](http://www.satisfice.com/articles/gooden2.pdf) 66 | 67 | Testing can inform if a product has _enough quality_ for release, or if the user will _perceive_ the product as stable and useful. Otherwise, you risk never finishing your testing. 68 | 69 | ## ⏳ Finite 70 | 71 | > Does my (current) testing concentrate on making the product better or perfect? Be smart with your priorities: work on making the product better, not perfect. 72 | > 73 | > — [Lina Zubyte](https://letmetrysoftwaretesting.wordpress.com/2018/01/22/testing-to-make-product-better-vs-perfect/) 74 | 75 | Nothing can be tested completely. With an unlimited budget and an unlimited deadline (e.g. billions of years) it would be possible to check every combination of inputs that would lead to every bug of a particular software. But that's impossible. 76 | 77 | Part of the skill of being a tester is deciding what to test. You will have to compromise and prioritise. That will be your [test strategy](/toolbox/test-strategy.md) to achieve ~~perfect~~ good enough software. And when do you stop testing? 78 | 79 | > Ultimately, testing is _finished_ when management has enough information to enable them to make the decision whether or not to release the product. 80 | > 81 | > — [Claire Reckless](https://dojo.ministryoftesting.com/dojo/lessons/so-what-is-software-testing) 82 | 83 | ## Sources 84 | 85 | - [The Challenge of "Good Enough" Software](http://www.satisfice.com/articles/gooden2.pdf) 86 | - [Ten Misconceptions About Software Testing](https://dojo.ministryoftesting.com/dojo/lessons/ten-misconceptions-about-software-testing-that-non-testers-share) 87 | - [Testers: Get Out of the Quality Assurance Business](http://www.developsense.com/blog/2010/05/testers-get-out-of-the-quality-assurance-business) 88 | - [Pressing the Green Button](http://www.developsense.com/blog/2018/12/pressing-the-green-button/) 89 | - ["Testers just Validate Acceptance Criteria"](https://medium.com/@blakenorrish/testers-just-validate-acceptance-criteria-4c25566b591e) 90 | -------------------------------------------------------------------------------- /docs/concepts/what-testing-is.md: -------------------------------------------------------------------------------- 1 | # Testing is… 2 | 3 | ## 🔎 Investigation 4 | 5 | **Testing uncovers information that enables better decisions.** 6 | 7 | - Noticing what everyone looks but no one sees. 8 | - Looking for what is inconsistent or counter intuitive. 9 | - Finding problems before they happen or become critical. 10 | - Providing information that improves team's decisions. 11 | - Gathering examples or patterns before reporting a bug. 12 | - Chasing some pattern which might be elusive or hard to understand. 13 | - Going beyond optimism and actually trying it. 14 | 15 | ## ⛺️ Exploration 16 | 17 | **Testing uses creativity to discover assumptions and unexpected behaviours.** 18 | 19 | - Using the product like a well-behaved user would. 20 | - Abusing the product like a ill-intentioned user would. 21 | - Examining different aspects of the product to prevent surprises. 22 | - Identifying the product's subtleties and extremities. 23 | - Studying the product beyond the formal requirements. 24 | - Using questions to validate assumptions and prompt new scenarios. 25 | - Exploring products imaginatively to find unexpected behaviours. 26 | 27 | ## 🤝 Empathy 28 | 29 | **Testing wears multiple hats to understand each stakeholders' perspective.** 30 | 31 | - Defining personas for the product's stakeholders. 32 | - Applying different perspectives to the same situation. 33 | - Identifying your team's biases and assumptions. 34 | - Speaking with different stakeholders to detect misunderstandings and misalignments. 35 | - Showing the client the gap between what they want and what the users want. 36 | - Thinking critically about features in order to evaluate their true value. 37 | - Negotiating with the client a balance between "good", "cheap" and "pretty". 38 | - Striving to deliver a reliable product that can be used with confidence. 39 | 40 | ## 💬 Communication 41 | 42 | **Testing democratises information and aids stakeholders talking to each other.** 43 | 44 | - Combining scattered data into relevant and accessible information. 45 | - Sharing just the right information, at the right time, using the right format. 46 | - Reporting bugs in a way that developers can fix them and clients prioritise them. 47 | - Communicating clearly and helping others do the same, using questions and examples. 48 | - Keeping discussions visual and people engaged and feeling heard. 49 | - Using your "helicopter view" to onboard people and "flying down" to details when needed. 50 | - Maintaining documentation and training users on how to use the product. 51 | 52 | ## Sources 53 | 54 | - [41 Definitions of Software Testing](https://chroniclesoftesting.blogspot.pt/2017/11/41-definitions-of-software-testing.html) 55 | - [Testing Terminology](http://pixelgrill.com/testing-terminology/) 56 | - [99 Second Introduction to Testing](https://dojo.ministryoftesting.com/lessons/99-second-introduction-to-testing) 57 | - [Testing is…](http://www.developsense.com/blog/2014/10/testing-is/) 58 | - [So, What Is Software Testing?](https://dojo.ministryoftesting.com/dojo/lessons/so-what-is-software-testing) 59 | - [What is the main purpose of our testing?](https://testingfromthehip.wordpress.com/2016/06/14/what-is-the-main-purpose-of-our-testing/) 60 | -------------------------------------------------------------------------------- /docs/fields/usability.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/fields/usability.md -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Start Testing 5 | 6 | 7 | 11 | 15 | 19 | 20 | 21 | 22 | 23 |
Loading…
24 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Syllabus 2 | 3 | > _Your attitude, not your aptitude, will determine your altitude._ — Zig Ziglar 4 | 5 | 🚧⬇ _this will be a A4 page summary of how to start testing_ ⬇🚧 6 | 7 | ## Concepts 8 | 9 | The [purpose](/concepts/testing-purpose.md) of testing is to measure and maximise quality delivered, while minimising risk to an acceptable level. Testing [is not](/concepts/what-testing-is-not.md) straightforward. Testing [is](/concepts/what-testing-is.md) investigation, exploration, empathy, communication, and [it's not](/concepts/what-testing-is-not.md) limited to checking or automation. 10 | 11 | As a tester, it will be your [responsibility](/concepts/tester-responsibilities.md) to be a guiding light, an inspector, a scout, a thinker, a bug detector, a (user) friend, a teammate and… a learner — and that I know you are, since you're reading this :) 12 | 13 | Stakeholders use or are affected by the product you are testing. You will clarify and detail their expectations into [requirements](/concepts/requirements.md). By advocating for [testability](/concepts/testability.md), you make testing less painful and more valuable for everyone involved. 14 | 15 | ## Toolbox 16 | 17 | It all starts by describing how your team will test and report any discoveries. This is your [test strategy](/toolbox/test-strategy.md). 18 | 19 | The testing community (and your experience) will show you which [tools and technologies](/toolbox/tester-tools.md) improve your testing. As a quality advocate, you should recommend to your team tools and [development methodologies](/toolbox/dev-methodologies.md) that increase your product's testability. 20 | 21 | There are many techniques you can use to perform your role: a [charter](/toolbox/charters.md) summarises your testing goals and guides your exploration; [oracles](/toolbox/oracles.md) are sources of knowledge that improve your testing; [heuristics](/toolbox/heuristics.md) can be used to pick what to test next or to make a decision when you don't have an oracle. 22 | 23 | During your testing, you will discover new information. [Taking notes](/toolbox/note-taking.md) will structure your thinking, expand your memory and make it easier to share that information across the team. And [mnemonics](/toolbox/mnemonics.md) will reduce the effort of retrieving that information from your memory. 24 | 25 | ## Test types 26 | 27 | Talking about different test types is difficult. It is important that you and your team speak the same testing language. There's so many different [types of tests](/types/test-types.md). The most popular are unit, integration, end-to-end and regression tests, but there are many more. 28 | 29 | You can group those and many more tests into [functional vs non-functional testing](/types/test-types?id=functional-testing) or [black box vs white box testing](/types/test-types?id=black-box-testing). Not to mention different approaches to testing, like [exploratory](/types/test-types?id=🧭-exploratory-testing) or [automation](/types/test-types?id=🤖-automation-in-testing). And did you know about advanced approaches like [chaos](/types/test-types?id=💥-chaos-testing) and [mutation](/types/test-types?id=🧬-mutation-testing) testing? 30 | 31 | And once you decide what types of tests you want to have, you need to think how many is good enough. The [testing pyramid](/types/test-pyramid.md) is an heuristic to help you find the balance that is right for your context. 32 | 33 | ## Tester roles 34 | 35 | (in progress) 36 | 37 | ## Fields 38 | 39 | (in progress) 40 | 41 | ## Future 42 | 43 | (in progress) 44 | 45 | 🚧⬆ _offer a way to download this summary as a pdf_ ⬆🚧 46 | 47 | --- 48 | 49 | [GitHub repository](https://github.com/dialex/start-testing) 50 | -------------------------------------------------------------------------------- /docs/next/index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dialex/start-testing/3d611ef15fce3fe08ed73862b3430d8a3b357478/docs/next/index.md -------------------------------------------------------------------------------- /docs/notebook/specification-by-example.md: -------------------------------------------------------------------------------- 1 | # Specification By Example (SBE) 2 | 3 | > **tl;dr** A practice that translates business goals into scenarios that can be tested and automated. 4 | 5 | This particular way of doing SBE is called Example Mapping. 6 | 7 | **What you will be using**: 8 | 9 | - User Stories (⭐️ yellow) are written by POs/BAs and summarise the desired functionality 10 | - Examples (✅ green) detail a scenario that our feature must support 11 | - Rules (🔵 blue) summarise a group of examples or describe a constraint 12 | - Questions (🔴 red) are doubts that nobody in the conversation can answer — if not answered, then assume 13 | 14 | **How it [works](https://medium.com/@mattwynne/introducing-example-mapping-42ccd15f8adf)**: 15 | 16 | 1. Pick a story ⭐️ to discuss and place it at the top of the table. 17 | 1. Write the known acceptance criteria 🔵 and put them beneath the story ⭐️. 18 | 1. For each rule, write at least one example ✅ to illustrate it. Put the ✅ under the respective 🔵. 19 | 1. During the discussion, you might uncover questions 🔴 that nobody in the room can answer. Put them aside. 20 | 1. Continue until the scope of the story is clear (or time runs out). 21 | 1. Find out who would be suitable to answer the questions 🔴 raised. 22 | 23 | ![example](../_media/notebook/specification-by-example-1.png) 24 | 25 | **Exercises**: [Story](https://medium.com/@mattwynne/introducing-example-mapping-42ccd15f8adf) / [Rules vs Examples](https://speakerdeck.com/mattwynne/rules-vs-examples-bddx-london-2014) 26 | 27 | - The process is visual and gives you fast feedback 28 | - Invite the three amigos: a developer, a tester and a product person 29 | - Time-box each story to 25 minutes; Run it every other day 30 | - For distributed teams: use GDocs, or a coloured spreadsheet, or a mind map 31 | - Focus on getting clarity and reducing the unknowns — leave the Gherkin scenarios for later 32 | - Reach a shared understanding of what it will take for the story to be done 33 | - Let the PO focus on rules/examples; then the Tester drafts the Gherkin scenarios and asks for a review 34 | - Use [thumb-vote](https://www.conferencesthatwork.com/index.php/event-design/2012/06/testing-consensus-using-roman-voting/) to decide if the story is detailed enough for development 35 | 36 | **How it should [flow](https://blog.red-badger.com/blog/2012/07/31/what-is-specification-by-example)**: 37 | 38 | 1. Deriving scope from goals 39 | 1. Specifying collaboratively 40 | 1. Illustrating specifications using examples 41 | 1. Refining the specifications 42 | 1. Automating validation without changing the specifications 43 | 1. Validating the system frequently 44 | 1. Evolving living documentation 45 | 46 | **Tips**: 47 | 48 | - What we want to achieve? Why and how will we measure it? 49 | - Who are the relevant stakeholders? 50 | - How can they help the project achieve its goals? 51 | - Specify stories in a (3-5 pax) group -> shared understanding, diff perspectives `#WisdomOfCrowds` 52 | - Examples: - assumptions, - risk, + alignment + coverage 53 | - Good example: self-explanatory, focused on one functionality, domain language, measurable 54 | - Refine examples to describe a feature unambiguously but not necessarily cover all cases 55 | - Key examples are called Acceptance Criteria (AC), Acceptance/Functional Tests 56 | - Too many examples compromise automation, maintainability and readability (live doc) 57 | - Living doc is in sync with code and anyone in the team can understand it 58 | - Unit tests are technology facing; the AC are Business facing; both support the team and are fully automated 59 | 60 | ## Teachers 61 | 62 | - [Matt Wynne](https://cucumber.io/blog/authors/matt/) (Example Mapping) 63 | - [Gojko Adzic](https://gojko.net/) (Specification By Example) 64 | 65 | ## Sources 66 | 67 | - [What is Specification by Example](https://blog.red-badger.com/blog/2012/07/31/what-is-specification-by-example) 68 | - [Introducing Example Mapping](https://medium.com/@mattwynne/introducing-example-mapping-42ccd15f8adf) 69 | - [Specification by Example](https://www.thoughtworks.com/insights/blog/specification-example) (exercise) 70 | -------------------------------------------------------------------------------- /docs/notebook/user-story-mapping.md: -------------------------------------------------------------------------------- 1 | # User Story Mapping 2 | 3 | > **tl;dr** A top-down approach to break down Goals into Activities > Tasks > Stories. 4 | 5 | **How it [works](https://www.thoughtworks.com/insights/blog/story-mapping-visual-way-building-product-backlog)**: 6 | 7 | Story mapping is an engaging activity where all stakeholders participate on building the product backlog on a wall (instead of a long document). 8 | 9 | - Start with the company's vision 🌈 10 | - A vision 🌈 is achieved via goals 🎯 11 | - A goal 🎯 is reached by completing activities 💪 12 | - An activity 💪 is completed after the user performs some tasks ✅ 13 | - And these tasks can be transformed into user stories for software development 14 | 15 | [Example](https://www.thoughtworks.com/insights/blog/story-mapping-visual-way-building-product-backlog) _goal (red), activity (blue), task (green), user story (yellow)_ 16 | 17 | ![diagram](../_media/notebook/user-story-mapping-1.png) 18 | 19 | **How to prepare a session**: 20 | 21 | - Book a large meeting room with empty walls 22 | - Get coloured post-its, one for each level 23 | - Get thick permanent markers (to read from far) 24 | - A good camera to take photos of the wall (detail and panorama) 25 | - Optional: stickers, like dots or stars (to flag special cases) 26 | 27 | ![structure](../_media/notebook/user-story-mapping-2.jpg) 28 | 29 | **Tips**: 30 | 31 | - You can adapt the structure for user journeys 🚶‍: User > Goals > User Journeys > Actions > Stories 32 | - Large projects may require up to 6 levels in a story map. Smaller projects, 3 levels 33 | - Use stickers like dots or stars to represent special notations (out of scope, spikes) 34 | - Use smaller post-its to capture assumptions, follow-ups or questions 35 | 36 | ## Teachers 37 | 38 | - [Jeff Patton](https://jpattonassociates.com/blog/) 39 | 40 | ## Sources 41 | 42 | - [Story Mapping, Visual Way of Building Product Backlog](https://www.thoughtworks.com/insights/blog/story-mapping-visual-way-building-product-backlog) 43 | -------------------------------------------------------------------------------- /docs/roles/agile-team-member.md: -------------------------------------------------------------------------------- 1 | # Agile team member 2 | -------------------------------------------------------------------------------- /docs/roles/automation-tester.md: -------------------------------------------------------------------------------- 1 | # Automation tester 2 | -------------------------------------------------------------------------------- /docs/roles/bug-hunter.md: -------------------------------------------------------------------------------- 1 | # Bug hunter 2 | -------------------------------------------------------------------------------- /docs/roles/certifications.md: -------------------------------------------------------------------------------- 1 | # Certifications 2 | -------------------------------------------------------------------------------- /docs/roles/coach.md: -------------------------------------------------------------------------------- 1 | # Coach 2 | -------------------------------------------------------------------------------- /docs/roles/exploratory-tester.md: -------------------------------------------------------------------------------- 1 | # Exploratory tester 2 | 3 | --- 4 | 5 | ## THIS WAS EXTRACTED AS IS FROM `note-taking.md` 6 | 7 | > James and Jon Bach developed [Session-Based Test Management](http://www.satisfice.com/sbtm/) (SBTM) as a method for measuring and managing exploratory testing. 8 | > 9 | > Exploratory testing is learning. Learning what is and what might be. Learning about our perception of the world, what's in front of us and how that influences the choices we make. It's learning together to build better products. It's creating an environment that enables a continuous "testing is learning" loop. 10 | > 11 | > The [following template](https://docs.google.com/document/d/1rKYmujVhUlNgfeYIBot12Z8E7S0Y_Z4pk5pefK7xO3g/edit?usp=sharing) provides triggers for an exploratory testing session. It's a way for you to experiment with your own structure. To help you tell a testing story – whether debriefing in real-time or asynchronously. I've had success with this template. It satisfies two audiences: One that just needs a summary and another who loves detail. 12 | > 13 | > I take notes like I'm creating a story for someone else to read. This includes a bit of show and tell. Great storytelling uses show more than tell. Sometimes it feels more natural to _tell_ your notes what you're doing. The _show_ comes alive with screenshots, GIFs and video. 14 | > 15 | > As I explore I write notes imagining I'm taking someone through my actual process, steps and thoughts. Imagine saying it out loud and just type. ‪You can always edit later. 16 | 17 | ### 🧭 Exploratory sessions 18 | 19 | > TO EDIT: INTRO 20 | > 21 | > James and Jon Bach developed [Session-Based Test Management](http://www.satisfice.com/sbtm/) (SBTM) as a method for measuring and managing exploratory testing. 22 | > 23 | > Exploratory testing is learning. Learning what is and what might be. Learning about our perception of the world, what's in front of us and how that influences the choices we make. It's learning together to build better products. It's creating an environment that enables a continuous "testing is learning" loop. 24 | > 25 | > The [following template](https://docs.google.com/document/d/1rKYmujVhUlNgfeYIBot12Z8E7S0Y_Z4pk5pefK7xO3g/edit?usp=sharing) provides triggers for an exploratory testing session. It's a way for you to experiment with your own structure. To help you tell a testing story – whether debriefing in real-time or asynchronously. I've had success with this template. It satisfies two audiences: One that just needs a summary and another who loves detail. 26 | > 27 | > I take notes like I'm creating a story for someone else to read. This includes a bit of show and tell. Great storytelling uses show more than tell. Sometimes it feels more natural to _tell_ your notes what you're doing. The _show_ comes alive with screenshots, GIFs and video. 28 | > 29 | > As I explore I write notes imagining I'm taking someone through my actual process, steps and thoughts. Imagine saying it out loud and just type. ‪You can always edit later. 30 | > 31 | > Setup -> Explore -> Summarise 32 | 33 | #### Prepare your TORCH 34 | 35 | These questions will structure and guide your exploratory testing session: 36 | 37 | - Time: How much time will you put aside for this session? _(aim for 45-60 mins)_ 38 | - Oracles: Who or what will help you act on the information you discover? (see [oracles](/toolbox/oracles.md)) 39 | - Risks: What do you want to learn about? (see [charters](/toolbox/charters.md)) 40 | - Consider: What questions do you want to answer? (see [**`5W1H`** mnemonic](/toolbox/mnemonics.md)) 41 | - Heuristics: What triggers will guide your session? (see [heuristics](/toolbox/heuristics.md)) 42 | 43 | #### KIIQ exploring 44 | 45 | I group discoveries into four categories: Issues, Questions, Ideas and Praise (PQIP). 46 | 47 | - Kudos 48 | - Ideas 49 | - Issues 50 | - Questions 51 | 52 | #### Bring PROOF 53 | 54 | In addition to walking through the session sheet, we use an agenda summarized by the acronym "PROOF": 55 | 56 | - Past: What happened during the session? 57 | - Results: What was achieved during the session? 58 | - Obstacles: What got in the way of good testing? 59 | - Outlook: What still needs to be done? 60 | - Feelings: How does the tester feel about all this? 61 | 62 | [ALL TOGETHER: example of Setup/Explore/Debrief template, guided by TORCH](https://club.ministryoftesting.com/t/examples-of-time-boxed-note-taking-sessions/12760) 63 | -------------------------------------------------------------------------------- /docs/roles/index.md: -------------------------------------------------------------------------------- 1 | # Tester roles 2 | 3 | 4 | -------------------------------------------------------------------------------- /docs/roles/lead-tester.md: -------------------------------------------------------------------------------- 1 | # Lead tester 2 | -------------------------------------------------------------------------------- /docs/roles/manual-tester.md: -------------------------------------------------------------------------------- 1 | # Manual tester 2 | -------------------------------------------------------------------------------- /docs/roles/mentor.md: -------------------------------------------------------------------------------- 1 | # Mentor 2 | -------------------------------------------------------------------------------- /docs/roles/recruiter.md: -------------------------------------------------------------------------------- 1 | # Recruiter 2 | -------------------------------------------------------------------------------- /docs/roles/technical-tester.md: -------------------------------------------------------------------------------- 1 | # Technical tester 2 | -------------------------------------------------------------------------------- /docs/toolbox/charters.md: -------------------------------------------------------------------------------- 1 | # Charters 2 | 3 | > **tl;dr** A charter summarises your testing goal and guides your exploration. 4 | 5 | ## Theory 6 | 7 | It works like a map: 8 | 9 | - If you show your map to someone, they quickly understand where you want to go. 10 | - If you wander too much and find yourself lost, your map focuses your mind by showing which paths deviate you from your initial goal and which paths lead you there. 11 | 12 | **Exploring software** has much in common with exploring territory. Along the way you will make many discoveries (e.g. information, bugs). To make your journey easier you can use tools. Remember, "the most important tool is the one between your ears". As an [exploratory tester](/roles/exploration-tester.md) you will conduct exploratory testing sessions like these and charters can guide your way. 13 | 14 | You can write your charters to be precise or broad, as you will see in the practice section below. Nevertheless, for a charter to be useful it should contain enough detail for anyone to understand three things. As a [mnemonic](/toolbox/mnemonics.md) you can remember the initials `TRI` — because a charter summarises what you are **TR**y**I**ng to accomplish: 15 | 16 | - **Target**: What is the name of the area you will explore? 17 | - **Resource**: What tools will you use on your exploration? 18 | - **Information**: What do you hope to achieve/discover in the end? 19 | 20 | Different charters lead to different types of exploration. You can view charters as a reminder to focus on a particular kind of information or risk while exploring. 21 | 22 | Charters are handy tools during all phases of a testing session. _Before_ an exploratory session they structure your thinking and goals. _During_ the session they guide you and prevent deviations. _After_ the session they summarise what you did (free documentation). 23 | 24 | ## Practice 25 | 26 | A charter is a **small sentence** that summarises your focus for a particular exploratory testing session. Each charter should be motivated by a risk that you identified on a previous session or a need to discover more about a particular area of your product. 27 | 28 | Here are a few examples of… 29 | 30 | - Targets: a feature, a requirement, a screen. 31 | - Resources: a tool, a data set, a technique, a configuration, another feature… anything you can use. 32 | - Information: bugs, (unexpected) behaviours, dependencies, usability, compliance. 33 | 34 | As mentioned, charters can be precise or broad. If you are testing a specific area of your product for the first time, maybe you don't have enough knowledge to write a detailed charter. In that case your testing charter might look like this: 35 | 36 | > My mission is to test **the user interface** to discover **accessibility issues**. 37 | 38 | As you continue to test other parts of your product, you start to narrow down what still needs further testing. This time the charter for your next test session can be more focused and detailed: 39 | 40 | > Explore **the login form** using **different credentials** to discover **how secure the form is**. 41 | 42 | In this last charter you clearly specify your target (the login form) and what is your goal (to discover how secure the form is). You also have a strategy on how you will explore your target to get the information you want (using different credentials). 43 | 44 | Notice that "different credentials" is still a bit vague, however we want to keep charters **concise and jargon-free**. That's enough for any non-technical reader to understand the type of testing you carried out on your session. 45 | 46 | A good charter offers a direction without detailing too much on actions, otherwise it might transform into a test case. If you make it too broad, your goal is not clear and during your exploratory session you won't know when to stop. 47 | 48 | The next time you need to come up with testing charters you can use templates to speed up the process. These **charter templates** give you a framework and you only need to fill in the gaps. Based on the previous examples, we can design these two templates: 49 | 50 | > Explore **target** 51 | > 52 | > With **resource** 53 | > 54 | > To discover **information** 55 | 56 | These templates are tools not rules. You can use a different format. 57 | 58 | > My mission is to test **target** 59 | > 60 | > To discover **information** 61 | 62 | Feel free to create your own templates and don't let them limit your creativity. 63 | 64 | ## Teachers 65 | 66 | - [Elisabeth Hendrickson](https://twitter.com/testobsessed) 67 | 68 | ## Sources 69 | 70 | - [Software Testing Clinic's 99 seconds intro](https://dojo.ministryoftesting.com/lessons/99-second-introduction-to-charters) 71 | - [Explore It!](https://amzn.to/2OucmPY) 72 | -------------------------------------------------------------------------------- /docs/toolbox/dev-methodologies.md: -------------------------------------------------------------------------------- 1 | # Development methodologies 2 | 3 | > **tl;dr** TDD, ATDD and BDD are different development practices that favour quality. 4 | 5 | ## Theory 6 | 7 | Neither of these methodologies are "testing" by itself. They are development practices that foster quality, which in turn make testing easier. 8 | 9 | ### TDD: (Unit) Test Driven Development 10 | 11 | > Write unit tests before writing the code that makes those tests pass. 12 | 13 | The TDD cycle can be summarised as "Red-Green-Refactor". First, you write a set of [unit tests](/types/test-types.md) that describe the code behaviour that you want to implement. If you run those tests, they will be failing (red), because that behaviour is not yet implemented. Second, you write the simplest code required to make those tests pass (green). If you wanted, you could stop here. Usually developers go a step further and improve that code to be more resilient and maintainable (refactor). 14 | 15 | Unit tests tell if the code does what the developer expected it to do, at the method level, in isolation from the rest of the codebase. 16 | 17 | ### ATDD: Acceptance Test Driven Development 18 | 19 | > Write acceptance tests before writing the code that makes those tests pass. 20 | 21 | The ATDD cycle follows the same "Red-Green-Refactor" from TDD. The difference is the type of test that is created in the "Red" step. In fact, the two methodologies can be used together, as [Robert C. Martin](https://sites.google.com/site/unclebobconsultingllc/tdd-with-acceptance-tests-and-unit-tests) illustrates: "a developer runs the [acceptance tests](/types/test-types.md) and notices what fails; then they write unit tests that force them to write the code that will make some small portion of the acceptance tests pass; they keep running the acceptance tests to see how much is working as expected, and they keep adding unit tests and code until all the acceptance tests pass". 22 | 23 | Acceptance tests tell if the code does what the product/client expected it to do, at the system level, in collaboration with the rest of the codebase. 24 | 25 | ### OID: Outside-In Development 26 | 27 | > OID works by using automation tools combined with scenarios from a collaborative session to create a guide for Developers; this results in ensuring Developers develop what the business wants. 28 | > 29 | > — [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-3/) 30 | > 31 | > (…) people started working _outside-in_, from the User Interface (UI) through which users experienced the system's behaviour, to the controllers, models, microservices (API), classes, etc., until they finally had working software that mattered to the stakeholders (…) 32 | > 33 | > — [Liz Keogh](https://lizkeogh.com/2011/06/27/atdd-vs-bdd-and-a-potted-history-of-some-related-stuff/) 34 | 35 | OID is starts similar to ATDD. Both methodologies start by specifying the behaviour of your system as seen from the outside (see [black box testing](/types/test-types.md)). These specifications tell developers what their code needs to accomplish. 36 | 37 | There's a small difference though. OID is not so prescriptive about test automation as ATDD is. Listen to [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-3/): "The assumption from Testers is that because OID uses tools that are typically related to automated testing that must mean OID is automated testing. OID helps Developers design good code and deliver what the business really wants. Not deliver testing." 38 | 39 | OID is less about technology and testing and more about process and collaborating; it was an [evolutionary step](https://dannorth.net/whats-in-a-story/) towards behaviour driven development. 40 | 41 | ### BDD: Behaviour Driven Development 42 | 43 | > BDD in a nutshell: Using examples at multiple levels to create a shared understanding and reduce uncertainty to deliver software that matters. 44 | > 45 | > — Dan North, creator of BDD 46 | 47 | BDD is an OID methodology. It starts at the outside by identifying business outcomes, and then drills down into the features that will achieve those outcomes. It focuses heavily on conversations between different roles, using examples to clarify requirements, and describing those requirements in a language readable by all participants. 48 | 49 | > If you're not having conversations, you're not doing BDD. Order of importance: 50 | > having conversations > capturing conversations > automating conversations 51 | > 52 | > — [Liz Keogh](https://lizkeogh.com/2014/01/22/using-bdd-with-legacy-systems/) 53 | 54 | Each feature is captured as a **story**. Stories are descriptions of [requirements](/concepts/requirements.md) and their business benefits. They use a language agreed and understood by everyone — product owner, developer and tester. It includes a set of acceptance criteria, a list of statements that must be true to declare the story "done". 55 | 56 | > This will likely be an iterative process. The stakeholder will have an idea of what they want but will not know how much work will be involved. With the help of the technical and testing experts, they will understand the cost/benefit of each scenario and make a judgement about whether they want it. 57 | > 58 | > — [Dan North](https://dannorth.net/whats-in-a-story/) 59 | 60 | To describe a story we need words. If we use words from a natural language (like English) then anyone will be able to read it, but without some structure everyone's writing style would affect the readability of the story. To avoid that, some rules were introduced and that's how the **Gherkin language** was born, also known as the Given-When-Then syntax. 61 | 62 | > If you look up anything around BDD, you're likely to find conversation, collaboration, scenarios and examples at its core, together with suggestions for how to automate them. If you look further, you'll find Three Amigos and the Gherkin syntax and Cucumber (…) and a host of other tools. 63 | > 64 | > — [Liz Keogh](https://lizkeogh.com/2015/03/27/what-is-bdd/) 65 | 66 | Using this common language, it's time to gather the team to write these stories together. The purpose is to uncover more information and refine the idea, discovering assumptions and ambiguities. 67 | 68 | > We involve members from testing, development, and business in an informal meeting that is more typically known as "[Three amigos](https://www.stickyminds.com/sites/default/files/magazine/file/2013/3971888.pdf)" to discuss and question what we plan to build. The goal is to dispel any incorrect assumptions (…) and ignorance we have around what we want to deliver. 69 | > 70 | > — [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-2/) 71 | > 72 | > The output of these conversations is our feature files, with scenarios that provide examples of the features, based on the information that we have uncovered throughout the collaborative conversations. 73 | > 74 | > — [Dan Ashby](https://danashby.co.uk/2017/02/03/bdd-and-the-real-primary-purpose-of-feature-files/) 75 | 76 | ## Practice 77 | 78 | ### TDD 79 | 80 | For every method or class you need to implement, repeat the "Red-Green-Refactor" cycle: 81 | 82 | 1. Decide what code behaviour you want to implement; 83 | 2. Write a set of unit tests that assert that behaviour; 84 | 3. Run those tests — they should fail; 85 | 4. Write enough code to make them pass; 86 | 5. Run those tests — they should pass; 87 | 6. Refactor the code, and repeat steps 4-5. 88 | 89 | These unit tests usually cover happy and sad paths (see [Test types: Unit tests](/types/test-types.md)). 90 | 91 | ### ATDD 92 | 93 | For every feature or acceptance criteria, repeat this cycle: 94 | 95 | 1. Write a list of acceptance criteria to consider a feature done; 96 | 2. Automate those criteria into acceptance tests; 97 | 3. Run those tests — they should fail; 98 | 4. Decide what code changes are necessary to pass those tests; 99 | - (optional) For each code change, use the "Red-Green-Refactor" cycle; 100 | 5. Run those tests — they should pass. 101 | 102 | These acceptance tests usually cover just the happy path (see [Test types: Acceptance tests](/types/test-types.md)). 103 | 104 | ### BDD 105 | 106 | For every feature: 107 | 108 | 1. Gather different team roles to discuss the feature, aka. "three amigos"; 109 | 2. Capture the feature into a story using scenarios, aka. [specification by example](/notebook/specification-by-example.md); 110 | 3. Write code to fulfil the acceptance criteria of that story; 111 | 4. (optional) Write code to assert those criteria, aka. executable specification; 112 | 5. (optional) Write code to merge all features into a single file, aka. living documentation. 113 | 114 | #### Three amigos 115 | 116 | As [Liz Keogh](https://lizkeogh.com/2011/03/04/step-away-from-the-tools/) puts it: "There are things about your domain that you don't know or you've misunderstood; by talking through examples in groups, the chances of uncovering these gaps is increased." 117 | 118 | Your role in these sessions is to generate questions and ensure the conversation stays inside the scope of the feature. It's also a good opportunity to evaluate the [testability](/concepts/testability) of that feature. 119 | 120 | > A tester using BDD will be able to ask questions, collect information and help identify risks to inform other testing activities. Questions are your best tool for collaboration sessions: 121 | > 122 | > - "What, Who, Where, When, Why" - see [**`W5HEK`** mnemonic](/toolbox/mnemonics) 123 | > - "Maybe this is a stupid question but…" - Those questions identify a lot of assumptions 124 | > - "So just to confirm…" - Rephrasing with your own words validates your understanding 125 | > 126 | > — [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-2/) 127 | 128 | #### Specification By Example (SBE) 129 | 130 | Detailing a feature into a story using scenarios is hard. SBE is an old concept but it was recently popularised by [Gojko Adzic](https://gojko.net/books/specification-by-example/)'s book. The goal of these specifications is to "improve quality, reduce rework and collaborate better". 131 | 132 | [Matt Wynne](https://cucumber.io/blog/bdd/example-mapping-introduction/) then proposed a simplified implementation of SBE called Example Mapping. 133 | 134 | > A low-tech method for making this conversation (three amigos) short and powerfully productive. In short, it uses different coloured post-it notes as visual aids to help keep track of rules (acceptance-criteria), examples (Gherkin scenarios) and questions. 135 | 136 | For a detailed guide on how to use this technique in practice, continue to this [notebook page](/notebook/specification-by-example). 137 | 138 | #### Story 139 | 140 | ```gherkin 141 | Title (one line describing the story) 142 | 143 | # Narrative # 144 | 145 | As a [role] 146 | I want [feature] 147 | So that [benefit] 148 | 149 | # Acceptance Criteria # 150 | 151 | Scenario: Title 152 | Given [context] 153 | When [action/event] 154 | Then [result] 155 | ``` 156 | 157 | > - The title should describe an activity 158 | > - The narrative should include a Role, a Feature and a Benefit 159 | > - The scenario title should say what's different 160 | > - The scenario should be described in terms of Context, Actions and Results 161 | > - The given should define all of, and no more than, the required context 162 | > - The action should describe the feature 163 | > 164 | > — [Dan North](https://dannorth.net/whats-in-a-story/) 165 | 166 | #### Living documentation (BDD) 167 | 168 | The tool that reads Gherkin and executes test code is called [Cucumber](https://cucumber.io/). If your team decides to write automated checks for each scenario (i.e. executable specification) then you can go a step further and achieve living documentation. 169 | 170 | If you automated your scenarios, most likely you have a collection of `*.feature` files. You can find (or build) a tool that reads these files and automatically generates web pages (`*.html` files). 171 | 172 | The end goal is to have an automated process that reads your scenarios, transforms them into clean web pages, and publishes them online for any team member to access. That means you will have an always up-to-date documentation of the current functionality of your software — that's the origin of the term _living_ documentation. 173 | 174 | #### All together now! 175 | 176 | > There are teams ignoring the collaborative side of BDD, focusing too much on using a Gherkin syntax as means to build test cases and (…) focus on automating test coverage. It's important to remember that Gherkin is for development guidance, not test coverage. 177 | > 178 | > — [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-4/) 179 | 180 | [Alister Scott](https://www.thoughtworks.com/insights/blog/specification-example) wrote a blog post about a fictional team, illustrating their journey from ATDD to BDD, highlighting the difficulties along the way. They start with automated acceptance checks (ATDD), then convert them to Gherkin scenarios (BDD), and finally end with living documentation (BDD). 181 | 182 | He highlights several requirements of a successful BDD, here are a few: 183 | 184 | > - Abstract: the scenarios should be high-level enough to hide details and implementations; 185 | > - Language: terminology should be consistent to ensure a shared understanding; 186 | > - Flows: only a few (1-3) end-to-end flows, not a combination of every decision point. 187 | 188 | ## Teachers 189 | 190 | - [Alister Scott](https://watirmelon.blog/) 191 | - [Dan Ashby](https://danashby.co.uk/) 192 | - [Dan North](https://dannorth.net/) (author of BDD) 193 | - [Gáspár Nagy](http://gasparnagy.com/) 194 | - [Liz Keogh](https://lizkeogh.com) 195 | - [Mark Winteringham](https://www.mwtestconsultancy.co.uk/) 196 | 197 | ## Sources 198 | 199 | - [ATDD vs. BDD, and a potted history of some related stuff](https://lizkeogh.com/2011/06/27/atdd-vs-bdd-and-a-potted-history-of-some-related-stuff/) 200 | - [BDD and the real primary purpose of feature files](https://danashby.co.uk/2017/02/03/bdd-and-the-real-primary-purpose-of-feature-files/) 201 | - [Introducing Example Mapping](https://cucumber.io/blog/bdd/example-mapping-introduction/) 202 | - [Is BDD testing? (part 2)](https://www.mwtestconsultancy.co.uk/bdd-testing-part-2/) 203 | - [Is BDD testing? (part 3)](https://www.mwtestconsultancy.co.uk/bdd-testing-part-3/) 204 | - [Is BDD testing? (part 4)](https://www.mwtestconsultancy.co.uk/bdd-testing-part-4/) 205 | - [Specification by Example (book)](https://gojko.net/books/specification-by-example/) 206 | - [Specification by Example (example)](https://www.thoughtworks.com/insights/blog/specification-example) 207 | - [Step Away from the Tools](https://lizkeogh.com/2011/03/04/step-away-from-the-tools/) 208 | - [TDD with Acceptance Tests and Unit Tests](https://sites.google.com/site/unclebobconsultingllc/tdd-with-acceptance-tests-and-unit-tests) 209 | - [The Three Amigos: all for one, one for all](https://www.stickyminds.com/sites/default/files/magazine/file/2013/3971888.pdf) 210 | - [Using BDD with Legacy Systems](https://lizkeogh.com/2014/01/22/using-bdd-with-legacy-systems/) 211 | - [What is BDD?](https://lizkeogh.com/2015/03/27/what-is-bdd/) 212 | - [What's in a story?](https://dannorth.net/whats-in-a-story/) 213 | - [BDD in 2020](https://alisterbscott.com/2020/05/28/bdd-in-2020/) 214 | -------------------------------------------------------------------------------- /docs/toolbox/framework/cypress.md: -------------------------------------------------------------------------------- 1 | # Cypress 2 | 3 | > Fast and reliable testing for anything that runs in a browser. It uses Javascript (and not Selenium) to make setting up, writing, running and debugging tests easy — for QAs and developers. 4 | > 5 | > — [Official website](https://www.cypress.io/how-it-works/) 6 | 7 | ## Code 8 | 9 | Example of automation at [GitHub](https://github.com/dialex/start-testing/tree/main/code/framework/cypress). 10 | 11 | ## Review 12 | 13 | | Category | Opinion | Score | 14 | | ----------------- | ------------------------------------------------------------ | :-------: | 15 | | _Use cases_ | Automate end-to-end (E2E) tests using the UI or the client-side Javascript. It also supports API testing and mocks. | 🥇 | 16 | | _Learning curve_ | Cypress has little setup and comes with intuitive methods and assertions, so you will be writing your first tests in no time. When you open Cypress, you can watch the test execution step-by-step in the browser, pause it, even go back in time! | 🥇 | 17 | | _Language_ | Tests are written in Javascript, thus some basic knowledge is required to code and understand the tests. You will only take full advantage of this powerful framework if you are proficient with Javascript though. | 🥈 | 18 | | _Ecosystem_ | Javascript. I used VS Code to write tests and (after a simple configuration) it's IntelliSense made me very productive. | 🥇 | 19 | | _Readability_ | A non-tech person can only understand the title of each test. E2E tests easily become hard to read even for devs. There are unofficial libs to support Gherkin. By default there's no test report file, only an ASCII output, but you can add any [Mocha](https://docs.cypress.io/guides/tooling/reporters.html#Custom-Reporters) test reporter. | 🥈 | 20 | | _Extensibility_ | You can create your own Cypress Commands (not picked up by IntelliSense) or regular Javascript functions. PageObjects is a nightmare (their [recommendation](https://docs.cypress.io/faq/questions/using-cypress-faq.html#Can-I-use-the-Page-Object-pattern) isn't better). There are several Cypress plugins (but they offer [functionality that should be built-in](https://github.com/cypress-io/cypress/issues/1865#issuecomment-484897559)). It's a pity that parallel test execution is a paid feature. | 🥉 | 21 | | _Maintainability_ | Debugging is good (pause, go back in time, DOM inspection), I longed for step-by-step. IntelliSense usually guides your coding. Cypress commands run asynchronously and that leads to issues (e.g. run conditions) and limitations (e.g. can't mix sync and async code). | 🥈 | 22 | | _Documentation_ | The online doc is abundant and comprehensive. It covers how to use Cypress, including some [recipes/examples](https://github.com/cypress-io/cypress-example-recipes#application-actions), but also explains some test concepts (e.g. when to use test mocks, anti-patterns, etc.). | 🥇 | 23 | | **VERDICT** | Perfect for a Javascript project with an experienced team. | **4.5/5** | 24 | -------------------------------------------------------------------------------- /docs/toolbox/framework/karate.md: -------------------------------------------------------------------------------- 1 | # Karate 2 | 3 | > Karate is the only open-source tool to combine API test-automation, mocks, performance-testing and even UI automation into a single, unified framework. You don't have to compile (Java) code. Just write tests in a readable syntax. 4 | > 5 | > — [Official website](https://intuit.github.io/karate/) 6 | 7 | ## Code 8 | 9 | Example of automation at [GitHub](https://github.com/dialex/start-testing/tree/main/code/framework/karate). 10 | 11 | ## Review 12 | 13 | | Category | Opinion | Score | 14 | | ----------------- | ------- | :------: | 15 | | _Use cases_ | Automate API tests. It also supports performance and end-to-end (E2E) tests. Mobile testing is in the roadmap. | 🥇 | 16 | | _Learning curve_ | Steep. It's easy to understand how it works, but it lacks an official step-by-step tutorial where each main DSL keyword is introduced, one at a time. It feels like a framework made by and to "hardcore developers". The purpose of using Gherkin is to make the tools accessible to non-devs, yet I never felt productive or in control. | 🥈 | 17 | | _Language_ | Tests are written in [Karate's DSL](https://hackernoon.com/yes-karate-is-not-true-bdd-698bf4a9be39). It uses the syntax of Gherkin and it adds some custom keywords and operators. It also supports calls to Java and JavaScript, outside the Given-When-Then structure. | 🥇 | 18 | | _Ecosystem_ | The [VS Code extension](https://marketplace.visualstudio.com/items?itemName=kirkslota.karate-runner) didn't work at all (e.g. no autocomplete, no debugger, no run test button). In terms of community… there's not much since it's a recent tool. The author is an [enthusiastic](https://twitter.com/KarateDSL/status/1167533484560142336) salesperson which led to [some bad PR](https://twitter.com/jarbon/status/1136589061605416961), result of the "my tool is amazing, if you disagree you're against me" attitude. He's very active on Stack Overflow and GitHub though, replies within hours! | 🥉 | 19 | | _Readability_ | It's easier to read than to know what to write. Without autocomplete or good documentation it becomes hard to know how to use the Karate DSL to achieve what you want to do. Your tests use Gherkin's structure, but you have to mix programming syntax too (e.g. operators, selectors). This [sums the downsides](https://club.ministryoftesting.com/t/karate-for-test-automation-what-is-your-experience/39336/2) of natural (e.g. no refactoring) and programming (e.g. learning curve) languages. The CLI output is very verbose (without colours or whitespace) so it's hard. The HTML report doesn't clear results from previous test runs. Oh, and the DSL doesn't allow inline comments. | 😭 | 20 | | _Extensibility_ | You don't need plugins, since it follows the "batteries included" philosophy. Running tests in parallel is very easy. It includes Mocking, [code coverage](https://github.com/intuit/karate/tree/master/karate-demo#code-coverage-using-jacoco) and HTML reports (but it doesn't reset results between runs, even when using `--clean`, which makes the report useless). | 🥈 | 21 | | _Maintainability_ | The [_big difference_](https://intuit.github.io/karate/#cucumber-vs-karate) from Cucumber is that you _don't_ need to write extra "step definitions" (but you end up mixing Gherkin with programming). You can't use PageObjects, but you can centralise selectors in a `locators.json` to achieve DRY. I could not debug at all, zero of features [advertised here](https://twitter.com/KarateDSL/status/1167533484560142336) worked for me (Cypress, even [Selenium](https://hackernoon.com/the-world-needs-an-alternative-to-selenium-so-we-built-one-zrk3j3nyr), does a better job). On top of that, I got [incoherent test results on UI automation](https://stackoverflow.com/questions/62308044/karate-ui-automation-test-results-are-not-coherent). | 😭 | 22 | | _Documentation_ | Documentation is a single README file. It's verbose, exhaustive for beginners, hard to navigate and find what you are looking for. It feels like the official docs were written by the author but never revised from the perspective of a beginner (e.g. the provided quick start example fails; it can't be copied because it's an image; it was faster to skim blogs for examples and reverse-engineer the bits that I needed). | 🥉 | 23 | | **VERDICT** | Useful for [API testing](https://docs.google.com/document/d/1ETTrdMVcBXaPjdKY-_67zCWBsi2Ctc5DIQUIfr02H7A/edit), because it uses a generic language and can be executed as a standalone. But don't be fooled, the target audience are developers and it's not an alternative for UI/E2E tests. | **3/5** | 24 | -------------------------------------------------------------------------------- /docs/toolbox/framework/playwright.md: -------------------------------------------------------------------------------- 1 | # Playwright 2 | 3 | > Playwright enables end-to-end testing. Test modern single page apps, across all modern browsers, using in your preferred language (JS, TS, Java, C#, Python). 4 | > 5 | > — [Official website](https://playwright.dev/) 6 | 7 | ## Code 8 | 9 | Example of automation at [GitHub](https://github.com/dialex/start-testing/tree/main/code/framework/playwright). 10 | 11 | ## Review 12 | 13 | | Category | Opinion | Score | 14 | | ----------------- | ------- | :------: | 15 | | _Use cases_ | Automate end-to-end (E2E) tests using the UI. It also supports mocks. | 🥈 | 16 | | _Learning curve_ | Most test frameworks behave similarly, so for me it's just a matter of learning their syntax. The official doc was not sufficient to get started, so while I was coding I had the reference API by my side. When I was not sure how to do something, I simply enabled the "record mode" and let Playwright generate the test code of my actions. | 🥈 | 17 | | _Language_ | Lots of flexibility, as you can choose from five languages to write your tests (personally, I picked JavaScript and VS Code). Since the API is very low level, more than basic knowledge is required to code and understand the tests. | 🥇 | 18 | | _Ecosystem_ | My IDE autocomplete was limited. You can debug your test code using [breakpoints in VS Code](https://code.visualstudio.com/docs/nodejs/nodejs-debugging#_javascript-debug-terminal)… but you need multiple watches, as you can't evaluate expressions. Part of Playwright's pitch is cross-browser testing. Indeed it's super easy to enable it but super flaky to use. [In my case](https://github.com/playwright-community/jest-playwright/issues/614), Firefox was (mostly) fine, Chrome returned page errors (which I had to ignore) and Safari was 3x slower (when it didn't fail straight away) – both issues were false positives. | 😭 | 19 | | _Readability_ | Verbose and an eyesore. A simple assertion takes [three lines](https://github.com/playwright-community/expect-playwright#why-do-i-need-it) — thankfully [expect-playwright](https://github.com/playwright-community/expect-playwright#api-documentation) reduces it to one. Something concise and intuitive in Cypress (`page.get.first`) is obscure in Playwright (`page.$$`). The test code reads like back-end code, with `async` and `await` keywords on every line. Sometimes you need to explicitly [wait for page loads](https://playwright.dev/docs/api/class-page#page-wait-for-navigation) (are we back to the Selenium days?). | 😭 | 20 | | _Extensibility_ | You can run tests with its own runner, [Playwright Test](https://playwright.dev/docs/intro#installation), or you can use [Jest](https://github.com/playwright-community/jest-playwright) — developers will be familiar with its syntax and the test results report in the CLI is a lot better. It supports the [PageObject Model](https://playwright.dev/docs/pom). | 🥉 | 21 | | _Maintainability_ | Hard to debug. When the `toHaveText()` assertion fails, it outputs the whole page HTML in the error log, which is a nightmare to review. [Slower than expected](https://blog.checklyhq.com/cypress-vs-selenium-vs-playwright-vs-puppeteer-speed-comparison/), if the tests ran in parallel I didn't even notice it. | 🥉 | 22 | | _Documentation_ | I started with the [official doc](https://playwright.dev/docs/intro) and an [online course](https://testautomationu.applitools.com/js-playwright-tutorial). It's the typical, "it works and looks fine" until you try a real world scenario. I had to follow [community](https://applitools.com/blog/playing-with-playwright/) [guides](https://www.eliostruyf.com/utilize-playwright-jest-cross-browser-e2e-test-solutions/) to unblock myself. There's also some [official tutorials](https://try.playwright.tech/). On the bright side, the doc is complete and easy to search. | 🥈 | 23 | | **VERDICT** | I had a bad time and, given the effort, I was not proud of what I achieved. Unproductive. Unreliable. | **1/5** | 24 | -------------------------------------------------------------------------------- /docs/toolbox/framework/robot.md: -------------------------------------------------------------------------------- 1 | # Robot Framework 2 | 3 | > Robot Framework is a test automation framework for acceptance testing (…) it utilises the keyword-driven testing approach. Its testing capabilities can be extended by Python test libraries. 4 | > 5 | > — [Official website](http://robotframework.org/) 6 | 7 | ## Code 8 | 9 | Example of automation at [GitHub](https://github.com/dialex/start-testing/tree/main/code/framework/robot). 10 | 11 | ## Review 12 | 13 | | Category | Opinion | Score | 14 | | -------- | ------- | :---: | 15 | | _Use cases_ | Automate end-to-end tests using the UI. You can also use it quite well for contract or API tests (though not as explicit as [RestAssured](http://rest-assured.io/)). | 🥈 | 16 | | _Learning curve_ | You will quickly master the Robot syntax, as it's close to plain English. The framework "just works" and you don't need to know much about it. | 🥇 | 17 | | _Language_ | Tests are written in Robot, and supports Gherkin. Even though it's easy, it's yet another syntax your team needs to learn. | 🥈 | 18 | | _Ecosystem_ | Python. [Enough libraries](https://github.com/fkromer/awesome-robotframework/blob/master/README.md) for most use cases. PyCharm as not helpful an IDE, so I used a regular Python text editor like VS Code. | 🥈 | 19 | | _Readability_ | The keyword-driven approach makes your tests easy to understand. The test report file gives you an overview of the results, yet allows you to drill down to details. | 🥇 | 20 | | _Extensibility_ | You can create your own keywords to increase the abstraction, or compose them with existing keywords. Not easy to add custom code. | 🥈 | 21 | | _Maintainability_ | Refactoring keeps the code base understandable, although without a proper IDE it gets cumbersome. Limited debug. | 🥈 | 22 | | _Documentation_ | The online doc is enough and clear. There are enough examples online. There are also tutorials and courses. | 🥈 | 23 | | **VERDICT** | Good for small or simple projects, otherwise has limitations. | **3.5/5** | 24 | -------------------------------------------------------------------------------- /docs/toolbox/heuristics.md: -------------------------------------------------------------------------------- 1 | # Heuristics 2 | 3 | > **tl;dr** Heuristics are shortcuts to make decisions or pick what to test next. 4 | 5 | ## Theory 6 | 7 | If [mnemonics](/toolbox/mnemonics) act as "memory shortcuts", then heuristics are "decision shortcuts". These mechanisms allow people to function without spending too much to thinking about their next action. 8 | 9 | > We use heuristics under conditions of uncertainty (…) to rapidly solve problems or make decisions. When you consider the number of decisions people make every day, it makes sense for our brains to use shortcuts to help us quickly assess the different options and decide. 10 | > 11 | > — [Richard Bradshaw and Sarah Deery](https://www.ministryoftesting.com/dojo/lessons/software-testing-heuristics-mind-the-gap) 12 | 13 | Expressions like "rule of thumb", "educated guess", or "intuition" are all examples of humans using heuristics. So consider this rule of thumb, one that you might recall from your student years: _"I don't know the contents of the next exam, but the teacher already mentioned this specific subject three times, so it must be important."_ 14 | 15 | This example of heuristic is useful to demonstrate two key limitations: 16 | 17 | - **All heuristics are fallible.** They simplify our context by assuming what is uncertain and ignoring what is contradictory or irrelevant. Given this incomplete context our decisions will be fallible, but there are situations where acting is more important than precision — and that's when heuristics are useful. 18 | - **All heuristics can turn to biases.** Prolonged usage of the same heuristics have a negative impact on you and your testing. Without awareness for bias, you will eventually miss or misinterpret information, which create gaps in your testing. 19 | 20 | > Despite their fallible nature and the potential biases they cause, heuristics are very useful (…) to find solutions that are "good enough" (…) in scenarios where it's impractical to find the optimal solution to a problem. 21 | > 22 | > — [Richard Bradshaw and Sarah Deery](https://www.ministryoftesting.com/dojo/lessons/software-testing-heuristics-mind-the-gap) 23 | > 24 | > Heuristics provide patterns that can be useful in _some_ situations, _some_ times. (…) It's useful to treat heuristics with a certain amount of distrust. 25 | > 26 | > — [Anne-Marie Charrett](https://mavericktester.com/2019/12/31/heuristics-sfdipot/) 27 | 28 | As with any other tool, it's important that you understand the advantages and limitations of heuristics, so that you can wisely choose when and which heuristics to apply in your context. 29 | 30 | > Reliance on an oracle can lead you to the wrong conclusion. 31 | > A decision rule that is useful but not always correct is called a heuristic. 32 | > 33 | > — [Cem Kaner](http://kaner.com/?p=190) 34 | 35 | Oracles are considered heuristics, however not all heuristics are oracles: [**`FEW HICCUPS`**](/toolbox/mnemonics.md) heuristic is an oracle because it tells you how to decide if something is right or wrong; the Goldilocks heuristic is not because it only gives you hints about what to test. 36 | 37 | > When I test a software application there are a number of things that I know are worth trying. These are my test heuristics. Heuristics are simply experience-based techniques for problem solving and discovery. 38 | > 39 | > — [Katrina Clokie](https://katrinatester.blogspot.com/2014/09/heuristics-and-oracles.html) 40 | 41 | When you have doubts about what to test next, there are a number of heuristics you can use to generate new test ideas. With time and experience you will developed your own set of test heuristics. 42 | 43 | ## Practice 44 | 45 | You will frequently come across heuristics in the form of checklists, cheat sheets, mnemonics, oracles or models. If they serve as cognitive shortcuts to solve problems or make decisions, they're heuristics. 46 | 47 | > Once you learn about heuristics, it's time to practice them in different contexts. (…) When using heuristics you should reflect what worked, what didn't and why. If a heuristic is not working for you, try another, modify it or make your own. 48 | > 49 | > — [Richard Bradshaw and Sarah Deery](https://www.ministryoftesting.com/dojo/lessons/software-testing-heuristics-mind-the-gap) 50 | 51 | ### 💡 Test ideas 52 | 53 | There's a big number of heuristics you can use to generate test ideas. [Elisabeth Hendrickson](http://testobsessed.com/wp-content/uploads/2011/04/testheuristicscheatsheetv1.pdf) compiled a cheat sheet with the most common. One of the simplest is the Goldilocks heuristic (named after the [bedtime story](https://en.wikipedia.org/wiki/Goldilocks_and_the_Three_Bears)), which focuses on the concept of "too big", "too small", "just right". For more testing opportunities, check this list from [Erik Brickarp](http://erik.brickarp.se/2016/08/how-to-come-up-with-test-ideas.html). 54 | 55 | > Let's say you want to test a new field that collects the age of a user. Inspired by the Goldilocks heuristic, you can observe the behaviour of that field when you type a value that is too big (`999`), too small (`-1`), and just right (`30`). 56 | 57 | [James Bach](https://www.satisfice.com/download/heuristic-test-strategy-model)'s Heuristic Test Strategy Model (HTSM) contains more tips on how to explore your product ([**`SFDIPOT`**](/toolbox/mnemonics?id=⛺%ef%b8%8f-exploration)) and its non-functional properties ([**`CRUCSPIC STMP`**](/toolbox/mnemonics?id=%f0%9f%92%a1-product)). 58 | 59 | This presentation from [Karen Johnson](http://karennicolejohnson.com/wp-content/uploads/2012/11/KNJohnson-2012-heuristics-mnemonics.pdf) demonstrates how you can use heuristics like [**`RCRCRC`**](/toolbox/mnemonics?id=🤖-automation) (ideas about what to check on regression testing) or [**`FEW HICCUPPS`**](/toolbox/mnemonics?id=⛺%ef%b8%8f-exploration) (oracles focused on consistency) in practice. To discover other mnemonics, check this [cheat sheet](/toolbox/mnemonics). 60 | 61 | Given the time you have to test is limited, you might want to prioritise your testing by "finding important problems first" and "maximising diversity". These and [other heuristics](https://thelifeofoneman.com/the-main-test-heuristics-to-consider) allow you to focus on using different techniques to reveal different types of critical problems. 62 | 63 | If you like learning while having fun, [Lena Pejgan](https://testing.pejgan.se/2020/04/30/would-heu-risk-it-part-32-wrap-up-time/) created a card games called "Would Heu-risk it?". It contains a total of 30 tools (things testers use to increase the value of their testing), traps (common mistakes and anti-patterns) and weapons (pieces of wisdom gained from experience). 64 | 65 | ### 🕶 Biases 66 | 67 | > Bias is an irrational judgement or subconscious inference made from (historical) data available to us. 68 | > 69 | > In testing, biases cause you to miss or focus too much on a specific behaviour or data. 70 | > 71 | > — [99 second intro to biases in testing](https://www.ministryoftesting.com/dojo/lessons/99-second-introduction-to-biases-in-testing) 72 | 73 | For example, when you miss something because you are too focused on another thing, that's a form of bias called "inattentional blindness". To see this in practice, put yourself to the test with ["The Monkey Business Illusion"](https://youtu.be/IGQmdoK_ZfY). 74 | 75 | There are many more biases that limit or weaken your testing. When you are conscious of these biases you can minimise their negative impact in your testing. Otherwise biases create gaps in your testing which give bugs an opportunity to go unnoticed until it's too late. 76 | 77 | In order to counter the bias effect of heuristics, [Anne-Marie Charrett](https://mavericktester.com/2018/03/20/2018-3-20-how-to-avoid-being-fooled-in-software-testing/) recommends that you: 78 | 79 | - Diversify your actions — e.g. try a smaller resolution, use keyboard shortcuts 80 | - Diversify your test data — e.g. pick a diff user, generate random data 81 | - Diversify your oracles — e.g. show what you found to a diff stakeholder 82 | - Diversify who is doing the testing — e.g. rotate perspectives and expectations 83 | - Diversify your test environment — e.g. use a diff machine or OS, test in production 84 | 85 | [Katrina Clokie](https://katrinatester.blogspot.com/2018/05/9-quick-ideas-for-flexible-testing.html) has a few additional suggestions: 86 | 87 | - Change the order of your test approach to break a routine 88 | - Seek test ideas from non-testers outside your agile team (e.g. UX, Ops) 89 | - Pair with a tester in another team to see a different test approach first-hand 90 | - Experiment with a tool that you haven't tried before 91 | - Ask for constructive feedback about your testing 92 | 93 | [Alan Richardson](https://www.eviltester.com/2017/05/quaere-heuristics-mnemonics-and-acronyms.html) challenges your amount of testing with the next three questions. He also suggests a few words to fill in the blanks: Questioning, Usage, Analysis, Exploration, Reasoning, Experimentation ([`QUAERE`](https://www.eviltester.com/2017/05/quaere-heuristics-mnemonics-and-acronyms.html)) 94 | 95 | - "Have I performed enough \_\_\_\_?" 96 | - "Has my \_\_\_\_ been good enough?" 97 | - "Did my \_\_\_\_ cover everything it could?" 98 | 99 | [Tom Bartel](https://www.tombartel.me/blog/are-you-suffering-from-curse-of-knowledge/) describes a bias he calls the "curse of knowledge": 100 | 101 | > The knowledge that you have gathered becomes natural to you. You become "unconsciously competent", so you have a harder time explaining it to somebody else. A warning sign is when you start your sentences with _"As we all know, …"_ or _"I probably don't need to explain that…"_ 102 | > 103 | > This is how you create unsafe environments. 104 | 105 | [Buster Benson](https://medium.com/better-humans/cognitive-bias-cheat-sheet-55a472476b18#.486tj1s6j) did an amazing job at collecting, explaining and summarising the most common biases that affect us. Here's a brief (or [visual](https://upload.wikimedia.org/wikipedia/commons/a/a4/The_Cognitive_Bias_Codex_-_180%2B_biases%2C_designed_by_John_Manoogian_III_%28jm3%29.png)) summary of the four groups of biases. 106 | 107 | > - **Too much information** — Our brain uses a few tricks to pick out the bits of info that are most likely going to be useful in some way. 108 | > - We are drawn to details that confirm our own existing beliefs. 109 | > - We notice flaws in others more easily than flaws in ourselves. 110 | > - Repetition, changes in patterns, funny, or bizarre things grab our attention. 111 | > - **Not enough meaning** — The world is very confusing. We connect the dots, fill in the gaps with stuff we already think we know. 112 | > - We find stories and patterns even in sparse data. 113 | > - We imagine things and people we're familiar with as better. 114 | > - We think we know what others are thinking. 115 | > - **Need to act fast** — We're constrained by time and information, yet we can't let that paralyse us. 116 | > - We favour the immediate, relatable thing in front of us over the delayed and distant. 117 | > - We're motivated to complete things that we've already invested time and energy in. 118 | > - We prefer to preserve our status in a group, and to avoid irreversible decisions. 119 | > - **Not enough memory** — We keep what is most likely to prove useful in the future. 120 | > - We discard specifics to form generalities. 121 | > - We store memories differently based on how they were experienced. 122 | 123 | ## Teachers 124 | 125 | - [Alan Richardson](https://www.eviltester.com/) 126 | - [Dan Ashby](https://danashby.co.uk/) 127 | - [James Bach](https://www.satisfice.com/) 128 | - [Lena Pejgan](https://testing.pejgan.se/) 129 | - [Richard Bradshaw](https://thefriendlytester.co.uk/) 130 | 131 | ## Sources 132 | 133 | - [Software Testing Heuristics: Mind The Gap!](https://www.ministryoftesting.com/dojo/lessons/software-testing-heuristics-mind-the-gap) 134 | - [Heuristics and Oracles](https://katrinatester.blogspot.com/2014/09/heuristics-and-oracles.html) 135 | - [Heuristic Test Strategy Model](https://www.satisfice.com/download/heuristic-test-strategy-model) 136 | - [Software Testing Heuristics & Mnemonics](http://karennicolejohnson.com/wp-content/uploads/2012/11/KNJohnson-2012-heuristics-mnemonics.pdf) 137 | - [99 Second Introduction to Biases in Testing](https://www.ministryoftesting.com/dojo/lessons/99-second-introduction-to-biases-in-testing) 138 | - [How to avoid being fooled in software testing](https://mavericktester.com/2018/03/20/2018-3-20-how-to-avoid-being-fooled-in-software-testing/) 139 | - [Quaere, Heuristics, Mnemonics, and Acronyms](https://www.eviltester.com/2017/05/quaere-heuristics-mnemonics-and-acronyms.html) 140 | - [Are You Suffering From the Curse of Knowledge?](https://www.tombartel.me/blog/are-you-suffering-from-curse-of-knowledge/) 141 | - [Cognitive bias cheat sheet](https://medium.com/better-humans/cognitive-bias-cheat-sheet-55a472476b18#.486tj1s6j) 142 | - [How to come up with test ideas](http://erik.brickarp.se/2016/08/how-to-come-up-with-test-ideas.html) 143 | - [9 quick ideas for flexible testing](https://katrinatester.blogspot.com/2018/05/9-quick-ideas-for-flexible-testing.html) 144 | -------------------------------------------------------------------------------- /docs/toolbox/mnemonics.md: -------------------------------------------------------------------------------- 1 | # Mnemonics 2 | 3 | > **tl;dr** Mnemonics are memory shortcuts to retrieve information. 4 | 5 | ## Theory 6 | 7 | > A mnemonic is a pattern of letters, ideas, or associations which helps remember something else. It is a learning technique that aids information retention and retrieval. — [Wikipedia](https://en.wikipedia.org/wiki/Mnemonic) 8 | 9 | Your brain can store a lot of information, but sometimes it's hard to recall a specific detail. Maybe you don't use that information too often or maybe it's not intuitive. Mnemonics are **[memory shortcuts](https://www.mindtools.com/memory.html)** that allow you to retrieve the same information with less effort. 10 | 11 | > Example: You can memorise how many days each month has, or you can use the knuckles of your hand to instantly know the answer. In the latter case, you are using a shortcut to retrieve the same information — that's a mnemonic. 12 | > 13 | > Example: You are learning how to test an online form, and you are told that it's a good idea to try a value that is too small, too big and just right. You can memorise that information, but that will cost your brain some effort. Then you notice that pattern is similar to the [Story of Goldilocks and the Three Bears](https://en.wikipedia.org/wiki/Goldilocks_and_the_Three_Bears). So you just call it "the Goldilocks test" — that's a mnemonic. 14 | 15 | Some mnemonics will give you the full information you are looking for, others will give you references to where you can get the full information. The first example gives you the exact information you want (number of days of a month). The second example doesn't give you the answer but uses knowledge you already have to make it easier to recall the exact information you want (aka. association of ideas). 16 | 17 | > Example: You finished an exploratory session and now you want to summarise your findings. Usually you describe the Past (what you explored), the Results (what you discovered), the Obstacles (what prevented better testing), the Outlooks (what should be tested next), the your Feelings (how you felt while using the product under test). 18 | > That's a lot of things to remember! But if you take the first letter of every word, you get `PROOF` — that's an acronym. "After I explore, I need to bring… `PROOF`" — that's a mnemonic. 19 | 20 | Most of the testing mnemonics look like this last example. They are acronyms that condense vast amounts of information into a word (`PROOF`), a sentence (`FEW HICCUPPS`), or something in between (`SFDIPOT`). 21 | 22 | These mnemonics don't give you the exact information you want, but they give you a clue about what you are trying to remember. And even if you forget the words that make the acronym, you can search for the acronym and quickly get the detailed information you need — almost like the glossary of a book. 23 | 24 | Use the mnemonic technique to squeeze and shape any piece of information into a format that is "brain friendly". Below is a list of popular mnemonics used by the testing community. Feel free to [customise them to your needs](https://www.ministryoftesting.com/dojo/lessons/model-fatigue-and-how-to-break-it-john-stevenson) or develop your own. 25 | 26 | ## Practice 27 | 28 | ### 💡 Product 29 | 30 | - **When using [Specification By Example](/notebook/specification-by-example.md) to write features and scenarios: `OOPSI`** by [Jenny Martin](https://jennyjmar.com/2016/04/16/bdd-discovery-and-oopsi/) 31 | - Outcomes: what you expect to achieve (e.g. _As a … I want to … So that …_) 32 | - Outputs: the outputs that deliver what you want to achieve 33 | - Processes: the activities performed to generate those outputs 34 | - Scenarios: step by step descriptions of those activities (aka. test cases) 35 | - Inputs: data used by scenarios to perform an activity (aka. test data) 36 | - **When writing a user story: `INVEST`** by [Bill Wake](https://xp123.com/articles/invest-in-good-stories-and-smart-tasks/) 37 | - Independent: should be ready to start without waiting for another story 38 | - Negotiable: should capture the goal and leave the implementation for debate 39 | - Valuable: should deliver value to some stakeholder 40 | - Estimable: should have an effort amount, so than it can be prioritised 41 | - Small: should be achievable in one to three days 42 | - Testable: otherwise how do you know you're done? 43 | - **When detailing _functional_ requirements: `W5H` or `WWWWWHEK`** by [Darren McMillan](http://www.bettertesting.co.uk/content/?p=857) 44 | - What is this for? 45 | - Who is this for? 46 | - When is it needed? When will it be done? 47 | - Where does it fit? Where is it being done? 48 | - Why is it being done? 49 | - How is it being done? 50 | - What additional questions do I have based on my knowledge? (of this or related products) 51 | - What additional questions do I have based on my experience? (of this or related products) 52 | - **When detailing _non-functional_ requirements: `CRUCSPIC STMP`** adapted by [Henrik Emilsson](http://thetesteye.com/posters/TheTestEye_SoftwareQualityCharacteristics.pdf) 53 | - Capability: completeness, accuracy, efficiency, concurrency, extensibility 54 | - Reliability: stability, robustness, recoverability, safety, integrity, trustworthiness 55 | - Usability: learnability, operability, control, clarity, consistency, accessibility 56 | - Charisma: uniqueness, satisfaction, professionalism, curiosity, hype, attitude, story 57 | - Security: authentication, authorization, privacy, compliance, invulnerability, piracy 58 | - Performance: capacity, responsiveness, availability, endurance, scalability, consumption 59 | - IT-bility: requirements, upgrades, uninstallation, configuration, deployability 60 | - Compatibility: hardware, OS, backward, forward, standards, sustainability 61 | - Supportability: analytics, troubleshooting, debugging, versatility 62 | - Testability: traceability, controllability, observability, isolate-ability, automation, information 63 | - Maintainability: flexibility, extensibility, simplicity, readability, modularity, refactor-ability 64 | - Portability: localisation, internationalization, adaptability, reusability 65 | - **When prioritising requirements for your product: `NoNeLaNe`** adapted by [Alister Scott](https://watirmelon.blog/2019/10/14/now-next-later-never-improving-moscow/) 66 | - Now (Must have) 67 | - Next (Should have) 68 | - Later (Could have) 69 | - Never (Won't have) 70 | 71 | ### 🐞 Bugs 72 | 73 | - **When creating mechanisms to detect errors: `FAILURE`** by [Ben Simo](https://www.questioningsoftware.com/2007/08/failure-usability.html) 74 | - Functional: Do we detect errors automatically? Can users report errors? 75 | - Appropriate: Are errors reported instantly, to the right audience? Do we have false errors? 76 | - Impact: Does it fail as soon as possible? Is the user flow blocked? What do users lose? 77 | - Log: Are logs accessible, detailed, searchable? 78 | - UI: Is the error message understandable by users? 79 | - Recovery: Does the message help users recover from the error? Can they contact support? 80 | - Emotions: What does a user feel when an error occurs? Does the error message ease or worsen that pain? 81 | - **When a bug is found: `REACT`** by [Brendan Connolly](http://www.brendanconnolly.net/react-to-bugs/) (adapted from [**`RIMGEA`**](https://www.kenst.com/2018/02/how-to-write-a-good-bug-report-use-rimgen/)) 82 | - Reproduce 83 | - Explore 84 | - Analyse 85 | - Communicate 86 | - Triage 87 | 88 | ### 🤖 Automation 89 | 90 | - **Anatomy of a test case: `SEARCH`** 91 | - Setup 92 | - Execute 93 | - Analyse 94 | - Report 95 | - Clean 96 | - Home 97 | - **When writing unit tests: `SIPIFFI`** adapted by Diogo Nunes 98 | - Small: covers a small piece of behaviour 99 | - Isolated: doesn't affect other tests 100 | - Precise: when it fails, you know exactly what's wrong 101 | - Intense: covers all relevant execution flows 102 | - Fast: runs under 500 ms 103 | - Idempotent: can be run multiple times (no side effects) 104 | - Frequently run: the sooner it fails, the sooner we fix it 105 | - **When writing automated tests: `SACRED`** by [Richard Bradshaw](https://youtu.be/z9m_yZMswOQ?t=56) 106 | - State: manage the environment where tests will run (e.g. configs, feature flags, data) 107 | - Actions: specify which steps to automate (e.g. click page element, call API, mock) 108 | - Codified [oracle](/toolbox/oracles.md): conditions that will decide whether the observed behaviour was expected (e.g. assertions) 109 | - Reporting: display and share the test results (e.g. logs, reports, notifications) 110 | - Execution: decide where and how the tests will run (e.g. local, CI, staging, production) 111 | - Deterministic: strive to have coherent and reliable test results (e.g. reduce flakyness) 112 | - **When prioritising regression tests: `RCRCRC`** by [Karen Johnson](https://testandcode.com/38) 113 | - Recent: new feature or areas of code 114 | - Core: essential features 115 | - Risk: important features but used less frequently 116 | - Configuration: code dependent on environment settings 117 | - Repaired: tests that reproduce bugs or features recently fixed 118 | - Chronic: features that frequently break 119 | - **When assessing the quality of an automated test: `PARMesan`** by [Matt Barbour](https://testguild.com/avoid-zombie-test-automation-essential-survival-guide/) 120 | - Performant: test runs as fast as possible (e.g. explicit waits, parallelism) 121 | - Accurate: test failure precisely identifies root cause (e.g. false positives) 122 | - Repeatable: test can be executed multiple times and produce the same result 123 | - Meaningful: test validates an important feature or capability 124 | 125 | ### ⛺️ Exploration 126 | 127 | - **When thinking about which areas you can test: `SFDIPOT`** by [James Bach](https://www.satisfice.com/download/heuristic-test-strategy-model) 128 | - Structure: everything that makes up the product (software and hardware) 129 | - Function: everything that the product does 130 | - Data: everything that the product processes (in/out, quantity, quality, state, lifecycle) 131 | - Integrations: everything used to interact with the product (UI, API, bot, import) 132 | - Platform: everything external on which the product depends (libs, 3rd parties, tools, hardware) 133 | - Operations: how the product will be used (personas, environment, happy path, edge cases) 134 | - Time: any relationship between the product and time (speed, concurrency, frequency, delays) 135 | - **When looking for test [oracles](/toolbox/oracles.md): `FEW HICCUPPS`** adapted by [Michael Bolton](https://www.developsense.com/blog/2012/07/few-hiccupps/) 136 | - Familiarity: should not suffer from common issues (or others) 137 | - Explainability: should be intuitive and easy to understand or explain 138 | - World: should be aligned with our knowledge of the world 139 | - History: should behave like it used to 140 | - Image: should match the company's brand (values, reputation, message) 141 | - Comparable: should behave like similar _external_ products 142 | - Claims: should behave like advertised by sales/marketing 143 | - User: should fulfil users' desires and expectations 144 | - Product: should behave like other _internal_ products 145 | - Purpose: should fulfil the company's expectations 146 | - Standards: should comply with applicable laws and regulations 147 | - **When starting an exploratory testing session: `TORCH`** by [Simon Tomes](https://docs.google.com/document/d/1rKYmujVhUlNgfeYIBot12Z8E7S0Y_Z4pk5pefK7xO3g/edit) 148 | - Time: How much time will you put aside for this session? 149 | - Oracles: Who or what will help you act on the information you discover? (see [oracles](/toolbox/oracles.md)) 150 | - Risks: What do you want to learn about? (see [charters](/toolbox/charters.md)) 151 | - Consider: What questions do you want to answer? (see **`WWWWWHEK`** mnemonic) 152 | - Heuristics: What triggers will guide your session? (see [heuristics](/toolbox/heuristics.md)) 153 | - **When ending an exploratory testing session: `PROOF`** by [Jon Bach](https://jonbox.wordpress.com/) 154 | - Past: what was explored during the session (e.g. charter) 155 | - Results: what was discovered 156 | - Obstacles: what prevented better testing 157 | - Outlooks: which product areas should be explored next 158 | - Feelings: what emotions where felt during usage of the product 159 | - **When exploring APIs: `ICE OVER MAD`** by [Ash Winter](https://testingisbelieving.blogspot.com/2013/11/johnny-mnemonic-iceovermad.html) 160 | - Integration: how will consumers use this service? 161 | - Consumers: who will use it — humans or machines? 162 | - Endpoints: how many endpoints and how are they accessed? 163 | - Operations: what is the purpose of the service? 164 | - Volume: what is the expected usage size and pattern? 165 | - Errors: does it handle and log failures? 166 | - RESTful: is it? 167 | - Modularity: how does each part of the service work together? 168 | - Authentication: sessions, privacy, encryption, permissions, etc. 169 | - Definitions: what is the contract of the service? 170 | - **When exploring usability: `CAN I USE THIS`** by [David Greenlees](https://martialtester.files.wordpress.com/2013/11/bsm_can-i-use-this-nov-2013.pdf) 171 | - Comparable products 172 | - Accessibility 173 | - Navigation 174 | - Intuitive 175 | - Users 176 | - Standards 177 | - Emotional response 178 | - Training (drunk test) 179 | - Heuristics (e.g. top 10 Jakob Nielsen) 180 | - Instructions 181 | - Satisfaction 182 | 183 | ### 🔎 Testability 184 | 185 | - **When assessing your product's [testability](/concepts/testability.md): `SOCKS`** by [Adam Knight](https://www.a-sisyphean-task.com/2012/07/putting-your-testability-socks-on.html) 186 | - Simplicity: you should reduce complexity, so that your product is easy to test and maintain 187 | - Observability: you should be able to monitor what the product did, when and how 188 | - Control: you should be able to influence the state of the product to reproduce scenarios at will 189 | - Knowledge: you should have access to information (and [oracles](/toolbox/oracles.md)) about the product under test 190 | - Stability: you should have a stable environment and product to perform your testing 191 | - **When looking for ways to improve your product's [testability](/concepts/testability.md): `VIPS`** by [James Bach](https://www.satisfice.com/download/heuristics-of-software-testability) 192 | - Value: changing the quality standard or our knowledge of it 193 | - Intrinsic (aka. product): changing the system itself 194 | - Project: changing the conditions under which we test 195 | - Subjective (aka. tester): changing the tester or the test process 196 | - **When expanding your knowledge on testing: `SACKED SCOWS`** by [James Bach](https://testsidestory.com/2010/03/16/a-lesson-learned-from-james-bach/) 197 | - Scouting obsessively: _I discover the sources and tools I will need_ 198 | - Authentic problems: _engage my mind_ 199 | - Cognitive savvy: _means working with the rhythms of my mind_ 200 | - Knowledge attracts knowledge: _the more I know, the easier I learn_ 201 | - Experimentation: _makes learning vivid and direct_ 202 | - Disposable time: _lets me try new things without regrets_ 203 | - Stories: _are how I make sense of things_ 204 | - Contrasting ideas: _leads to better ideas_ 205 | - Other minds: _exercise my thinking and identify my flaws_ 206 | - Words and pictures: _make a home for my thoughts_ 207 | - Systems thinking: _helps me tame complexity_ 208 | 209 | ### 👥 Team 210 | 211 | - **When starting a new project or joining a new team: `MIDTESTD`** by [James Bach](https://www.satisfice.com/download/heuristic-test-strategy-model) 212 | - Mission: what must you do to satisfy your client? 213 | - Information: what context about the product do you need for testing? 214 | - Developer relations: how can you get to know the developers? 215 | - Test team: who will perform or support testing? 216 | - Equipment: which hardware, software, docs or tools do you need for testing? 217 | - Schedule: when and how will team members align and share with each other? 218 | - Test items: which parts of the product should be tested? 219 | - Deliverables: which observable artefacts are expected to come out of testing? 220 | - **When designing a test strategy for a new project: `SADDEST PPIRATEE`** by [Diogo Nunes](https://www.diogonunes.com/blog/test-strategy-saddest-ppiratee-mnemonic/) (adapted from **[`GRATEDD SCRIPTS`](http://www.software-testing.com.au/blog/2009/07/21/thinking-about-test-strategy-a-mnemonic-device/)**) 221 | - Scope 222 | - Approach 223 | - Dependencies 224 | - Data 225 | - Environments 226 | - Stakeholders 227 | - Time 228 | - Product 229 | - Prioritisation 230 | - Information 231 | - Risks 232 | - Architecture 233 | - Technologies 234 | - Experience 235 | - Emotions 236 | 237 | ## Sources 238 | 239 | - [CAN I USE THIS](https://martialtester.files.wordpress.com/2013/11/bsm_can-i-use-this-nov-2013.pdf) 240 | - [CRUSSPIC STMPL](http://thetesteye.com/posters/TheTestEye_SoftwareQualityCharacteristics.pdf) 241 | - [FAILURE](https://www.questioningsoftware.com/2007/08/failure-usability.html) 242 | - [FEW HICCUPPS](https://www.developsense.com/blog/2010/05/transpection-transpected/) 243 | - [ICEOVERMAD](https://testingisbelieving.blogspot.com/2013/11/johnny-mnemonic-iceovermad.html) 244 | - [INVEST](https://xp123.com/articles/invest-in-good-stories-and-smart-tasks/) 245 | - [MIDTESTD](https://www.satisfice.com/download/heuristic-test-strategy-model) 246 | - [NoNeLaNe](https://watirmelon.blog/2019/10/14/now-next-later-never-improving-moscow/) 247 | - [OOPSI](https://jennyjmar.com/2016/04/16/bdd-discovery-and-oopsi/) 248 | - [PARM](https://testguild.com/avoid-zombie-test-automation-essential-survival-guide/) 249 | - [PROOF](https://jonbox.wordpress.com/) 250 | - [RCRCRC](https://testandcode.com/38) 251 | - [REACT](http://www.brendanconnolly.net/react-to-bugs/) 252 | - [RIMGEA](https://www.kenst.com/2018/02/how-to-write-a-good-bug-report-use-rimgen/) 253 | - [SACKED SCOWS](https://testsidestory.com/2010/03/16/a-lesson-learned-from-james-bach/) 254 | - [SACRED](https://youtu.be/z9m_yZMswOQ?t=56) 255 | - [SFDIPOT](https://www.satisfice.com/download/heuristic-test-strategy-model) 256 | - [SOCKS](https://www.a-sisyphean-task.com/2012/07/putting-your-testability-socks-on.html) 257 | - [TORCH](https://docs.google.com/document/d/1rKYmujVhUlNgfeYIBot12Z8E7S0Y_Z4pk5pefK7xO3g/edit) 258 | - [VIPS](https://www.satisfice.com/download/heuristics-of-software-testability) 259 | - [WWWWWHEK](http://www.bettertesting.co.uk/content/?p=857) 260 | -------------------------------------------------------------------------------- /docs/toolbox/note-taking.md: -------------------------------------------------------------------------------- 1 | # Note-taking 2 | 3 | > **tl;dr** Your notes structure your thinking, expand your memory and share information across the team. 4 | 5 | ## Theory 6 | 7 | We testers discover, gather and share information. 8 | 9 | Keeping that information only inside your head is both ephemeral and ineffective. What you discover is of little use if you cannot communicate that information to the right audience, in a clear and engaging format. 10 | 11 | When we recall things we often miss important details or we remember them in a way that reinforces our own thinking ([confirmation bias](/toolbox/biases.md)). Writing notes frequently during your testing sessions reduces this risk. 12 | 13 | Moreover, note-taking is a skill that greatly improves your exploratory testing by giving it structure. Good exploratory testing debriefs are insightful, searchable and shareable. 14 | 15 | Find your own way of supporting your memory and your ability to explain what you tested. 16 | 17 | ## Practice 18 | 19 | There are many tools and processes at your disposal. Alan Richardson lists a few in this [article](https://www.eviltester.com/2013/09/10-experiments-to-improve-your.html): 20 | 21 | - Memory 22 | - Pen and paper 23 | - Text editor + screenshot tool 24 | - Spreadsheet 25 | - Record the screen + talk as you test 26 | - Tool optimised for Exploratory Testing _(i.e. [Rapid Reporter](http://testing.gershon.info/reporter/))_ 27 | - Diagram 28 | - Mind map 29 | 30 | As a starting point, you have the [Cornwell method](https://youtu.be/cfaZVfQgt0U?t=158), that requires a simple pen and paper. Draw a vertical line in a way that creates a sidebar with one third the width of the page. Use the bigger area to take notes and the sidebar to extract key findings from those notes. At the bottom of the page you can write a summary of the whole sheet. This is the _"vanilla"_ flavour of the method — you are encouraged to add your own _"toppings"_ to it! For example, Richard Bradshaw likes to [tag key findings with letters and icons](https://youtu.be/H0rFIQe4Chs). 31 | 32 | ### 👩‍💼👨‍💼 Meetings 33 | 34 | You might find yourself in a meeting on behalf of your team. This often happens on meetings where your team doesn't make decisions, but needs to give information or take questions. You save them time and they expect a summary of the meeting. When taking notes in meetings focus on: 35 | 36 | - **Facts** 37 | - _e.g. Ann was chosen as spokesperson for the design team_ 38 | - **Issues** 39 | - _e.g. Devs are blocked waiting for design mockups_ 40 | - **Decisions** 41 | - _e.g. Devs will focus on Epic 12 while they wait for 34's designs_ 42 | - **To do** 43 | - _e.g. Ask Ann: What's the progress on mockups? Is there a draft devs can use in the meantime?_ 44 | - **To clarify** 45 | - _e.g. When are regression tests executed? Ask Gary or `#regressions` Slack channel_ 46 | 47 | Note taking is also useful to reduce rambling during meetings. Share your screen while you're taking notes and use the structure above to guide the conversation. This helps everyone visualise the topics of discussion and at the end of the meeting you'll have a free summary. 48 | 49 | > Tip: This [website](https://shoulditbeameeting.com/#/) let's you quickly decide if you really need that meeting. You can print the [flowchart](https://dannyroosevelt.com/ratethatmeeting-assets/should-it-be-a-meeting-flowchart.pdf) too. If your team has trouble ending meetings on time, you can use this [website](https://producthunt.com/posts/costie) to show how much that meeting is currently costing. 50 | 51 | ### 🧭 Exploratory sessions 52 | 53 | [Exploratory testing](/roles/exploratory-tester.md) is composed of three main phases: setup, explore, debrief. If you apply your note-taking skills during the explore phase, the debrief becomes much easier and insightful. 54 | 55 | > As I explore I take notes like I'm creating a story for someone else to read, taking them through my process and thoughts. Sometimes it feels more natural to _tell_ what you're doing. The _show_ comes alive with screenshots, GIFs and video. 56 | > 57 | > — [Simon Tomes](https://www.qeek.co/blog/a-handy-note-taking-template-for-exploratory-testing) 58 | 59 | Simon Tomes shared [a template](https://docs.google.com/document/d/1rKYmujVhUlNgfeYIBot12Z8E7S0Y_Z4pk5pefK7xO3g) that provides structure and triggers for your exploratory testing sessions. As always, try it and tweak it to your taste. That document ([real example](https://docs.google.com/document/d/1tLovWr2aehnq-JRj1qzAXPv-7r8PdBSdx1uk-X5sr3c)) gives the reader not only a summary but also the details of the session. 60 | 61 | While you explore, "[`KIIQ`](/toolbox/mnemonics.md)" taking notes focusing on four subjects: 62 | 63 | - **K**udos: what you enjoyed doing or seeing in your exploration 64 | - **I**deas: what you think could be done differently 65 | - **I**ssues: what is wrong and needs to be fixed or discussed 66 | - **Q**uestions: missing information or context that need follow-up 67 | 68 | ### 👀 User testing 69 | 70 | When performing a user testing session it's helpful to have one facilitator and one observer. The observer is focused on taking notes, while the facilitator is guiding the user through the exercise and clarifying any doubts. 71 | 72 | During the session, take notes that cover: 73 | 74 | - **Quotes** from participants — including praise, criticism, doubts and suggestions 75 | - **Issues** identified (and ideas on how to fix them) 76 | - Timestamps of relevant moments (in case you're recording the session) 77 | 78 | Tag user quotes with feelings, such as `#frustrated` `#surprised` `#pleased`. 79 | 80 | Use shorthand or even sketches, to be faster or more accurate. After a few sessions, patterns tend to emerge, so take more notes in the first few sessions and use the following sessions to collect evidence for recurring issues. 81 | 82 | When you finished all your sessions, write a single document that summarises what you learned. Include what your users felt, where they struggled, what they enjoyed, what they longed for, and list any bugs or issues found during those sessions. 83 | 84 | ## Teachers 85 | 86 | - [Alan Richardson](https://www.eviltester.com/) 87 | - [Simon Tomes](https://www.qeek.co/author/VjuuLCMAACkH2iwj/simon-tomes) 88 | 89 | ## Sources 90 | 91 | - [Explaining Exploratory Testing Relies On Good Notes](http://thesocialtester.co.uk/explaining-exploratory-testing-relies-on-good-notes/) 92 | - [Session-Based Test Management](https://www.satisfice.com/download/session-based-test-management) 93 | - [Three Digestible Diagrams to Describe Exploratory Testing](https://dojo.ministryoftesting.com/dojo/lessons/three-digestible-diagrams-to-describe-exploratory-testing) 94 | - [A Handy Note-taking Template for Exploratory Testing](https://www.qeek.co/blog/a-handy-note-taking-template-for-exploratory-testing) 95 | - [10 Experiments to Improve Your Exploratory Testing Note Taking](https://club.ministryoftesting.com/t/10-days-of-note-taking-experimentation-start-mon-23rd/16981/20) 96 | -------------------------------------------------------------------------------- /docs/toolbox/oracles.md: -------------------------------------------------------------------------------- 1 | # Oracles 2 | 3 | > **tl;dr** An oracle is a source of knowledge that improves your testing. 4 | 5 | ## Theory 6 | 7 | There are a number of ways in which you can determine that you have discovered a defect in a software application. Those are your test oracles. Oracles are the mechanism by which you **recognise a problem**. They help you discover the real reason why you think there is a problem. 8 | 9 | Knowing your oracles means that you can **objectively explain** to developers and business stakeholders why the users of the software will agree that you have found a bug. This makes your [bug advocation](/roles/bug-hunter.md) more effective. 10 | 11 | In **oracle-based testing**, you compare the behaviour of the program under test to the behaviour of a source you consider accurate (an oracle). You constantly look for answers to: Is this behaviour correct? Is this what the user expects? A tester who is familiar with the type of product under test will have no problem making these evaluations. However, a newcomer needs a reference — an oracle. 12 | 13 | Some oracles lead you to other oracles, which allow you to extend your initial test strategy into areas you haven't thought about before. Keep in mind that each oracle **focus on a specific perspective**. If you limit yourself to a single oracle, it might bias your testing by giving you a narrow view of your context. That is why oracles are [heuristics](/toolbox/heuristics.md) — they are useful tools that help us make decisions, but sometimes they point us to the wrong decision. 14 | 15 | ## Practice 16 | 17 | Oracles come in many shapes, here are a few examples: 18 | 19 | - a document, that gives specific correct outputs for given inputs; 20 | - an algorithm, that a human can use to calculate correct outputs for given inputs; 21 | - another program, which takes the same input and produces the same output (for comparison); 22 | - a human domain expert, who can look at the output and tell whether it is correct; 23 | - or any other way of telling that a given output is correct. 24 | 25 | Use the [mnemonic](/toolbox/mnemonics.md) `FEW HICCUPS` to remember these oracles: 26 | 27 | - **Familiarity**: Is it free of common/past bugs? 28 | - _e.g. the product exhibits behaviour X which was marked as a bug on previous releases_ 29 | - **Explainability**: Is it intuitive or do you need help? Is it clear or do you need an explanation? 30 | - _e.g. our beta testers were not sure what button X would do if clicked_ 31 | - **World**: How does it plug into what already exists in the world? 32 | - _e.g. our product can be used as a tool to carry DoS attacks on other websites_ 33 | - **History**: Is the new version coherent with the older ones? 34 | - _e.g. feature X existed in the previous version and now it's gone_ 35 | - **Image**: Does it match the brand? Does it affect the reputation? 36 | - _e.g. colours don't match the company guidelines_ 37 | - _e.g. the choice of words/images might sound aggressive for culture X_ 38 | - **Compare**: How does it compare with competitors? 39 | - _e.g. players in our market solve X by doing Y and there's no added value in doing it differently_ 40 | - **Claims**: Does the product match what the sales/marketing says about it? 41 | - _e.g. our latest ad claims our product does X but it doesn't_ 42 | - **Users**: Does it meet the desires and expectations of the end-users? 43 | - _e.g. users of this market expect this screen to be customisable_ 44 | - **Product**: How does it compare with internal products? 45 | - _e.g. our product A has a sidebar but our product B uses a top navbar to accomplish the same_ 46 | - **Purpose**: Does it meet the desires and expectations of the company? 47 | - _e.g. feature A doesn't meet the requirements given by our Product Owner_ 48 | - **Standards**: Does it comply with (external) laws and regulations? 49 | - _e.g. this page scored too low on W3C's accessibility test_ 50 | 51 | This [link](http://www.testingeducation.org/k04/examples/obas05s.html) shows how to apply the "comparable products" oracle to test the copy-paste feature in different software. 52 | 53 | ## Teachers 54 | 55 | - [Michael Bolton](http://www.developsense.com/blog/) 56 | 57 | ## Sources 58 | 59 | - [Software Testing Clinic's 99 seconds intro](https://dojo.ministryoftesting.com/lessons/99-second-introduction-to-oracles) 60 | - [A Course in Black Box Software Testing](http://www.testingeducation.org/k04/OracleExamples.htm) 61 | - [Heuristics and Oracles](https://katrinatester.blogspot.pt/2014/09/heuristics-and-oracles.html) 62 | - [The Oracle Problem and the Teaching of Software Testing](http://kaner.com/?p=190) 63 | - [What is a test oracle, and what is it used for?](https://stackoverflow.com/a/23971174/675577) 64 | -------------------------------------------------------------------------------- /docs/toolbox/test-strategy.md: -------------------------------------------------------------------------------- 1 | # Test strategy 2 | 3 | > **tl;dr** Describes how your team will test and report any discoveries. 4 | 5 | ## Theory 6 | 7 | The test strategy is defined at the start of the project and it can be revisited and reviewed during the duration of that project. It's typically created to answer the five Ws (see [**`W5H`** mnemonic](/toolbox/mnemonics.md)). 8 | 9 | The strategy is usually driven by the testers, yet it should always include feedback from all team members: 10 | 11 | - _Software Testers_ want to know what they might be testing over the course of the project 12 | - _Software Developers_ want to know how they can increase [testability](/concepts/testability.md) 13 | - _Project Managers_ want to understand how you'll test, so they can be confident in making a decision to release 14 | - _Product Owners_ can tell you how the product is meant to be used 15 | - _Customer Support_ can tell you how the customer uses the system, and the kind of problems they encounter 16 | - _Sales_ can tell you which products are the most popular 17 | 18 | > A good test strategy is a rich collage of different processes, techniques and tools. 19 | > 20 | > — [Mark Winteringham](https://www.mwtestconsultancy.co.uk/bdd-testing-part-4/) 21 | > 22 | > The test strategy is the set of ideas that guide your test process to fulfil your (testing) mission. 23 | > 24 | > — ["The life of one man"](https://thelifeofoneman.com/the-testing-strategy) 25 | 26 | This strategy reveals the way tests will be designed and executed to foster quality. To do so, it needs to answer several questions like what parts of the product will be tested, what test techniques will be used, who will be involved, and how long it will take. The number of questions and the detail of the answers depends on your project's [requirements](/concepts/requirements.md), so you will have to choose between a formal and detailed document versus a simple and abridged alternative. 27 | 28 | > Every time I look at any of the dozens of test plans my teams have written, I see dead test plans. This begs the question: if a plan isn't worth bothering to update, is it worth creating in the first place? 29 | > 30 | > — [James Whittaker](https://testing.googleblog.com/2011/09/10-minute-test-plan.html) 31 | 32 | Documents are for communicating information between people. Lacking a test strategy document doesn't mean a strategy doesn't exist. Your test strategy could be a single page/slide, a mind map, or a simple checklist. Use what fits your client's needs best. 33 | 34 | First discover **who** needs to read this document, and then ask **what** they want to know and what is the expected level of detail. In a regulated or controlled environment, such document could be mandatory, maybe even a legal requirement. 35 | 36 | > Search for 'How to write a test plan' on the internet and there are all sorts of templates. Templates are often too generic or too specific and quickly become outdated. Once it's written, it's quite common to find out that almost no-one has actually read it. 37 | > 38 | > Instead, focus on selecting the best content when writing a test plan. For everything you decide to add, ask yourself: Does the reader need to know this? Is this relevant information? What else could I be doing with this time? 39 | > 40 | > — [Anthony Vallone (Google Testing Blog)](https://testing.googleblog.com/2016/06/the-inquiry-method-for-test-planning.html) and [Claire Reckless](https://www.ministryoftesting.com/dojo/lessons/the-one-page-test-plan) 41 | 42 | Keep in mind that your strategy might evolve through time. 43 | 44 | > An emergent strategy (…) emerges as assumptions get tested, constraints become concrete and context changes. This happens when you begin testing and discover a whole new set of risks and change direction to accommodate that. When working in complex, uncertain environments, when there's more questions than answers, it makes sense for teams to be aware of the emergent nature of a software testing strategy. 45 | > 46 | > — [Anne-Marie Charrett](https://mavericktester.com/2020/01/13/emergent-strategy/) 47 | 48 | Similarly to [exploratory testing](/roles/exploratory-tester.md), the plan is just a foundation, you can adapt as you learn more about your context. In fact, specifying a test strategy is an effective exercise to collect information about your context, because of all the answers it demands from you. And since you have to write down that information, it's a way to organise your thoughts. 49 | 50 | > Using a test plan as a mechanism to seek answers, to drive information consensus, and to prepare yourself, can make it worthwhile. Make it valuable for you and your stakeholders. And if you can't, don't be afraid to get rid of it. 51 | > 52 | > — [Richard C Paterson](https://dojo.ministryoftesting.com/lessons/how-to-write-a-software-test-plan) 53 | 54 | ## Practice 55 | 56 | > Creating a test plan is complex. An ideal test plan is (…) a balance of implementation cost, maintenance cost, benefit, and risk. 57 | > 58 | > — [Anthony Vallone (Google Testing Blog)](https://testing.googleblog.com/2016/06/the-inquiry-method-for-test-planning.html) 59 | 60 | [James Bach](https://www.satisfice.com/download/test-plan-evaluation-model) proposes a list of characteristics of a good test strategy. Keep in mind these criteria while you develop your strategy. You can also revisit them when you're finished, to check if you can improve your test strategy in any area: 61 | 62 | - _Usefulness:_ Will the test strategy effectively serve its purpose? 63 | - _Accuracy:_ Is it accurate with respect to any statements or facts? 64 | - _Efficiency:_ Does it make efficient use of available resources? 65 | - _Adaptability:_ Can it accommodate changes and unpredictability in the project? 66 | - _Feasibility:_ Does the organization have capabilities to implement this strategy? 67 | - _Clarity:_ Is it coherent and unambiguous? 68 | - _Usability:_ Is the test strategy document concise, searchable and shareable? 69 | - _Compliance:_ Does it meet externally imposed requirements? 70 | 71 | A good starting point is the Heuristic Test Strategy Model, also from [James Bach](https://www.satisfice.com/download/heuristic-test-strategy-model). According to that plan, testing is influenced by: project environment, quality criteria, test techniques. [Dan Ashby](https://danashby.co.uk/2017/12/13/a-new-model-for-test-strategies/) then simplified that model into this intuitive diagram. 72 | 73 | ![DanAshbyHTSM](../_media/articles/modified-htsm1.png) 74 | 75 | This visual representation of a possible test strategy is useful to illustrate the how different testing activities transform data into information into quality. 76 | 77 | Now that you have an overview of a possible test strategy, it's time to fill in the details, and an effective way to do so is by asking questions. [Simon Knight](https://sjpknight.com/post/test-planning-simplified/) suggests that you keep it simple at first, using [W5H](/toolbox/mnemonics.md): 78 | 79 | > - _Why_ does it need testing? – The reason to spend time/people testing specific areas. Also risks. 80 | > - _What_ will (and won't) be tested? – The scope of your testing. 81 | > - _How_ will it be tested? – The technical details (tools, envs, data, automation, etc.) 82 | > - _Who_ will test it? – Who's responsible for what. 83 | > - _Where_ can we get information about what we are testing? – see [Oracles](/concepts/oracles.md). 84 | > - _When_ will testing start and finish? – How do we know it's complete? 85 | 86 | Those questions cover the most critical aspects of any strategy to test your product. You can use them to generate more questions and detail your strategy even more. As the number of questions increases, we should group them into categories. [Diogo Nunes](https://www.diogonunes.com/blog/test-strategy-saddest-ppiratee-mnemonic) took [Jared Quinert](http://www.software-testing.com.au/blog/2009/07/21/thinking-about-test-strategy-a-mnemonic-device/)'s [**`GRATEDD SCRIPTS`**](/toolbox/mnemonics.md) mnemonic and further expanded it with questions from [Erik Brickarp](http://erik.brickarp.se/2016/11/test-plan-questions.html) and [Michael Bolton](https://www.developsense.com/blog/2010/11/context-free-questions-for-testing/). The result was this mind map, and a new mnemonic called [**`SADDEST PPIRATEE`**](/toolbox/mnemonics.md): 87 | 88 | ![TestStrategy-SADDEST-PPIRATEE](../_media/articles/Test-Strategy-Overview-(SADDEST-PPIRATEE).png) 89 | 90 | > **Scope** 91 | > 92 | > - How would you describe the success of this project? 93 | > - What is expected that we deliver? 94 | > - What is expected that we achieve? 95 | > - (extra) 96 | > - How flexible is the scope? Can we descope if needed? 97 | > - What are the functional requirements? 98 | > - What are the non-functional requirements? 99 | > 100 | > **Product** 101 | > 102 | > - What problem are we solving? For who? 103 | > - What should our solution do? Scenarios? 104 | > - (extra) 105 | > - What should our solution never do? 106 | > - Are there alternatives or competitors to our product? 107 | > - How are we expected to be different from the alternatives? Or the same? 108 | > - How are users expected to interact with our product? 109 | > - Which platforms (e.g. OS, browser) should we support? 110 | > 111 | > **Stakeholders** 112 | > 113 | > - Who has a stake on our success? And failure? 114 | > - Who is our client? (e.g. who pays the product) 115 | > - Who is our user? (e.g. who uses the product) 116 | > - (extra) 117 | > - Any other stakeholder? 118 | > - What are their expectations? And concerns? 119 | > - Who is our team? What are their roles? 120 | > - Who can we trust? Who should we avoid? 121 | > 122 | > **Risks** 123 | > 124 | > - What would threaten our success? 125 | > - What is likely to change? 126 | > - What is still unknown? 127 | > - (extra) 128 | > - Do we foresee any obstacles or pain points? 129 | > - How do we continuously verify we're on target? 130 | > - Do we have any concerns or fears? 131 | > - What's the worst thing that could happen? How can we avoid that? 132 | > 133 | > **Dependencies** 134 | > 135 | > - Is our delivery influenced by someone/thing outside our team? 136 | > - Do we need to cooperate with other teams? When, how and why? 137 | > - (extra) 138 | > - Do we have to comply with rules/regulations? 139 | > 140 | > **Approach** 141 | > 142 | > - How will we work together? (e.g: scrum, kanban) 143 | > - How will we develop our product? (e.g. pairing, TDD) 144 | > - What would a typical day look like? 145 | > - (extra) 146 | > - What is our done criteria? 147 | > - How would we recognize a bug? (e.g. oracle) 148 | > - How should react when we find a bug? 149 | > - How do we make decisions and resolve conflicts? 150 | > - How can we split testing among the team? 151 | > - How do we handle onboarding? And handover? 152 | > - Any regulations or rules that influence or limit the way we work? 153 | > 154 | > **Prioritisation** 155 | > 156 | > - Who will set priorities? 157 | > - Who reviews/approves our delivery? 158 | > - Who perceives the quality of our delivery? 159 | > - (extra) 160 | > - Quality, Cost, Time: pick two 161 | > - What other values are paramount? 162 | > 163 | > **Time** 164 | > 165 | > - Any important dates? 166 | > - Any recurring events or ceremonies? 167 | > - (extra) 168 | > - How much time do we have to deliver? 169 | > - What happens if we miss a deadline? 170 | > 171 | > **Architecture** 172 | > 173 | > - Can you draw the main components of our system? 174 | > - How do they interact 175 | > 176 | > **Technologies** 177 | > 178 | > - Are we expected to use any specific tools/languages? 179 | > - Which tools do we want to use to develop? And test? And deliver? And communicate? 180 | > - (extra) 181 | > - What is the technological landscape where our product? 182 | > - What tools are we expected to build? 183 | > - What equipment and tools are available to support our testing? 184 | > - Do we have enough resources to meet the expectations? 185 | > - Should we use open-source? Can we pay for SaaS? 186 | > 187 | > **Environments** 188 | > 189 | > - How many do we need? For what? 190 | > - Who will manage them? Who has access? 191 | > - (extra) 192 | > - What should change to increase testability? 193 | > - What should change to speed up feedback? 194 | > - How can we create/update test data? 195 | > 196 | > **Data** 197 | > 198 | > - Which metrics are relevant to us? 199 | > - (extra) 200 | > - What data should we collect about our product? 201 | > - What data should we collect about our approach? 202 | > - How do we display that data? And make it visible? 203 | > - Should we be notified when thresholds are crossed? 204 | > 205 | > **Information** 206 | > 207 | > - What is meaningful to test? 208 | > - What questions should our testing answer? 209 | > - How should those answers be reported? To who? 210 | > - (extra) 211 | > - What do we need to learn more about? 212 | > - Where can we get information about X? 213 | > Who do we contact? 214 | > - Where do we share knowledge? How? 215 | > - How do we provide feedback to each other? 216 | > - How do we track and visualize our testing? 217 | > 218 | > **Experience** 219 | > 220 | > - Have we ever worked in a similar context? 221 | > - What skills/experience can be found in the team? 222 | > - (extra) 223 | > - Are we lacking any skills critical to our success? 224 | > - Who else knows something about this, inside our organisation? 225 | > - Who are the experts, even if outside our organisation? 226 | > - Which tools and techniques are useful in our context? 227 | > 228 | > **Emotions** 229 | > 230 | > - How do you feel about our product? 231 | > - What do users feel and say about it? 232 | 233 | If you're not sure which areas should be tested, you can do a risk mapping exercise. [Sam Connelly](https://bughuntersam.com/visual-risk-ui-automation-framework/) explains step by step how you can make one. [Diogo Nunes](https://www.diogonunes.com/blog/risk-mapping-prioritisation/) later adapted that exercise for remote teams. 234 | 235 | ## Teachers 236 | 237 | - [Dan Ashby](https://danashby.co.uk/) 238 | - [James Bach](https://www.satisfice.com/) 239 | - [Michael Bolton](https://www.developsense.com) 240 | 241 | ## Sources 242 | 243 | - [Test Plan Evaluation Model](https://www.satisfice.com/download/test-plan-evaluation-model) 244 | - [When the rubber hits the road](https://mavericktester.com/2020/01/13/emergent-strategy/) 245 | - [The One Page Test Plan](https://www.ministryoftesting.com/dojo/lessons/the-one-page-test-plan) 246 | - [How To Write A Software Test Plan](https://dojo.ministryoftesting.com/lessons/how-to-write-a-software-test-plan) 247 | - [An update to the Heuristic Test Strategy Model](https://danashby.co.uk/2017/12/13/a-new-model-for-test-strategies/) 248 | - [Thinking about Test Strategy – A mnemonic device](http://www.software-testing.com.au/blog/2009/07/21/thinking-about-test-strategy-a-mnemonic-device/) 249 | - [Test plan questions](http://erik.brickarp.se/2016/11/test-plan-questions.html) 250 | - [Context-Free Questions for Testing](https://www.developsense.com/blog/2010/11/context-free-questions-for-testing/) 251 | - [The Inquiry Method for Test Planning](https://testing.googleblog.com/2016/06/the-inquiry-method-for-test-planning.html) 252 | - [Five Factor Testing](https://madeintandem.com/blog/five-factor-testing/) 253 | - [A Mobile App Test Strategy](https://bughuntersam.com/a-mobile-app-test-strategy/) 254 | - [That elusive Test Strategy](https://bughuntersam.com/that-elusive-test-strategy/) 255 | - [Test Planning Simplified](https://sjpknight.com/post/test-planning-simplified) 256 | -------------------------------------------------------------------------------- /docs/types/test-pyramid.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # The test pyramid model 4 | 5 | > **tl;dr** A visual metaphor that groups tests into layers and recommends how many tests should exist in each of layers. 6 | 7 | ## Theory 8 | 9 | The concept of a test pyramid was initially introduced by [Mike Cohn](https://amzn.to/3W7LLKz). It is a visual metaphor that groups tests into layers and recommends how many tests we should have in each of these layers. 10 | 11 | ![original test pyramid](../_media/articles/test-pyramid.png) 12 | 13 | This visual model conveys several messages: 14 | 15 | - You should have multiple test types or layers 16 | - You should have more [unit tests](/types/test-types.md) than service steps, and more of those than UI tests 17 | - Tests at the base of the pyramid (unit) are faster, cheaper and more precise 18 | - Tests at the top of the pyramid (UI) are slower, more expensive and more realistic 19 | 20 | Unit tests ensure a small and specific unit of code works as intended in isolation. That's why they are cheap and fast to run. These units of code then interact with other parts the system to provide services. The behaviour of those services is tested by the service tests. Finally, a human needs to interact with those services and that is done through a User Interface (UI). The UI tests check that the system as a whole does what the user expects, and that's why those tests are the slowest to run and most expensive to debug. 21 | 22 | > The test pyramid is a way of thinking about how different kinds of automated tests should be used to create a balanced portfolio 23 | > 24 | > — [Martin Fowler](https://martinfowler.com/bliki/TestPyramid.html) 25 | 26 | Since it was introduced in 2009, new test types were developed. The "service tests" are nowadays called "integration tests" and may include [API testing](/types/test-types.md) and [Contract testing](/types/test-types.md). The "UI tests" are nowadays split between [frontend testing](/types/test-types.md) and [end-to-end testing](/types/test-types.md). 27 | 28 | The simplicity and usefulness of this model made the test pyramid very popular, and many engineers and testers use it as a reference still today. 29 | 30 | > "All models are wrong, some are useful" 31 | > 32 | > — [George Box](https://en.wikipedia.org/wiki/George_E._P._Box) 33 | 34 | Despite it's popularity, it is not without flaws. Some people say the model is overly simplistic because it doesn't include all testing done for a project. Others say it is biased towards automation, like Dan Ashby who calls it ["the automation triangle"](https://danashby.co.uk/2018/05/03/a-better-testing-pyramid/). 35 | 36 | ### The testing trophy 37 | 38 | > I want to be confident that my code satisfies the requirements and I'll use a mix of the different testing strategies to accomplish that goal. 39 | > 40 | > \-- [Kent Dodds](https://kentcdodds.com/blog/static-vs-unit-vs-integration-vs-e2e-tests) 41 | 42 | Almost [10 years later](https://twitter.com/kentcdodds/status/960723172591992832?lang=en), Kent Dodds published his modern take on the test pyramid and called it the testing trophy. It's an overall improvement over the previous model in [several aspects](https://kentcdodds.com/blog/static-vs-unit-vs-integration-vs-e2e-tests). 43 | 44 | ![Test trophy](../_media/articles/test-pyramid-trophy.png) 45 | 46 | Here's a summary, paraphrasing the author: 47 | 48 | - **Purpose.** Why do _you_ write tests? Confidence. I want to be confident that the code I'm writing won't break the app. I want to get the most confidence out of the tests I write, and I'm aware of the trade-offs I'm making when testing. 49 | - **Static tests**. Linters and other static analysis tools scan your code for typos, type errors, common mistakes, potential bugs. 50 | - **Unit tests**: Verify that individual, isolated parts work as expected. 51 | - **Integration tests**: Verify that several units work together in harmony. 52 | - **End-to-End tests**: Simulate the user interacting with the app and verify that it functions correctly. 53 | - **Flexible.** The size of each of test layer may differ based on what your team values. The proportions are not meant to be taken as rigid rules. It also depends on how easy it is to test your app with the tools available. 54 | 55 | > Every level comes with its own trade-offs. An E2E test has more points of failure making it often harder to track down the problem, but it also means that your test is giving you more confidence. 56 | > 57 | > — [Kent Dodds](https://kentcdodds.com/blog/static-vs-unit-vs-integration-vs-e2e-tests) 58 | 59 | ## Practice 60 | 61 | As with any tool, "keep the best and discard the rest". 62 | 63 | Think of the test pyramid as an [heuristic](/docs/toolbox/heuristics.md) to trigger your thinking, a starting point — it should not be blindly pursued as perfection or the end goal. It teaches you three valuable lessons: 64 | 65 | 1. Write tests with different granularity 66 | 2. High-level tests should be realistic (and you pay for it) 67 | 3. Low-level tests should be fast and precise 68 | 4. If a higher-level test fails without a lower-level test failing too, you need to write a lower-level test 69 | 70 | That last one needs a bit more explanation. Tests at the top of the pyramid exercise multiple parts of the system at the same time. If one of those tests fail, it tells you _"there's a problem with X"_ but without a lower-level test you will not know the cause of the problem. And they will serve as a good [regression test](/types/test-types.md) for the future. 71 | 72 | It also keeps your test suite fast. If you have tested all conditions on a lower-level test, the extra confidence you get from a higher-level test is small. Redundant tests makes your development process more costly because you need to change more tests when you change the behaviour of your code. 73 | 74 | > The more your tests resemble the way your software is used, the more confidence they can give you. 75 | > 76 | > — [Kent Dodds](https://twitter.com/kentcdodds/status/977018512689455106) 77 | 78 | The "pyramid" concept also suggests an analogy to construction. Each test layer is a different material used in the construction. A building made of a single material is not as strong as one that uses multiple specialised materials, each contributing differently to the stability of the building. 79 | 80 | If you prefer cheese to construction, here's another analogy. Swiss cheese slices have holes. We want to create a surface where you can't see through, thus we layer one slice on top of the other. Individually, slices have holes (limitations) and don't cover the whole surface, but together the surface of one layer covers the holes of the layer below! 81 | 82 | And that's what we want to achieve with our testing layers. Individually each layer has coverage gaps, but all together we efficiently maximise coverage. "The right tool for the right job". "Divide to conquer". 83 | 84 | ![Rosie's test pyramid with circles](../_media/articles/test-pyramid-rosie-circles.png) 85 | 86 | With time, many more models were created besides the original pyramid. Nowadays you have many flavours to pick from, just like ice creams. Speaking of which: 87 | 88 | - [the ice cream cone](https://alisterbscott.com/kb/testing-pyramids/) (or [cupcake](https://www.thoughtworks.com/insights/blog/introducing-software-testing-cupcake-anti-pattern)) 89 | - [the trophy](https://kentcdodds.com/blog/the-testing-trophy-and-testing-classifications) 90 | - [the box of rocks](https://gerg.dev/2018/05/testing-is-like-a-box-of-rocks/) 91 | - [the honeycomb](https://medium.com/@fistsOfReason/testing-is-good-pyramids-are-bad-ice-cream-cones-are-the-worst-ad94b9b2f05f) 92 | - [the planet](https://www.dropbox.com/s/ahnmtqt4c5l1qv7/round%20earth.pdf?dl=0) 93 | - [the snowman](https://angryweasel.com/blog/the-test-automation-snowman) 94 | - [the wheel](https://www.ministryoftesting.com/dojo/lessons/an-introduction-to-the-automation-test-wheel) 95 | - [the filters](https://infiniteundo.com/post/158179632683/abandoning-the-pyramid-of-testing-in-favor-of-a) 96 | - and [many more](http://www.testingreferences.com/here_be_pyramids.php) 97 | 98 | ![Ice cream cone anti-pattern](../_media/articles/test-pyramid-icecream.png) 99 | 100 | What they all have in common is that their authors are trying to convey a message visually. Use whatever illustrates best your narrative or create your own. 101 | 102 | ## Teachers 103 | 104 | - [Alister Scott](https://alisterbscott.com/kb/testing-pyramids/) 105 | - [Kent C. Dodds](https://kentcdodds.com/) 106 | - [Martin Fowler](https://martinfowler.com/bliki/TestPyramid.html) 107 | - [Mike Cohn](https://www.mountaingoatsoftware.com/blog) 108 | 109 | ## Sources 110 | 111 | - [The Practical Test Pyramid](https://martinfowler.com/articles/practical-test-pyramid.html) 112 | - [Just Say No to More End-to-End Tests](https://testing.googleblog.com/2015/04/just-say-no-to-more-end-to-end-tests.html) 113 | - [Static vs Unit vs Integration vs E2E Testing for Frontend Apps](https://kentcdodds.com/blog/static-vs-unit-vs-integration-vs-e2e-tests) 114 | - [Balancing the Test Automation Pyramid](https://medium.com/lydtech-consulting/balancing-the-test-automation-pyramid-30cf9c8d8a3c) 115 | - [How to test your mobile apps efficiently? A five-level pyramid testing strategy](https://medium.com/@BIT_OFIT/how-to-test-efficiently-your-mobile-apps-68be944331ee) 116 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "start-testing", 3 | "version": "0.3.2", 4 | "description": "A crowdsourced testing course, written by testers for testers wannabes.", 5 | "homepage": "https://github.com/dialex/start-testing", 6 | "bugs": { 7 | "url": "https://github.com/dialex/start-testing/issues" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/dialex/start-testing.git" 12 | }, 13 | "license": "SEE LICENSE IN LICENSE.txt", 14 | "author": "Diogo Nunes (https://diogonunes.com/)", 15 | "scripts": { 16 | "bump:patch": "npm version --no-commit-hooks --no-git-tag-version patch", 17 | "bump:time": "open -t ./time-counter.txt", 18 | "c": "npm run commit", 19 | "commit": "npx git-cz", 20 | "ci:pipeline": "npm-run-all lint:markdown lint:spelling", 21 | "deps:clean": "rm -f package-lock.json && rm -rf node_modules", 22 | "deps:install": "npm ci", 23 | "deps:up": "npx npm-check-updates -u && npm audit fix", 24 | "fix": "npm-run-all fix:*", 25 | "fix:spelling": "mdspell 'docs/**/*.md' --en-gb --ignore-numbers --ignore-acronyms", 26 | "fix:ellipsis": "find docs -type f -name '*.md' | xargs sed -i '' 's/\\.\\.\\./…/g'", 27 | "fix:emdash": "find docs -type f -name '*.md' | xargs sed -i '' 's/ -- / — /g'", 28 | "fix:single-quote": "find docs -type f -name '*.md' | xargs sed -i '' \"s/’/'/g\"", 29 | "fix:double-quote-left": "find docs -type f -name '*.md' | xargs sed -i '' 's/“/\"/g'", 30 | "fix:double-quote-right": "find docs -type f -name '*.md' | xargs sed -i '' 's/”/\"/g'", 31 | "lint": "npm-run-all lint:*", 32 | "lint:markdown": "markdownlint docs --config docs/.markdownlint.json --ignore docs/_coverpage.md", 33 | "lint:spelling": "mdspell 'docs/**/*.md' -r --en-gb --ignore-numbers --ignore-acronyms", 34 | "lint:broken-links": "remark -u validate-links docs --ignore-path .remarkignore --frail", 35 | "pr:prepare": "npm-run-all fix lint", 36 | "pr:preview": "npm-run-all --parallel serve preview", 37 | "pr:ready": "npm run bump:patch && npm run bump:time && git commit -m 'chore: bump up version' .", 38 | "preview": "echo \"Opening a preview on your browser now.\" && sleep 1 && open http://localhost:3000/#/", 39 | "serve": "docsify serve docs", 40 | "test": "echo 'Aborting: no tests found. (see https://github.com/dialex/start-testing/issues/70)' && exit 1" 41 | }, 42 | "config": { 43 | "commitizen": { 44 | "path": "./node_modules/cz-emoji" 45 | }, 46 | "cz-emoji": { 47 | "types": [ 48 | { 49 | "name": "chore", 50 | "description": "Small and boring change required for something else", 51 | "emoji": "🤖", 52 | "code": "🤖" 53 | }, 54 | { 55 | "name": "diverge", 56 | "description": "Add new information", 57 | "emoji": "🔀️", 58 | "code": "🔀️" 59 | }, 60 | { 61 | "name": "converge", 62 | "description": "Summarise current information", 63 | "emoji": "♻️", 64 | "code": "♻️" 65 | }, 66 | { 67 | "name": "format", 68 | "description": "Improve lint or style", 69 | "emoji": "🎨", 70 | "code": "🎨" 71 | }, 72 | { 73 | "name": "release", 74 | "description": "Create a release commit", 75 | "emoji": "🚚", 76 | "code": "🚚" 77 | }, 78 | { 79 | "name": "fix", 80 | "description": "Fix a bug", 81 | "emoji": "🐞", 82 | "code": "🐞" 83 | }, 84 | { 85 | "name": "feat", 86 | "description": "Introduce a new feature", 87 | "emoji": "⭐️", 88 | "code": "⭐️" 89 | }, 90 | { 91 | "name": "ci", 92 | "description": "Change CI or build process", 93 | "emoji": "🛠", 94 | "code": "🛠" 95 | }, 96 | { 97 | "name": "doc", 98 | "description": "Update documentation", 99 | "emoji": "📘", 100 | "code": "📘" 101 | } 102 | ] 103 | } 104 | }, 105 | "devDependencies": { 106 | "markdown-spellcheck": "^1.3.1", 107 | "markdownlint-cli": "^0.37.0", 108 | "npm-run-all": "^4.1.5", 109 | "remark-cli": "^12.0.0", 110 | "remark-lint": "^9.1.2", 111 | "remark-validate-links": "^13.0.0" 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /time-counter.txt: -------------------------------------------------------------------------------- 1 | # minutes 2 | 3 | 100 4 | 115 5 | 90 6 | 60 7 | 60 8 | 120 9 | 70 10 | 100 11 | 140 12 | 1680 13 | 120 14 | 1000 15 | 260 16 | 120 17 | 30 18 | 60 19 | 550 20 | 140 21 | 40 22 | 170 23 | 54: 60 24 | 85: 300 25 | 51: 410 26 | 5: 535 27 | 4: 510 28 | 15: 535 29 | 7: 800+80 30 | 12: 1100 31 | 102: 1000 32 | 6: 430 33 | 13: 1120 34 | --------------------------------------------------------------------------------