├── static
├── icon.png
└── img
│ ├── preview.png
│ ├── preview-dark.png
│ ├── sflc.svg
│ ├── logo-dark.svg
│ └── logo-light.svg
├── vue-shim.d.ts
├── .gitignore
├── content
├── settings.json
└── en
│ ├── index.md
│ ├── getting-started
│ ├── values.md
│ ├── what-is.md
│ ├── why-important.md
│ └── how-to.md
│ ├── feedback-loops
│ ├── overview.md
│ ├── day-in-the-life.md
│ └── developer-experience.md
│ ├── resources.md
│ ├── time
│ ├── overview.md
│ ├── interruptions-fragmentation.md
│ └── improving-flow.md
│ └── building
│ ├── reviewing.md
│ ├── overview.md
│ └── ci.md
├── tailwind.config.js
├── nuxt.config.js
├── package.json
├── tsconfig.json
├── README.md
├── CODE_OF_CONDUCT.md
├── components
└── app
│ └── AppHeader.vue
└── LICENSE.md
/static/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/node/ee-handbook/main/static/icon.png
--------------------------------------------------------------------------------
/static/img/preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/node/ee-handbook/main/static/img/preview.png
--------------------------------------------------------------------------------
/vue-shim.d.ts:
--------------------------------------------------------------------------------
1 | declare module "*.vue" {
2 | import Vue from 'vue'
3 | export default Vue
4 | }
--------------------------------------------------------------------------------
/static/img/preview-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/node/ee-handbook/main/static/img/preview-dark.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Nuxt dev/build outputs
2 | .output
3 | .nuxt
4 | sw.js
5 | dist
6 | # Node dependencies
7 | node_modules
8 | # System files
9 | *.log
10 |
--------------------------------------------------------------------------------
/content/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Engineering Effectiveness Handbook",
3 | "url": "https://www.okayhq.com/handbook",
4 | "logo": {
5 | "light": "img/logo-light.svg",
6 | "dark": "img/logo-dark.svg"
7 | },
8 | "github": "OkayHQ/ee-handbook",
9 | "twitter": "@OkayHQ",
10 | "defaultDir": ""
11 | }
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | const defaultTheme = require('tailwindcss/defaultTheme')
2 |
3 | module.exports = {
4 | theme: {
5 | extend: {
6 | colors: {
7 | primary: {
8 | 100: '#ceddff',
9 | 500: '#306eff',
10 | 900: '#ceddff'
11 | },
12 | gray: {
13 | ...defaultTheme.colors.gray,
14 | 900: '#12263f'
15 | }
16 | },
17 | spacing: {
18 | 2: '0.125rem'
19 | }
20 | }
21 | }
22 | }
--------------------------------------------------------------------------------
/nuxt.config.js:
--------------------------------------------------------------------------------
1 | import theme from '@nuxt/content-theme-docs'
2 |
3 | export default theme({
4 | buildModules: ['@nuxt/typescript-build', '@nuxtjs/google-analytics'],
5 | router: {
6 | base: '/handbook/'
7 | },
8 | i18n: {
9 | baseUrl: '/handbook'
10 | },
11 | build: {
12 | transpile: [/^vue-github-button/]
13 | },
14 | env: {
15 | eventsEndpoint: process.env.EVENTS_ENDPOINT
16 | },
17 | googleAnalytics: {
18 | id: process.env.GOOGLE_ANALYTICS_ID
19 | }
20 | })
21 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ee-handbook",
3 | "version": "1.0.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "nuxt",
7 | "build": "nuxt build",
8 | "start": "nuxt start",
9 | "generate": "nuxt generate"
10 | },
11 | "dependencies": {
12 | "@nuxt/content-theme-docs": "^0.11.0",
13 | "axios": "^0.26.1",
14 | "nuxt": "^2.15.8",
15 | "vue-github-button": "^1.3.0"
16 | },
17 | "devDependencies": {
18 | "@nuxt/types": "^2.15.8",
19 | "@nuxt/typescript-build": "^2.1.0",
20 | "@nuxtjs/google-analytics": "^2.4.0"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2018",
4 | "module": "ESNext",
5 | "moduleResolution": "Node",
6 | "lib": [
7 | "ESNext",
8 | "ESNext.AsyncIterable",
9 | "DOM"
10 | ],
11 | "esModuleInterop": true,
12 | "allowJs": true,
13 | "sourceMap": true,
14 | "strict": true,
15 | "noEmit": true,
16 | "baseUrl": ".",
17 | "paths": {
18 | "~/*": [
19 | "./*"
20 | ],
21 | "@/*": [
22 | "./*"
23 | ]
24 | },
25 | "types": [
26 | "@types/node",
27 | "@nuxt/types"
28 | ]
29 | },
30 | "exclude": [
31 | "node_modules"
32 | ]
33 | }
--------------------------------------------------------------------------------
/content/en/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Introduction
3 | description: ''
4 | position: 1
5 | category: ''
6 | ---
7 | Welcome to the Engineering Effectiveness Handbook! We are the team of [engineering leaders](https://www.okayhq.com/about) behind [Okay](https://www.okayhq.com) and the authors of the first version of this handbook.
8 |
9 | After talking to hundreds of engineering leaders, we know 2 things:
10 | * the vast majority of engineering teams want to become more effective
11 | * but not everyone knows where to start
12 |
13 | This handbook intends to fill that gap. Our goal is to build the **first practical and actionable guide to engineering effectiveness** for engineering leaders.
14 |
15 | We decided to give this work to the community under CC-BY-SA and we welcome contributions, which you can provide in three main ways:
16 | * adding links to our [curated list of engineering effectiveness resources](resources)
17 | * contributing a chapter or an entire section to the handbook
18 | * improving an existing part of the handbook
19 |
20 | We hope you'll enjoy reading this handbook!
21 |
22 |
--------------------------------------------------------------------------------
/content/en/getting-started/values.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Values and Assumptions
3 | description: ''
4 | position: 3
5 | category: Getting Started
6 | ---
7 | To help the reader understand where we are coming from, we want to clarify several assumptions, or values, that form the background of everything else in the handbook:
8 |
9 | ## Measurement is good
10 | Engineering productivity can [indeed be measured](https://www.okayhq.com/blog/engineering-productivity-can-be-measured), as long as it is mainly focused on understanding bottlenecks and blockers, instead of measures of output like lines of code.
11 |
12 | ## No surveillance
13 | Engineering metrics should never be part of the performance evaluation process. In particular, trying to measure individual contributors’ performance is not only a futile exercise, but it creates a climate of surveillance that is actively hurting productivity.
14 |
15 | ## Transparency
16 | While engineering productivity is more of a manager’s concern, we believe that the data should be freely available and discussed transparently, regardless of levels or titles. In our experience, real change only happens when everyone has access to the same information.
17 |
18 | ## Managerial accountability and courage
19 | Managers should be held accountable for building productive teams. In particular, managers should both provide a system to measure productivity and lead the changes that need to happen. In some cases, changes may be complex to implement and require courage to push through the status quo.
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | Welcome to the [Engineering Effectiveness Handbook](https://www.okayhq.com/handbook/)! We are the team of [engineering leaders](https://www.okayhq.com/about) behind [Okay](https://www.okayhq.com) and the authors of the first version of this handbook.
4 |
5 | After talking to hundreds of engineering leaders, we know 2 things:
6 | * the vast majority of engineering teams want to become more effective
7 | * but not everyone knows where to start
8 |
9 | This handbook intends to fill that gap. Our goal is to build the **first practical and actionable guide to engineering effectiveness**.
10 |
11 | We decided to give this work to the community under CC-BY-SA and we welcome contributions, which you can provide in three main ways:
12 | * adding links to our [curated list of engineering effectiveness resources](./content/en/resources.md)
13 | * contributing a chapter or an entire section to the handbook
14 | * improving an existing part of the handbook
15 |
16 | All content is in [Markdown](https://www.markdownguide.org/getting-started/#what-is-markdown) files under the [📁 content](./content/en) folder.
17 |
18 | We hope you'll enjoy reading this handbook!
19 |
20 |
21 | # Run Locally
22 |
23 | ## Setup
24 |
25 | Install dependencies:
26 |
27 | ```bash
28 | yarn install
29 | ```
30 |
31 | ## Development
32 |
33 | ```bash
34 | yarn dev
35 | ```
36 |
37 | ## Static Generation
38 |
39 | This will create the `dist/` directory for publishing to static hosting:
40 |
41 | ```bash
42 | yarn generate
43 | ```
44 |
45 | To preview the static generated app, run `yarn start`
46 |
47 | For detailed explanation on how things work, checkout [nuxt/content](https://content.nuxtjs.org) and [@nuxt/content theme docs](https://content.nuxtjs.org/themes-docs).
48 |
--------------------------------------------------------------------------------
/content/en/feedback-loops/overview.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Overview
3 | description: ''
4 | position: 40
5 | category: Managing feedback loops
6 | ---
7 | Feedback loops are the **set of tasks that an engineer does repeatedly** to deliver software. Examples of feedback loops include everything from how long an IDE takes to do mini-tasks like code autocomplete to larger tasks like how long it takes an engineer to launch a new service in production.
8 |
9 | How well these loops perform **creates the foundation for how effective your organization can be**. A feedback loop taking longer or being more painful to perform has the impact of both reducing the frequency of times it’s used as well as slowing down delivery of software.
10 |
11 | For example if running unit tests locally on a developer laptop takes 30 minutes instead of 5 seconds, developers are less likely to write new tests and to run tests locally. In this case, the impact of the poorly performing feedback loop is **slower coding and lower quality code** in the long term.
12 |
13 | ## Factors that matter for Feedback Loops:
14 | - **Frequency**: Some activities are performed by developers 100s of times a day (IDE autocomplete), and others are typically performed less than once a year (onboard to a new team).
15 | - **Latency**: How long feedback loops take can vary widely from milliseconds/seconds while others can be measured on the order of weeks
16 | - **Usability**: How easy it is to perform the activity
17 | - **Purpose**: Some loops exist for the purpose of improving code quality (code reviews) while others exist to enable you to write and ship code (deploying a new version of a service)
18 |
19 | Calculating the cost of a feedback loop is not as simple as multiplying the frequency times the latency. The cost of latency typically follows step function increases:
20 |
21 | - **Less than 5 seconds** - Stay on the same screen and wait
22 | - **5 seconds - 1 minute** - Do a quick Slack check and come back (return in 5 minutes)
23 | - **1 minute - 10 minutes** - Go grab coffee (return in 15 minutes)
24 | - **10 minutes - 1 hour** - Come back to it sometime later (return in ~ 4 hours)
25 | - **\> 1 hour** - Check in next day
26 |
27 | Seemingly small changes in latency can lead to cascading downstream effects to the iteration velocity for an engineer, and for the team as a whole. On the flip side, a huge investment in a loop may not yield a positive return on investment until it materially breaks through one of those thresholds.
--------------------------------------------------------------------------------
/content/en/resources.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Resources
3 | description: ''
4 | position: 50
5 | category: Going further
6 | ---
7 | This is a collection of online and offline resources related to Engineering Effectiveness.
8 | ## Slack Community
9 | Join our Slack Community about [Engineering Analytics](https://join.slack.com/t/eng-analytics/shared_invite/zt-1i8emziud-nmGDL~h2bNrZMyu9idzqNg)
10 | ## Articles
11 | * [Maximizing Developer Effectiveness](https://martinfowler.com/articles/developer-effectiveness.html): an article focused on the notion of developer feedback loops
12 | * [The SPACE of Developer Productivity](https://queue.acm.org/detail.cfm?id=3454124): reference article for the SPACE framework
13 | * [State of Devops 2019](https://services.google.com/fh/files/misc/state-of-devops-2019.pdf): research behind the DORA metrics
14 | * [What is Developer Experience?](https://redmonk.com/jgovernor/2022/02/21/what-is-developer-experience-a-roundup-of-links-and-goodness/): roundup of tactics to improve developer experience
15 |
16 | ## Books
17 | * [Accelerate: The Science of Lean Software and DevOps: Building and Scaling High Performing Technology Organizations](https://www.amazon.com/Accelerate-Software-Performing-Technology-Organizations/dp/1942788339): by the researchers behind the DORA Metrics
18 | * [The Goal: A Process of Ongoing Improvement](https://www.amazon.com/Goal-Process-Ongoing-Improvement/dp/0884271951): a book about the theory of constraints in the context of manufacturing, which can apply to some aspects of the software development life cycle.
19 | * [The Phoenix Project](https://www.amazon.com/Phoenix-Project-DevOps-Helping-Business/dp/1942788290/): a novel about how devops transformations can take inspiration from manufacturing.
20 | * [The Unicorn Project](https://www.amazon.com/Unicorn-Project-Developers-Disruption-Thriving/dp/1942788762/): a novel about how to transform a low effectiveness engineering team.
21 | * [Team Topologies](https://teamtopologies.com/book): a practical, step-by-step, adaptive model for organizational design and team interaction.
22 |
23 | ## Podcasts
24 | * [Why are there so many bad bosses](https://open.spotify.com/episode/2g0BmsofzZQGVWSHRAuf4V?si=5eadab637a584f71): a Freakonomics episode about the relationship between productivity, engagement, and having a bad/good boss
25 |
26 | ## Tools
27 | * [Google Four Keys](https://github.com/GoogleCloudPlatform/fourkeys): an open-source project by Google to compute DORA Metrics
28 | * [Devops Capabilities Guide](https://cloud.google.com/architecture/devops/capabilities): Actionable guide to improve DORA Metrics
--------------------------------------------------------------------------------
/content/en/feedback-loops/day-in-the-life.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Day in the life of an engineer
3 | description: ''
4 | position: 41
5 | category: Managing feedback loops
6 | ---
7 |
8 | To fully internalize feedback loops, it is useful to imagine the day in the life of an engineer in different cultures. Inefficiencies in feedback loops stack on top of each other and can lead to hours and days where an engineer or team can't make any forward progress on their plan.
9 |
10 | ## Poor effectiveness culture
11 |
12 | Imagine a world where a typical day for an engineer looks like this:
13 |
14 | - 12am-8am: Paged after hours and lost a few hours of sleep responding to incidents
15 | - 9am: Wake up and go to stand up
16 | - 9:30AM - 12PM: Review new P0/P1 Bugs and realize they were assigned to your team but belong to others
17 | - Skip lunch
18 | - 12PM - 2PM: Two back-to-back 1 hour interviews
19 | - 2PM - 3PM: Frontend council meeting
20 | - 3-4PM: Catch up on slack updates + remind team members about PR code reviews you submitted last week that aren't reviewed
21 | - 4PM: Take a look at the backlog, but too worn out, hungry and tired from the day to start on anything meaningful
22 |
23 |
24 | ## High effectiveness culture
25 |
26 | Now, imagine a world where the typical day for an engineer looks like this:
27 |
28 | - 9AM: Standup
29 | - 9:15AM: Take on a meaty backlog item and spend 3 hour working on it. Submit it for code review
30 | - 12:15PM: Grab lunch
31 | - 1PM: Respond to morning code review requests
32 | - 2PM: Address code review comments, Merge and deploy change
33 | - 3-5PM: Start on design doc for new feature (2 hours)
34 |
35 |
36 | Engineers in highly effective cultures not only have more time, they are also more motivated and engaged, which leads to building better software.
37 |
38 | ## What's at stake
39 |
40 | There are many consequences to a low effectiveness culture where the feedback loops have not been managed.
41 |
42 | - **Poor developer experience**: Morale will suffer as engineers get more and more frustrated with not being able to do their job
43 | - **Lower quality code**: Engineers will run and write tests less often because they are hard to create or take too long to run
44 | - **Slower delivery**: The accumulation of delays on the things developers have to do regularly stack up and result in slower progress
45 | - **Difficulties hiring top talent**: Over time, the top engineers will move on, your engineering brand will suffer, and it will get harder to convince top talent to join as they will anticipate pain points around your culture.
46 | - **Attrition**: Eventually, engineers will leave the company out of frustration. They wanted to deliver high quality code and have an impact on customers but too many things got in their way.
--------------------------------------------------------------------------------
/content/en/time/overview.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Overview
3 | description: ''
4 | position: 20
5 | category: Managing time
6 | ---
7 |
8 | Time is arguably the **most important input to manage to create high performing software teams**. To deliver successfully, a software team needs to correctly leverage time, scope and people.
9 |
10 | - **Time** is the total time your team spends on a problem.
11 | - **Scope** is the functionality of the completed work.
12 | - **People** are a combination of the number of people and the skills they bring to the table.
13 |
14 | What teams do with their time is the primary factor in determining its output. Even if you hire the best engineers, if they spend all their time on the wrong things, or they’re constantly interrupted while trying to do the right things, the team won’t succeed.
15 |
16 | Because software engineering is a creative task, **total time spent is not enough to ensure efficiency or success**. Fragmented chunks of 5 minutes may mean that the project never gets done. For most engineers, that’s not even enough time to make meaningful progress on a simple bug fix.
17 |
18 | Software projects typically require engineers to have several concepts and context in their memory to make forward progress. A task might require you to read a ticket with requirements, look at log output, read related code before you're able to start making changes. Any interruption causes an expensive context switch that forces you to start this whole process over.
19 |
20 | Flow state or being *in the zone* means having no worries about imminent interruptions - you’re fully in the moment and at your most productive. Fragmentation and interruptions kill flow state because context switching is so expensive for engineers. This state is typically where the most work gets done and it’s when the hardest problems can get solved.
21 |
22 | Because it takes time to get enough context to make forward progress on a task, we’ve found that the **minimum unit of time that allows for flow state is 2 hours**. Two hours of uninterrupted time is called Maker Time. It is certainly possible to achieve flow state in shorter periods of time but that’s typically an exception. Periods longer than 2 hours can also lead to higher quality flow states.
23 |
24 | Fragmentation is when you have a poorly organized calendar or work schedule where you’re not able to create long periods of open space to do work.
25 |
26 | > A day where you have eight 30 minute meetings spread equally once an hour is going to be significantly less productive than a day where those 8 meetings happen from 8am-12pm and you have the rest of the afternoon to do focused work. Maximizing the percentage of Maker Time in a week is an important priority for software teams.
27 |
28 | We’ve found that **the best teams are able to achieve 70% or higher maker time** during a work week for their engineers.
29 |
30 | As a manager, it can be hard to remember what it was like to need primarily Maker Time to succeed because a manager’s role can be significantly more interrupt-driven. Paul Graham summarizes the differences between [manager and maker schedules](http://www.paulgraham.com/makersschedule.html). To be most effective engineers require large blocks of uninterrupted time and for them any interruptions are disproportionately costly.
--------------------------------------------------------------------------------
/content/en/time/interruptions-fragmentation.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Causes of Context Switching
3 | description: ''
4 | position: 21
5 | category: Managing time
6 | ---
7 |
8 | Context switching is very expensive and disruptive for engineers. There are two main types of context switching an engineer experiences regularly: **interruptions and calendar fragmentation**.
9 |
10 | Interruptions are typically unplanned or unexpected triggers that cause an engineer to turn their attention to another task which breaks their flow.
11 |
12 | Calendar fragmentation is when the required scheduled activities an engineer needs to attend are not organized efficiently on their calendar.
13 |
14 | There are many sources of interruptions and calendar fragmentation that engineers deal with on a regular basis.
15 |
16 | ## Interruptions
17 | - **Pages**: Incidents can happen at any time and require team members to step in. Even false alerts can distract them from the task that they were doing.
18 | - **Slack/Email/Tap on the shoulder/Etc**: Communication during the day can lead to interruptions, especially if the expectation is to be immediately responsive to a message.
19 | - **Slow tooling**: If the build, test or other tools they need to accomplish a task are too slow, engineers will generally use the time to do something else, which could take them out of the focused flow state and cost an expensive and potentially longer than intended context switch.
20 |
21 | ## Calendar Fragmentation
22 | - **High Meeting Load / Meeting culture**: This is typically one of the biggest causes of low maker time for engineers. There are many reasons why a high meeting culture develops: low accountability environments, unclear decision making processes, lack of trust, high politics, poor meeting hygiene and many other factors can contribute to higher background meeting load.
23 | - **Interview load**: High interview load can be caused by unbalanced interviews where not enough people are trained or people are not fairly assigned to interviews. This can also be a deliberate cost of a fast growing team.
24 | - **Lack of deliberate scheduling**: As described in the example above, the same total meeting time can be organized in different ways that significantly impacts the total available Maker Time for a person.
25 |
26 | Carefully minimizing these two types of context switching categories for your team is an extremely high leverage investment. If you make a meaningful dent, your team will notice and you will have more and more impact. If you don’t, they will give you feedback and eventually either burn out or leave.
27 |
28 | ## Example Metrics
29 | *Metrics are useful for trending at the team or organization level*
30 | |Metric|When to use|
31 | |---|---|
32 | |Maker Time Percentage| What percent of an engineer or team's time consists of uninterrupted 2 hour or longer blocks of time|
33 | |Friction Time Percentage|What percent of an engineer or team's time is made up of < 2 hour gaps between meetings that are hard to utilize|
34 | |Meeting Time Percentage|What percent of an engineer or team's time is made up of meetings|
35 | |Interview distribution|How much time is each engineer spending on interviews|
36 | |After-hour/Working hours Pages|How many pages is the team acting on both during and outside of working hours|
37 | |Slack support/on-call channel activity|How many requests are coming in on the team's on-call or support channel|
38 |
--------------------------------------------------------------------------------
/content/en/getting-started/what-is.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: What is Engineering Effectiveness?
3 | description: ''
4 | position: 1
5 | category: Getting Started
6 | ---
7 | Software Engineering is hard to reduce to well-known patterns of management.
8 |
9 | Most industries have followed recognizable evolutions: from nascent technologies to empirical methods of management to fully scalable operations. The reason this happens over and over again is the pursuit of economic growth, which is inherently tied to making profits.
10 |
11 | Software companies are subject to the same economic forces and they are currently transitioning from empirical notions of productivity to a more scientific approach. This is mainly driven by the fact that almost every company is becoming a software company.
12 |
13 | **There remains a challenge though: how do we define productivity in software?**
14 | ## Software is different
15 | In the classic sense, productivity is the ratio of outputs to inputs: people try to make and sell more of the same thing at a cheaper cost to themselves. However, measuring the output of a development organization is *notoriously hard*.
16 |
17 | For example, the idea of measuring lines of code as a productivity proxy has long been identified as a dead-end. Attempts such as measuring agile sprint points or “number of tickets closed” have never really taken hold. Any engineer knows intuitively why this approach has failed:
18 | - **Computer science relies on symbolic representations.** The number of symbols used to describe a complex algorithm bears little correlation to the value it creates.
19 | - **Software engineering is a social activity** involving many engineers and teams. Human beings don’t like being measured as if they were widgets in a factory, and employers are not in a position to ignore these feelings because of the general penury of software engineers.
20 |
21 | As former engineers ourselves, we also think it is counter-productive to try and measure the output of engineers in this way.
22 |
23 | ## Focusing on inputs
24 | This difficulty has led some engineering leaders to give up on trying to understand productivity:
25 |
26 | - In some cases, productivity is deliberately ignored or replaced with qualitative frameworks such as “OKR completion rate”.
27 | - In other cases, “working on the right things” is presented as being superior to improving productivity, which is a false dichotomy.
28 |
29 | The unfortunate consequence is that such organizations unfairly reward story-telling: missing deadlines or moving more slowly than the competition can always be explained away. As a result, the organization becomes more political and the best employees leave for greener pastures.
30 |
31 | **This handbook instead focuses on the other side of the productivity equation: inputs.** Taking into account the realities of software development, we define inputs as anything that affects the workflows of engineers:
32 | * How much available time do individuals have to think and code?
33 | * How good is the suite of developer tools?
34 | * How long does it take to get code reviewed by a peer?
35 | * How easy is it to access documentation?
36 | * How often do engineers receive pages outside of business hours, interrupting their sleep or family life?
37 |
38 | In software engineering, inputs are easier to measure and more actionable than outputs - they should be the focus of productivity. To further illustrate this point, let's look at side-by-side examples:
39 |
40 | |Measure ... (INPUT METRIC)|Don't measure ... (OUTPUT METRIC)|
41 | |--|--|
42 | |The time it takes to do code reviews|The throughput of pull requests|
43 | |The amount of interruptions affecting engineers|How many hours engineers have been working|
44 | |The amount of unplanned work affecting a sprint|Sprint points burned by the team|
45 |
46 |
47 |
48 | In the rest of this handbook, we will thus define engineering effectiveness as **the optimization of inputs, ie the removal of bottlenecks and inefficiencies in the life of an engineering team.**
49 |
50 | For simplicity, we will also use the words productivity and effectiveness interchangeably.
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/content/en/time/improving-flow.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Improving Flow
3 | description: ''
4 | position: 22
5 | category: Managing time
6 | ---
7 |
8 | Meeting load, Interviews and Page Load are three of the most common disruptors to flow for engineering teams. They are required activities for well-functioning teams but require careful management to stay efficient. Left unchecked these activities tend to take up increasing amounts of time and become more costly for the productivity of your team.
9 |
10 | ## Best pratices to reduce fragmentation and meeting load
11 |
12 | - **Align 1/1s to the team's schedule instead of the manager's:** As a manager, there's always the temptation to batch your 1/1s into chunks so that they work well for your schedule. 1/1s can be a big source of calendar fragmentation on a given day if they’re not carefully scheduled. They also typically happen every week so getting these right can have recurring benefits.
13 |
14 | - **Increase team awareness and autonomy around managing meetings:** Sharing data with teams about the cost of meetings and giving them the support and autonomy to move, change, or cancel meetings is critical to a healthy meeting culture that encourages Maker Time. Once the team understands the cost of context switching, and has your support to improve it, they will take action.
15 |
16 | - **Optimize ad-hoc meeting scheduling:** Typically, 20% of meetings are scheduled last minute (a few days or less in advance) and many of them are organized by the team itself. The best time to schedule meetings is adjacent to existing meetings (or at the beginning/end of a day). This minimizes the chance that it will break a Maker time block for one of the attendees.
17 |
18 | ## Best practices to reduce Page Load
19 |
20 | - **Assess the signal-noise ratio of every alert your team receives.** Cut the noisy alerts aggressively. For those teams that are in central parts of the stack (e.g. database), you might need to create much larger rotations and train more people. The volume of pages might be naturally higher due to the sheer scale of these services.
21 |
22 | - **Prioritize solving the root cause behind the instability of your services.**. Quantifying the page load of the team both in terms of work interruptions and work-life balance will make it easier to prioritize root cause investments. By slowing down to fix root causes your team will move faster and more sustainably in the long term.
23 |
24 | - **Establish goals around a maximum number of pages / week for team members.** Make yourself accountable to meet these goals, not the engineers.
25 |
26 | ## Best practices to reduce Interview Load
27 |
28 | - Make sure every interview question has **enough trained interviewers**.
29 |
30 | - Make **interview training** part of your long-term onboarding program (typically 3-6 months after joining).
31 |
32 | - Establish an **upper bound of interviews/week** per person (usually 2 or 3 max.).
33 |
34 | - Include **interview participation** in performance reviews.
35 |
36 |
37 | ## Identifying other sources of context switching
38 |
39 | High meeting load, interviews and paging are some of the most common causes of context switching that affect software teams today. However, there are many other types of interruptions out there, so the ability to determine the highest leverage problem affecting your team is important. Here’s a framework you can use to discover what to focus on:
40 |
41 | - **Collect the data**: Metrics, Surveys, 1/1s, Sprint retrospectives, exit interviews are all great ways to collect data on what factors are most impacting your team’s ability to spend time in an effective way
42 |
43 | - **Involve the team** in exploring the solutions that will have the most impact
44 |
45 | - **Set a goal** that you and your team take on to improve this problem.
46 |
47 | - Rinse and Repeat
48 |
49 | Time is a scarce resource and there will always be headwinds that will chip away at your team’s time. If you hire a new engineer, time will be needed to train them and get them up to speed, coordination costs will also increase. If an engineer leaves, on-call/interview load is shared between less people. Tooling tends to slow down as the software stack gets larger and more complex.
--------------------------------------------------------------------------------
/content/en/getting-started/why-important.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Why is Engineering Effectiveness important?
3 | description: ''
4 | position: 2
5 | category: Getting Started
6 | ---
7 | The importance of engineering effectiveness is best understood through the *lack of it*, which we will try to explain here. Engineering leaders manage many competing priorities, among which:
8 | - delivering features
9 | - hiring towards ambitious growth goals
10 | - managing rising infrastructure costs
11 |
12 | The speed at which tech companies move can also make it easy to fall into a reactive mode of management, or “fighting fires”. Put another way: the focus is on the *what* - what project to start, what technology to build or buy - at the detriment of the *how* - the construction of a resilient “engine” for the engineering team.
13 |
14 | When this mode of operations becomes the cultural norm inside the organization, dysfunctions appear quickly. Let’s illustrate this from the point of views of various actors.
15 |
16 | ## From the point of view other executives and the board of directors
17 |
18 | When an engineering team is not productive, outside actors notice it through the **lack of output** and in the form of **catastrophic events**.
19 |
20 | *Lack of output* usually creates a sentiment that “engineering is a black box”, with executives or board members trying to get more directly involved. The CEO might increase the frequency of engineering-focused meetings, ask for metrics and for more detailed reports. This problem usually happens in the growth phase of the company, which is exactly when engineering productivity should be taken seriously.
21 |
22 | *Catastrophic events* are often major production incidents (and subsequent code freezes) or unexpected attrition. We have seen situations where entire sub-departments quit on a single day and code freezes that have lasted for weeks at a time. When several of these signs appear in a short period of time, it indicates an unstable organization, where there is no coherent system or “engine” by which the inputs are transformed into successful projects. Or there might be some system, but not enough attention paid to how productive the system is.
23 |
24 | ## From the point of view of developers
25 |
26 | As we wrote above, developer experience - as in the many workflows and social interactions needed to create working code - is the core input of an engineering team. Working on an unproductive team is painful: pushing new features takes forever, the tooling is inadequate, dubious meetings and on-call pages turn into a never-ending stream of interruptions.
27 |
28 | The [learned helplessness](https://www.okayhq.com/blog/status-quo-is-so-hard-to-change-in-engineering-teams) that permeates the daily experience leads to disengagement, which leads to leaving the company. High performers are disproportionately affected by this type of issues and will self-select out of unproductive organizations.
29 |
30 | In our experience, a **productive team is almost always an engaged team, and vice-versa** - the key, again, is to focus on building an efficient engine free of bottlenecks and annoyances.
31 |
32 | ## From the point of view of the leader themselves
33 |
34 | As former engineering leaders, we’ve had our fair share of awkward moments where we had to acknowledge we could have done better. The dominant emotion in these cases is the impression of **getting blind-sided** by what is happening.
35 |
36 | For example, you may think that you have implemented efficient DevOps practices with healthy on-call rotations for your new service-oriented architecture. One day, you find out through a series of one-on-ones that one of your teams has been woken up at night, every night, for the past 2 weeks because of noisy alerts, with one team member dangerously close to burning out.
37 |
38 | This emotion is actually rooted in an objective loss of control, as it usually happens when the company is scaling fast. What worked with 10 direct reports - anecdotal evidence through one-on-ones and daily stand-ups - just won’t work with an organization of 70.
39 |
40 | This is when building a system of signals and metrics to replace the anecdotal evidence becomes critical.
41 |
42 | To conclude, measuring and improving engineering effectiveness is a precondition to building a healthy engineering team: attracting talent, building an efficient system where they can produce their best work, and retaining that talent. The score - i.e. the features and projects - will take care of itself, as long as you pay attention to removing bottlenecks faced by your team.
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/content/en/feedback-loops/developer-experience.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Improving Developer Experience
3 | description: ''
4 | position: 42
5 | category: Managing feedback loops
6 | ---
7 |
8 | Understanding which aspect of an engineer's life feels most painful is an important input in determining which feedback loops to investigate and address. The approach is very different when the CS team is overloaded with customer bug reports versus when the engineering team is frustrated that it can't ship code fast enough.
9 |
10 | There are three main types of feedback loops:
11 |
12 | * **Quality** - all the flows that engineers use to make sure they are building code that meets the requirements of their customers
13 | * **Delivering code** - all the flows engineers use while writing and modifying code.
14 | * **Process** - all the checklists or meetings engineers are required to attend as part of their job responsibilities
15 |
16 | Each category has its own set of feedback loops that can be evaluated and optimized to improve the effectiveness of the team. Below are examples of common loops across each category.
17 |
18 | ## Quality Feedback Loops
19 | |Loop|Definition|
20 | |---|---|
21 | |Writing a new test| The ability for engineers to write and run tests to make sure their code works (unit, integration, end to end, etc)|
22 | |Code reviews| Code reviews are a mechanism for engineers to review each other's code for a variety of concerns (quality, security, design, etc)|
23 | |Responding to page| How effectively an engineer is able to troubleshoot a service|
24 |
25 | ## Delivering Code Feedback Loops
26 | |Loop|Definition|
27 | |---|---|
28 | |Common IDE/CLI commands| The list of most commonly used IDE features and CLI commands (commit, ide autocomplete, linting, hot-reloading) |
29 | |Finding documentation| Ability for engineer to jump into code and understand what is happening well enough to start changing it|
30 | |Building a service|How long does it take to build a service|
31 | |Creating a new service|How long does it take an engineer to create a Hello World new service|
32 | |Deploying a change|Once a change is *ready*, how quickly can a engineer deploy it to customers|
33 |
34 | ## Process Feedback Loops
35 | |Loop|Definition|
36 | |---|---|
37 | |Postmortems| The system your team uses to make sure your learn and prevent serious incidents from happening again|
38 | |Agile/Scrum| The regular rhythm your team uses to plan, reflect and status on the work it produces|
39 | |Onboarding to a new team| What does a new hire or an engineer switching teams need to do to become productive|
40 | |On-call|What process does your team follow to address unplanned critical incidents that affect the team|
41 |
42 | ## Optimizing feedback loops
43 |
44 | Optimizing feedback loops to improve productivity typically follows its own loop:
45 | - Catalog all your existing feedback loops with an understanding of their frequency, latency, usability, and purpose.
46 | - Create a prioritization framework that weighs these factors in a way that is consistent with your values and culture.
47 | - Create an action plan to prioritize.
48 |
49 | Remember that sometimes, **the right choice can be to remove the loop altogether**. For example, if you have a centralized approval committee that requires every engineer to present a design for every new change, evaluating and measuring whether this process loop is accomplishing its purpose and worth the ROI is important.
50 |
51 | Loops are important to analyze and measure because many of them are frequently overlooked. It’s very tempting to ignore and delay indefinitely working on slow build times and continue to prioritize new features instead. Over time **too many poor loops can lead to the feeling of death by a thousand cuts**.
52 |
53 | Finally, it's important not to only rely on hard metrics. No matter how thorough and thoughtful you are with collecting metrics, it's critical to capture qualitative feedback to avoid blind spots or incorrect assumptions in your framework. This can be done through surveys, 1/1s, retrospectives and exit interviews.
54 |
55 | ## Creating a culture of optimizing feedback loops
56 |
57 | It’s easy for management to get disconnected from the pain induced by feedback loops because by definition, the more senior a manager is, the more removed they are from having to deal with loops, and the closer they are to the pressure of delivering new code.
58 |
59 | **Great managers are able to create systems to stay connected** to the pain and build a culture where feedback loops are continuously surfaced, prioritized and improved.
60 |
61 | ## Example Metrics
62 | *Metrics are useful for trending at the team or organization level*
63 | |Metric|When to use|
64 | |---|---|
65 | |Deploy Time| How long does it take to Deploy a change to production|
66 | |Tooling Latency| How long do developer laptop commands take to run for local build, test, etc|
67 | |Time to resolve incident| How long does it take to resolve an incident|
--------------------------------------------------------------------------------
/content/en/building/reviewing.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Reviewing
3 | description: ''
4 | position: 32
5 | category: Managing building activities
6 | ---
7 | Reviewing code is a human-intensive and deeply social part of the development process. Consequently, it is also one of the most time consuming and harder to optimize - you cannot “throw more raw power” at this problem.
8 |
9 | In most companies, the process goes like this:
10 | - a developer uploads a coherent set of changes (Commit, Pull Request) to a central place.
11 | - they ask one or more other developers to review the code for bugs and coding standards.
12 | - reviewers can issue comments and send the change back to the author.
13 | - this exchange may go back and forth, until reviewers approve the change.
14 | - the code is then checked into the main line.
15 |
16 | ## Common sources of lengthy code reviews
17 |
18 | - **Too many reviewers / diffuse ownership**: this situation happens when there are more than 2 or 3 reviewers assigned on a pull request, potentially through automated systems like automated “code ownership” with collective assignment. As a result, nobody really feels responsible for completing the review and the author waits for too long. A key metric to monitor for this case is the time it takes to do the first action on a review, from the point of view of the author.
19 | - **Unbalanced code review load**: a common situation is to see one or two engineers on a team doing the bulk of the code reviews for everyone. There is usually a good reason: these engineers happen to be more senior, or they are more tenured and know the codebase better. The side effect is that these reviewers become a bottleneck for the team and burn out under the load. The same problem can appear on a larger scale, with entire teams becoming bottlenecks for the rest of the organization - for example, a single DevOps team is supposed to review every infrastructure change. A key metric to monitor is the distribution of code review on a per reviewer basis, aggregate by individual or by sub-teams. This will enable you to quickly spot wild unbalances.
20 | - **Large changes**: when changes reach a certain size in terms of modified lines, reviewers get understandably less motivated to review them. In our experience working with many companies and codebases, we have seen large changes create a lower bound on review time in a fairly predictable manner. Our recommendation is to keep a pulse on very large changes by setting a threshold, and understand why they happen. As we shared above, we would not recommend having a blanket rule or maximum size, as it will likely backfire.
21 | - **Inherent complexity or disagreements**: this happens when a change generates a lot of back-and-forth between author and reviewers. It is not necessarily a bad thing, in the sense that it could indicate a well-functioning review process. A good rule of thumb in terms of what to measure is to imagine when a developer would openly complain (e.g. in a 1/1) that the code review is stuck. There is usually an upper bound in terms of number of comments or review loops where most of your engineers would say the change is in trouble - you should try and build a signal based on this.
22 | - **Interruptions**: finally, code reviews can also take a long time when reviewers keep getting interrupted by meetings, interviews, slack messages, oncall, etc. Please see the relevant sections in the handbook!
23 |
24 | ## Best practices to apply when measuring the code review process
25 |
26 | - We recommend choosing a clear point of view when building metrics: are you interested in unblocking the author, or do you want to measure the activity of reviewers ? We favor metrics built from the point of view the author - ie that are encapsulating the “wait time” of the author
27 | - Given the variety of practices around drafting and using remote branches, we usually start the clock not at review creation time, but when the review actually gets requested. This represents the moment that the author starts waiting.
28 | - Understand the timezones and working hours of participants (authors, reviewers) to avoid noisy metrics
29 |
30 | ## Example Metrics
31 | *Metrics are useful for trending at the team or organization level*
32 | |Metric|When to use|
33 | |---|---|
34 | |End-to-end Review Time|Understand how fast reviews get completed at the Team or Organization level|
35 | |Time to First / Nth Action|Useful as a breakdown of Review Time (for further analysis) or to catch ownership-related issues|
36 | |Distribution of Reviews by Reviewer|Understand whether some reviewers might be overloaded, creating a bottleneck for authors|
37 | |Count of back-and-forth interactions|Understand whether code reviews are surfacing issues that should have been caught earlier (design problem, deeper disagreements)|
38 |
39 | ## Example Lists
40 | *Lists are particularly useful at the team level or for first-line managers*
41 | |List|When to use|
42 | |---|---|
43 | |Reviews that have received no activity so far|Keep a pulse on reviews that are in a pure waiting state|
44 | |Stale PRs: PRs with activity but opened for more than X days|Catch reviews that may be delayed, investigate reasons why|
45 | |Reviews approved but not merged|Ensure that reviews move to the next step of the development life cycle|
46 |
--------------------------------------------------------------------------------
/content/en/getting-started/how-to.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: How to use this handbook?
3 | description: ''
4 | position: 4
5 | category: Getting Started
6 | ---
7 | ## Who is this for?
8 |
9 | We wrote this handbook for **engineering leaders - from first-line managers to CTOs**. To be more specific: while we hope every leader will find value reading it, we think it will best serve you if you are dealing with a median to high level of complexity. Here are examples of such situations:
10 | - *Growth complexity*: A co-founder and CTO leading a team of 30 engineers, with plans to double or triple that team size in the next 12 months
11 | - *Task complexity*: A director of engineering at a unicorn company, potentially leading a brand new “developer enablement” team and trying to measure the impact of tooling projects
12 | - *Scale complexity*: Any manager/director/VP leading or working in an organization of hundreds of engineers.
13 |
14 | In particular, we don’t think introducing metrics and focusing on engineering productivity is very useful for teams of 15 engineers or below (unless, of course, as part of a larger organization). In our experience, these teams and their leaders will be better off “doing things that don’t scale” instead of embarking on premature scaling initiatives.
15 |
16 | Finally, we hope individual contributors will enjoy reading this handbook and will promote our vision of engineering effectiveness based on identifying and removing bottlenecks. We firmly believe that it will help empower developers and that it will lead to more transparent and efficient engineering teams.
17 |
18 |
19 | ## The Explore - Measure - Monitor - Act loop
20 |
21 | If you are already familiar with Application Monitoring, Product Analytics or other intelligence tools, you can apply a similar workflow to engineering effectiveness. We suggest the following 4 steps:
22 | - *Explore*: this phase is all about gaining visibility and establishing baselines by exploring the data. You might already have a hunch as to what to look for, based on anecdotal evidence and discussions with your team. For example, you may know that having too many meetings is a problem, so you might want to focus the data exploration on that aspect. The next few chapters will give you more specific ideas on the types of metrics to try at this stage.
23 | - *Measure*: once you have identified a set of problems to go after, you will define metrics that represent these problems. For example, having too many meetings can be translated into “meeting hours per week per person”. It is important to choose metrics that are actionable and easy to explain. We recommend grouping metrics into dashboards, which act as the concrete representation of your goals.
24 | - *Monitor*: this next phase is about comparing the metric to the goals that you have set on a regular basis. For example, you might set a goal to reduce the number of meetings per person by 20% in the next quarter. This is the most critical phase, as an unmonitored metric won’t be able to create any change by itself.
25 | - *Act*: this phase is about taking action to improve effectiveness. In the same way that debugging systems depends on many factors, this final phase is the most context-dependent. In the case of meeting load, there are a variety of techniques you can apply such as “no meetings days”, developing a more asynchronous culture, etc. You might also find that you need to go back to exploration and sub-divide the problem into more specific metrics.
26 |
27 | ## Metrics, Lists, Alerts: when to use which
28 |
29 | In our experience, there are 3 main categories of signals that will be useful for engineering leaders: metrics, lists and alerts. Let's explain when to use which and who might benefit more from each type.
30 |
31 | ### Metrics
32 |
33 | Metrics are quantitative measurements of a signal, usually over a specific time period. For example, you might want to measure the average time it took to do code reviews over the past 6 months. They are useful for everyone from first-line managers to executives, and they form the building blocks of dashboards.
34 |
35 | Use metrics for:
36 | - high-level **trending and baselining**
37 | - building **reports** that you will monitor over time
38 | - doing some high-level **comparisons** across teams
39 |
40 | To ensure that metrics are actionable, it is also important to build relevant filters, grouping and drill-downs. Metrics should only be used at the individual level to measure blockers and interruptions; if there is a way to interpret the metric as a performance indicator, we'd highly recommend choosing a coarser grouping (team, organization).
41 |
42 | ### Lists
43 |
44 | Lists are tabular outputs of a query - like a metric but with no aggregation. For example, you might want to pull up a list of stale pull requests or a list of tickets associated with a particular team and project. They are particularly useful for first-line managers and team leads.
45 |
46 | Use lists for:
47 | - **real-time checks** on potential blockers (e.g. PRs that are not progressing)
48 | - **visualizing** the various stages of a pipeline in more detail
49 | - building quick **exports** to your spreadsheet software
50 |
51 | ### Alerts
52 |
53 | Alerts are notifications that are triggered when metrics meet a certain condition. For example, you might want to be alerted whenever a Pull Request has been open for more than N days. They are most useful for first-line managers and their teams (e.g. you could set up a shared Slack channel to receive these alerts)
54 |
55 | Use alerts for:
56 | - **avoiding getting blind-sided** by slowly degrading situations (e.g. a sub-team getting a lot of pages at night)
57 | - making **continuous improvements** based on goals (e.g. you want to complete code reviews within a max of N days)
58 |
59 |
60 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socio-economic status,
10 | nationality, personal appearance, race, caste, color, religion, or sexual
11 | identity and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the overall
27 | community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or advances of
32 | any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email address,
36 | without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at handbook@okayhq.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series of
86 | actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or permanent
93 | ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within the
113 | community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.1, available at
119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
120 |
121 | Community Impact Guidelines were inspired by
122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
123 |
124 | For answers to common questions about this code of conduct, see the FAQ at
125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
126 | [https://www.contributor-covenant.org/translations][translations].
127 |
128 | [homepage]: https://www.contributor-covenant.org
129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
130 | [Mozilla CoC]: https://github.com/mozilla/diversity
131 | [FAQ]: https://www.contributor-covenant.org/faq
132 | [translations]: https://www.contributor-covenant.org/translations
133 |
--------------------------------------------------------------------------------
/content/en/building/overview.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Overview
3 | description: ''
4 | position: 30
5 | category: Managing building activities
6 | ---
7 |
8 | In the previous chapter, we looked at the fundamental input to effective engineering teams - making time for complex, uninterrupted work. The most obvious way to use this time is to build stuff!
9 |
10 | This chapter will therefore guide you through how to manage and measure what we call *building* - all the activities that specifically go towards producing working code in the form of features for end-users, such as planning new features, writing code and deploying it.
11 | ## The software development life cycle (SDLC)
12 |
13 | There is a wealth of information online about the [Software Development Lifecycle](https://en.wikipedia.org/wiki/Systems_development_life_cycle). Consequently, there is some variation in terms of the various steps of the lifecycle. We do not take an opinionated approach on this; for pragmatic reasons, we will use these high-level steps in the rest of of this chapter:
14 |
15 |
16 |
17 | ### Existing frameworks: DORA Metrics
18 |
19 | In 2016, the State of DevOps report started introducing 4 key metrics - the DORA Metrics, which were initially used to explain the difference in productivity between the hundreds of software organizations studied in the report. To this day, the DORA metrics remain the most researched metrics in this domain. The four metrics are:
20 | - Lead Time for Changes
21 | - Deployment Frequency
22 | - Change Failure Rate
23 | - Time to Restore Service
24 | Since there is a lot of existing resources about DORA Metrics, we won't dedicate a section to them, but we will mention them in the relevant parts of the handbook. We recommend the book [Accelerate](https://www.amazon.com/Accelerate-Software-Performing-Technology-Organizations) for further reading.
25 |
26 | ### Existing frameworks: SPACE Framework
27 | The [SPACE framework](https://queue.acm.org/detail.cfm?id=3454124) is a recent addition to the DORA Metrics, which adopts a more holistic view on developer productivity. We see it as a complement to this handbook, as we share many underlying values and approaches with it.
28 |
29 | ## What not to measure and why
30 |
31 | As we briefly mentioned in the introductory chapter, it is critical to avoid using certain metrics that, while easy to measure, will be detrimental to team morale and will do almost nothing to improve productivity. The broad characteristics of a “bad” metric are any of the following:
32 |
33 | - It tries to measure the performance of individual engineers
34 | - It treats code as a series of characters instead of as an abstraction
35 | - It focuses on outputs
36 | - It is easy to game
37 | - It is “magical”, in the sense that it requires a lot of context to understand
38 |
39 | Here are some examples of such metrics, from most to least obvious:
40 |
41 | - *Number of lines of code written by a developer*: we hope that most readers will agree that this is the archetype of a bad engineering productivity metric. Fun fact: the Apollo 11 source code is roughly 40,000 lines of Assembly.
42 |
43 | - *Pull Request Throughput of a developer*: this could be considered an improvement over lines of code, in the sense that it deals with a logical unit of work as opposed to a series of characters. In practice, this metric will lead people to produce smaller, more frequent or even comment-only PRs, for the sake of meeting throughput goals. However, we do see this metric being useful at an organization level (dozens of engineers), to understand baselines of activity over time. It is, however, not very actionable.
44 |
45 | - *Pull Request Size*: since it is accepted that big PRs slow down the code review process and introduce more risk, we have seen some companies focus on setting a maximum PR size. This almost always backfires and we have even seen developers writing utility scripts to break down PRs automatically to avoid getting caught in the filter. The core problem here is to narrowly focus on the output instead of understanding why people may need to write large PRs in the first place.
46 |
47 | - *Code churn*: this metric tries to measure rework by comparing additions and deletions happening in various parts of the codebase. This is an example of a magical metric that is also based on unclear assumptions: rework can be good or bad depending on fairly complex interpretations of the context. It can lead to broken incentives, where developers would avoid tech debt - ridden parts of the codebase in order to avoid accumulating “rework”.
48 |
49 | ### A little nuance: low-pass / high-pass filter on metrics
50 |
51 | For the sake of completeness, we want to mention that some of these metrics may become useful if you apply a mental filter that retains only the most extreme values.
52 |
53 | For example, if a particular team or developer hasn’t written any lines of code in the past quarter, there is very likely something going on. We would still refrain from immediately equating this to low productivity, but we would deep dive into these cases to understand the context. In that sense, these filtered metrics can be useful as “canaries in the coal mine”. That said, we doubt that most engineering leaders need metrics and dashboards to become aware of such extreme cases.
54 |
55 | ## What to measure instead
56 |
57 | Not suprisingly, the signals we favor display opposite features compared to the harmful metrics we describe above. The characteristics of a "good" metric are:
58 |
59 | - **It focuses on inputs**, in the form of bottlenecks, slow-downs and other inefficiencies at every step of the SDLC. This type of metric is more specific and actionable, and usually harder to misuse or "game". It also clearly puts the focus on improving the developer experience as opposed to measuring individual performance.
60 | - **Its definition is simple to understand**. For someone with "average context" about the situation at hand, it should be easy to intuitively understand how the metric is defined and what may cause it to move in different directions. Again, it optimizes for actionability and it reduces the potential for "interpretation battles".
61 | - **It supports drill-downs**. The metric should be easy to aggregate at various resolutions (team, department, organization) and it should support multiple ways to filter or group (e.g. by repository, by service, etc.). This is critical in terms of making it easy to debug problems and iterate on solutions.
--------------------------------------------------------------------------------
/components/app/AppHeader.vue:
--------------------------------------------------------------------------------
1 |
2 |
83 |
84 |
85 |
117 |
118 |
--------------------------------------------------------------------------------
/content/en/building/ci.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: CI/CD
3 | description: ''
4 | position: 33
5 | category: Managing building activities
6 | ---
7 | The last part of the development pipeline is to test and deploy working code: continuous integration and continous deployment (CI/CD).
8 | ## Continous Integration
9 | CI is one of the most instrumented parts of the Software Development Life Cycle. Jenkins, CircleCI and Buildkite are examples of well-known CI platforms. They usually work with the following abstractions:
10 | - a **CI job** is comprised of several automated tests that run on a dedicated CI cluster.
11 | - CI jobs can be grouped and ordered into a **CI pipeline**.
12 | - the CI system provides various **triggers** to start pipelines based on events (e.g. a pull request got opened).
13 | The 3 most common concerns with CI systems are:
14 | - are the tests **exhaustive** enough (ie avoiding false negatives)?
15 | - are the tests **non-flaky** (ie avoiding false positives)?
16 | - are the tests **running fast enough**?
17 |
18 | In this handbook, we'll focus mostly on understanding flaky and slow tests.
19 | ### Hardware bottlenecks
20 |
21 | Let's start with the obvious: CI is computationally intensive and represents a sizable budget for even mid-size organizations. There are many online resources covering CI optimization, so we'll briefly mention the most common cases.
22 |
23 | - **Undersized CI cluster**: as teams grow and add more tests and CI jobs, the CI cluster can become an overloaded shared resource. A common sign is that the time it takes to run a single CI pipeline becomes tightly correlated with the number of CI runs. This means that the system does not scale.
24 | - **Unstable machines**: on sufficiently large clusters, there will be misbehaving machines, which can create hardware-induced flakiness. We recommend having a process to identify and cordon off these problematic machines.
25 | - **Architecture issues**: this happens when the way tests, jobs and pipelines are written stops scaling. Before throwing more hardware at CI problems, it is useful to review the architecture for classic issues involving concurrency, resource comsumption, etc.
26 |
27 | Since solving the technical challenges of CI is more tractable than, say, chasing flaky tests, there can be a temptation to focus solely on hardware solutions. It is still critical to understand the human impact of slow CI, as we'll see in the next 2 sections.
28 |
29 | ### Branch CI bottlenecks
30 | Branch CI happens when engineers test code on a branch, usually attached to a specific pull request.
31 |
32 | Branch CI is critical to optimize because it involves making humans wait: CI results are often the first pieces of feedback an author can get on their new pull request. As a result, we highly recommend building metrics **scoped on a per-branch or per-PR basis**, rather than job-based or pipeline-based metrics, which lack that human context. Ideally, you want to understand the **total amount of time humans are waiting on your CI system.**
33 |
34 | Here are bottlenecks to look out for with Branch CI:
35 |
36 | - **Branch CI taking a long time**: this is the most immediate signal you could look at, and it gives you a direct measurement of how long people are waiting for CI to complete. In our experience, if CI takes more than 5-10 min, it really becomes a background thread for engineers, which causes them to context-switch. Again, it is important to aggregate the total, wall-clock CI time of *every* job running on a PR to get an accurate sense of this wait time.
37 | - **PRs with a high number of CI runs**: in a typical system, you would expect CI to run once per new commit added to the PR. If these 2 numbers deviate from each other, it likely means that CI is being retried because of inherently flaky tests or because the engineer has decided to re-execute the CI job. The latter situation can also happen if engineers are using the CI system as their main testing ground. In this case, you could see many commits added to a PR, for the sole purpose of getting constant feedback on the code. This is not a bad practice per se, but it may indicate that engineers resort to this workaround because e.g. local tooling does not work well. In general, understanding why CI gets executed so frequently on a per PR basis will yield invaluable information regarding the developer experience.
38 | - **PRs with high failure rate**: Branch CI is expected to fail more often than main CI, since the code is being iterated on. However, keeping track of the failure trends can help with spotting a pattern affecting all your engineers. On a large team, the experience of constantly hitting failures with CI can quickly demoralize engineers and cause them to lose trust in the CI system.
39 |
40 |
41 | ### Main CI bottlenecks
42 | Main CI is the integration step of the life cycle: the main branch of the repo gets tested with all recently merged code before deployment
43 | Contrary to Branch CI, Main CI is not related to particular pull requests or indidivuals. It will reveal emergent issues affecting the team collectively.
44 |
45 | Here are examples of such issues:
46 | - **Flakiness**: flakiness is the rate of false positives of the CI system. Since code has already been tested on the branch, flakiness on Main CI is a cleaner signal (barring unforeseen interactions between recently merged code). Flaky tests are particularly harmful because they are usually hard to classify between true and false positives. On a large CI pipeline, a few flaky tests can routinely fail the whole pipeline - this quickly becomes extremely costly. A typical technique is to execute every job N times and average the results, which unfortunately increases resource consumption and does not address root issues.
47 | - **Blocked deployments**: in a typical CI/CD scenario, we see many teams focusing on *keeping the build green* because a Main CI failure implies stopping the deployment train. It can be useful to understand the total amount of time that the build was broken in a given time period. This has a direct impact on deployment frequency and your lead time to deliver features.
48 |
49 | ## Deployments
50 |
51 | Deployment is the final part of the development life cycle: shipping features to customers. It is probably the least standardized part of the process:
52 | - some companies automate deployments (continuous deployment) while others decide to keep it manual
53 | - some companies rely on their CI system to deploy while others build custom deployment scripts or use their container orchestration platform
54 |
55 | We recommend building a set of metrics that match your own custom system and we'll just cover 2 common signals:
56 |
57 | - **Deployment Frequency**: this DORA metric represents how often your organization successfully ships releases to production. It gives a good high-level indication of the performance of your entire development life cycle. Most engineers also prefer working in environments where code is shipped at least once a day, so this metric impacts retention and hiring. Being an output metric, it is not readily actionable and we'd recommend focusing on long-term trends instead of micro-optimizations.
58 | - **Lead Time (Merged to Deploy or Main CI to Deploy)**: this represents how long it takes for code to go from one step to another. You can use various definitions of lead time depending on what you are trying to optimize for: the entire CI/CD experience or the speed of deployments. In organizations with a single release train, lead time and deployment frequency are actually closely connected. A typical failure scenario is to build release roadmaps that are based on a *desired or imagined* lead time and deployment frequency, which is why it is particulary important for engineering leaders to fully understand these metics when interacting with their product, sales and other executive counterparts.
59 |
60 |
61 | ## Example Metrics
62 | *Metrics are useful for trending at the team or organization level*
63 | |Metric|When to use|
64 | |---|---|
65 | |Wall-clock CI Time on a PR|Understand how long humans wait on the CI system|
66 | |Failure Rate of CI per PR|Catch trends indicating a degradation of the CI system, e.g. due to overload|
67 | |Count of CI runs per PR|Understand why CI may potentially run too many times per PR|
68 | |Aggregate time that Main CI was blocked|Understand when and how your SDLC gets blocked because of CI|
69 | |Deployment Frequency (DORA)|Measure how often the organization ships features to end users|
70 | |Lead Time for Merged to Deploy (DORA-like)|Understand the performance of your entire CI/CD pipeline|
71 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
2 |
3 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
4 |
5 | ### Using Creative Commons Public Licenses
6 |
7 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
8 |
9 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors).
10 |
11 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).
12 |
13 | ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License
14 |
15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
16 |
17 | ### Section 1 – Definitions.
18 |
19 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
20 |
21 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
22 |
23 | c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License.
24 |
25 | d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
26 |
27 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
28 |
29 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
30 |
31 | g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike.
32 |
33 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
34 |
35 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
36 |
37 | j. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License.
38 |
39 | k. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
40 |
41 | l. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
42 |
43 | m. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
44 |
45 | n. __You__ means the individual or entity exercising the Licensed Rights under this Public License. __Your__ has a corresponding meaning.
46 |
47 | ### Section 2 – Scope.
48 |
49 | a. ___License grant.___
50 |
51 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
52 |
53 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and
54 |
55 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only.
56 |
57 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
58 |
59 | 3. __Term.__ The term of this Public License is specified in Section 6(a).
60 |
61 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
62 |
63 | 5. __Downstream recipients.__
64 |
65 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
66 |
67 | B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply.
68 |
69 | C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
70 |
71 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
72 |
73 | b. ___Other rights.___
74 |
75 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
76 |
77 | 2. Patent and trademark rights are not licensed under this Public License.
78 |
79 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.
80 |
81 | ### Section 3 – License Conditions.
82 |
83 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
84 |
85 | a. ___Attribution.___
86 |
87 | 1. If You Share the Licensed Material (including in modified form), You must:
88 |
89 | A. retain the following if it is supplied by the Licensor with the Licensed Material:
90 |
91 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
92 |
93 | ii. a copyright notice;
94 |
95 | iii. a notice that refers to this Public License;
96 |
97 | iv. a notice that refers to the disclaimer of warranties;
98 |
99 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
100 |
101 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
102 |
103 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
104 |
105 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
106 |
107 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
108 |
109 | b. ___ShareAlike.___
110 |
111 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply.
112 |
113 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License.
114 |
115 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.
116 |
117 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply.
118 |
119 | ### Section 4 – Sui Generis Database Rights.
120 |
121 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
122 |
123 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only;
124 |
125 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and
126 |
127 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
128 |
129 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
130 |
131 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability.
132 |
133 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__
134 |
135 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__
136 |
137 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
138 |
139 | ### Section 6 – Term and Termination.
140 |
141 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
142 |
143 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
144 |
145 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
146 |
147 | 2. upon express reinstatement by the Licensor.
148 |
149 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
150 |
151 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
152 |
153 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
154 |
155 | ### Section 7 – Other Terms and Conditions.
156 |
157 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
158 |
159 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
160 |
161 | ### Section 8 – Interpretation.
162 |
163 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
164 |
165 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
166 |
167 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
168 |
169 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
170 |
171 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
172 | >
173 | > Creative Commons may be contacted at creativecommons.org
--------------------------------------------------------------------------------
/static/img/sflc.svg:
--------------------------------------------------------------------------------
1 |
17 |
--------------------------------------------------------------------------------
/static/img/logo-dark.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/static/img/logo-light.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------