32 | {node.frontmatter.date}
33 |
34 |
35 |
40 |
41 |
42 | );
43 | })}
44 |
45 | );
46 | };
47 |
48 | BlogIndex.propTypes = PropTypes.shape({
49 | data: PropTypes.shape({
50 | allMarkdownRemark: PropTypes.shape({
51 | edges: PropTypes.arrayOf(PropTypes.shape({
52 | node: PropTypes.shape({
53 | excerpt: PropTypes.string.isRequired,
54 | fields: PropTypes.shape({
55 | slug: PropTypes.string.isRequired,
56 | }).isRequired,
57 | frontmatter: PropTypes.shape({
58 | date: PropTypes.string.isRequired,
59 | description: PropTypes.string.isRequired,
60 | title: PropTypes.string.isRequired,
61 | }).isRequired,
62 | }).isRequired,
63 | }).isRequired).isRequired,
64 | }).isRequired,
65 | site: PropTypes.shape({
66 | siteMetadata: PropTypes.shape({
67 | title: PropTypes.string.isRequired,
68 | }).isRequired,
69 | }).isRequired,
70 | }).isRequired,
71 | }).isRequired;
72 |
73 | export default BlogIndex;
74 |
75 | export const pageQuery = graphql`
76 | query {
77 | site {
78 | siteMetadata {
79 | title
80 | description
81 | }
82 | }
83 | allMarkdownRemark(sort: { fields: [frontmatter___date], order: DESC }, filter: { fileAbsolutePath: { regex: "/blog/"}}) {
84 | edges {
85 | node {
86 | excerpt
87 | fields {
88 | slug
89 | }
90 | frontmatter {
91 | date(formatString: "MMMM DD, YYYY")
92 | title
93 | description
94 | }
95 | }
96 | }
97 | }
98 | }
99 | `;
100 |
--------------------------------------------------------------------------------
/gatsby-config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | siteMetadata: {
3 | title: 'Nans\' blogs',
4 | author: {
5 | name: 'Nans Dumortier',
6 | summary: 'who is a French Software Developer.',
7 | },
8 | description: 'This is actually more a notepad than anything else, you will discover the notes I take during some trainings, or while reading some books. Sometimes I write in English, sometimes in French, sometimes both at the same time, sorry about that ! (🇬🇧/🇫🇷)',
9 | siteUrl: 'http://nans-dumortier.com',
10 | social: {
11 | twitter: 'NansDumortier',
12 | linkedIn: 'nans-dumortier',
13 | devTo: 'nans',
14 | github: 'NansD',
15 | },
16 | },
17 | plugins: [
18 | {
19 | resolve: 'gatsby-source-filesystem',
20 | options: {
21 | path: `${__dirname}/content/blog`,
22 | name: 'blog',
23 | },
24 | },
25 | {
26 | resolve: 'gatsby-source-filesystem',
27 | options: {
28 | path: `${__dirname}/content/portfolio`,
29 | name: 'portfolio',
30 | },
31 | },
32 | {
33 | resolve: 'gatsby-source-filesystem',
34 | options: {
35 | path: `${__dirname}/content/assets`,
36 | name: 'assets',
37 | },
38 | },
39 | {
40 | resolve: 'gatsby-transformer-remark',
41 | options: {
42 | plugins: [
43 | {
44 | resolve: 'gatsby-remark-images',
45 | options: {
46 | maxWidth: 590,
47 | },
48 | },
49 | {
50 | resolve: 'gatsby-remark-responsive-iframe',
51 | options: {
52 | wrapperStyle: 'margin-bottom: 1.0725rem',
53 | },
54 | },
55 | 'gatsby-remark-prismjs',
56 | 'gatsby-remark-copy-linked-files',
57 | 'gatsby-remark-smartypants',
58 | ],
59 | },
60 | },
61 | 'gatsby-transformer-sharp',
62 | 'gatsby-plugin-sharp',
63 | {
64 | resolve: 'gatsby-plugin-google-analytics',
65 | options: {
66 | trackingId: 'UA-119867329-1',
67 | },
68 | },
69 | 'gatsby-plugin-feed',
70 | {
71 | resolve: 'gatsby-plugin-manifest',
72 | options: {
73 | name: 'Gatsby Starter Blog',
74 | short_name: 'GatsbyJS',
75 | start_url: '/',
76 | background_color: '#ffffff',
77 | theme_color: '#663399',
78 | display: 'minimal-ui',
79 | icon: 'content/assets/logo.png',
80 | },
81 | },
82 | 'gatsby-plugin-react-helmet',
83 | {
84 | resolve: 'gatsby-plugin-typography',
85 | options: {
86 | pathToConfigModule: 'src/utils/typography',
87 | },
88 | },
89 | {
90 | resolve: 'gatsby-plugin-s3',
91 | options: {
92 | bucketName: 'notes-taking-blog-bucket',
93 | protocol: 'https',
94 | hostname: 'www.nans-dumortier.com',
95 | },
96 | },
97 | 'gatsby-plugin-eslint',
98 | // this (optional) plugin enables Progressive Web App + Offline functionality
99 | // To learn more, visit: https://gatsby.dev/offline
100 | // `gatsby-plugin-offline`,
101 | ],
102 | };
103 |
--------------------------------------------------------------------------------
/content/blog/An elegant solution for memory leaks in React/An elegant solution for memory leaks in React.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: An elegant solution for memory leaks in React
3 | date: "2020-07-30T18:35:00.284Z"
4 | description: "I created a hook to solve a common problem in React in an elegant way"
5 | tags: ['article', 'react']
6 | ---
7 |
8 | ## 🔴 UPDATE
9 |
10 | This "solution" doesn't seem to *really* avoid leaks. Even AbortController doesn't seem to be the silver bullet against memory leaks 😰.
11 | Check out the [discussion in the comments](https://dev.to/nans/an-elegant-solution-for-memory-leaks-in-react-1hol)!
12 |
13 | ----
14 |
15 | When working with asynchronous calls, for example API calls, you might have encountered this error :
16 |
17 | `Can't perform a React state update on an unmounted component. This is a no-op, but it indicates a memory leak in your application. To fix, cancel all subscriptions and asynchronous tasks in a useEffect cleanup function.`
18 |
19 |
20 |
21 | ## A GIF is worth a thousand words ...
22 |
23 | 
24 |
25 | This is a small page that simulates some asynchronous logic on load, and then updates the view accordingly. Here, I unmount the component before the async work has been finished, and trigger the Error. (I took this example from [this](https://stackoverflow.com/questions/56442582/react-hooks-cant-perform-a-react-state-update-on-an-unmounted-component/63176693#63176693) StackOverFlow post)
26 |
27 | This is caused by this code :
28 |
29 | `````javascript
30 | function Example() {
31 | const [text, setText] = useState("waiting...");
32 |
33 | useEffect(() => {
34 | simulateSlowNetworkRequest().then(() => {
35 | setText("done!"); // ⚠️ what if the component is no longer mounted ?
36 | // => Warning: Can't perform a React state update on an unmounted component.
37 | });
38 | }, []);
39 |
40 | return
{text}
;
41 | }
42 | `````
43 |
44 | When running into that issue, I found multiple solutions, the most used one seems to be this one :
45 |
46 | `````javascript
47 | function OtherExample() {
48 | const [text, setText] = useState("waiting...");
49 |
50 | useEffect(() => {
51 | let isMounted = true; // 👈
52 | simulateSlowNetworkRequest().then(() => {
53 | if (!isMounted) { // 👈
54 | setText("done!"); // no more error
55 | }
56 | });
57 | return () => {
58 | isMounted = false; // 👈
59 | };
60 | }, []);
61 |
62 | return
{text}
;
63 | }
64 | `````
65 |
66 | **But** it requires you to add quite a lot of stuff into your component, having to deal with that `isMounted` variable all over the place ...
67 |
68 | There are other interesting solutions, like making your [Promises cancellable](https://dev.to/viclafouch/cancel-properly-http-requests-in-react-hooks-and-avoid-memory-leaks-pd7).
69 |
70 | ## You told me there would be an elegant solution !
71 |
72 | I wasn't lying! The solution I came up with is a very simple [hook](https://www.npmjs.com/package/use-state-if-mounted). It works just like React's useState, but it basically checks if the component is mounted before updating the state !
73 |
74 | Here is an example of the refactored code :
75 |
76 | ```javascript
77 | function OtherExample() {
78 | const [text, setText] = useStateIfMounted("waiting..."); // 👈
79 |
80 | React.useEffect(() => {
81 | simulateSlowNetworkRequest().then(() => {
82 | setText("done!"); // no more error
83 | });
84 | }, [setText]);
85 |
86 | return
{text}
;
87 | }
88 | ```
89 |
90 | Here is the [CodeSandBox](https://codesandbox.io/s/use-local-variable-to-avoid-setstate-on-unmouted-component-6k08n?file=/src/index.js:459-695) if you wanna play around !
91 |
92 | ## TLDR
93 | Use [useStateIfMounted](https://www.npmjs.com/package/use-state-if-mounted) hook, that will only update the state if your component is mounted ! 🚀
94 |
95 |
96 | I hope this might be helpful, feel free to reach me out in any case ! 🤗
--------------------------------------------------------------------------------
/src/templates/blog-post.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable react/no-danger */
2 | import React from 'react';
3 | import { Link, graphql } from 'gatsby';
4 |
5 | import PropTypes from 'prop-types';
6 | import Bio from '../components/bio/bio';
7 | import Layout from '../components/layout/layout';
8 | import SEO from '../components/seo/seo';
9 | import { rhythm, scale } from '../utils/typography';
10 |
11 | const BlogPostTemplate = ({ data, pageContext, location }) => {
12 | const post = data.markdownRemark;
13 | const siteTitle = data.site.siteMetadata.title;
14 | const { previous, next } = pageContext;
15 |
16 | return (
17 |
18 |
22 |
23 |
24 |
30 | {post.frontmatter.title}
31 |
32 |
39 | {post.frontmatter.date}
40 |
41 |
42 |
43 |
48 |
51 |
52 |
53 |
83 |
84 | );
85 | };
86 |
87 | export default BlogPostTemplate;
88 |
89 | export const pageQuery = graphql`
90 | query BlogPostBySlug($slug: String!) {
91 | site {
92 | siteMetadata {
93 | title
94 | }
95 | }
96 | markdownRemark(fields: { slug: { eq: $slug } }) {
97 | id
98 | excerpt(pruneLength: 160)
99 | html
100 | frontmatter {
101 | title
102 | date(formatString: "MMMM DD, YYYY")
103 | description
104 | }
105 | }
106 | }
107 | `;
108 |
109 | BlogPostTemplate.propTypes = PropTypes.shape({
110 | data: PropTypes.shape({
111 | allMarkdownRemark: PropTypes.shape({
112 | edges: PropTypes.arrayOf(PropTypes.shape({
113 | node: PropTypes.shape({
114 | excerpt: PropTypes.string.isRequired,
115 | fields: PropTypes.shape({
116 | slug: PropTypes.string.isRequired,
117 | }).isRequired,
118 | frontmatter: PropTypes.shape({
119 | date: PropTypes.string.isRequired,
120 | description: PropTypes.string.isRequired,
121 | title: PropTypes.string.isRequired,
122 | }).isRequired,
123 | }).isRequired,
124 | }).isRequired).isRequired,
125 | }).isRequired,
126 | site: PropTypes.shape({
127 | siteMetadata: PropTypes.shape({
128 | title: PropTypes.string.isRequired,
129 | }).isRequired,
130 | }).isRequired,
131 | }).isRequired,
132 | }).isRequired;
133 |
--------------------------------------------------------------------------------
/content/blog/lambda-training/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Lambda training
3 | date: "2020-03-11T15:15:00.284Z"
4 | description: "Notes taken during AWS lambda training"
5 | tags: ['training', 'AWS']
6 | ---
7 | # Learn AWS Lambda and see real world example with serverless
8 |
9 | main documentation : https://serverless.com/
10 |
11 | ## What is lambda
12 | functions
13 | limited by time
14 | run on demand
15 | scaling is automated -> that's why we call it "Serverless"
16 |
17 | ## Pricing
18 | - pay per request
19 | - pay per compute time
20 |
21 | ## AWS Lambda integrations
22 | - API Gateway -> to build a REST API
23 | - Kinesis -> processing data in streams
24 | - DynamoDB -> Managed NoSQL database
25 | - ...
26 |
27 | ## Example : Thumbnail creation
28 | "Database Trigger-like" behavior for S3
29 | New image in S3 -> trigger -> AWS Lambda function creates a thumbnail and push it to S3
30 |
31 | /!\ the handler is the name of function that will be called in lambda
32 | The instructor only did a "hello world".
33 |
34 | ## The Serverless Framework
35 | Aims to ease the pain of creating, deploying, managing and debugging lambda functions.
36 | Integrates with CI/CD tools.
37 | CloudFormation support (templates for deploying).
38 |
39 | ```bash
40 | npm install -g serverless
41 | ```
42 |
43 | IAM => Create a serverless-admin (new user). Programmatic access, and give administrator access. (not best practice though ...)
44 |
45 | ```bash
46 | serverless config credentials --provider aws --key XXX --secret YYY --profile serverless-admin
47 | ```
48 |
49 | ## Hello world using the Serverless framework
50 | `serverless` or `sls` => list of commands.
51 |
52 | ```
53 | sls create --template aws-python --path hello-world-python
54 | ```
55 |
56 | serverless.yml add
57 | ```yml
58 | profile: serverless-admin
59 | region: eu-west-3
60 | ```
61 |
62 | ```bash
63 | sls deploy -v
64 | ```
65 |
66 | the code is uploaded to s3, and then CloudFront is configured. IAM role is created for us.
67 |
68 | ## Running a function from the command line
69 | ```bash
70 | sls invoke -f hello -l
71 | ```
72 |
73 | ## Updating the function
74 | change the `print` statement.
75 | ```bash
76 | sls deploy function -f hello
77 | ```
78 | => how to integrate that with a proper CI/CD system ?
79 |
80 | ## Fetching function logs
81 | ```bash
82 | sls logs -f hello -t
83 | ```
84 |
85 | enables to stream function logs as they happen !
86 |
87 | ## Remove the function
88 | ```bash
89 | sls remove
90 | ```
91 |
92 | ## add an IAM role statement to a function
93 | ```yaml
94 | iamRoleStatements:
95 | - Effect: "Allow"
96 | Action:
97 | - "lambda"
98 | Resource:
99 | - "*"
100 | ```
101 | ## VPC for lambda functions
102 | can launch a lambda in a VPC so that they can access EC2 instances or RDS instances.
103 | 
104 | can specify securityGroupIds and subnetsId in the serverless.yml file.
105 |
106 | ## Real world example : Service to create S3 image thumbnails
107 |
108 | 
109 |
110 | - S3 events
111 | - plugins to deploy python dependencies (docker needed)
112 |
113 | Triggering a function from an s3 event :
114 | 
115 |
116 | plugin :
117 | serverless-python-requirements
118 | it installs what's written in requirements.txt file.
119 | Pillow => import PIL
120 |
121 | ## Real world example : REST API with API Gateway and DynamoDB
122 |
123 | 
124 |
125 | Lambda functions are stateless, they don't keep anything in memory, so they will write to DynamoDB.
126 |
127 | package.json => like requirements.txt
128 |
129 | ```yaml
130 | environment :
131 | DYNAMODB_TABLE: ${self:service}-${opt:stage, self:provider.stage}
132 | ```
133 |
134 | 
135 |
136 | 
137 |
138 | POST on /todos => call create function
139 | this is wired to the API gateway
140 |
141 | under resources, you can specify the cloud formation template
142 |
143 | 
144 |
145 | 
146 |
147 | response should always have a statusCode and a body.
148 |
149 | 
150 |
151 | ## Personal investigation : creating a full stack app
152 | If you want to build an app that can leverage that rest API that is written in Lambda, you can use aws-amplify : https://aws-amplify.github.io/docs/js/api.
153 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Nans' blog!
2 | 
3 |
4 |
5 |
13 |
14 | Kick off your project with this blog boilerplate. This starter ships with the main Gatsby configuration files you might need to get up and running blazing fast with the blazing fast app generator for React.
15 |
16 | _Have another more specific idea? You may want to check out our vibrant collection of [official and community-created starters](https://www.gatsbyjs.org/docs/gatsby-starters/)._
17 |
18 | ## 🚀 Quick start
19 |
20 | 1. **Create a Gatsby site.**
21 |
22 | Use the Gatsby CLI to create a new site, specifying the blog starter.
23 |
24 | ```shell
25 | # create a new Gatsby site using the blog starter
26 | gatsby new my-blog-starter https://github.com/gatsbyjs/gatsby-starter-blog
27 | ```
28 |
29 | 1. **Start developing.**
30 |
31 | Navigate into your new site’s directory and start it up.
32 |
33 | ```shell
34 | cd my-blog-starter/
35 | gatsby develop
36 | ```
37 |
38 | 1. **Open the source code and start editing!**
39 |
40 | Your site is now running at `http://localhost:8000`!
41 |
42 | _Note: You'll also see a second link: _`http://localhost:8000/___graphql`_. This is a tool you can use to experiment with querying your data. Learn more about using this tool in the [Gatsby tutorial](https://www.gatsbyjs.org/tutorial/part-five/#introducing-graphiql)._
43 |
44 | Open the `my-blog-starter` directory in your code editor of choice and edit `src/pages/index.js`. Save your changes and the browser will update in real time!
45 |
46 | ## 🧐 What's inside?
47 |
48 | A quick look at the top-level files and directories you'll see in a Gatsby project.
49 |
50 | .
51 | ├── node_modules
52 | ├── src
53 | ├── .gitignore
54 | ├── .prettierrc
55 | ├── gatsby-browser.js
56 | ├── gatsby-config.js
57 | ├── gatsby-node.js
58 | ├── gatsby-ssr.js
59 | ├── LICENSE
60 | ├── package-lock.json
61 | ├── package.json
62 | └── README.md
63 |
64 | 1. **`/node_modules`**: This directory contains all of the modules of code that your project depends on (npm packages) are automatically installed.
65 |
66 | 2. **`/src`**: This directory will contain all of the code related to what you will see on the front-end of your site (what you see in the browser) such as your site header or a page template. `src` is a convention for “source code”.
67 |
68 | 3. **`.gitignore`**: This file tells git which files it should not track / not maintain a version history for.
69 |
70 | 4. **`.prettierrc`**: This is a configuration file for [Prettier](https://prettier.io/). Prettier is a tool to help keep the formatting of your code consistent.
71 |
72 | 5. **`gatsby-browser.js`**: This file is where Gatsby expects to find any usage of the [Gatsby browser APIs](https://www.gatsbyjs.org/docs/browser-apis/) (if any). These allow customization/extension of default Gatsby settings affecting the browser.
73 |
74 | 6. **`gatsby-config.js`**: This is the main configuration file for a Gatsby site. This is where you can specify information about your site (metadata) like the site title and description, which Gatsby plugins you’d like to include, etc. (Check out the [config docs](https://www.gatsbyjs.org/docs/gatsby-config/) for more detail).
75 |
76 | 7. **`gatsby-node.js`**: This file is where Gatsby expects to find any usage of the [Gatsby Node APIs](https://www.gatsbyjs.org/docs/node-apis/) (if any). These allow customization/extension of default Gatsby settings affecting pieces of the site build process.
77 |
78 | 8. **`gatsby-ssr.js`**: This file is where Gatsby expects to find any usage of the [Gatsby server-side rendering APIs](https://www.gatsbyjs.org/docs/ssr-apis/) (if any). These allow customization of default Gatsby settings affecting server-side rendering.
79 |
80 | 9. **`LICENSE`**: Gatsby is licensed under the MIT license.
81 |
82 | 10. **`package-lock.json`** (See `package.json` below, first). This is an automatically generated file based on the exact versions of your npm dependencies that were installed for your project. **(You won’t change this file directly).**
83 |
84 | 11. **`package.json`**: A manifest file for Node.js projects, which includes things like metadata (the project’s name, author, etc). This manifest is how npm knows which packages to install for your project.
85 |
86 | 12. **`README.md`**: A text file containing useful reference information about your project.
87 |
88 | ## 🎓 Learning Gatsby
89 |
90 | Looking for more guidance? Full documentation for Gatsby lives [on the website](https://www.gatsbyjs.org/). Here are some places to start:
91 |
92 | - **For most developers, we recommend starting with our [in-depth tutorial for creating a site with Gatsby](https://www.gatsbyjs.org/tutorial/).** It starts with zero assumptions about your level of ability and walks through every step of the process.
93 |
94 | - **To dive straight into code samples, head [to our documentation](https://www.gatsbyjs.org/docs/).** In particular, check out the _Guides_, _API Reference_, and _Advanced Tutorials_ sections in the sidebar.
95 |
96 | ## 💫 Deploy
97 |
98 | [](https://app.netlify.com/start/deploy?repository=https://github.com/gatsbyjs/gatsby-starter-blog)
99 |
100 | [](https://zeit.co/import/project?template=https://github.com/gatsbyjs/gatsby-starter-blog)
101 |
102 |
103 |
--------------------------------------------------------------------------------
/content/blog/environmental-footprint/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Face à la complexité de la crise environnementale, il faut faciliter les prises de décision
3 | date: "2022-10-10T09:35:00.284Z"
4 | description: "Je vous partage un peu de mon cheminement sur les questions environnementales."
5 | tags: ['non-dev', 'environment', 'lca', 'acv', 'french 🇫🇷']
6 | ---
7 |
8 | Le climat se dérègle, ce n'est plus un débat. On a aussi des problèmes de disponibilité des ressources fossiles, de dérèglement de la biodiversité, de disponibilité de l'eau ... Ça commence à faire beaucoup d'infos à digérer.
9 |
10 | Une fois qu'on a choisi d'agir pour les causes environnementales, par où commencer ? Comment peut-on prioriser nos actions ?
11 |
12 | Sans prétendre répondre complètement à ces questions, je partage ici un morceau de cheminement.
13 |
14 | Je vais principalement prendre des exemples liés au monde de l'agro-alimentaire, parce que c'est le domaine que je connais le mieux.
15 |
16 | ## On améliore ce qu'on mesure
17 |
18 | Pour vérifier qu'on fait bien quelque chose, en général, on prend des mesures, et ensuite on s'adapte. C'est comme ça que fonctionne un régulateur de vitesse, il mesure la vitesse à un instant T, puis il demande au moteur d'adapter sa cadence en fonction de ce qu'il mesure.
19 |
20 | 
21 |
22 | > Schéma du principe de mesure et de rétro-action d'un régulateur de vitesse
23 |
24 | Grosso modo, ça ressemble à ça.
25 |
26 | En principe, on peut essayer de contrôler n'importe quelle grandeur avec ce genre de système. C'est comme ça qu'on peut faire un métro automatique, ou même qu'on commande les pales d'un hélicoptère.
27 |
28 | Après cette petite aparté technologique, revenons au sujet qui nous intéresse. Est-ce qu'on peut mesurer un impact environnemental ?
29 |
30 | Évidemment, tout le monde connait l'empreinte carbone. Les plus habitués de cette notion connaissent même les subtilités du "CO2 équivalent", avec la prise en compte des différents gaz à effet de serre.
31 |
32 | On peut même comprendre ce que ça représente avec des simulateurs proposés par l'ADEME comme [Mon convertisseur CO2](https://datagir.ademe.fr/apps/mon-convertisseur-co2/).
33 |
34 | Mais si on ne mesure que l'empreinte carbone, on oublie tout le reste ! Est-ce qu'il vaut mieux limiter son empreinte carbone mais aggraver son empreinte eau ?
35 |
36 | 
37 | > Empreinte Carbone de produits alimentaires
38 |
39 | 
40 | > Empreinte eau de produits alimentaires
41 |
42 | Par exemple, lorsqu'on consomme une noix, on a un bilan en CO2 extrêmement faible (c'est une culture qui stocke du carbone), mais on a une empreinte eau très forte.
43 |
44 | On a besoin d'une grandeur qui nous permettrait d'éviter les **transferts d'impact**.
45 |
46 | ## Comment éviter les transferts d'impact ?
47 |
48 | La commission Européenne a demandé à des scientifiques de se mettre d'accord sur la question, sous le projet _Product Environnemental Footprint, _PEF pour les intimes_. On a commencé par déterminer quels sont les indicateurs à mesurer. La Commission Européenne a tranché pour [16 indicateurs.](https://doc.agribalyse.fr/documentation/methodologie-acv#les-indicateurs-acv-fournis-dans-agribalyse) Sans tous les lister, on y retrouve le changement climatique, l'épuisement des ressources en eau, l'épuisement des ressources énergétiques, l'usage des terres, l'eutrophisation terrestre/eau douce/marine ...
49 |
50 | Le PEF propose ensuite de regrouper ces grandeurs sous un "Single Score", exprimé en points. Il est obtenu à la suite d'une normalisation et d'une pondération.
51 |
52 | Normaliser ça signifie mettre toutes les grandeurs sous une même unité (on ne compare pas du CO2 et des M3 d'eau !).
53 |
54 | Pondérer c'est donner plus ou moins d'importance aux différents impacts. Pour le PEF, la pondération prend à la fois en compte la robustesse relative de chacun de ces indicateurs et les enjeux environnementaux.
55 |
56 | 
57 | > Single Score pour 1kg de Burritos.
58 |
59 |
60 | 
61 | > Aperçu du détail des données
62 |
63 | Le PEF a des limites malheureusement. Il manque un indicateur spécifique à la biodiversité, il manque aussi un indicateur pour les pollutions au micro-plastique ... Mais c'est au moins une méthodologie qui fait **consensus chez les scientifiques**.
64 |
65 | Donc comment on évite les transferts d'impact ? On se dote d'un score le plus global possible, et on ajuste notre régulateur de vitesse de telle sorte à ce qu'il diminue le score.
66 |
67 | ## Comment se servir du Single Score ?
68 |
69 | Pour obtenir ce score, on mène une **Analyse du Cycle de Vie (ACV).** Ce terme désigne une méthode d'évaluation qui permet d'analyser tous les maillons de la chaîne de valeur d'un produit ou service.
70 |
71 | Et on parle vraiment de tous les maillons ! Ça part de l'extraction de la matière première, jusqu'à la fin de vie du produit avec le traitement des déchets. Quand on fait l'ACV d'une tomate, on part depuis les minéraux qui constituent le tracteur jusqu'au compost du particulier. On compte aussi le cas ou la tomate est jetée à la poubelle, on compte aussi le cas où la tomate a été mise de côté car "hors calibre" à la ferme.
72 |
73 | 
74 | > Extrait de l'ACV du Burritos, de l'usine jusqu'au supermarché
75 |
76 | Une fois qu'on a modélisé un produit existant, on peut commencer à imaginer des scénarios **d'éco-conception.** Si on reprend le schéma du régulateur de vitesse, on va donc envoyer un signal de rétro-action.
77 |
78 | On peut faire une première tentative en essayant de chercher à s'approvisionner de façon locale : on supprime tous les transports (oui c'est utopique, mais faisons l'exercice).
79 |
80 | Voici le résultat :
81 |
82 | 
83 | > Comparaison d'impact Burritos vs Burritos sans aucun transport
84 |
85 | On se rend compte que même dans un scénario de réduction maximale des transports, on ne réduit pas de façon significative les impacts.
86 |
87 | On identifie clairement que le boeuf haché est un "point chaud" de notre ACV. On a même ce qui ressemble au [principe de Pareto](https://fr.wikipedia.org/wiki/Principe_de_Pareto) : 20% de la matière qui compose mon burritos cause 80% des impacts environnementaux !
88 |
89 | Essayons de modéliser un burritos au poulet :
90 |
91 | 
92 | > Comparaison d'impact Burritos boeuf vs poulet
93 |
94 | Là on a une réduction d'impact bien plus satisfaisante !
95 |
96 | ## Pourquoi cette méthodologie n'est-elle pas plus connue que ça ?
97 |
98 | La bonne nouvelle, c'est que l'ACV va bientôt être très largement adoptée. Le gouvernement français travaille sur une loi d'[Affichage Environnemental ](https://www.ecologie.gouv.fr/laffichage-environnemental-des-produits-et-des-services-hors-alimentaire)obligatoire, qui consistera à apposer une note sur les produits et les services vendus.
99 |
100 | Ce score sera défini par l'état, et sera basé principalement sur des ACV.
101 |
102 | 
103 | > Proposition d'illustration pour l'affichage environnemental
104 |
105 | On a là les outils pour aider les consommateurs à mieux choisir, et donc inciter les professionnels à mieux concevoir leurs produits et services.
106 |
107 | ## Sources
108 |
109 | Données d'impact environnemental de différents biens alimentaires (schémas en histogramme):
110 |
111 | L'outil d'ACV simplifiée que je développe:
112 |
113 | L'affichage environnemental réglementaire:
114 |
115 | La base de données utilisée pour les produits agro-alimentaires:
116 |
--------------------------------------------------------------------------------
/content/blog/docker-training/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Docker training
3 | date: "2020-03-13T15:15:00.284Z"
4 | description: "Notes taken during Docker training"
5 | tags: ['training', 'devOps']
6 | ---
7 |
8 | # Docker Essentials: A developer introduction
9 | ## Lab 1 : Running a container
10 | ### What are Containers ?
11 | A group of precesses, run in isolation. They run on a shared kernel.
12 | The isolation is provided by Linux **namespaces**.
13 |
14 | VM vs Container
15 | VMs are heavy and slow to start because they have a full OS.
16 | 
17 | containers can be directly run on top of virtual machines.
18 |
19 | ### What is Docker ?
20 | Docker is tooling to manage containers.
21 | *"build once, run anywhere"*
22 |
23 | ### Why containers are appealing
24 | - no more "Works on my machine"
25 | - lightweight and fast
26 | - better resource utilization
27 |
28 | ### Running a container
29 | ```bash
30 | docker container run -t ubuntu top
31 | ```
32 |
33 | The docker run command first starts a docker pull to download the Ubuntu image onto your host. After it is downloaded, it will start the container.
34 |
35 | Even though we are using the Ubuntu image, it is important to note that the container does not have its own kernel. It uses the kernel of the host and the Ubuntu image is used only to provide the file system and tools available on an Ubuntu system.
36 |
37 | ```bash
38 | docker container ls
39 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
40 |
41 | ubuntu "top" 58 seconds ago Up 58 seconds wizardly_germain
42 | ```
43 |
44 | then
45 | ```bash
46 | $ docker container exec -it 7d1a9d47e601 bash
47 | root@7d1a9d47e601:/#
48 | ```
49 | Using docker container exec with bash is a common way to inspect a Docker container.
50 |
51 | > Namespaces are a feature of the Linux kernel. However, Docker allows you to run containers on Windows and Mac. The secret is that embedded in the Docker product is a Linux subsystem.
52 |
53 | ### 2. Run multiple containers
54 | running an nginx server :
55 | ```bash
56 | docker container run --detach --publish 8080:80 --name nginx nginx
57 | ```
58 |
59 | `--detach` flag will run this container in the background.
60 | the `publish` flag publishes port 80 in the container by using port 8080 on your host.
61 |
62 | So here, go to localhost:8080 :
63 | 
64 |
65 | The `name` flag names the container.
66 |
67 | Then, we can run another container :
68 | ```bash
69 | docker container run --detach --publish 8081:27017 --name mongo mongo:3.4
70 | ```
71 |
72 | We can see the containers by running `docker container ls`.
73 |
74 | ### Removing containers
75 | 1. Stop the containers :
76 | ```bash
77 | docker container stop
78 | ```
79 | 2. Remove the stopped containers :
80 | ```bash
81 | docker system prune
82 | ```
83 |
84 | ⚠️ Note :
85 | You should avoid using unverified content from the Docker Store when developing your own images because these images might contain security vulnerabilities or possibly even malicious software.
86 |
87 |
88 | ## Lab 2 Add CI/CD value with Docker images
89 | ### Docker images
90 | - Docker image is a tar file containing a container's filesystem + metadata
91 | - for sharing and redistribution
92 |
93 | **Docker registry** push and pull image from registry.
94 | Default registry: Docker hub. Public and free for public images.
95 |
96 | You can also have a private registry, self-hosted, or cloud provider options.
97 |
98 | Creating a docker image - with Docker build
99 | - Create a "Dockerfile"
100 | - It contains a List of instructions for how to construct the container
101 | - `docker build -f Dockerfile`
102 |
103 | Secret Sauce: Docker image layers
104 |
105 | 
106 |
107 | to make pushes faster, the lines in the docker file that change the most should be at the bottom of the file.
108 | Because when you invalidate a layer, you automatically invalidate the layers after it.
109 | So if it is the last layer, we optimize the caching system.
110 |
111 | - union file system
112 | - merge image layers into single file system for each container
113 | - copy-on-write
114 | - copies files that are edited up to top writable layer
115 |
116 | Lab objectives :
117 | - create custom image using a Dockerfile,
118 | - build and run your image locally
119 | - push your image to your account on DockerHub
120 | - Update your image with a code change to see Docker image layering/caching in action
121 |
122 | ### Create a python app
123 | Paste this in a file called app.py to create a simple Flask app :
124 | ```python
125 | from flask import Flask
126 |
127 | app = Flask(__name__)
128 |
129 | @app.route("/")
130 | def hello():
131 | return "hello world!"
132 |
133 | if __name__ == "__main__":
134 | app.run(host="0.0.0.0")
135 | ```
136 |
137 | then run
138 | ```bash
139 | pip3 install flask
140 | python3 app.py
141 | ```
142 |
143 | ### Creating and building the docker image
144 | Create a file named Dockerfile and add the following content:
145 | ```
146 | FROM python:3.6.1-alpine
147 | RUN pip install flask
148 | CMD ["python","app.py"]
149 | COPY app.py /app.py
150 | ```
151 |
152 | The FROM line specifies the starting image to build your layers on top of it.
153 | In this case, you are selecting the `python:3.6.1-alpine` base layer because it already has the version of Python and pip that you need to run your application.
154 | he alpine version means that it uses the alpine distribution, which is significantly smaller than an alternative flavor of Linux. A smaller image means it will download (deploy) much faster.
155 |
156 | > It is best practice to use a specific tag when inheriting a parent image so that changes to the parent dependency are controlled. If no tag is specified, the latest tag takes effect, which acts as a dynamic pointer that points to the latest version of an image.
157 |
158 | For security reasons, you must understand the layers that you build your docker image on top of.
159 |
160 | The RUN command executes commands needed to set up your image for your application, such as installing packages, editing files, or changing file permissions. In this case, you are installing Flask. The RUN commands are executed at build time and are added to the layers of your image.
161 |
162 | CMD is the command that is executed when you start a container. Here, you are using CMD to run your Python application.
163 | There can be only one CMD per Dockerfile. If you specify more than one CMD, then the last CMD will take effect.
164 |
165 | This line copies the app.py file in the local directory (where you will run docker image build) into a new layer of the image.
166 |
167 | It seems counter-intuitive to put this line after the CMD ["python","app.py"] line. Remember, the CMD line is executed only when the container is started, so you won't get a file not found error here.
168 |
169 | For full specifications about the Dockerfile :[documentation here](https://docs.docker.com/engine/reference/builder/).
170 |
171 |
172 | Then, we can **build the Docker image**.
173 | ```bash
174 | docker image build -t python-hello-world ./
175 | ```
176 | The `-t` parameter allows us to name the image.
177 |
178 | then we can run
179 | ```bash
180 | docker image ls
181 | ```
182 | to see our image !
183 |
184 | ### Run the docker image
185 |
186 | ```bash
187 | docker run -p 5001:5000 -d python-hello-world
188 | ```
189 |
190 | it returns > `0a8fa98d3b930c7b2d986c76fdeaf6b86cf041343be804a33585ccc9aedf5958`
191 |
192 | Navigate to http://localhost:5001 => you see "hello world!".
193 |
194 |
195 | To see the logs :
196 | ```bash
197 | docker container logs 0a8fa98d3b930c7b2d986c76fdeaf6b86cf041343be804a33585ccc9aedf5958
198 | ```
199 | It returns :
200 | ```
201 | * Serving Flask app "app" (lazy loading)
202 | * Environment: production
203 | WARNING: This is a development server. Do not use it in a production deployment.
204 | Use a production WSGI server instead.
205 | * Debug mode: off
206 | * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
207 | 172.17.0.1 - - [13/Mar/2020 09:10:34] "GET / HTTP/1.1" 200 -
208 | 172.17.0.1 - - [13/Mar/2020 09:10:34] "GET /favicon.ico HTTP/1.1" 404 -
209 | ```
210 |
211 | > The Dockerfile is used to create reproducible builds for your application. A common workflow is to have your CI/CD automation run docker image build as part of its build process. After images are built, they will be sent to a central registry where they can be accessed by all environments (such as a test environment) that need to run instances of that application. In the next section, you will push your custom image to the public Docker registry, which is the Docker Hub, where it can be consumed by other developers and operators.
212 |
213 | ### Push to a central registry
214 |
215 | Login to the docker registry :
216 | ```bash
217 | docker login
218 | ````
219 |
220 | Tag the image with you username.
221 | The Docker Hub naming convention is to tag your image with `[dockerhub username]/[image name]`.
222 |
223 | ```bash
224 | docker tag python-hello-world nansdibm/python-hello-world
225 | ```
226 |
227 | Use docker push :
228 | ```bash
229 | docker push nansdibm/python-hello-world
230 | ```
231 |
232 | You can see your image on docker hub in the browser.
233 |
234 | 
235 |
236 | ### Deploy a change
237 | Update app.py by changing "Hello World" with "Hello Beautiful World!".
238 |
239 | Rebuild the image
240 | ```
241 | docker image build -t nansdibm/python-hello-world .
242 | ```
243 |
244 | Notice the "Using cache" for Steps 1 - 3. These layers of the Docker image have already been built, and the docker image build command will use these layers from the cache instead of rebuilding them.
245 |
246 | ```bash
247 | docker push nansdibm/python-hello-world
248 | ```
249 |
250 | ```
251 | The push refers to repository [docker.io/nansdibm/python-hello-world]
252 | bd3dadabc6d3: Pushed
253 | 455874a92037: Layer already exists
254 | 5f354b8b5dc0: Layer already exists
255 | f61107386c17: Layer already exists
256 | db49993833a0: Layer already exists
257 | 58c71ea40fb0: Layer already exists
258 | 2b0fb280b60d: Layer already exists
259 | ```
260 | There is a caching mechanism in place for pushing layers too. Docker Hub already has all but one of the layers from an earlier push, so it only pushes the one layer that has changed.
261 |
262 | ### Understanding image layers
263 | ```
264 | FROM python:3.6.1-alpine
265 | RUN pip install flask
266 | CMD ["python","app.py"]
267 | COPY app.py /app.py
268 | ```
269 |
270 | Each of these lines is a layer. Each layer contains only the delta, or changes from the layers before it. To put these layers together into a single running container, Docker uses the union file system to overlay layers transparently into a single view.
271 |
272 | The "copy-on-write" function is very fast and in almost all cases, does not have a noticeable effect on performance. You can inspect which files have been pulled up to the container level with the docker diff command. For more information, see the command-line reference on the docker diff command.
273 |
274 | ```bash
275 | docker diff 0a8fa98d3b93
276 | ```
277 |
278 | Because image layers are read-only, **they can be shared by images and by running containers**.
279 |
280 | 
281 |
282 | > Because the containers use the same read-only layers, you can imagine that starting containers is very fast and has a very low footprint on the host.
283 |
284 | ### Remove the containers
285 | ```bash
286 | docker container ls
287 | ```
288 |
289 | ```bash
290 | docker container stop 0a8fa98d3b93
291 | ```
292 |
293 | ```bash
294 | docker system prune
295 | ```
296 |
297 | ## Lab 3 Orchestrate applications with Docker Swarm
298 |
299 | ### Container orchestration
300 | What about production ?
301 | Automated scheduling and scaling, zero downtime deployments, high availability and fault tolerance...
302 |
303 | What is container orchestration ?
304 | - Cluster management
305 | - Scheduling
306 | - Service discovery
307 | - Health management
308 | - Declare desired state
309 | - Active reconciliation
310 |
311 | Docker swarm, Kubernetes, ...
312 | With hosted solutions :
313 | - IBM Cloud Container Services
314 | - Amazon ECS
315 | - Azure Containers
316 | - Google ...
317 |
318 | Setup a 3 node cluster with container orchestration with Docker Swarm.
319 | - Schedule and scale an application
320 | - Expose the application
321 | - Update with a rolling update
322 | - Demonstrate node failure and reconciliation
323 |
324 | ### Create your first swarm
325 | Using https://labs.play-with-docker.com/
326 | Create a new instance
327 |
328 | ```bash
329 | docker swarm init --advertise-addr eth0
330 | ```
331 |
332 | To add a worker in this swarm :
333 | ```bash
334 | docker swarm join --token SWMTKN-1-61p9nb8opn3tg8lchklkbyjz7uhysj8krjexrlaxpr3kaprte8-ex2t37ry9f8ux0xu92u3jfsm8 192.168.0.8:2377
335 | ```
336 |
337 | To add a manager :
338 | ```bash
339 | docker swarm join-token manager
340 | ```
341 |
342 | > You can think of Docker Swarm as a special mode that is activated by the command: docker swarm init. The --advertise-addr option specifies the address in which the other nodes will use to join the swarm.
343 |
344 | With other instances, you can join the swarm by running
345 | ```bash
346 | docker swarm join --token SWMTKN-1-61p9nb8opn3tg8lchklkbyjz7uhysj8krjexrlaxpr3kaprte8-ex2t37ry9f8ux0xu92u3jfsm8 192.168.0.8:2377
347 | ```
348 |
349 | Back on node1, run
350 | ```bash
351 | docker node ls
352 | ```
353 |
354 | ### Deploy your first service
355 | To run containers on a Docker Swarm, you need to create a service. A service is an abstraction that represents multiple containers of the same image deployed across a distributed cluster.
356 |
357 | > The --mount flag is useful to have NGINX print out the hostname of the node it's running on. You will use this later in this lab when you start load balancing between multiple containers of NGINX that are distributed across different nodes in the cluster and you want to see which node in the swarm is serving the request.
358 |
359 | ```bash
360 | docker service ls
361 | ```
362 |
363 | ```bash
364 | docker service ps nginx1
365 | ```
366 |
367 | ### Scale your service
368 | Update your service with an updated number of replicas.
369 | ```bash
370 | docker service update --replicas=5 --detach=true nginx1
371 | nginx1
372 | ```
373 | - The state of the service is updated to 5 replicas, which is stored in the swarm's internal storage.
374 | - Docker Swarm recognizes that the number of replicas that is scheduled now does not match the declared state of 5.
375 | - Docker Swarm schedules 4 more tasks (containers) in an attempt to meet the declared state for the service.
376 |
377 | ```bash
378 | docker service ps nginx1
379 | ````
380 |
381 | 
382 |
383 | ```bash
384 | docker service logs nginx1
385 | ```
386 |
387 | ### Apply rolling updates
388 |
389 | ```bash
390 | docker service update --image nginx:1.13 --detach=true nginx1
391 | ```
392 |
393 | 
394 |
395 | ### Reconcile problems with containers
396 | ```bash
397 | docker swarm leave
398 | ```
399 |
400 | 
401 |
402 | ### Determine how many nodes you need
403 | For a production application, you should provision a cluster with multiple manager nodes to allow for manager node failiures.
404 | At least 3 manager nodes, but no more than 7.
405 | - 3 manager nodes tolerate 1 node failure
406 | - 5 manager nodes tolerate 2 node failures
407 | - 7 manager nodes tolerate 3 node failures
408 |
409 | Play with docker Templates :
410 | 
411 |
412 | ### Summary
413 | - Docker Swarm schedules services by using a declarative language. You declare the state, and the swarm attempts to maintain and reconcile to make sure the actual state equals the desired state.
414 | - Docker Swarm is composed of manager and worker nodes. Only managers can maintain the state of the swarm and accept commands to modify it. Workers have high scalability and are only used to run containers. By default, managers can also run containers.
415 | - The routing mesh built into Docker Swarm means that any port that is published at the service level will be exposed on every node in the swarm. Requests to a published service port will be automatically routed to a container of the service that is running in the swarm.
416 |
--------------------------------------------------------------------------------
/content/blog/aws-training/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Formation AWS 09-10/03/2020
3 | date: "2020-03-10T15:15:00.284Z"
4 | description: "Notes taken during AWS training"
5 | tags: ['AWS', 'training', 'cloud', 'devOps']
6 | ---
7 |
8 | ## AWS Essentials
9 |
10 | https://learning.oreilly.com/videos/amazon-web-services/9780134702186/9780134702186-awsa_01_01
11 |
12 | ### 1.1 Cloud Computing
13 | On-demand
14 | Resource pooling (shared servers, shared buildings)
15 | Rapid Elasticity => growing when we need to grow
16 | Measured service => Pay as you go
17 |
18 | ### 1.2 Regions
19 | Primary building block.
20 | VM, Storage, ... You pick a region.
21 | Cost in AWS is determined by the region.
22 | You can choose multiple regions (for multiple audiences), or also to spread risk
23 |
24 | ### 1.3 Availability zones
25 |
26 | Availability zones are a collection of datacenters. Connected with private fiber.
27 | us-west-2a us-west-2b us-west-2c ...
28 | They are key to fault tolerance.
29 | System that's resilient on the machine, but also resilient on the datacenter.
30 | What happens when a datacenter becomes unreachable.
31 |
32 | ### 1.4 Edge locations
33 | Edge location
34 | Caching
35 | Amazon route 53 => DNS query as quickly as possible.
36 | Edge location serve Amazon CloudFront and they serve Amazon route 53
37 |
38 | ### 1.5 Scope of services
39 |
40 | #### Global
41 |
42 | AWS IAM
43 | AWS CloudFront
44 | AWS Route53
45 |
46 | #### Regional
47 | Dynamo DB ...
48 |
49 | #### Availability
50 | Elastic block store
51 | Elastic compute cloud
52 |
53 | ### 1.6 Service overview
54 | AWS console.
55 | top right => choose region.
56 |
57 | Compute, Storage, Database, Networking, Analytics, Security, Management Tools, Developer tools, Internet of Things, Game development, Mobile services, ...
58 | Rich environment.
59 |
60 | ## 2. Security in AWS
61 | ### 2.1 AWS Identity and Access Management
62 | AWS IAM
63 | Authent Autho Users Group, Password Policy, Multi factor authentication => Smth you know combined with smth you have (phone (have), user/password (know))
64 | Authenticating and Authorizing against AWS API.
65 |
66 | ### Creating a user and group
67 |
68 | #### Create a User
69 | autogenerate a password AND require to create a new password at next sign in
70 | give the password to the person over an encrypted connection of some kind.
71 |
72 | #### Create a group
73 |
74 | Create a group, create permissions, and add user to the group
75 |
76 | ### 2.3 access keys
77 |
78 | jane.doe
79 | password.
80 | She wants sdk, CLI, ... Create Access Key for Jane.doe.
81 | Access ID, Secret Accesss key.
82 |
83 | ### Permissions and Policies
84 | Permissions are granted via policies
85 | Policies are written in JSON
86 |
87 | We can genetare policies thanks to a tool created by Amazon.
88 |
89 | All policies start with a default implicit deny.
90 |
91 | We need an explicit allow to have permissions.
92 | Amazon Resource Name :
93 | Fromat pattern to find in the docs.
94 | arn:aws:iam:...:user/jdoe
95 | arn:aws:s3:
96 | arn:aws:
97 | smth can be let blank :::::
98 |
99 | implicit deny => explicit deny => explicit allow
100 |
101 | Statements specify :
102 | Principal (resource based policies)
103 | Actions
104 |
105 | - ex2:RunInstanced
106 | Resources
107 | - EC2 instances
108 | Conditions
109 | - time of day
110 | - from specific IP address
111 | - resource contains particular tag
112 |
113 | ### 2.5 Creating and attaching policies
114 |
115 | Generator creates the appropriate syntax.
116 | Allow
117 | EC2
118 | RunIntances, ... (actions that are authorized).
119 | ARN : \* (all)
120 |
121 | And then you can attach a policy to a group.
122 | Developers => run EC2 instances.
123 |
124 | ### 2.6 Understanding Roles
125 |
126 | Rotate credentials regularly.
127 | Credentials should never be shared, in the code or in environment variables
128 | Roles enable us to use temporary credentials that are cached by the cli or the sdk.
129 | high level of security
130 |
131 | ### 2.7 Creating roles
132 |
133 | IAM => Role section
134 | create new role
135 | Role Name => Name of application. myApplication
136 | Role type (S3 stuff)
137 | attach policy
138 |
139 | every application will be able to read from S3
140 |
141 | ### 2.8 Federated Users
142 |
143 | We can create Single Sign on
144 | LDAP, Active directory.
145 |
146 | We can federate applications so that they can all use the same S3 bucket.
147 | This way we can bypass backend APIs. Allow mobile apps to access directly s3 services.
148 |
149 | User authenticate against our app
150 | Identity broker will make a acall to aws security token service.
151 | The service will return temporary credentials.
152 |
153 | C'est pour gérer les devs je crois (?)
154 |
155 | ### 2.9 managing an MFA device
156 |
157 | Multi factor authentication
158 | Hardware MFA device (to be ordered from Amazon :o )
159 | Virtual MFA device
160 | QR code or show secret key.
161 | Scan.
162 | Add authentication code
163 | wait
164 | add second authentication code
165 |
166 | have jane come over to my desk.
167 |
168 | in case you can't call jane to you desk
169 |
170 | ### 2.10 Resource Policies
171 |
172 | Apply permission to aws permissions.
173 | policies that are not applied to users or group, but to resources.
174 | Make an s3 bucket publicly readable for example.
175 |
176 | ### 2.11 Applying resource policy
177 |
178 | S3 service
179 | add bucket policy
180 | click policy generator
181 | allow
182 | getObject, listObjects ....
183 | arn:aws:s3:::nameofthebucket/\*
184 |
185 | add statement
186 |
187 | ### 2.12 Using roles for cross account access
188 |
189 | Payer Account (initial account)
190 | (pays the bill)
191 |
192 | Development account - Production Account
193 |
194 | you can **leverage** roles.
195 | You can allow a user in one account to access ressources from another account.
196 |
197 | ### 2.13 Best Practices
198 |
199 | Leverage groups : give only ENOUGH rights. Not more.
200 |
201 | - Grant least privilege
202 | - Strong pwd policy
203 | - deny statements for added security
204 | - never share credentials
205 | - multiple accounts for isolation
206 |
207 | master account
208 |
209 | - protect it
210 | - don't use for day-to-day
211 | - delete default access keys
212 | - Enable MFA with physical key, and lock in a safe.
213 |
214 | ## Networking AWS
215 |
216 | Amazon virtual private cloud : VPC
217 | Segment networks
218 | Create a VPC in a particular region.
219 | Choose a particular address range
220 | We can create subnets.
221 |
222 | ### 3.2 Creating a VPC
223 |
224 | from the aws console
225 | VPCs are specific to a region
226 | We have a default VPC from Amazon.
227 | 10.2.0.0 => the widest range of ip addresses
228 |
229 | a vpc has a route table
230 | ACL => Allow incoming or outcoming traffic
231 |
232 | ### 3.3 adding subnets
233 |
234 | Create a subnet below a VPC, and specify an availability zone
235 | We can assign a CIDR block (I really have to learn more about CIDR ranges !!)
236 | /24 => allows 256 IP addresses. The 1st 4, and the last one IP address are not usable (used by AWS)
237 |
238 | ### 3.4 Routing
239 |
240 | ELB => load balancer
241 | EC2 application server
242 | Database
243 | 
244 | Route tables that enable to hide EC2 and database from the internet.
245 | On premises network (what's that ?)
246 |
247 | ### 3.5 demo
248 | Routing subnets to the internet.
249 | vpc dashboard => route table
250 |
251 | Route tables associated
252 | create internet gateway
253 | attach internet gateway to vpc
254 |
255 | auto-assign public IP
256 | => do that to enable EC2 instances
257 |
258 | ### 3.6 public private and hybrid subnets
259 |
260 | 
261 |
262 | ### 3.7 Network Access Control Lists
263 |
264 | tool to use within VPC that are stateless
265 | like a firewall arround a subnet
266 |
267 | ### 3.8 Security Groups
268 | firewall that applies to an EC2 instance.
269 | it is Stateful.
270 |
271 | ### 3.9 creating an NACL
272 |
273 | web traffic acl
274 | associated to vpc
275 |
276 | inbound =>
277 | rule #100 HTTP 80 TCP 80 0.0.0.0/0 ALLOW
278 | rule #101 HTTPs 443 TCP 80 0.0.0.0/0 ALLOW
279 |
280 | outbound =>
281 | rule #100 TCP 1080-65535 ALLOW (send to the internet)
282 |
283 | ### 3.10 Creating a security group
284 |
285 | web-server
286 | web application security
287 | inbound rules accept http and https from anywhere
288 | NACL => subnet can receive traffic
289 | Security group => Instance can receive traffic
290 | so this is not double work
291 |
292 | ### 3.11 VPC peering
293 |
294 | It is a way to pair 2 VPCs.
295 | IP ranges must not overlap
296 | Peering connection PCX
297 | routing traffic from one range to another range
298 |
299 | ### 3.15 Aws direct connect
300 | dedicated private connection
301 | 1Gbps or 10 Gbps options.
302 |
303 | ## 4 Computing in AWS
304 |
305 | ### 4.1 EC2
306 |
307 | Elastic compute cloud (EC2)
308 | Virtual machine
309 | based on Xen hypervisor
310 | Various combinations of cpu, memory, disk, IO
311 | one VM called an instance
312 |
313 | ### 4.2 AMazon Machine Image
314 |
315 | bit for bit copy of root volume
316 | you launch a machine from a machine image
317 |
318 | ### 4.3 Launching a linux instance
319 |
320 | Amazon images
321 | Amazon Linux
322 | 64 bits
323 | .ebs => Elastic block
324 |
325 | t2 micro
326 | auto assign public IP
327 | tag instance to keep your environment organized
328 | Name : Linux-demo
329 | Environment : dev
330 | Application : xxx
331 |
332 | security group !!!
333 |
334 | download a private keypair to to access the instance
335 | public IP and private IP are associated to that instance
336 | add security group to give shell access
337 | go back to the instance, change security groups, add ssh security group
338 | port 22 in ssh should be opened
339 |
340 | ssh -i azez.pem ex2-user@public.ip.of.instance
341 |
342 | ### 4.4 Key pairs
343 |
344 | public and private keys.
345 | Way of login into the operating system.
346 | 2048 ssh2 rsa keypairs
347 |
348 | ### 4.5 Instance metadata service
349 |
350 | retrieve information about an EC2 instance
351 | service within an instance to get information about itself
352 | use this instance with scripts to bootstrap stuff.
353 | The script gets information about the instance to configure software.
354 |
355 | 169,254,169,254/latest/meta-data
356 |
357 | ### 4.6 Demo instance Metadata Service
358 |
359 | curl http://169.254.169.254/latest/meta-data
360 | get the instance id :
361 | export instanceId= curl http://169.254.169.254/latest/meta-data/instance-id
362 | shell scripts that could access that variable.
363 |
364 | ### 4.7 bootstraping with userdata
365 |
366 | it's the key to maintain a self healing environment
367 |
368 | ### 4.8 Launching a windows instance
369 |
370 | Meh windows
371 |
372 | ### 4.9 Stopping and terminating instances
373 |
374 | Stop => can restart later
375 | terminate => throw away
376 |
377 | ### 4.10 Billing options
378 |
379 | on demand vs reserved instances
380 | availability is not guaranteed on reserved instances
381 |
382 | reserved instance have a commitment (1 year, 1 or 3 year)
383 |
384 | ### 4.11 highly available web application
385 |
386 | 
387 |
388 | 3 tier architecture in amazon ec2
389 |
390 | ### 4.12 => 4.14 Introduction to AWS Lambda
391 |
392 | Mobile Application could access Amazon Simple Storage Service.
393 | Amazon cognito => Federated user retrieves temporary credentials
394 |
395 | upload directly to S3.
396 | Fire event to lambda function.
397 | 
398 |
399 | highly available and fault tolerant
400 | Speed time to market
401 |
402 | ## 5. Storage options
403 |
404 | ### 5.1 overview of aws storage options
405 |
406 | EC2 => Instance store
407 | block storage that are built in to the EC2 instance
408 | storage there is EPHEMERAL.
409 | cannot take snapshots
410 | 
411 |
412 | EBS => not resilient to the loss of a data center.
413 | Independant
414 | Snapshots
415 | pay per gb per month
416 |
417 | Amazon S3
418 | write once, read many
419 | storage for the internet
420 | for things that need to be retrieved from users
421 |
422 | AWS Storage gateway
423 | virtual machine that is designed to be run on-premises
424 | it exposes a device
425 |
426 | ### 5.2 Amazon simple storage service
427 |
428 | object storage.
429 | highly available and fault tolerant.
430 | no filesystem
431 | Bucket => Object
432 | upload limit of 5 gb
433 | objects can be multi part up to 5 TB
434 |
435 | object storage vs block storage
436 |
437 | ### 5.3 Demo creating buckets and object
438 |
439 | host static website on s3 :o :o :o
440 |
441 | ### 5.4 Bucket Security with resource policies
442 |
443 | ### 5.5 Introduction to Amazon Glacier
444 |
445 | Write once, read rarely
446 | glacier is cold storage
447 | 3-5 hours to get the files
448 |
449 | ### 5.7 Demo: Adding life cycle Rules
450 |
451 | ### 5.8 Instance Store Volumes
452 |
453 | they are build-in and ephemeral
454 |
455 | ### 5.9 Elastic block stock EBS
456 |
457 | it is not a NAS
458 | pay for provisioned storage
459 | data is independent from instance, and is connected over network
460 | can detach and attach to another
461 |
462 | ### 5.11 Creating an EBS volume in the aws dashboard
463 |
464 | volume type (HDD, SSD)
465 |
466 | ## 6. Databases
467 |
468 | ### 6.1 Options
469 |
470 | Amazon RDS => for sql stuff, mariadb
471 | Amazon ElastiCache => Memcached, redis
472 | DynamoDB => NoSQL, Event-driven.
473 | Redshift => Encryption
474 |
475 | ### 6.2 Amazon RDS
476 | Snapshots, backups and patches
477 | Read replicas for when we have read heavy traffic
478 |
479 | ### 6.3 Data Durability
480 | Multi AZ deployments
481 | Primary instance with secondary standby in a different availability zone
482 |
483 | ### 6.4 Launch an Amazon RDS instance
484 |
485 | subnet group => several subnets
486 | publicly accessible => NO
487 |
488 | ### 6.5 Amazon DynamoDB
489 |
490 | NoSQL Data store
491 | easy noSQL service
492 | Partition vs primary key
493 |
494 | ### 6.7 Scan and query operations
495 |
496 | interface pour faire des query
497 | https://learning.oreilly.com/videos/amazon-web-services/9780134702186/9780134702186-awsa_06_07
498 |
499 | ### 6.8 Amazon ElastiCache
500 |
501 | Redis => in memory database, backed to disk
502 | Memcached => in memory key-value store, not backed to disk
503 |
504 | ### 6.9 Amazon redshift
505 |
506 | SQL compliant.
507 | It is a clustered service
508 | parallel queries across the nodes.
509 | Ideal for OLAP and BI apps
510 | pedabyte scale data wharehouse
511 |
512 | ### 6.10 database for e-commerce
513 |
514 | Elastic load balancer => 3 EC 2 instances
515 |
516 | 
517 |
518 | Read replica(s)
519 | Multi-AZ => Amazon handles the primary and secondary thing
520 |
521 | ElastiCache => Category pages, related products
522 |
523 | copies to redshift in order to run BI stuff
524 | use redshift to understand conversion rates
525 |
526 | ## 7 Analytics in AWS
527 |
528 | ### 7.1 Real time stream processing => Amazon Kinesis
529 |
530 | "SOAP" and old school technologies from back in the 90s.
531 | Use case : warn users of the lifecycle of a pizza before it is delivered to them.
532 |
533 | Kinesis
534 |
535 | ### 7.2 Real time stream processing
536 |
537 | It divides large amount of data into usable shards.
538 | (terabytes per hour)
539 |
540 | 1 shard => 1000 PUTs per second
541 |
542 | Kinesis stream should be handled by Apps that have ONE SINGLE PURPOSE
543 | It can be Aggregation, Sliding Window analysis,
544 |
545 | ### 7.3 Big data with amazon elastic MapReduce
546 | yet another "amazing" service.
547 |
548 | Managed Hadoop framework (or spark, presto, hbase)
549 | provision single or thousands of instance.
550 |
551 | for data intensive applications.
552 | Data mining, log analysis, scientific simulation, genomics.
553 | can read data from anywhere.
554 |
555 | ### 7.4 AWS Data Pipeline
556 |
557 | Transform data from one data type to another datatype.
558 | Helps move data between data sources.
559 | It uses EC2 or EMR to transform Data.
560 | Can Execute SQL queries, or custom applications.
561 |
562 | ### 7.5 Video Subscription Service Business Intelligence
563 |
564 | 
565 |
566 | ### 8 Developer and management tools
567 |
568 | ### 8.1 CloudWatch
569 |
570 | Collect metrics on amazon stuff.
571 | It stores metrics for up to 2 weeks
572 | each service (EC2, ELB, EBS) will get its collection of metrics.
573 |
574 | \$/metric
575 |
576 | Application-level metrics, or intance-level metrics.
577 |
578 | CPU CloudWatch Alarms.
579 | Triggered on breach of threshold.
580 | Alarm CPUUtilization is over 80% for 2 periods of 1 minute for example
581 |
582 | Doesn't necessarily signal emergency.
583 |
584 | can publish notifications.
585 | Up to 5000 alarms per account.
586 |
587 | ### 8.2 CloudWatch Logs
588 | Search using specific syntax (see docs)
589 | Can search JSON fields
590 | Subscription filters
591 |
592 | ### 8.3 Cloudformation Infrastructure as code
593 | Infrastructure as code
594 | cloud formation templates
595 | architectures are complex
596 | Manual process => bad documentation, difficult to reproduce
597 | Challenges with scripts => Dependencies
598 |
599 | Solution : Automate with CloudFormation
600 |
601 | Templates are meant to be written once and be deployed many times
602 | Parameters,
603 |
604 | ### 8.4 Cloudformation demo
605 |
606 | designer within cloudformation to create templates.
607 |
608 | ### Application Deployment and management
609 |
610 | AWS Elastic BeanStalk
611 | Application management platform
612 | easy entry
613 | Ideal for developers.
614 | Super convenient Beanstalk handle resources automatically
615 |
616 | AWS OpsWorks
617 | Configuration management platform
618 | supports chef recipes (configuration as code)
619 | based on chef recipes
620 | OpsWorks ideal for DevOps Engineers.
621 |
622 | ### Launching an application on aws beanstalk
623 |
624 | example
625 |
626 | ## 9. Mobile Application and services
627 |
628 | ### 9.1 Amazon Simple Queue service
629 |
630 | Highly Available and fault tolerant.
631 | Seems like RabbitMQ
632 | Buffering events to enable loose coupling and asynchronous management
633 |
634 | ### 9.2 Amazon Simple Notification Service SNS
635 |
636 | publish/subscribe
637 |
638 | ### 9.3 Amazon simple email service SES
639 |
640 | Sending bulk email at scale
641 | Transactional emails, marketing emails, social networking
642 | get feedback about those who have been bounced, those who are marked as spam ...
643 |
644 | ### 9.4 Amazon Cognito
645 | Identity
646 | Integrates with major authentication providers.
647 | Enable SSO or login with social media
648 | Cognito can stor app data, state, preferences.
649 | Can store in local with SQLite.
650 |
651 | ### 9.5 Amazon Mobile Analytics
652 | Makes it easy to measure usage of application.
653 | What are the going on trends?
654 | integrate AWS mobile SDK
655 | Also offers REST API.
656 |
657 |
658 | ## 10. High availability and fault tolerence
659 | ### 10.1 Elastic Load Balancing
660 | What happens when this component fails ?
661 | Spread instances across availability zones
662 |
663 | ### 10.2 Listeners and SSL Certificates
664 | Use the amazon certificate manager to have HTTPS !!
665 | Free
666 |
667 | Upload the certificate to IAM, and then choose it for HTTPs listener
668 |
669 |
670 | ### 10.3 Load Balancing
671 | Maintaining balance between availability zones
672 |
673 | ### 10.4 Creating an ELB
674 | EC2 => load balancers
675 | define vps and listeners
676 | add security groups. The load balancer should have his own security group.
677 | Configure Health Check
678 | add EC2 Instances (they must be in the vpc we have chosen
679 | )
680 |
681 | ### 10.5 auto scaling
682 | 
683 | 
684 | self healing services
685 |
686 | ### 10.6 Demand based Scaling
687 | Scale to meet the demand.
688 | AutoScaling groups along with Amazon CloudWatch alarms.
689 | Scale down when CPU is low.
690 | 
691 |
692 | ### 10.7 Creating an auto scaling group
693 | 1. create a launch configuration
694 | yum install -y httpd
695 | to be added in create launch configuration
696 | 2. Create auto scaling group
697 | select the right vpc
698 | configure scaling policies
699 |
700 | ## 11 Course Wrap up
701 | ### 11.1 The ideal
702 | - Highly available
703 | it is up a great majority of the time
704 | AMI, EC2, S3, Auto Scaling
705 |
706 | - fault-tolerant
707 | Aplpication continues to operate through fault
708 | AMI (?), EC2, Elastic Load Balancing
709 |
710 | - Secure
711 | AWS CLI/Console
712 |
713 | - Durable
714 | Data survives loss of infrastructure
715 | EBS, Snapshots, S3, Glacier
716 |
717 | ### 11.2 Best Practices
718 | - Design for Failure
719 | - Everything will fail eventually
720 | - any component can fail at any time
721 | - "What happens when "____" ?
722 |
723 | - Scale Horizontally
724 | - Stateless Applications/Components (state should not be stored locally)
725 | - ElastiCache, DynamoDB
726 | - Distributed processsing :
727 | - parallelize and batch
728 | - Kinesis, Elastic Map Reduce
729 |
730 | - Disposable resources over fixed servers
731 | - Automate, automate, automate !
732 | - CloudFormation
733 | - Elastic Beanstalk
734 | - AutoScaling
735 | - CloudWatch
736 | - Third Party Tools
737 | - Ansible, chef, puppet as ways to automate environment
738 |
739 | - Security in Layers
740 | - VPS, Routes, NACLs, Security groups
741 | - IAM: Users, groups (least rights possible), roles, keys
742 | - Leverage multiple accounts
743 | - Protect Master credentials
744 |
745 | - Loose Coupling
746 | - Microservices
747 | - Failures should not cascade
748 | - ELB, Amazon SQS, Kinesis ...
749 |
750 | - Optimize for cost
751 |
752 | ### 11.3 The exam
753 | 80 minutes
754 | 55 questions
755 | Architect a solution that is technically appropriate, and cost effective.
756 | 
757 |
--------------------------------------------------------------------------------
/content/portfolio/karbon/res/impacts.svg:
--------------------------------------------------------------------------------
1 |
75 |
--------------------------------------------------------------------------------