);
50 | })}
51 |
52 | );
53 | };
54 |
55 | interface Props {
56 | items: TOCSimple[];
57 | }
58 |
59 | export const TOC: FC = ({ items }) => {
60 | return (
61 |
62 | {renderItems(items)}
63 |
64 | );
65 | }
--------------------------------------------------------------------------------
/components/party.tsx:
--------------------------------------------------------------------------------
1 | import 'types-wm';
2 | import { FC, useEffect, useState } from 'react';
3 | import styled from '@emotion/styled';
4 |
5 | const Clicker = styled.span`
6 | cursor: pointer;
7 | margin: 0;
8 | font-size: 1rem;
9 | overflow: hidden;
10 | `;
11 |
12 | declare var party: any;
13 |
14 | const appendPartyScript = () => {
15 | if (typeof party !== 'undefined') {
16 | return;
17 | }
18 | const script = document.createElement('script');
19 | script.type = 'text/javascript';
20 | script.async = true;
21 | script.src = '/party.min.js';
22 | (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(script);
23 | }
24 |
25 | export const MonetizationParty: FC = () => {
26 | const [isUsingMonetization, setIsUsingMonetization] = useState(false);
27 | const [msg, setMsg] = useState('👋 Click me!');
28 | const onClick = () => {
29 | setMsg('🥳💰 Thanks for supporting me!');
30 | if (party && party.screen) {
31 | party.screen();
32 | }
33 | };
34 | const enableParty = () => {
35 | appendPartyScript();
36 | setIsUsingMonetization(true);
37 | }
38 |
39 | useEffect(() => {
40 | if (!document.monetization) {
41 | return
42 | }
43 | if (document.monetization.state === 'started') {
44 | enableParty()
45 | } else {
46 | document.monetization.addEventListener('monetizationstart', enableParty);
47 |
48 | return () => {
49 | document.monetization.removeEventListener('monetizationstart', enableParty);
50 | }
51 | }
52 | }, []);
53 |
54 | return isUsingMonetization ? {msg} : null;
55 | };
56 |
--------------------------------------------------------------------------------
/posts/scalable-angular-applications.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Scalable Angular Applications
3 | author: Damian Sosnowski
4 | abstract: Currently, one of the most popular frameworks among the Web community is Angular (or Angular 2+ as some prefer). The main reason why we in GFT have decided to use it in our projects, is its comprehensive character, and a strong push it gives towards consistent project structure and architecture.
5 | created: "2019-08-16"
6 | updated: "2019-08-16"
7 | tags:
8 | - angular
9 | - redux
10 | - architecture
11 | - webdev
12 | ---
13 |
14 | Unfortunately, even a framework as opinionated as Angular can only enforce the basics of application architecture. That's sufficient for small or medium applications, however, in GFT we usually have to deal with big applications, composed of dozens of modules and filled with complex data collections and complicated user flows. What is more, our projects are often developed by a team scattered across different continents and time zones.
15 |
16 | In order to maintain high quality of delivery and prevent technical debt from being created, we had to agree to a series of guidelines and good practices of how to plan, structure and write applications in Angular.
17 |
18 | These architecture principles can be divided into three main categories:
19 |
20 | 1. Project structure – how to organize you project files, define and work with Angular modules and their dependencies
21 | 2. Data flow architecture – a guide on how to define that way the data flows through your application layers
22 | 3. State management – how to manage the state of GUI and propagate it between different application parts
23 |
24 | This article is a combination of community–inspired guidelines and the experience that we've gathered working in our projects.
25 |
26 | [See the full article](https://bulldogjob.com/articles/539-scalable-angular-application-architecture)
27 |
28 | 
29 |
--------------------------------------------------------------------------------
/pages/index.tsx:
--------------------------------------------------------------------------------
1 | import Head from 'next/head'
2 | import { Fragment } from 'react'
3 | import { GetStaticProps } from 'next'
4 | import { DateTime } from 'luxon';
5 | import styled from '@emotion/styled';
6 | import { GreenSectionHeader } from '../components/headers';
7 | import { ArticleTile } from '../components/article-tile';
8 | import { getPostsMetdata, PostMetadata } from '../lib/posts';
9 | import { ContentWrapper } from '../components/content-wrapper';
10 |
11 | interface HomeProps {
12 | articles: PostMetadata[]
13 | }
14 |
15 | const Green = styled.section`
16 | background-color: #0b7261;
17 | margin: 1rem 0 0 0;
18 | padding: 2rem 0;
19 | `;
20 |
21 | export default ({ articles }: HomeProps) => {
22 | return (
23 |
24 |
25 | Blog - Sosnowski.dev
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 | More articles
39 | {
40 | articles.slice(1).map(post => {
41 | return ();
42 | })
43 | }
44 |
45 |
46 |
47 | );
48 | }
49 |
50 | export const getStaticProps: GetStaticProps = async () => {
51 | const data = await getPostsMetdata();
52 | data.sort((a, b) => {
53 | return (DateTime.fromISO(a.created).toMillis() > DateTime.fromISO(b.created).toMillis()) ? -1 : 1;
54 | });
55 | return {
56 | props: {
57 | articles: data
58 | }
59 | };
60 | }
--------------------------------------------------------------------------------
/components/headers.tsx:
--------------------------------------------------------------------------------
1 | import React, { FunctionComponent } from 'react';
2 | import styled from '@emotion/styled';
3 | import { StyledFunctionComponent } from './helpers';
4 |
5 | interface HeaderProps {
6 | size?: string;
7 | color?: string;
8 | bar?: boolean;
9 | barColor?: string;
10 | margin?: string;
11 | }
12 |
13 | const PrimaryHeaderContainer = styled('h1')`
14 | font-size: ${props => props.size || '2.6rem'};
15 | color: ${props => props.color || '#000'};
16 | font-weight: normal;
17 | font-family: 'Inconsolata', sans-serif;
18 | letter-spacing: 1px;
19 | display: inline-block;
20 | margin: ${props => props.margin || '3rem 0'};
21 | padding: 0;
22 | position: relative;
23 |
24 | &::before {
25 | content: "#";
26 | color: ${props => props.barColor || '#ffc832'};
27 | margin-right: 0.5rem;
28 | font-size: 110%;
29 | }
30 |
31 | & > span {
32 | z-index: 20;
33 | position: relative;
34 | }
35 |
36 | & a {
37 | color: ${props => props.color || '#000'};
38 | text-decoration: none;
39 | :hover {
40 | text-decoration: underline;
41 | }
42 | }
43 | `;
44 |
45 |
46 |
47 | export const PrimaryHeader: StyledFunctionComponent = (props) => {
48 | return (
49 | {props.children}
50 | );
51 | };
52 |
53 | export const TOCHeader: StyledFunctionComponent = ({ children, className }) => {
54 | return (
55 | {children}
56 | )
57 | }
58 |
59 | export const GreenSectionHeader: StyledFunctionComponent = ({ children, className }) => {
60 | return (
61 | {children}
62 | )
63 | };
64 |
65 | export const FooterSectionHeader: StyledFunctionComponent = ({ children, className }) => {
66 | return (
67 | {children}
68 | )
69 | };
70 |
71 |
72 | export const DefaultHeader: FunctionComponent = (props) => {
73 | return (
74 | {props.children}
75 | );
76 | }
77 |
--------------------------------------------------------------------------------
/components/article-tile.tsx:
--------------------------------------------------------------------------------
1 | import styled from '@emotion/styled';
2 | import { StyledFunctionComponent } from "./helpers";
3 | import Link from 'next/link';
4 | import { DateTime } from 'luxon';
5 | import { DefaultHeader } from './headers';
6 | import { PostMetadata } from '../lib/posts';
7 |
8 | interface StyleProps {
9 | main?: boolean;
10 | }
11 |
12 | const Container = styled('article')`
13 | margin: 3rem 0;
14 | `;
15 |
16 | const Abstract = styled('p')`
17 | font-size: ${props => props.main ? '1.2rem' : '1rem'};
18 | line-height: ${props => props.main ? '1.4rem' : '1.1rem'};
19 | color: ${props => props.main ? '#212121' : '#fff'};
20 | & a {
21 | text-decoration: none;
22 | color: ${props => props.main ? '#212121' : '#fff'};
23 | :hover {
24 | text-decoration: underline;
25 | }
26 | }
27 | `;
28 |
29 | const Tag = styled.span`
30 | display: inline-block;
31 | padding: 0.3rem;
32 | text-transform: lowercase;
33 | margin-right: 0.4rem;
34 |
35 | & > strong {
36 | margin-right: 3px;
37 | }
38 | `;
39 |
40 | const Tags = styled('section')`
41 | display: block;
42 | text-align: left;
43 | color: ${props => props.main ? '#0b7261' : '#2e2459'};
44 | font-weight: bold;
45 | font-size: 0.9rem;
46 | `;
47 |
48 | const DateInfo = styled('span')`
49 | display: block;
50 | font-size: 0.9rem;
51 | padding: 0.3rem;
52 | color: ${props => props.main ? '#000' : '#fff'};
53 | `;
54 |
55 | interface Props {
56 | main?: boolean;
57 | post: PostMetadata;
58 | }
59 |
60 | export const ArticleTile: StyledFunctionComponent = ({ post, className, main = false, children }) => {
61 | const created = DateTime.fromISO(post.created);
62 | return (
63 |
64 |
65 | {post.title}
66 |
67 | {post.abstract}
68 |
69 | {
70 | post.tags.map(tag => {
71 | return (#{tag})
72 | })
73 | }
74 |
75 | {created.toFormat('dd LLL yyyy')}
76 |
77 | );
78 | };
79 |
--------------------------------------------------------------------------------
/bin/upload-statics.ts:
--------------------------------------------------------------------------------
1 | import { join, relative } from 'path';
2 | import { readdirSync, readFileSync, Dirent } from 'fs';
3 | import { lookup } from 'mime-types';
4 | import { S3Client } from '@aws-sdk/client-s3-node/S3Client';
5 | import { PutObjectCommand, PutObjectInput } from '@aws-sdk/client-s3-node/commands/PutObjectCommand';
6 | import { ListObjectsCommand, ListObjectsInput } from '@aws-sdk/client-s3-node/commands/ListObjectsCommand';
7 |
8 | const bucketName = 'sosnowski-blog-files';
9 | const staticFolder = join(__dirname, '..', 'blog', 'out');
10 |
11 | const CACHE_DEFAULT = 60 * 60 * 24;
12 | const CACHE_ASSETS = 60 * 60 * 24 * 7;
13 |
14 | const getAllFiles = (path: string): string[] => {
15 | return readdirSync(path, {
16 | withFileTypes: true,
17 | encoding: 'utf8'
18 | }).reduce((prev: string[], current: Dirent): string[] => {
19 | if (current.isFile()) {
20 | return [...prev, join(path, current.name)];
21 | } else if (current.isDirectory()) {
22 | return [
23 | ...prev,
24 | ...getAllFiles(join(path, current.name))
25 | ]
26 | }
27 | }, []);
28 | }
29 |
30 | const getS3Assets = async (): Promise => {
31 | const listInput: ListObjectsInput = {
32 | Bucket: bucketName,
33 | Prefix: `assets/`
34 | };
35 | const existingAssets = (await s3.send(new ListObjectsCommand(listInput))).Contents.map(object => {
36 | return object.Key;
37 | });
38 | return existingAssets;
39 | }
40 |
41 | const s3 = new S3Client({
42 | region: 'us-east-1',
43 | credentials: {
44 | secretAccessKey: process.env.AWS_S3_ACCESS_KEY,
45 | accessKeyId: process.env.AWS_S3_ACCESS_KEY_ID
46 | }
47 | });
48 |
49 | (async () => {
50 | console.log('Reading static files in ' + staticFolder);
51 | const allStaticFiles = getAllFiles(staticFolder);
52 | console.log('Loading existing assets from S3...');
53 | const existingAssets = await getS3Assets();
54 |
55 | for(let i = 0; i < allStaticFiles.length; i++) {
56 | const file = allStaticFiles[i];
57 | const imageKey = relative(staticFolder, file);
58 | const isAsset = imageKey.substr(0, 6) === 'assets';
59 | if (!existingAssets.includes(imageKey)) {
60 | console.log(`Uploading file ${file} to ${imageKey}...`);
61 | await s3.send(new PutObjectCommand({
62 | Bucket: bucketName,
63 | Key: imageKey,
64 | Body: readFileSync(file),
65 | ContentType: lookup(file) || 'plain/text',
66 | CacheControl: `max-age=${isAsset ? CACHE_ASSETS : CACHE_DEFAULT}`
67 | }));
68 | console.log('Done');
69 | } else {
70 | console.log(`${imageKey} already uploaded`);
71 | }
72 | }
73 | console.log('All done');
74 | })();
75 |
--------------------------------------------------------------------------------
/pages/post/[id].tsx:
--------------------------------------------------------------------------------
1 | import { GetStaticProps, GetStaticPaths } from "next";
2 | import { PrimaryHeader } from '../../components/headers';
3 | import { getPostsMetdata, PostMetadata, getAllPostData, TOCSimple } from "../../lib/posts";
4 | import { Content, Abstract, Meta, DateInfo, Tag, Article } from "../../components/content";
5 | import { DateTime } from "luxon";
6 | import Head from "next/head";
7 | import { TOC } from "../../components/toc";
8 | import { Fragment } from "react";
9 |
10 | interface Props {
11 | post: {
12 | content: string
13 | toc: TOCSimple[]
14 | } & PostMetadata;
15 | }
16 |
17 | export default ({ post }: Props) => {
18 | const created = DateTime.fromISO(post.created);
19 | return (
20 |
21 |
22 | {post.title} - Sosnowski.dev
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 | {post.title}
35 |
36 | {
37 | post.tags.map(tag => {
38 | return (#{tag})
39 | })
40 | }
41 | {created.toFormat('dd LLL yyyy')}
42 |
43 | {post.abstract}
44 |
45 |
46 |
47 |
48 | );
49 | }
50 |
51 | export const getStaticProps: GetStaticProps = async (context) => {
52 | const id = Array.isArray(context.params.id) ? context.params.id[0] : context.params.id;
53 | const postData = await getAllPostData(id);
54 | return {
55 | props: {
56 | post: {
57 | ...postData.meta,
58 | content: postData.content,
59 | toc: postData.toc
60 | }
61 | }
62 | };
63 | }
64 |
65 | export const getStaticPaths: GetStaticPaths = async () => {
66 | return {
67 | paths: (await getPostsMetdata()).map(meta => {
68 | return {
69 | params: {
70 | id: meta.id,
71 | }
72 | };
73 | }),
74 | fallback: false
75 | };
76 | }
--------------------------------------------------------------------------------
/bin/create-rss.ts:
--------------------------------------------------------------------------------
1 | import { create } from 'xmlbuilder2';
2 | import { parseISO, formatISO } from 'date-fns/fp';
3 | import { readdir, Dirent, readFile, writeFile } from 'fs';
4 | import { basename, extname } from 'path';
5 | import { promisify } from 'util';
6 | import { join } from 'path';
7 | import matter from 'gray-matter';
8 |
9 | const readDirAsync = promisify(readdir);
10 | const readFileAsync = promisify(readFile);
11 | const writeFileAsync = promisify(writeFile);
12 |
13 | const postsPath = join(__dirname, '..', 'posts');
14 | const rssPath = join(__dirname, '..', 'public', 'rss.xml');
15 |
16 | export interface PostMetadata {
17 | title: string;
18 | created: Date;
19 | abstract: string;
20 | id: string;
21 | }
22 |
23 | export const getPostMetadata = async (postFile: string): Promise => {
24 | const fileContent = await readFileAsync(join(postsPath, postFile), {
25 | encoding: 'utf8'
26 | });
27 |
28 | const result = matter(fileContent);
29 | const postId = basename(postFile, extname(postFile));
30 |
31 | return {
32 | title: result.data.title,
33 | abstract: result.data.abstract,
34 | created: parseISO(result.data.created),
35 | id: postId
36 | };
37 | };
38 |
39 | export const getPostsMetadata = async (): Promise => {
40 | const dirContent: Dirent[] = await readDirAsync(postsPath, {
41 | withFileTypes: true,
42 | encoding: 'utf8'
43 | });
44 |
45 | return Promise.all(
46 | dirContent
47 | .filter(entry => entry.isFile())
48 | .map((entry) => {
49 | console.log(`Reading meta for ${entry.name}`);
50 | return getPostMetadata(entry.name);
51 | })
52 | );
53 | };
54 |
55 | (async () => {
56 | console.log('Reading posts metadata...');
57 | const postsData = await getPostsMetadata();
58 | postsData.sort((a, b) => {
59 | return a > b ? 1 : -1;
60 | });
61 | console.log('Generating xml content...');
62 | const data = {
63 | rss: {
64 | '@version': '2.0',
65 | channel: {
66 | title: 'Sosnowski.dev - Personal Blog',
67 | link: 'https://sosnowski.dev',
68 | description: `Hi! I'm Damian. I'm an Engineering Manager in OLX, certified AWS Architect Associate and a technology geek. Welcome to my blog!`,
69 | item: postsData.map(meta => {
70 | return {
71 | title: meta.title,
72 | link: `https://sosnowski.dev/post/${meta.id}`,
73 | description: meta.abstract,
74 | pubDate: formatISO(meta.created)
75 | };
76 | })
77 | }
78 | }
79 | };
80 |
81 | const doc = create(data);
82 | const xml = doc.end({ prettyPrint: true });
83 | console.log('Saving file...');
84 | await writeFileAsync(rssPath, xml, { encoding: 'utf8' });
85 | })();
86 |
--------------------------------------------------------------------------------
/components/footer.tsx:
--------------------------------------------------------------------------------
1 | import styled from '@emotion/styled';
2 | import { StyledFunctionComponent } from './helpers';
3 | import { ContentWrapper } from './content-wrapper';
4 | import { FooterSectionHeader } from './headers';
5 |
6 | const FooterContainer = styled.footer`
7 | background-color: #2a3439;
8 | margin: 0;
9 | padding: 2rem 0;
10 | `;
11 |
12 | const Content = styled(ContentWrapper)`
13 | display: grid;
14 | grid-template-columns: auto 1fr;
15 | grid-template-rows: auto auto auto;
16 | grid-template-areas:
17 | "header header"
18 | "avatar text"
19 | "social social";
20 | grid-gap: 1rem;
21 |
22 | @media (max-width: 750px) {
23 | grid-template-columns: auto;
24 | grid-template-rows: auto auto auto auto;
25 | grid-template-areas:
26 | "header"
27 | "avatar"
28 | "text"
29 | "social";
30 | }
31 | justify-items: start;
32 | align-items: start;
33 | `;
34 |
35 | const Header = styled(FooterSectionHeader)`
36 | grid-area: header;
37 | margin-bottom: 2rem;
38 | `;
39 |
40 | const Avatar = styled.img`
41 | grid-area: avatar;
42 | justify-self: center;
43 | width: 150px;
44 | height: 150px;
45 | border-radius: 75px;
46 | margin: 1rem;
47 | `;
48 |
49 | const Text = styled.p`
50 | grid-area: text;
51 | color: #fff;
52 | align-self: center;
53 | margin: 0;
54 | font-size: 1.1rem;
55 | line-height: 1.7rem;
56 | `;
57 |
58 | const Links = styled.p`
59 | grid-area: social;
60 | justify-self: stretch;
61 | color: #fff;
62 | text-align: right;
63 | margin: 0;
64 |
65 | & img {
66 | width: 2rem;
67 | height: 2rem;
68 | }
69 |
70 | & > a {
71 | margin-left: 2rem;
72 | display: inline-block;
73 | }
74 | `;
75 |
76 | export const Footer: StyledFunctionComponent = ({ className, children }) => {
77 | return (
78 |
79 |
80 | Hi! I'm Damian
81 |
82 | Welcome to my blog! I'm an Engineering Manager at OLX, certified AWS Architect Associate and a technology geek.
83 | While mostly working with JavaScript I try to avoid being locked down in a single-technology box as I believe that most interesting things happen at the junction of different words.
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 | )
95 | }
96 |
--------------------------------------------------------------------------------
/bin/helpers/parsing.ts:
--------------------------------------------------------------------------------
1 | import { readFileSync, writeFileSync } from 'fs';
2 | import { basename, join } from 'path';
3 | import unified from 'unified';
4 | import markdown from 'remark-parse';
5 | import { Node } from 'unist';
6 | import frontmatter from 'remark-frontmatter';
7 | import toMdString from 'remark-stringify';
8 |
9 | import { copyImagesToAssets } from './images';
10 |
11 | // export enum ChangeType {
12 | // Added = 'A',
13 | // Modified = 'M',
14 | // Deleted = 'D'
15 | // }
16 |
17 | // export interface GitFileInfo {
18 | // file: string;
19 | // change: ChangeType
20 | // }
21 |
22 | // export const getModifiedPosts = (filePath: string): GitFileInfo[] => {
23 | // console.log(`Reading diff... ${filePath}`);
24 | // const fileData = readFileSync(filePath, {
25 | // encoding: 'utf8'
26 | // });
27 |
28 | // return fileData.split('\n')
29 | // .filter(line => line.length > 0)
30 | // .map(line => line.split('\t'))
31 | // .filter(parts => /__posts.*\.md/.test(parts[1].trim()))
32 | // .map(parts => {
33 | // return {
34 | // file: decodeURI(parts[1].trim()),
35 | // change: ((change: string) => {
36 | // if (change === 'A') {
37 | // return ChangeType.Added;
38 | // }
39 | // if (change === 'M') {
40 | // return ChangeType.Modified;
41 | // }
42 | // if (change === 'D') {
43 | // return ChangeType.Deleted;
44 | // }
45 | // throw new Error(`Unknown change type: "${change}"`);
46 | // })(parts[0].trim())
47 | // };
48 | // });
49 | // }
50 |
51 | const findImages = (tokens: Node[]): Node[] => {
52 |
53 | const imageTokens = tokens.reduce((prev: Node[], current: Node): Node[] => {
54 | if (current.type === 'image') {
55 | return [...prev, current];
56 | } else if (current.children && (current.children as Node[]).length > 0) {
57 | return [...prev, ...findImages(current.children as Node[])];
58 | }
59 | return prev;
60 | }, []);
61 | return imageTokens;
62 | }
63 |
64 | const parserPlugin = (postPath: string) => async tree => {
65 | const images = findImages(tree.children as Node[] || []);
66 | if (images.length === 0) {
67 | return;
68 | }
69 | const s3Urls = await copyImagesToAssets(postPath, images.map(token => decodeURI(token.url as string)));
70 |
71 | images.forEach((token, index) => {
72 | token.url = `/${s3Urls[index]}`;
73 | });
74 | };
75 |
76 | export const parsePostImages = async (postPath: string) => {
77 | const postId = basename(postPath);
78 | const fileData = readFileSync(postPath, {
79 | encoding: 'utf8'
80 | });
81 | const contents = await unified()
82 | .use(markdown)
83 | .use(toMdString)
84 | .use(frontmatter, ['yaml'])
85 | .use(parserPlugin, postPath)
86 | .process(fileData);
87 |
88 | const newPath = join(__dirname, '..', '..', 'blog', 'posts', postId);
89 | console.log('Saving to '+newPath);
90 |
91 | writeFileSync(newPath, contents.toString(), {
92 | encoding: 'utf8'
93 | });
94 | }
--------------------------------------------------------------------------------
/components/content.tsx:
--------------------------------------------------------------------------------
1 | import styled from '@emotion/styled';
2 |
3 | import * as styles from './styles';
4 | import { StyledFunctionComponent } from './helpers';
5 |
6 | export const Article = styled.article`
7 | display: flex;
8 | flex-direction: column;
9 | flex-wrap: nowrap;
10 | align-items: stretch;
11 | align-content: stretch;
12 | margin: 0 auto;
13 | padding: 0 ${styles.contentPadding};
14 | max-width: ${styles.maxContentWidth};
15 | `;
16 |
17 | export const Abstract = styled.section`
18 | font-size: 1.2rem;
19 | line-height: 1.4rem;
20 | font-weight: bold;
21 | margin: 0;
22 | padding: ${styles.contentPadding};
23 | `;
24 |
25 | export const Meta = styled('section')`
26 | display: block;
27 | text-align: left;
28 | color: #0b7261;
29 | font-weight: normal;
30 | font-size: 1rem;
31 | margin: 0;
32 | `;
33 |
34 | export const DateInfo = styled('span')`
35 | display: block;
36 | font-size: 0.9rem;
37 | padding: 0.3rem;
38 | color: #000;
39 | `;
40 |
41 | export const Tag = styled.span`
42 | display: inline-block;
43 | padding: 0.3rem;
44 | text-transform: lowercase;
45 | margin-right: 0.4rem;
46 |
47 | & > strong {
48 | margin-right: 3px;
49 | }
50 | `;
51 |
52 | interface Props {
53 | content: string;
54 | }
55 |
56 | const ContentContainer = styled.section`
57 | padding: ${styles.contentPadding};
58 | font-size: 1.1rem;
59 | line-height: 1.5rem;
60 | margin: 0;
61 |
62 | & a {
63 | color: ${styles.activeTextColor};
64 | text-decoration: underline;
65 | }
66 |
67 | & h2 {
68 | font-size: 2.1rem;
69 | color: #000;
70 | padding: 0.5rem 0;
71 | width: 90%;
72 | margin: 3rem 0 2rem 0;
73 | line-height: 2rem;
74 |
75 | &::before {
76 | content: "##";
77 | color: ${styles.secondaryBgColor};
78 | font-size: 110%;
79 | margin-right: 0.5rem;
80 | }
81 | }
82 |
83 | & h3 {
84 | font-size: 1.5rem;
85 | color: #000;
86 | padding: 0.5rem 0;
87 | max-width: 50%;
88 | line-height: 2rem;
89 | &::before {
90 | content: "###";
91 | color: ${styles.secondaryBgColor};
92 | font-size: 110%;
93 | margin-right: 0.5rem;
94 | }
95 | }
96 |
97 | & h4 {
98 | font-size: 1.2rem;
99 | color: #000;
100 | }
101 |
102 | & blockquote {
103 | border-left: 4px solid ${styles.mainHeaderBgColor};
104 | padding: 1rem;
105 | margin: 1rem;
106 | }
107 |
108 | & li {
109 | margin: 0.5rem;
110 | }
111 |
112 | & > p > code {
113 | background-color: #FFEECA;
114 | display: inline-block;
115 | padding: 3px;
116 | color: #4E4637;
117 | }
118 |
119 | & img {
120 | max-width: 90%;
121 | max-height: 30rem;
122 | margin: 1rem auto;
123 | display: block;
124 | }
125 | `;
126 |
127 |
128 | export const Content: StyledFunctionComponent = ({ content, className, children }) => {
129 | return (
130 |
131 | );
132 | }
133 |
--------------------------------------------------------------------------------
/bin/helpers/images.ts:
--------------------------------------------------------------------------------
1 | import { basename, dirname, join } from 'path';
2 | import axios from 'axios';
3 | import { readFileSync, copyFileSync, mkdirSync, createWriteStream } from 'fs';
4 | import { S3Client } from '@aws-sdk/client-s3-node/S3Client';
5 | import { PutObjectCommand, PutObjectInput } from '@aws-sdk/client-s3-node/commands/PutObjectCommand';
6 | import { ListObjectsCommand, ListObjectsInput, ListObjectsOutput } from '@aws-sdk/client-s3-node/commands/ListObjectsCommand';
7 |
8 | const bucketName = 'sosnowski-blog-files';
9 | const publicPath = join(__dirname, '..', '..', 'blog', 'public');
10 |
11 | const copyRemoteImage = async (imageUrl: string, postId: string, postPath: string): Promise => {
12 | const newPath = join('assets', postId);
13 | mkdirSync(join(publicPath, newPath), {
14 | recursive: true
15 | });
16 | const url = new URL(imageUrl);
17 | const imageName = basename(url.pathname);
18 | console.log('Copying remote image ' + imageUrl);
19 | // GET request for remote image in node.js
20 | const response = await axios({
21 | method: 'get',
22 | url: imageUrl,
23 | responseType: 'stream'
24 | });
25 | response.data.pipe(createWriteStream(join(publicPath, newPath, imageName)));
26 | console.log(`Copied to ${join(newPath, imageName)}`);
27 | return join(newPath, imageName);
28 | }
29 |
30 | const copyLocalImage = async (imagePath: string, postId: string, postPath: string): Promise => {
31 | const imageName = basename(imagePath).replace(/\s/g,'-');
32 | const newPath = join('assets', postId);
33 |
34 | console.log(`Copying image ${join(postPath, imagePath)}`);
35 |
36 | mkdirSync(join(publicPath, newPath), {
37 | recursive: true
38 | });
39 | copyFileSync(join(postPath, imagePath), join(publicPath, newPath, imageName));
40 |
41 | console.log(`Copied to ${join(newPath, imageName)}`);
42 |
43 | return join(newPath, imageName);
44 | }
45 |
46 | export const copyImagesToAssets = async (postFilePath: string, imagesPaths: string[]): Promise => {
47 | const postId = basename(postFilePath, '.md').replace(/\s/g,'-');
48 | const postPath = dirname(postFilePath);
49 | const isUrl = /^http|https:\/\/.+/;
50 | console.log(`Copying images for post ${postId}`);
51 |
52 | console.log('Copying images');
53 | const assetsPaths: string[] = await Promise.all(imagesPaths.map(imagePath => {
54 | return (isUrl.test(imagePath) ?
55 | copyRemoteImage(imagePath, postId, postPath) : copyLocalImage(imagePath, postId, postPath));
56 | }));
57 |
58 | return assetsPaths;
59 | }
60 |
61 | // export const uploadImagesToS3 = async (postInfo: GitFileInfo, imagesPaths: string[]): Promise => {
62 | // let existingImages = [];
63 | // const postId = basename(postInfo.file, '.md');
64 | // const postPath = dirname(postInfo.file);
65 | // console.log(`Uploading images for post ${postId}`);
66 | // const s3 = new S3Client({
67 | // region: 'eu-west-1',
68 | // credentials: {
69 | // secretAccessKey: '',
70 | // accessKeyId: ''
71 | // }
72 | // });
73 |
74 | // if (postInfo.change === ChangeType.Modified) {
75 | // console.log('Post is modified, loading existing images');
76 | // const listInput: ListObjectsInput = {
77 | // Bucket: bucketName,
78 | // Prefix: `${postId}/`
79 | // };
80 | // existingImages = (await s3.send(new ListObjectsCommand(listInput))).Contents.map(object => {
81 | // return object.Key;
82 | // });
83 | // console.log(existingImages);
84 | // }
85 |
86 | // console.log('Uploading images');
87 | // const s3Paths: string[] = await Promise.all(imagesPaths.map(async imagePath => {
88 | // const imageName = basename(imagePath);
89 | // const imageKey = `assets/${postId}/${imageName}`.replace(/\s/g,'-');
90 | // console.log(`Uploading image ${join(postPath, imagePath)}`);
91 | // if (existingImages.includes(imageName)) {
92 | // console.log(`Image already exists in S3 as ${imageKey}`);
93 | // } else {
94 | // console.log('Uplading...');
95 | // const result = await s3.send(new PutObjectCommand({
96 | // Bucket: bucketName,
97 | // Key: imageKey,
98 | // Body: readFileSync(join(postPath, imagePath))
99 | // }));
100 | // console.log(`Done, uploaded as ${imageKey}`);
101 | // }
102 | // return `${cloudfrontDomain}/${imageKey}`;
103 | // }));
104 |
105 | // return s3Paths;
106 | // }
--------------------------------------------------------------------------------
/posts/developing-modern-apps-for-financial-markets-2.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Developing modern apps for financial markets – part 4 - Architecture and Development
3 | author: Damian Sosnowski
4 | abstract: The obvious truth is that sooner or later, every project has to leave the design phase and enter the creation process. But we prefer a more refined approach - in our work at GFT, development and architecture planning do not start after the design phase – instead, they both form a crucial part of the entire process, conducted in parallel with all requirement definitions and discussions. To learn how we manage to do this successfully, join me in the final part of the Developing Modern Apps for Financial Markets Series!
5 | created: "2019-08-29"
6 | updated: "2019-08-29"
7 | tags:
8 | - architecture
9 | - webdev
10 | - angular
11 | - projectmanagement
12 | ---
13 |
14 | _I've wrote this article some time ago, as a way to describe the application design process in GFT, you can find the full version of those articles in the link below._
15 |
16 |
17 | 
18 |
19 | ## The role of an architect
20 |
21 | Since the very beginning, the architect is involved in defining requirements – asking detailed questions that should be considered and that can affect the implementation.
22 |
23 | The topics discussed usually include:
24 |
25 | 1. Any **non-functional technical requirements**, such as: target platform, performance, size of datasets, accessibility etc.,
26 | 2. Everything related to **integration **with 3rd party systems and data exchange,
27 | 3. The **data source** that will be used to feed the application,
28 | 4. **Libraries, frameworks or any other tools** that should be used by the development team.
29 |
30 | Based on the above information, requirements and conversations with stakeholders, the architect prepares an application architecture plan and defines the technology stack that will meet the client’s needs.
31 |
32 | The additional, not-so-obvious role of the architect during the project kick-off phase is to be the person responsible for the **“technical sanity check”.** There has to be someone that makes sure that the requirements agreed with BAs and UX designers are actually implementable, or their estimated implementation time is within the client’s budget and timeframe. This way, the architect’s feedback can be quickly incorporated in the design phase – which in turn prevents problems that may occur later.
33 |
34 | Another task before the architect is to cooperate and stay in touch with architects on the side of the client, in cases when the project requires integration with other services – or simply supervision. This is especially crucial if we plan to have a project handover at the end.
35 |
36 | ## Architecture planning
37 |
38 | First, having completed the initial client interviews and information gathering stage, the preliminary high-level technical requirements are defined.
39 |
40 | _Does the client need a web-based or a native application? Will it be used on multiple devices, including mobile hardware? Does it have to work offline? What kind of data will be displayed and how will the communication between the app and the remaining systems work? What will be the source of data?_
41 |
42 | These and other high-level questions enable the architect to prepare the foundation of the application’s architecture.
43 |
44 | Further down the road, once a more detailed project specification is agreed, and elements such as screens, application modules, data structure, 3rd party services etc. are defined, the architect is able to translate them into a much more detailed **architecture plan**. This usually includes a close-to-final application structure, data structure and communication schema.
45 |
46 | 
47 |
48 | At this point, the technology stack is usually agreed on. Our usual practice is that the development team in Poland, aided by the architect, prepares a set of quick POCs to test and evaluate possible tools and select those that match the client’s requirements best.
49 |
50 | The actual development process can start here as well. With all basics defined, the nearshore development team can start setting up the development environment and the actual implementation.
51 |
52 | The final part of architecture planning is about defining the detailed application structure. Screens (or mock-ups, if final designs are not yet available) are divided into components, data flow between them is agreed on, and services for communication and different business domains are being defined.
53 |
54 | This part of architecture planning is usually done in an agile manner. When new screens or modules are being added, the process is repeated, and a new functionality is incorporated into the existing application structure.
55 |
56 | 
57 |
58 | Read the full version of article on [GFT Blog](https://blog.gft.com/blog/2018/12/12/developing-modern-apps-for-financial-markets-part-4-architecture-and-development/)
59 |
--------------------------------------------------------------------------------
/posts/developing-modern-apps-for-financial-markets-1.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Developing modern apps for financial markets – part 1
3 | author: Damian Sosnowski
4 | abstract: For many people at GFT, presenting and selling the company’s capabilities is part of their usual work, something they do frequently – if not daily. And that’s not only the case for professional sales teams. Job Family Leads, Delivery Managers (DM) and many other roles are heavily involved in contact with current and potential clients, building relationship with them, and promoting our skill sets.
5 | created: "2019-08-28"
6 | updated: "2019-08-28"
7 | tags:
8 | - architecture
9 | - webdev
10 | - angular
11 | - projectmanagement
12 | ---
13 |
14 | _I've wrote this article some time ago, as a way to describe the application design process in GFT, you can find the full version of those articles in the link below._
15 |
16 | 
17 |
18 | ## The challenge
19 |
20 | However, it is sometimes difficult to talk to clients about our work when nearly everything that we create is confidential and guarded by a set of restrictive IP laws, which is a common case when working for investment banks.
21 |
22 | This creates a big challenge when we approach a potential new client and want to present our skills and capabilities. We are not allowed to demonstrate our actual achievements, we cannot show and describe details of our projects and we do not have an actual portfolio of our work. Everything is stuck in the client’s silo.
23 |
24 | Therefore, we are forced to present only a high-level overview of our work. Some generic terms, PowerPoint presentations, catchy advertisement slogans. Those are useful, but we already know that clients are not always fully convinced by those means. What they want to see are specific, concrete examples of our work and processes that we use, and the know-how that we provide.
25 |
26 | What is more, there are specialisations that are particularly hard to sell without concrete deliverables to present, such as UX Design, Frontend Development, or Business Analysis.
27 |
28 | As people often participating in sales processes, we have experienced those difficulties multiple times, and finally, we’ve decided that it is time to do something about it.
29 |
30 | ## The idea
31 |
32 | For this project, we have created an alliance of two Job Families at GFT Poland: **Business Consulting and Digital (UX and UI).**
33 |
34 | Our goal was: **Create a working, meaningful application that will serve as an efficient and understandable presentation of the skills and processes that we use in our projects.**
35 |
36 | On top of that, we wanted to include some additional, ambitious requirements:
37 |
38 | 1. **The application should cover an actual, useful business case** – we didn’t want to create a shiny, but ultimately useless application. We work for the financial industry and we wanted to have an example of a real financial application that would be understandable for our clients. In essence, something that will represent what we do every day.
39 | 2. **Easily accessible and mobile-friendly** – the application should be as easy to use as possible. The idea is that Sales, a client, DM, or any of us can simply open the application on any compatible device and present it wherever they want. Therefore, we’ve decided that we will create a responsive web application, which is accessible on any device.
40 | 3. **Data heavy** – our commercial applications are usually data centric. Dealing with huge volumes of data is one of our everyday challenges, so it was crucial for us to prepare something that reflects this. But we wanted more. Instead of just displaying tons of data in grids and lists, we pushed towards a modern approach of data aggregation and visualisation. We wanted to create an intelligent tool for data analysis, not just a “display all the data” kind of application. Additionally, we wanted to show how the combination of proper Business Analysis and UX processes makes it possible to achieve.
41 |
42 | But our idea is not only about the application. Visualisation of our work is one thing, but we want to be able to show our potential clients something more than just a plain set of skills. One of the biggest values GFT brings to the projects is the knowledge and processes that we use in our daily work. Those are bullet-proofed and battle-tested processes, carved in years of working for the investment banking sector – and we can win contracts with them. What is more, selling processes and services might be a good way to convince clients to engage entire teams of consultants, with their combined knowledge and experience, instead of cherry-picking resources for their projects, positioning GFT as a skills-based services provider.
43 |
44 | To achieve this, we’ve decided that we will also create a set of deliverables that will describe and represent the processes that we’ve used to create this application. Something with which we will visualise, describe and explain the details of how a project is created, how the particular members of the team cooperate (for example BAs with UX, UX with Developers etc.). Something that will be a representation of our experience and know-how – that a client can benefit from.
45 |
46 | This series of blog posts, combined with the video clip, is the set of tools that we’ve created and we would now like to present them to you.
47 |
48 | [Check the video](https://youtu.be/WYijDEj-aaU)
49 |
50 | Read the rest of the article on [GFT Blog](https://blog.gft.com/blog/2018/11/22/developing-modern-apps-for-financial-markets-a-case-study-part-1/)
51 |
--------------------------------------------------------------------------------
/lib/posts.ts:
--------------------------------------------------------------------------------
1 | import { readdir, Dirent, readFile } from 'fs';
2 | import { basename, extname } from 'path';
3 | import { promisify } from 'util';
4 | import { join } from 'path';
5 | import markdown from 'remark-parse';
6 | import remark2rehype from 'remark-rehype';
7 | import html from 'rehype-stringify';
8 | import { Node } from 'unist';
9 | import matter from 'gray-matter';
10 | import unified from 'unified';
11 |
12 | import hljs from 'highlight.js/lib/core';
13 | import js from 'highlight.js/lib/languages/javascript';
14 | import go from 'highlight.js/lib/languages/go';
15 | import rust from 'highlight.js/lib/languages/rust';
16 | import typescript from 'highlight.js/lib/languages/typescript';
17 | import hljsMarkdown from 'highlight.js/lib/languages/markdown';
18 | import bash from 'highlight.js/lib/languages/bash';
19 |
20 | hljs.registerLanguage('javascript', js);
21 | hljs.registerLanguage('go', go);
22 | hljs.registerLanguage('rust', rust);
23 | hljs.registerLanguage('typescript', typescript);
24 | hljs.registerLanguage('tsx', typescript);
25 | hljs.registerLanguage('markdown', hljsMarkdown);
26 | hljs.registerLanguage('bash', bash);
27 |
28 | const readDirAsync = promisify(readdir);
29 | const readFileAsync = promisify(readFile);
30 | const postsPath = join(process.cwd(), 'posts');
31 |
32 | export interface PostMetadata {
33 | title: string;
34 | created: string;
35 | updated: string;
36 | tags: string[];
37 | abstract: string;
38 | id: string;
39 | }
40 |
41 | export interface PostData {
42 | meta: PostMetadata,
43 | content: string,
44 | toc: TOCSimple[]
45 | }
46 |
47 | export interface TOCRecord {
48 | href: string;
49 | label: string;
50 | level: number;
51 | parent?: TOCRecord;
52 | children: TOCRecord[];
53 | }
54 |
55 | export type TOCSimple = Pick & {
56 | children: TOCSimple[];
57 | };
58 |
59 | // const parseMeta = (postId: string, meta: { [key: string]: any }): PostMetadata => {
60 | // return {
61 | // title: meta.title,
62 | // tags: meta.tags,
63 | // abstract: meta.abstract,
64 | // created: meta.created,
65 | // updated: meta.updated,
66 | // id: postId
67 | // };
68 | // }
69 |
70 | export const getPostMetadata = async (postFile: string): Promise => {
71 | const fileContent = await readFileAsync(join(postsPath, postFile), {
72 | encoding: 'utf8'
73 | });
74 |
75 | const result = matter(fileContent);
76 | const postId = basename(postFile, extname(postFile));
77 |
78 | return {
79 | title: result.data.title,
80 | tags: result.data.tags,
81 | abstract: result.data.abstract,
82 | created: result.data.created,
83 | updated: result.data.updated,
84 | id: postId
85 | };
86 | };
87 |
88 | export const getPostsMetdata = async (): Promise => {
89 | const dirContent: Dirent[] = await readDirAsync(postsPath, {
90 | withFileTypes: true,
91 | encoding: 'utf8'
92 | });
93 |
94 | return Promise.all(
95 | dirContent
96 | .filter(entry => entry.isFile() && extname(entry.name) === '.md')
97 | .map((entry) => {
98 | console.log(`Found file in posts: ${entry.name}`);
99 | return getPostMetadata(entry.name);
100 | })
101 | );
102 | };
103 |
104 | interface NodeElement extends Node {
105 | properties: {[key: string]: unknown};
106 | }
107 |
108 | const findNodes = (nodes: Node[], condition: (node: Node) => boolean ): Node[] => {
109 | const matchingNodes = nodes.reduce((prev: Node[], current: Node): Node[] => {
110 | if (condition(current)) {
111 | return [...prev, current];
112 | } else if (current.children && (current.children as Node[]).length > 0) {
113 | return [...prev, ...findNodes(current.children as Node[], condition)];
114 | }
115 | return prev;
116 | }, []);
117 | return matchingNodes;
118 | }
119 |
120 | const htmlParser = () => (tree) => {
121 | const nodes: Node[] = tree.children || [];
122 | const images = findNodes(nodes, node => node.tagName === 'img');
123 | images.forEach((img: NodeElement) => {
124 | img.properties.loading = 'lazy';
125 | });
126 |
127 | const preCodeBlocks = findNodes(nodes, node => {
128 | return node.tagName === 'pre' && (node.children as Node[]).some(child => child.tagName === 'code');
129 | });
130 |
131 | preCodeBlocks.forEach(pre => {
132 | const codeEl: NodeElement = (pre.children as any[]).find(child => child.tagName === 'code');
133 | const codeContent = codeEl.children[0].value || "";
134 | codeEl.children = [{
135 | type: 'raw',
136 | value: hljs.highlightAuto(codeContent).value
137 | }];
138 |
139 | if (!codeEl.properties.className) {
140 | codeEl.properties.className = [];
141 | }
142 |
143 | (codeEl.properties.className as string[]).push('hljs');
144 | });
145 |
146 | const headersElements = findNodes(nodes, node => {
147 | return ['h1', 'h2', 'h3', 'h4', 'h5'].includes(node.tagName as string);
148 | });
149 | headersElements.forEach((header: NodeElement) => {
150 | const textNode = findNodes((header.children as Node[] || []), (node) => node.type === 'text')[0];
151 | const text: string = textNode.value as string || '-empty-';
152 | const id = text.toLowerCase().replace(/\W/g, '-');
153 | header.properties.id = id;
154 | });
155 | };
156 |
157 | const getTOC = (nodes: Node[], currentTOC: TOCRecord) => {
158 | nodes.forEach((node) => {
159 | const tagName: string = node.tagName as string;
160 | let tagLevel: number;
161 | switch (tagName) {
162 | case 'h1':
163 | tagLevel = 1;
164 | break;
165 | case 'h2':
166 | tagLevel = 2;
167 | break;
168 | case 'h3':
169 | tagLevel = 3;
170 | break;
171 | case 'h4':
172 | tagLevel = 4;
173 | break;
174 | }
175 | if (tagLevel) {
176 | const textNode = findNodes(node.children as Node[], (node) => node.type === 'text')[0];
177 | const newTOC: TOCRecord = {
178 | href: (node as NodeElement).properties.id as string,
179 | label: textNode ? textNode.value as string : '-no-label-',
180 | level: tagLevel,
181 | children: []
182 | };
183 |
184 | while (tagLevel <= currentTOC.level && currentTOC.parent) {
185 | currentTOC = currentTOC.parent;
186 | }
187 | newTOC.parent = currentTOC;
188 | currentTOC.children.push(newTOC);
189 | currentTOC = newTOC;
190 | } else if (node.children) {
191 | getTOC((node.children as NodeElement[]), currentTOC);
192 | }
193 | });
194 | }
195 |
196 | const getTOCNodes = (results: TOCRecord[]) => () => (tree) => {
197 | const nodes: Node[] = tree.children || [];
198 | const topTOC = {
199 | href: '/',
200 | label: 'Root',
201 | level: 0,
202 | children: []
203 | };
204 | getTOC(nodes, topTOC);
205 |
206 | topTOC.children.forEach(toc => results.push(toc));
207 | }
208 |
209 | const simplifyTOC = (records: TOCRecord[]): TOCSimple[] => {
210 | return records.map((record): TOCSimple => {
211 | return {
212 | href: record.href,
213 | label: record.label,
214 | children: simplifyTOC(record.children)
215 | };
216 | });
217 | }
218 |
219 | export const getAllPostData = async (postId: string): Promise => {
220 | const fileContent = await readFileAsync(join(postsPath, `${postId}.md`), {
221 | encoding: 'utf8'
222 | });
223 | const postMeta = matter(fileContent);
224 | const TOCRecords = [];
225 | const postHtml = await unified()
226 | .use(markdown)
227 | .use(remark2rehype)
228 | .use(htmlParser)
229 | .use(getTOCNodes(TOCRecords))
230 | .use(html, { allowDangerousHtml: true })
231 | .process(postMeta.content);
232 |
233 | return {
234 | meta: {
235 | title: postMeta.data.title,
236 | tags: postMeta.data.tags,
237 | abstract: postMeta.data.abstract,
238 | created: postMeta.data.created,
239 | updated: postMeta.data.updated,
240 | id: postId
241 | },
242 | toc: simplifyTOC(TOCRecords),
243 | content: postHtml.toString()
244 | };
245 | }
246 |
247 |
--------------------------------------------------------------------------------
/public/party.min.js:
--------------------------------------------------------------------------------
1 | (function(t,e){"function"==typeof define&&define.amd?define("partyjs",[],e):"object"==typeof exports?module.exports=e():t.party=e()})(this,function(){function t(t,e){if(Array.isArray(e))A[t]=new k(e);else if("string"==typeof e){let r=new DOMParser,i=r.parseFromString(e,"application/xml"),o=i.getElementsByTagName("parsererror")[0];if(o)throw new Error("Invalid SVG shape.");var n;let s=i.getElementsByTagName("svg")[0];s&&s.hasAttribute("viewBox")&&(n=M.fromBounds(s.getAttribute("viewBox").split(" ").map(t=>parseFloat(t))));let a,h=i.getElementsByTagName("polygon")[0],l=i.getElementsByTagName("path")[0];if(h){let t=h.getAttribute("points"),e=/(-?\d*\.\d+|-?\d+)/g,n=t.match(e),r=[];for(let t=0;t0&&(t=t.replace(new RegExp("\\{"+(e-1)+"\\}","g"),arguments[e]));return t}function a(t){if(!(t instanceof E))throw new TypeError("Invalid transform supplied to lighting calculation.");return Math.abs(Math.cos(t.rotation.x)*Math.cos(t.rotation.y))}function h(t,e,n){return t&&null!=t[e]?t[e]:n}function l(t,e,n){t&&null==t[e]&&(t[e]=n)}function c(t,e){for(var n in e)e.hasOwnProperty(n)&&l(t,n,e[n])}function u(t){if(["number","string","bigint","boolean","undefined"].includes(typeof t))return t;if("function"==typeof t)return t();if(Array.isArray(t))return t[Math.floor(e()*t.length)];throw new Error("Invalid randomized value")}function d(t,n,r){let s=u(h(n,"count",1)),l=u(h(n,"spread",0)),c=u(h(n,"angle",0));for(let d=0;du(p))),transform:new E(new b((t.left||0)+i((t.width||0)/2,2*h(n,"randomizePosition",!0))+(r?window.scrollX:0),(t.top||0)+i((t.height||0)/2,2*h(n,"randomizePosition",!0))+(r?window.scrollY:0)),b.generate(()=>Math.PI*h(n,"randomizeRotation",!0)*e()),b.one.scale(m)),color:u(h(n,"color",()=>C.fromHsl(360*e(),100,70).toString())),lighting:h(n,"lighting",!0),lifetime:0,draw:function(t){t.fillStyle=this.lighting?new C(0,0,0).mix(C.fromHex(this.color),.25+.75*a(this.transform)).toString():this.color;const e=.2;let n=this.lifetime>e?1:this.lifetime/e,r=A[this.shape];if(!r)throw Error(`Unknown shape '${this.shape}'.`);let i=new E(this.transform.position,this.transform.rotation,this.transform.scale.scale(n));r.withTransform(i).draw(t)},update:function(t){this.velocity=this.velocity.applyDelta(this.acceleration,t),this.transform=this.transform.applyDelta(this.velocity,t),this.lifetime+=t}})}}function f(t){for(;B.length>=g.maxParticles;)B.shift();B.push(t)}function p(t){B.forEach(e=>e.update(t));let e=Math.max(document.documentElement.offsetHeight,window.innerHeight);B=B.filter(t=>t.transform.position.y<=e)}function m(){P.canvas.width=window.innerWidth,P.canvas.height=window.innerHeight,P.clearRect(0,0,P.canvas.width,P.canvas.height),B.forEach(t=>t.draw(P))}function w(t){if(!P.canvas.parentElement)return;let e=(t-I)/1e3;B.length>0&&(p(e),m()),I=t,window.requestAnimationFrame(w)}function y(){if(document.getElementById("party-js-canvas"))return;const t=document.createElement("canvas");t.id="party-js-canvas",t.style="position: fixed; left: 0; top: 0; pointer-events: none; z-index: 99999;",P=t.getContext("2d"),document.body?document.body.appendChild(t):window.addEventListener("load",()=>document.body.appendChild(t)),window.requestAnimationFrame(w)}const g={maxParticles:1e3,gravityPixels:800},x={typeCheckFailed:"The supplied parameter must be of type '{0}'.",abstractMethodNotImplemented:"The type is required to implement the '{0}' method.",invalidPathNode:"Invalid node '{0}' detected in SVG path.",malformedPathNode:"Malformed node '{0}' detected in SVG path."};class v{constructor(t){this.index=0,this.items=t}first(){return this.reset(),this.next()}next(){return this.items[this.index++]}hasNext(){return this.index(n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t),a=n<.5?n*(1+e):n+e-n*e,h=2*n-a;r=s(h,a,t+1/3),i=s(h,a,t),o=s(h,a,t-1/3)}return new C(r,i,o)}}class b{constructor(t,e,n){this.x=t||0,this.y=e||0,this.z=n||0}add(t){if(!(t instanceof b))throw new TypeError(s(x.typeCheckFailed,"Vector"));return new b(this.x+t.x,this.y+t.y,this.z+t.z)}scale(t){if("number"==typeof t)return new b(this.x*t,this.y*t,this.z*t);if(t instanceof b)return new b(this.x*t.x,this.y*t.y,this.z*t.z);throw new TypeError(s(x.typeCheckFailed,"Number/Vector"))}static get zero(){return new b}static get one(){return new b(1,1,1)}static generate(t){if("function"!=typeof t)throw new TypeError(s(x.typeCheckFailed,"Function"));return new b(t(),t(),t())}}class E{constructor(t,e,n){this.position=t||b.zero,this.rotation=e||b.zero,this.scale=n||b.zero}applyDelta(t,e){if(!(t instanceof E))throw new TypeError(s(x.typeCheckFailed,"Transform"));if("number"!=typeof e)throw new TypeError(s(x.typeCheckFailed,"Number"));return new E(this.position.add(t.position.scale(e)),this.rotation.add(t.rotation.scale(e)),this.scale.add(t.scale.scale(e)))}apply(t){if(!(t instanceof b))throw new TypeError(s(x.typeCheckFailed,"Vector"));let e=t.x*this.scale.x,n=t.y*this.scale.y;return new b(this.position.x+(e*Math.cos(this.rotation.z)-n*Math.sin(this.rotation.z))*Math.cos(this.rotation.y),this.position.y+(e*Math.sin(this.rotation.z)+n*Math.cos(this.rotation.z))*Math.cos(this.rotation.x))}}class z{constructor(t,e,n,r){this.xmin=t,this.ymin=e,this.xmax=n,this.ymax=r}static fromVertices(t){let e=new z(1/0,1/0,0,0);for(let n=0;nt.transformPoint(e))}draw(t){if(!(t instanceof CanvasRenderingContext2D))throw new TypeError(s(x.typeCheckFailed,"CanvasRenderingContext2D"));t.beginPath();for(let e=0;e"string"==typeof t),"m"==r.toLowerCase()&&(r=r.toLowerCase()==r?"l":"L"),n.index--);let i,o,a=r.toLowerCase()==r,h=a?new b(e.x,e.y):new b;switch(r.toLowerCase()){case"m":i="move",o=[h.x+n.next(),h.y+n.next()],e.x=o[0],e.y=o[1];break;case"l":i="line",o=[h.x+n.next(),h.y+n.next()],e.x=o[0],e.y=o[1];break;case"h":i="line",o=[h.x+n.next(),e.y],e.x=o[0];break;case"v":i="line",o=[e.x,h.y+n.next()],e.y=o[1];break;case"z":i="line";let t=this.nodes.find(t=>"move"==t.type).getResultingCursor();o=[t.x,t.y],e.x=t.x,e.y=t.y;break;case"c":i="bezier",o=[];for(let t=0;t<3;t++)o.push(h.x+n.next()),o.push(h.y+n.next());e.x=o[o.length-2],e.y=o[o.length-1];break;case"s":i="bezier";let a=this.nodes[this.nodes.length-1];if("bezier"!=a.type)throw new Error(s(x.malformedPathNode,r));let l=e.x+(a.args[4]-a.args[2]),c=e.y+(a.args[5]-a.args[3]);o=[l,c];for(let t=0;t<2;t++)o.push(h.x+n.next()),o.push(h.y+n.next());e.x=o[o.length-2],e.y=o[o.length-1]}this.nodes.push(new N(i,o))}this.nodes.length>50&&console.warn("Complex shape registered, high usage may impact framerate.")}getBounds(){return z.fromVertices(this.nodes.map(t=>t.getResultingCursor()))}normalize(t){if(t=t||M.fromBounds(this.getBounds()),!(t instanceof M))throw new TypeError(s(x.typeCheckFailed,"ViewBox"));let e=new v(this.nodes);for(;e.hasNext();){let n=e.next();for(let e=0;ee.run(t,this.transform)),t.fill()}}class N{constructor(t,e){this.type=t,this.args=e}getResultingCursor(){return new b(this.args[this.args.length-2],this.args[this.args.length-1])}run(t,e){let n;switch(this.type){case"move":n=t.moveTo;break;case"line":n=t.lineTo;break;case"bezier":n=t.bezierCurveTo}let r=[];for(let t=0;t'),t("ellipse",''),t("rounded-square",''),t("rounded-rectangle",''),t("star",'');const V=Math.PI/180;var I=0;return y(),{init:y,area:function(t,e,n){d(t,e,null==n||n)},element:function(t,e){e=e||{},c(e,{shape:this.array(["square","rectangle"]),count:this.variation(40,.5),spread:this.constant(80),size:this.variation(10,.8),velocity:this.variation(-300,1),angularVelocity:this.minmax(1,6)}),this.area(t.getBoundingClientRect(),e)},position:function(t,e,n){n=n||{},c(n,{shape:this.array(["square","rectangle"]),count:this.variation(40,.5),spread:this.constant(80),size:this.variation(10,.8),velocity:this.variation(-300,1),angularVelocity:this.minmax(1,6)}),this.area({left:t,top:e},n)},cursor:function(t){let e=window.event;if(null==e.clientX||null==e.clientY)return console.error("Calling 'party.cursor()' with no current mouse event is not allowed.");this.position(e.clientX,e.clientY,t)},screen:function(t){t=t||{},c(t,{shape:this.array(["square","rectangle"]),count:this.variation(window.innerWidth/1980*500,.5),size:this.variation(10,.8),velocity:this.variation(-100,2),angularVelocity:this.minmax(1,6)}),this.area({width:window.innerWidth,height:-window.innerHeight},t)},registerShape:t,constant:function(t){return t},variation:function(t,e,n){if("number"!=typeof t||"number"!=typeof e)throw new TypeError(s(x.typeCheckFailed,"Number"));return()=>(n?o:i)(t,e)},minmax:function(t,e){if("number"!=typeof t||"number"!=typeof e)throw new TypeError(s(x.typeCheckFailed,"Number"));return()=>n(t,e)},array:function(t){if(!Array.isArray(t))throw new TypeError(s(x.typeCheckFailed,"Array"));return t},linearGradient:function(){if(!arguments||0==arguments.length)throw new Error;if(1==arguments.length)return arguments[0];var t=[...arguments].map(t=>C.fromHex(t));return()=>{let e=n(0,t.length-1),r=Math.floor(e),i=e%1;return t[r].mix(t[r+1],i).toString()}}}});
--------------------------------------------------------------------------------
/posts/monetizing-your-blog-with-cryptocurrencies.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Monetizing your blog with cryptocurrencies
3 | author: Damian Sosnowski
4 | abstract: Current internet has been dominated by huge corporations, spying on users and selling their data to the advertisement companies. Amount of ads in internet is unbearable, pushing the quality content out in favour of mass produced click baits. Adoption of crypto-based, open web monetization standards brings back hope that we can get back to creating high quality content for high quality readers. The post describes how you can monetize your content with available crypto-based payments systems.
5 | created: "2021-02-21"
6 | updated: "2021-02-21"
7 | tags:
8 | - crypto
9 | - bitcoin
10 | - blockchain
11 | - monetization
12 | ---
13 |
14 | Nowadays, monetization of the blog usually boils down to displaying ads via some 3rd party ad provider like Google. This however degrades the user experience, both in terms of page performance and "stealing" the users attention. It's a well known fact that most of the ads providers are actually tracking users, invading their privacy and selling their private data.
15 |
16 | For those reasons, people are actively blocking these ads, either via browers' plugins or by using browsers that have this kind of protection built in by default (like Brave). Additionally, some of you, like me, don't want to host this kind of content adding your share to the progressing degeneration of internet society.
17 |
18 | Let's review other available options, focusing on the monetization based on cryptocurrencies. But first, **why crypto**?
19 |
20 | ## Crypto vs traditional payment systems
21 |
22 | First of all, let's be honest, **crypto is cool nowadays**! With their value going crazy high, everyone wants to join a hype wagon, hoping that this 0.5 of Dogecoin will be worth thousands of dollars in few years.
23 |
24 | But hype aside, **what makes crypto different (better?) compared to traditional payment systems?**
25 |
26 | ### Real shared economy
27 |
28 | It's not a secret anymore that internet has been taken over by big corporations. Almost all the content and almost all the ads (and with ads, money) are being managed by few leviathans like Google or Facebook. Their monetization system is based on constant surveillance of their users, gathering enormous amount data about them and targeted advertising sold to the ones that will pay more.
29 |
30 | There has to be a better way to monetize quality content, and one of the proposed alternatives is the direct money transfer, usually in a form of micropayments, between the reader and the creator. This solution eliminates the need of loud advertisement fighting for your attention, as it is you who decide what content is worth paying for.
31 |
32 | Still, in traditional systems, payments require some established institution, such as PayPal or Bank, to participate in the payment process. This means that all the transactions are being tracked and controller and the transaction fees charged by the intermediaries often make the entire process not profitable for the peers.
33 |
34 | **And here is where cryptocurrencies come to play.**
35 |
36 | Cryptocurrencies run on a decentralised, self-managing blockchain networks, that by design do not include any ruling authorities. They form so called DEFI, Decentralised Finance Systems, that allows for direct peer to peer value transfers, without any intermediaries that control the process.
37 |
38 | 
39 | Traditional money transfer vs DEFI (source [https://www.gemini.com/cryptopedia](https://www.gemini.com/cryptopedia))
40 |
41 | DEFI topic is wide and fascinating, if you are interested in the details, I can recommend you this book [https://shermin.net/token-economy-book/](https://shermin.net/token-economy-book/) or this [youtube channel](https://www.youtube.com/watch?v=H-O3r2YMWJ4&feature=youtu.be).
42 |
43 | ## Available crypto-based monetization options
44 |
45 | The crypto monetization field is fairly fresh, it's still more a running experiment than a fully grown, stable ecosystem. There are however several, quite well established options:
46 |
47 | - **BAT Project** - monetization system, integrated with the Brave browser via Brave Rewards
48 | - **Coil** - monetization system **based on Web Monetization Standard**, that allows streaming of payments to the creators when users are viewing their content
49 | - **Steemit** - and other similar platforms, that pay you for posting content on their site
50 |
51 | Above platforms are build around cryptocurrencies but follow different patterns and assumptions.
52 |
53 | ## BAT Project
54 |
55 | **BAT** comes from **[Basic Attention Token](https://basicattentiontoken.org/)**. It's a cryptocurrency that wants to revolutionise how we think about the internet marketing and remove intermediaries between content creators and content consumers.
56 |
57 | Right now BAT only works with the **[Brave browser](https://brave.com/),** users can send and receive BAT tokens via its [Brave Rewards program](https://brave.com/brave-rewards/). There are plans to extend BAT support to other browsers (for example via browser add ons) but as for now this has not yet been achieved.
58 |
59 | ### How does it work
60 |
61 | The idea is simple. If users like the content they are viewing, they can tip the author, sending him some amount of BAT tokens.
62 |
63 | 
64 |
65 | How do the users get their tokens? They can of course buy those on crypto market, or they can earn those for watching ads in their browser.
66 |
67 | BAT Project is built around the idea that our attention is valuable. And if we are using our attention to view some ad, we should be rewarded for it. And the other way around, we should be able to reward content creators that produce the quality content, by sending them some money directly (tipping).
68 |
69 | 
70 | BAT tokens flow (source [https://basicattentiontoken.org/](https://basicattentiontoken.org/))
71 |
72 | #### Contribution modes
73 |
74 | There are three types of contribution that you can use with Brave and BAT tokens
75 |
76 | 1. **One time tip** - a simple one, you like some content, you click on the BAT icon and choose the value you want to send to the creator. It's a one time transfer
77 | 2. **Monthly contributions** - you can setup a recurring payment for the creators you really like
78 | 3. **Auto-Contribute** - this one is interesting. You setup an amount of BATs you want to spend monthly on contributions. Then, Brave calculates how much time you spend reading and viewing content from the creators. At the end of the month, creators will be rewarded proportionally to the time you've spent consuming their content.
79 | 4. As a content creator, you can also allow Brave to **display ads on your site**. You will be rewarded with some amount of BATs depending on the number of ads displayed to your readers
80 |
81 | ### Integrations
82 |
83 | Brave rewards can be integrated not only with your personal blog. This monetization system supports multiple social networks like Youtube, Twitter, Reddit or even Github.
84 |
85 | 
86 |
87 | It means that users will be able to tip you for your content posted there, even if they don't access your blog directly.
88 |
89 | 
90 |
91 | ### Adding Brave Rewards to your blog
92 |
93 | Let's have a quick walkthrough of how you can add BAT monetization to your blog.
94 |
95 | **First, you will need a Brave Creators account**: [https://creators.brave.com/](https://creators.brave.com/).
96 |
97 | After you create it you have to setup a **crypto wallet** that will collect received payments. Currently there are two options: **Gemini** and **Uphold**. For the sake of this tutorial I would encourage you to go with Uphold, as it's also compatible with another monetization system that we will discuss.
98 |
99 | Uphold will ask you for some personal data in order to verify your identity. This is a legal requirement, as far as I know.
100 |
101 | **Now, you have to add your website as a a new channel.** Simply choose "Ad Channel" option in the menu. In order to verify that you are indeed the owner of the website, you can either upload a unique file on your server or add a DNS record to your domain.
102 |
103 | It might take several hours for the Brave to refresh your information, but when it happens, users viewing your page, will see the tipping icon in their address bar
104 |
105 | 
106 |
107 | Once you verify your identity, it's done! **You are now verified Brave Content Creator!**
108 |
109 | ### Comparison
110 |
111 | Not everything is perfect though. As much as I like the BAT Project and Brave browser, this solution has some flaws.
112 |
113 | First, only users using Brave browser will be able to tip you. Everyone else, even if they would like to, cannot do that. Currently around 25 milion of users are using Brave but it does not mean that all of them are actively using BAT tokens.
114 |
115 | Additionally, there is no monetization API available, so you cannot execute any logic on your site based on the monetization or tipping events. You cannot for example implement any type of paywall or exclusive content to incentivise people into paying for your work.
116 |
117 | ## Coil
118 |
119 | [Coil](https://coil.com/) is an implementation of [Web Monetization standard](https://webmonetization.org/), that allows users to stream micropayments to the creators directly, when viewing their content. This service is not based on ads and it requires users to buy a paid membership.
120 |
121 | The key feature here is the [Monetization API](https://webmonetization.org/docs/api) that can be used to deliver some premium content for the users that have decided to buy paid Coil membership.
122 |
123 | ### How does it work
124 |
125 | In a way Coil is similar to BAT Auto-Contribute mode, you create a wallet and when user is viewing your content, the small amount of crypto is streamed to you in real time.
126 |
127 | 
128 | Payment streaming via Coil (source [https://coil.com/](https://coil.com/))
129 |
130 | As stated before, Coil is an implementation of the Web Monetization standard, it's out of the box supported by the [Puma browser](https://www.pumabrowser.com/) but you can enable it on other browsers via [dedicated](https://addons.mozilla.org/en-US/firefox/addon/coil/) [add-ons](https://chrome.google.com/webstore/detail/coil/locbifcbeldmnphbgkdigjmkbfkhbnca). **Important note here:** add-ons will only work on desktop, mobile versions of popular browsers **do not support Coil**. On mobile devices, Puma browser is the only option.
131 |
132 | The key difference here is a monetization API that can be used by JavaScript code in the browser. With it, you can implement custom logic, based on the monetization-related events. You can for example enable access to the premium content for the users that are paying you via Coil, hide the ads for them, give their account unique status etc. **This way you can reward readers that are rewarding you.**
133 |
134 | Under the hood, Coil is using an [Interledger protocol](https://interledger.org/), that allows you to stream payments between different ledgers (different cryptocurrencies ecosystems) in real time. If you are interested in the details I would advise you to check their docs, it's a fascinating piece of technology.
135 |
136 | #### Contribution modes
137 |
138 | As for now, the only available payment mode is real time streaming, users are not able to tip you a specific amount of money (as with BAT) and you cannot require any specific payment to happen before enabling premium content.
139 |
140 | ### Integrations
141 |
142 | Coil offers a wide variety of integrations. You can of course use it on your own website, it allows you to collect payments on Youtube, Twitch and a whole collection of [cooperating platforms](https://coil.com/creator/how-to-monetize#On-platform). It does not however support Twitter or Reddit.
143 |
144 | If you are a software engineer, it might be useful to know that Coil is also integrated with two popular software blogging platforms: [dev.to](http://dev.to) and [hashnode.com](http://hashnode.com).
145 |
146 | ### Adding Coil to your blog
147 |
148 | If you want to add Coil to your blog, you will have to start with creating the Creators Account here: [https://coil.com/creator](https://coil.com/creator).
149 |
150 | You will need a wallet that supports **Interledger Payment Pointer**, just few available wallets support it, but fortunately [Uphold Wallet](https://uphold.com/) that we've used with our BAT example is one of them.
151 |
152 | 
153 |
154 | You can decide which currency you would like to receive via it. The generated string is a pointer that you will use to receive payments to your wallet.
155 |
156 | 
157 |
158 | Paste this string to the Monetize content section of your Coil account settings. It will generate HTML `` tag that you should place on your site.
159 |
160 | 
161 |
162 | Once you deploy the version with the new `` tag, your website is ready! Now you can receive payments from Coil users.
163 |
164 | 
165 |
166 | **Additionally we can play a bit with the Web Monetization API.**
167 |
168 | To check if payment is actually being done, you can use the following snippet
169 |
170 | ```jsx
171 | document.monetization && document.monetization.state === 'started'
172 | ```
173 |
174 | As it may take some time for the monetization to start, you might want to setup an event listener that will execute once the payment starts flowing.
175 |
176 | ```jsx
177 | function startEventHandler (event) {
178 | console.log('Payment has started!')
179 | }
180 |
181 | document
182 | .monetization
183 | .addEventListener('monetizationstart', startEventHandler)
184 | ```
185 |
186 | With those you can enable some premium features on your blog for users paying via Coil.
187 |
188 | **I've prepared a small easter egg for Coil users, so if you are now using Coil you should see a small surprise on top of the page ;)**
189 |
190 | ### Comparison
191 |
192 | **The biggest advantage of Coil is its monetization API**. It allows creators to actually give something for the users that decided to pay for the content. This might be a deal breaker in terms of its wider adoption as it helps to create a situation when both sides are actually rewarded via the payments system.
193 |
194 | **Coil is based on the open Web Monetization standard**. It means, that once you set up a payment pointer, you are not tied to Coil. Any new service, using Web Monetization standard can send money to you.
195 |
196 | Another plus is it's compatibility with all major browsers. Compared to BAT it has a much bigger potential user group. Still, on mobile devices, which are the most important market nowadays, Coil is not supported by any of the major browsers. That's the "beauty" of the walled gardens...
197 |
198 | The caveat is that users have to actually buy **a paid membership** to enable monetization payments, which of course makes sense, but with BAT and Brave, users can (at least theoretically) earn BAT tokens for "free", by viewing ads.
199 |
200 | Also, just one, streaming based, contribution mode is narrowing down the possible monetization models. You can't charge any specific amount of money for the premium content or service, you will always receive payment just based on the time users spend on your page. This model promotes lengthy content (like long videos or streams) and not necessary the one that has the highest quality.
201 |
202 | ## Steemit and other similar platforms
203 |
204 | [Steemit](https://steemit.com/) and other similar platforms are a different kind of monetization systems. It's a separated platform, a distributed social network, that rewards users with cryptocurrency for posting content, and performing some community-oriented actions, like voting, commenting etc.
205 |
206 | The goal of those communities is slightly different than just monetization. It's a form of DAO, **Decentralised Autonomous Organisation**, that aims to create an independent, democratised internet community, alternative to the current, centralised, social media giants.
207 |
208 | Monetization of content and crypto rewards are more a way to incentivise some behaviours not just a way to pay for premium content.
209 |
210 | While still in early stages, those initiatives have very ambitious plans and can potentially disrupt the internet communities as we know it.
211 |
212 | ## Conclusion
213 |
214 | Fortunately you don't have to choose which payment system to implement on your site as BAT and Coil don't interfere with each other, so use both and enjoy your soon-to-be-crypto-millionaire status ;)
215 |
216 | 
217 |
218 | The crypto-based payment systems are at the early stage of development and probably it will take some time until they mature and get a better traction. Also, despite the promise of distributed, peer to peer payments, in both cases there is an intermediary between the content creator and the content consumer. I guess we still have to wait for the solution that will be easy to use and still truly distributed and independent form 3rd parties.
219 |
220 | Looking on a broader picture, it's good that such solutions are being created and their adoption grows. Current internet has been dominated by huge corporations, spying on users and selling their data to the advertisement companies. Amount of ads in internet is unbearable, pushing the quality content out in favour of mass produced click baits. Adoption of open web monetization standards brings back hope that we can get back to creating **high quality content for high quality readers**, with mutual respect and appreciation.
--------------------------------------------------------------------------------
/posts/anatomy-of-aws-lambda.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Anatomy of AWS Lambda
3 | author: Damian Sosnowski
4 | abstract: AWS Lambda is famous service that has popularized the idea of serverless in cloud computing. It was not the first service of a kind, nor the last, but definitely was, and remains, the most popular and widely used. In this article we will take a closer look on the anatomy of the AWS Lambda functions and the processes that are happening below the surface.
5 | created: "2019-10-02"
6 | updated: "2019-10-02"
7 | tags:
8 | - aws
9 | - severless
10 | - lambda
11 | - cloud
12 | ---
13 |
14 | ## What is Serverless?
15 |
16 | One cannot discuss Lambda without discussing the serverless architecture, one is driven by another. There are many definitions but in short: Serverless architecture allows you to focus entirely on the business logic of your applications. You don't have to think about servers, provisioned infrastructure, networking, virtual machine etc. All this stuff is handled for you by a cloud provider (AWS in case o Lambda). Usually it means, that your application heavily relies on managed services (like Lambda, DynamoDB, API Gateway) that are maintained by a cloud provider and allow you to abstract the server away.
17 |
18 | **Serverless services are usually characterized by following capabilities:**
19 |
20 | - **No server management** – You don’t have to provision or maintain any servers. There is no software or runtime to install, maintain, or administer
21 | - **Flexible scaling** – You can scale your application automatically or by adjusting its capacity through toggling the units of consumption (for example, throughput, memory) rather than units of individual servers
22 | - **High availability** – Serverless applications have built-in availability and fault tolerance. You don't need to architect for these capabilities because the services running the application provide them by default
23 | - **No idle capacity** – You don't have to pay for idle capacity. There is no need to pre-provision or over-provision capacity for things like compute and storage. There is no charge when your code isn’t running.
24 |
25 | AWS provides us with many serverless services: DynamoDB, SNS, S3, API Gateway or the fairly new and extremely interesting AWS Fargate. But this time w will only focus on AWS Lambda, a core service of a serverless revolution.
26 |
27 | ## Lambda - no servers, just code
28 |
29 | Lambda can be described as **Function as a Service (FaaS)**, where the functions is the main building block and execution unit. No servers to manage, no virtual machines, clusters, containers, just a function created in one of the supported languages. As with any other managed service, provisioning, scaling and reliability is handled automatically by AWS. This allows us to work on a very high abstraction level, focus entirely on the business logic and (almost) forget about underlying resources.
30 |
31 | 
32 |
33 | ### Basic Lambda Example
34 |
35 | Let's take a look at a simple lambda example.
36 |
37 | ```javascript
38 | exports.handler = async function(event, context) {
39 | console.log('Hello Lambda!');
40 | }
41 |
42 | ```
43 |
44 | You can see that we've defined so called `handler`, the function that will be executed by Lambda Service, every time the event occurs. Handler takes two arguments: an `Event object`, and a `Context`. We will take a closer look at those later.
45 |
46 | You might have noticed, that we've defined our handler as an `async` function, this allows us to perform asynchronous operations inside Lambda `handler`. By returning a promise as a result of handler call, we not only are able to return a result of asynchronous operation, but also we make sure that the Lambda will "wait" till all the started operations are finished.
47 |
48 | You don't have to use `asyn/await` syntax or promises at all, if you prefer "old school" callback approach, you can use a third handler argument:
49 |
50 | ```javascript
51 | exports.handler = function(event, context, callback) {
52 | /// ... async operations here
53 | callback(asyncOperationResult);
54 | }
55 |
56 | ```
57 |
58 | **Lambda is an event driven service, every lambda execution is triggered by some event, usually created by another AWS service.**
59 |
60 | An example of a compatible event source is **API Gateway**, which can invoke a Lambda function every time API Gateway receives a request. Another example is **Amazon SNS**, which has the ability to invoke a Lambda function anytime a new message is posted to an SNS topic. There are many event sources that can trigger your Lambda, make sure that you check the AWS documentation for the full list of those.
61 |
62 | ### Event and Context
63 |
64 | Every Lambda function receives two arguments: `Event` and `Context`. While Event provides function with the detailed information about the event that has triggered the execution (for example, Event from API Gateway can be used to retrieve request details such as query parameters, header or even request body).
65 |
66 | ```javascript
67 | exports.handler = async function(event, context) {
68 | console.log('Requested path ', event.path);
69 | console.log('HashMap with request headers ', event.headers);
70 | }
71 |
72 | ```
73 |
74 | Context on the other hand, contains methods and properties that provide information about the invocation, function, and execution environment (such as assigned memory limit or upcoming execution timeout).
75 |
76 | ```javascript
77 | exports.handler = async function(event, context) {
78 | console.log('Remaining time: ', context.getRemainingTimeInMillis());
79 | console.log('Function name: ', context.functionName);
80 | }
81 |
82 | ```
83 |
84 | ## Lambda execution in details
85 |
86 | You might have noticed, that our lambda code can be divided in two parts: the code inside the handler function, and the code outside of the handler.
87 |
88 | ```javascript
89 | // "Outside" of handler
90 | const randomValue = Math.random();
91 |
92 | exports.handler = function (event, context) {
93 | // inside handler
94 | console.log(`Random value is ${randomValue}`);
95 | }
96 |
97 | // this is still outside
98 |
99 | ```
100 |
101 | While this is still the typical JavaScript file here, those two parts of code are called differently, depending on the Lambda usage. But before we dig in to this topic, we have understand one crucial thing regarding Lambda functions: **Cold Start** and **Warm Start**.
102 |
103 | In order to understand what's behind **Cold Start** and **Warm Start** terms, we have to understand how our Function as a Service works.
104 |
105 | ### Cold start
106 |
107 | In the idle state, when no events are being fired and no code is being executed, your lambda function code is stored as a zip file, a lambda code package, in an S3 bucket. In case of JavaScript, this zip file usually contains a js file with your function code and any other required files.
108 |
109 | 
110 |
111 | Once the event occurs, Lambda has to download your code and set up an runtime environment, together with the resources specified in the Lambda configuration. Only after this step, your lambda code can be executed.
112 |
113 | This process is so called Cold Start of Lambda function, it happens when your Lambda is executed for the first time, or has been idle for a longer period of time.
114 |
115 | 
116 |
117 | Only after the initial setup is finished, you handler can be executed, with a triggering event passed as an argument.
118 |
119 | 
120 |
121 | The exact length of the Cold Start varies, depending on your code package size and settings of you Lambda function (functions created inside your private VPC usually have longer cold starts). You should be aware of this if the long cold start can affect users of your service. For that reason it's wise to keep you code package size as small as possible (be mindful about node_modules size!) and to select a runtime that provides faster cold starts.
122 |
123 | Ok, so what about Warm Start?
124 |
125 | ### Warm start
126 |
127 | The runtime described above is not terminated immediately after handler execution. For some time, the runtime remains active and can receive new events. If such event occurs, the warm runtime does not have to be initialized again, it can execute the handler immediately.
128 |
129 | 
130 |
131 | Warm execution is of course much much faster then the cold one. However, the problem is that we cannot assume that the function will, or will not be called with a cold or warm start. The time for which the function stays warm is not precisely defined in the documentation and is actually based on your configuration and actual Lambda usage. What's more, if you allow concurrency of your Lambdas, newly created Lambda instances will also start with a cold start. This means that you have to mindful of the above processes, and try to optimize both your cold and warm starts.
132 |
133 | ### Initialization vs handler
134 |
135 | And how does this affects our code? Remember our example of a handler function?
136 |
137 | ```javascript
138 | // "Outside" of handler
139 | const randomValue = Math.random();
140 |
141 | exports.handler = function (event, context) {
142 | // inside handler
143 | console.log(`Random value is ${randomValue}`);
144 | }
145 |
146 | // this is still outside
147 |
148 | ```
149 |
150 | The code outside of the handler function is executed ONLY during cold start. Handler function on the other hand, is executed for every event.
151 |
152 | ```javascript
153 | // This will be executed only during cold start
154 | const randomValue = Math.random();
155 |
156 | exports.handler = function (event, context) {
157 | // Handler will be executed for every request
158 | // So, what will be displayed here?
159 | console.log(`Random value is ${randomValue}`);
160 |
161 | ```
162 |
163 | Handler can still use all the variables created during the initialization, since those are stored in the memory (till the runtime is terminated). This means, that in the above code snippet, `randomValue` will be the same for every handler call. While this is probably not what we've wanted to achieve, using cold / warm start phases we can apply some optimization in our code.
164 |
165 | In general, it's recommended, to store all the initialization code outside of the handler function. All the initialization (like creating database connection), should be done outside of handler and just used inside it.
166 |
167 | ```javascript
168 | const config = SomeConfigService.loadAndParseConfig();
169 | const db = SomeDBService.connectToDB(config.dbName);
170 |
171 | exports.handler = async (event, context) => {
172 | const results = await db.loadDataFromTable(config.tableName);
173 | return results;
174 | }
175 |
176 | ```
177 |
178 | This way we not only vastly improve the execution time of our handler, but also we make sure that we are not hammering our DB with a new connections being created per every lambda invocation.
179 |
180 | There are many advanced optimization techniques that we can apply to our lambda functions. Still, being aware of cold and warms starts and a code optimization based on those processes is a simple and very efficient approach that you should apply by default to all your lambdas.
181 |
182 | ## Concurrency
183 |
184 | Even the most optimized services have to scale, in order to handle heavy workloads. In "classic" applications, this is handled by auto scaling group, that is responsible for tracking the servers utilization and properly provisioning additional servers when needed (or terminating unused ones). But when using Lambda, we don't work with servers, so how can we scale our function?
185 |
186 | ### Scaling logic in Lambda
187 |
188 | As we might expect, auto scaling of Lambda functions is handled automatically by Lambda service.
189 |
190 | By default, Lambda is trying to handle incoming invocation requests by reusing the existing warm runtimes. This works if the function execution time is shorter then the time between upcoming requests.
191 |
192 | 
193 |
194 | This is a very reasonable approach both from our point of view (warm runtime means faster execution time and ability to reuse resources and connections) and for AWS (service does not have to provide additional runtimes).
195 |
196 | However, if the time between the events is shorter then the function execution time, the single function instance is not able to handle those invocation requests. In such cases, to handle the workload, Lambda has to scale.
197 |
198 | 
199 |
200 | If Lambda receives a new invocation request while all the current runtimes are busy, it will create another runtime. This new runtime will handle the upcoming invocation request and execute the function code. Then runtime remains in the warm state for some time and can receive new requests. If the runtime stays idle for a longer period of time, Lambda terminates it to free the resources.
201 |
202 | 
203 |
204 | ### Concurrency limit
205 |
206 | There is a concurrency limit applied to every Lambda function, it specifies the maximum number of runtimes created at the same time. If your function starts exceeding this limit, the upcoming invocation request will be throttled. In most cases AWS Services that trigger Lambdas, are able to detect this situation and retry the request after some time.
207 |
208 | So, since we can modify the concurrency limit of our Lambdas, is there any reason we should set up a low concurrency limit?
209 |
210 | Yes, and this is actually quite a tricky use case. Remember that every Lambda runtime is isolated, which means that resources are not shared across those. If your Lambda connects to some Database, every runtime has to create a separated DB connection.
211 |
212 | In case of a high concurrency limit, this is a dangerous situation, since your DB can simply be DDOSed by a 1000 incoming connections in a very short period of time. In such situation it's better to set up a low concurrency limit (or just change the database to the one that can handle such workloads).
213 |
214 | Now, let's take a bit more detailed look about different Lambda invocation methods.
215 |
216 | ## Invocation methods
217 |
218 | ### Push vs Pull
219 |
220 | Lambda can be invoked in two different ways:
221 |
222 | - **Push invocation model** - Lambda function is executed when a specified event occurs in one of the AWS services. This might be a new SNS notification, new object added to S3 bucket or API Gateway request
223 | - **Pull invocation model** - Lambda pulls the data source (might be SQS queue so called **Event Source Mapping**) periodically and invokes your lambda function passing the batch of pulled records in an event object
224 |
225 | The above invocation model does not change a lot in terms of your function code, but you should be aware of it, when calculating the cost of you Lambda or architecting the data flow. Especially the second model, pull invocation, might create some confusion. You might be expecting the Lambda to be called immediately when new message is posted to SQS, while in fact, SQS will be periodically pulled and you function will receive a whole batch of recently added messages.
226 |
227 | ### Synchronous vs Asynchronous
228 |
229 | Additionally, the function can be called using two different invocation types:
230 |
231 | - **RequestResponse** - function is called synchronously, the caller waits for the function to finish and return the result. For example APi Gateway uses this invocation, which allows it to retrieve a request response object from Lambda.
232 |
233 | 
234 |
235 | - **Event** - function is called asynchronously, and caller does not wait for the function to return the value. The event is being pushed to the execution queue where it will wait for the function execution. This invocation type can automatically retry the execution if the function returns an error.
236 |
237 | 
238 |
239 | In real world, the invocation type is usually defined by the service that creates the event and calls the lambda functions.
240 |
241 | ## Roles and permissions
242 |
243 | One of the biggest advantages of Lambda is the fact that it's integrated with AWS IAM - a service that is responsible for managing permissions of your AWS resources. As with everything related to IAM, detailed permissions management for AWS Lambda is quite a complex process, for the most part however, it resolves around an **execution role**.
244 |
245 | ### Execution role
246 |
247 | Execution role is a role that your lambda function assumes when it's being executed. But what does it actually means?
248 |
249 | The IAM role is a collection of permissions, you can have a role that for example is permitted to read and save the data to some specific DynamoDB instance. When you assign this role to your Lambda function, it will assume this role when invoked. As a result, during the execution, Lambda will run with a set of permissions defined in the role.
250 |
251 | 
252 |
253 | One role can be assigned to multiple lambdas, which is convenient if they require the same set of permissions. Remember however, that you should always only grant Lambda the minimum set of permissions required for it to work properly, therefore having one role, with all the permissions, shared by all the lambdas, is not a good idea.
254 |
255 | At a minimum, your function needs access to Amazon CloudWatch Logs for log streaming, but, if your function is using a pull invocation model, it requires additional permission to read the data source (for example to read messages from SQS queue).
256 |
257 | ### _Example of a simple role_
258 |
259 | Let's assume that we want to create a Lambda function that reads the SQS Queue, process the data and saves the results to DynamoDB table. What should be the definition of the execution role for this function?
260 |
261 | First of all, the service needs a permission to send logs to CloudWatch, this allows us to monitor and debug our application if needed.
262 |
263 | 
264 |
265 | Since, it's going to write data to DynamoDB, we have to add a proper write permission as well
266 |
267 | 
268 |
269 | Should we add a read permission as well? Assuming that our lambda only writes the data to DB, no. That's the "minimum required permission" rule, if the permission is not absolutely needed, don't use it.
270 |
271 | As a last step, we have to add a permission that will allow our Lambda to read SQS queue. As stated above, Lambda needs to be able to read the awaiting messages in order to provide those to our Lambda function during execution.
272 |
273 | 
274 |
275 | This set of permissions will allow our Lambda to successfully perform the task it was designed for... and nothing else. And that's exactly what we wanted to achieve.
276 |
277 | ## What's under the hood?
278 |
279 | Lambda provides us with the amazing set of functionalities, and the biggest advantage of this service is that we don't have to think about the servers and all the technology under the hood that powers it. But we are very curious creatures, right? So, when our functions are executed, what is actually happening behind the curtain?
280 |
281 | The details of Lambda technology would require a whole separated article (or a book...), but in general, we can try at least scratch the surface here.
282 |
283 | ### Lambda invocation flow
284 |
285 | Lambda service is in fact a whole set of services cooperating together to provide the full range of Lambda functionalities.
286 |
287 | - **Load Balancer** responsible for distributing invocation requests to multiple Frontend Invokers in different Availability Zones. It is also able to detect the problems in a given AZ and route the request to remaining ones
288 | - **Frontend Invoker** is a service that receives invocation request, validates it and passes it to the Worker Manager
289 | - **Worker Manager** is a service that manages Workers, tracks the usage of resources and sandboxes in Workers and assigns the request to a proper one
290 | - **Worker** provides a secure environment for customer code execution, this is the service responsible for downloading you code package and running it in a created sandbox.
291 | - Additionally there is a **Counter service** responsible for tracking and managing concurrency limits and a **Placement Service**, that manages sandboxes on workers to maximize packing density.
292 |
293 | Summing up, the invocation request is passed by **Load Balancer** to a selected **Frontend Invoker**, Frontend Invoker checks the request, and asks the Worker Manager for a sandboxed function that will handle the invocation. **Worker Manager** either finds a proper **Worker** and a sandbox, or creates one. Once it's ready, the code is executed by a **Worker**.
294 |
295 | 
296 |
297 | ### Isolation
298 |
299 | Worker is usually some EC2 instance running in the cloud. There can be multiple different functions, from different users running on the same Worker instance. To keeps things secure and isolated, every function runs on a secure sandbox. Single sandbox can be reused for another invocation of the same function (warm runtime!) but it will **never** be shared between different Lambda functions.
300 |
301 | 
302 |
303 | The technology that powers this flow is **Firecracker**. It's an open source (link) project that allows AWS to span hundreds and thousands lightweight sandboxes on a single Worker.
304 |
305 | Since sandboxes can be easily and quickly created and terminated, while still providing a secure isolation of functions, Workers can be reused across multiple Lambdas and even Accounts, which allows the **Placement Service** to organize the work to create and apply the most performant usage patterns as possible.
306 |
307 | The above is a very brief and simplified overview of Lambda internals, if you would like to get some more fascinating details, check [this talk from re:Invent](https://www.youtube.com/watch?v=QdzV04T_kec)
308 |
309 | ## Conclusion
310 |
311 | Thank you for reaching the end of this article. It has gotten quite long :) Still, hopefully it was an interesting overview of what's Lambda and how to work with it. If you are interested in more details, just contact me!
312 |
--------------------------------------------------------------------------------
/posts/optimizing-aws-lambda.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Optimizing AWS Lambda
3 | author: Damian Sosnowski
4 | abstract: Serverless services, such as AWS Lambda, are tempting us with the promise of automated and effortless scalability. Managed serverless services are designed to easily scale to unbelievable levels, quickly adjusting to the upcoming workloads, and to run in an environment highly abstracted from the hardware underneath. All those traits make it very easy to think about the optimization of your services as a second-class citizen. After all, Lambda will just scale up when needed, so why spending time and money on optimization?
5 | created: "2019-12-16"
6 | updated: "2019-12-16"
7 | tags:
8 | - aws
9 | - severless
10 | - lambda
11 | - cloud
12 | ---
13 |
14 | As many others have said: “serverless” does not mean no servers. Instead, as noted in the AWS docs, serverless is more a question of not having to think about servers. While a serverless approach can simplify some computing tasks, the need to understand and maintain application performance remains as important as ever.
15 |
16 | > Don’t think servers, still think performance
17 |
18 | While Lambda scaling capabilities are indeed very powerful (you can read more about Lambda features, including scaling, [in my previous article](https://sosnowski.dev/anatomy-of-aws-lambda-ck1975g09000ijfs1fle758js)), optimization details and techniques might not be so obvious for people used to working with more traditional applications.
19 |
20 | ## Time is money, literally
21 |
22 | There is one major difference in how we think about optimization in Lambda-based applications. In a standard server-based applications, the cost of a running server is more and less constant. You pay for hours of running server instance, the price does not change if you are able to process the upcoming requests in 100 or 500 milliseconds. Of course, it will affect your users experience, and can limit your maximum throughput, but it will not affect your costs.
23 |
24 | **With Lambda it's different.** You pay for what you use, you pay only for the time your code executes, this is an opportunity for great savings (you don't pay for idle time, when your code is not executing), but it also means, that longer execution directly increases your AWS bill.
25 |
26 | In case of Lambda, you are also billed for each invocation. While the price for the single invocation is very low, a sudden peak in number of requests might put a significant pressure on your credit card.
27 |
28 | There are different ways you can optimize your lambdas to reduce the execution time and decrease your costs, but before we discuss them, let's take a look how AWS bills you for Lambda execution.
29 |
30 | ## How to calculate Lambda cost
31 |
32 | Lambda service charges you for every Lambda request (function invocation) and for the time your code is executing. The prices may vary depending on the region and AWS might change the price table from time to time.
33 |
34 | Currently in `eu-west-1` region, the cost of a single Lambda invocation is `$0.0000002`. As for the execution time, it is calculated as GB-Seconds. GB-Second simply represents a one second of execution time, of Lambda that is using 1 GB of memory and will cost you `$0.0000166667`
35 |
36 | Execution time is rounded up to the nearest 100 ms, so even if your function is execution for 230 ms, you will be billed for 300 ms.
37 |
38 | ### Free Tier Package
39 |
40 | AWS gives you an initial Free Tier for Lambda functions, that includes 1M Requests and 400,000 GB-Seconds per month for free. What's more, Lambda Free Tier does not expire after 12 months of AWS Free Tier term, but is available to both existing and new AWS customers indefinitely.
41 |
42 | That means that services with relatively small workload can be completely free and not generate any costs on your bill (at least not for Lambda service).
43 |
44 | ### Calculating cost by memory
45 |
46 | Let's now see on how GB-Seconds are calculated.
47 |
48 | As stated above, 1 GB-Second represents a 1 sec of execution of a function that has 1 GB memory assigned to it. If the function has a smaller memory footprint, it will use a fraction of this resource. Let's say we have a Lambda function that has a 512MB assigned to it, it means that 2 seconds of its execution time is worth 1GB-Second. Analogically function using 256MB of memory has to run for 4 seconds to use 1 GB-Second and using 2048MG (2GB) can only run for 0.5 sec.
49 |
50 | 
51 |
52 | Those rules applied to the usage of available free tier as well. You can run 1GB functions for 400,000 seconds for free, but if you only run functions with 128 MB footprint, you will have as many as 3,200,000 seconds to use before you will start seeing charges in your bill.
53 |
54 | 
55 |
56 | In reality of course your functions will have different footprints, depending on the tasks they perform, just remember to not to over provision function's memory, since even if not fully used, it will increase your costs.
57 |
58 | ## Measuring Lambda performance
59 |
60 | The first step in all optimization-oriented discussion is knowing how to measure it. Every optimization, without proper metrics, is just an academic discussion, that does not have to apply to the real world use cases. Remember to always measure and track the performance of your Lambdas, and verify if the applied changes are actually providing you with real benefits.
61 |
62 | Fortunately, AWS provides us with a rich set of monitoring tools, well integrated with Lambda service.
63 |
64 | ### Cloudwatch
65 |
66 | Cloudwatch is undoubtedly number one tool that we can use to track the performance and costs of our Lambda functions. It provides us ability to review the execution details of our function, ona various level of details.
67 |
68 | ##### Logs
69 |
70 | Every Lambda function sends execution details (and and any custom log messages that you add) to Cloudwatch, where logs are stored and available for analysis.
71 |
72 | At the end of every execution log, you can find the performance information. Time execution, memory usage etc. are simply displayed in the log entry.
73 |
74 | This is a very basic information, but often it's enough to evaluate your function performance.
75 |
76 | 
77 |
78 | - **Duration** - how much time your function was executing
79 | - **Billed Duration** - the time that you were billed for, Lambda rounds up time execution to the nearest 100ms
80 | - **Memory Size** - The amount of memory assigned to this Lambda function
81 | - **Max Memory Used** - The amount of memory used by your function during this execution
82 |
83 | For some execution you might also see `Init Duration`, this indicates that the function was cold started, and gives you an overview of how long the initialization code was executing.
84 |
85 | 
86 |
87 | Logs unfortunately only provide us with information about single lambda invocation at once. In order to track our service performance on a scale, we need some more advanced solution.
88 |
89 | #### Monitoring
90 |
91 | Cloudwatch provides also set of metrics that allows us to track what's happening with our functions in real time and review the historical data.
92 |
93 | **Number of invocations**
94 |
95 | Number of invocations is a build in metric that is able to track number of invocations per Lambda function. You can adjust the time range you want to visualize, display multiple functions at once, or even adjust the granularity of the graph.
96 |
97 | 
98 |
99 | **Execution time**
100 |
101 | The same way you visualize invocations, you can visualize execution time of the function. Depending on the situation and your workload, you might want to view the Maximum execution time, not average, this might help you find the bottlenecks or unexpected code issues.
102 |
103 | 
104 |
105 | The above methods are only a small piece of Cloud Watch functionality, still it's should be sufficient for the needs of this article. However I encourage you dive deeper in to this service and explore it's features on your own.
106 |
107 | ### X-Ray
108 |
109 | AWS X-Ray is service designed to analyse and debug distributed applications, that spread across multiple lambdas and services, to identify and troubleshoot issues and errors. While it's main purpose is much wider than Lambda performance testing, we can still use some of it's features to visualize interesting parts of Lambda execution.
110 |
111 | #### Activation
112 |
113 | To use X-Ray tracing for Lambda function, you have to enable it in AWS Console.
114 |
115 | 
116 |
117 | After that, you will be able to find your Lambda tracers in X-Ray panel.
118 |
119 | #### Lambda Trace
120 |
121 | Trace details view allows you to investigate what happens during your function execution and investigate how long particular invocation steps last.
122 |
123 | It's especially useful to spot performance problems with Lambda cold start and initialization phase.
124 |
125 | 
126 |
127 | You might also notice, that subsequent Lambda invocations, warm starts, do not have Initialization segment, since the initialization code is not run for those cases.
128 |
129 | 
130 |
131 | Now, knowing how to measure the performance of Lambda functions, let's review the techniques that can be applied to get performance boost.
132 |
133 | ## Optimization techniques
134 |
135 | As you might expect, there are multiple optimization aspects that apply to Lambda functions, let's review them one by one.
136 |
137 | ### Optimization through configuration
138 |
139 | Your Lambda code does not have direct access to the processor, instead, it gets assigned part of the virtual CPU. How big is this part, depends on the amount of assigned memory, starting with a small chunk at **128MB** to **1,792 MB** where Lambda gets the **full vCPU core**. Above that level, additional vCPU cores will get assigned to your function. Amount of assigned memory also affects your billing, the more memory you assign, the more you will pay for every 100 ms of execution.
140 |
141 | Assuming your function execution time is limited by CPU power, you can actually consider increasing the amount of assigned memory, so that your function execution time is shorter. Overall you might be billed the same amount of money (or even less) and your function will return results faster.
142 |
143 | ### Code optimization
144 |
145 | Usually, there is no magic here. Lambda execution, on the code level, is similar to any other code execution, so as a rule of thumb, you can assume that whatever makes your code run faster on your machine, will make it run faster in Lambda as well. Countless optimization techniques apply here, more efficient algorithms or nifty code tricks. There are however, two unique cases, that might require some more customized approach: **asynchronous operations** and **multithreading**.
146 |
147 | #### Asynchronous operations
148 |
149 | Asynchronous operations are (vastly simplifying the term) a way of handling long lasting operations, usually Input/Output, without forcing the main application process to stop and wait for the operation to finish. This is especially popular in Node.js ecosystem (or JavaScript in general), where the whole server runs on single process, so it would be extremely inefficient to force it to stop and wait for every network request or disc read.
150 |
151 | By making the operation asynchronous, Node.js can process other upcoming tasks, while the long lasting operation is in progress. This is a very useful and widely used technique that allows single process to effectively server multiple requests.
152 |
153 | 
154 |
155 | With Lambda however, it's different. Single instance of Lambda function **does not** handle multiple requests at once. Every request, or event, triggers a separated function execution. If the available Lambda function is busy, service will scale and create another instance. This means, that particular Lambda execution context, will not accept new request, until all operations of the previous request (asynchronous or not), are finished.
156 |
157 | **Does it means that asynchronous operations bring no advantages for applications running on Lambda? Yes and no.**
158 |
159 | It does not change anything if you only make one async operation per invocation call, or you have to run many of those one after another. In such case, you will see no benefits using asynchronous code and the execution time of your function will be defined by the sum of requests duration.
160 |
161 | .png)
162 |
163 | But, asynchronous approach will be beneficial, if those operations can be run in parallel. Then, you can start all those requests at once, perform other operations, and finish when all asynchronous requests are done. In such case, you execution time will be affected only by the slowest of those requests.
164 |
165 | .png)
166 |
167 | **Avoid waiting**
168 |
169 | Remember, that even when your function does nothing and waits for the asynchronous operation to finish, it still cannot handle new requests and the waiting time still counts as an execution time. Even if your function is not performing any operations, just waits, Lambda will still bill you for that time. Therefore, it's best to avoid at least reduce the number of I/O operations as much as possible.
170 |
171 | It can be done by taking a proper approach when defining application architecture and by using batch operations when possible. Let's take DynamoDB for example. Every DynamoDB operation, done from inside Lambda function, is in fact a http request being sent. So, simply putting 5 items in a table, one by one, means 5 separated requests that you have to wait for.
172 |
173 | ```javascript
174 | dynamoDB.puItem(item1, (err, data) => { /* callback */ }); // request 1
175 | dynamoDB.puItem(item2, (err, data) => { /* callback */ }); // request 2
176 | dynamoDB.puItem(item3, (err, data) => { /* callback */ }); // request 3
177 | dynamoDB.puItem(item4, (err, data) => { /* callback */ }); // request 4
178 | dynamoDB.puItem(item5, (err, data) => { /* callback */ }); // request 5
179 | ```
180 |
181 | Instead you can use _batchWriteItem_ method, that will save all those records in one request.
182 |
183 | ```javascript
184 | dynamoDB.batchWriteItem(allItems, (err, data) => { /*callback*/ }); // single request
185 | ```
186 |
187 | Latter is not only faster, but also much easier to manage.
188 |
189 | **Avoid asynchronous operations**
190 |
191 | What if you could avoid asynchronous, long-lasting, network-based operations at all? Why should you pay for the time your code is just waiting for the network request to finish? Especially if you are communicating with other AWS services. Let's imagine, you have a fairly simple case, when user uploads a picture to your application, its data is stored in the DB, and an email is being sent when the operation is done.
192 |
193 | If you would like to write such an application "the old way", it would probably look more and less like this:
194 |
195 | .png)
196 |
197 | After reading through previous article parts, you probably already know that such architecture involves a lot of waiting for network requests. This is not optimal, since you actually pay for waiting until other AWS services do their job.
198 |
199 | But how can we tackle this problem? Most of those requests cannot be just parallelized, since we need the results of the previous operation in order to start the next one.
200 |
201 | The key here is to approach this problem with a different mindset. **Writing serverless application, we should focus more on writing "a glue" , business logic combining existing managed services to achieve desired results, instead of manually handling requests.**
202 |
203 | Our application can be redesigned to leverage existing AWS connectors and events emitted by services.
204 |
205 | .png)
206 |
207 | Let's summarize shortly how application building blocks cooperate:
208 |
209 | - We are using **S3 Presigned URL**, that allows user to upload file directly to S3 bucket, it means that the whole uploading process does not even go through Lambda function
210 | - S3 buckets emits an event to SNS topic, after file is uploaded
211 | - Lambda subscribes to SNS topic and when triggered by an event, saves file meta data to DynamoDB
212 | - Once DynamoDB record is created, lambda connected to DynamoDB streams, is triggered, with a newly added record, and used to sent email via SES
213 |
214 | As you can see, we've eliminated most of the waiting time, by leveraging events triggered by AWS services. What's more, this architecture is not only efficient, but also more maintainable and should scale easier. Put SQS queue in between SNS and Lambda, and you additionally increase resiliency of you app and ability to handle unsuspected peaks of heavy workloads. If you would like to generate thumbnail of an added file, you simply connect new Lambda to SNS topic and store the results in S3. You Lambdas in such architecture receive events from one AWS service, execute small pieces of business logic and handle the data to another part of data flow chain.
215 |
216 | .png)
217 |
218 | While this approach may seem over complicated at first, it starts to feel natural after some time. Remember, that the whole idea of serverless applications is based on the assumption, that the code layer is as small as possible, and serves mostly as a connecting layer, between managed cloud services.
219 |
220 | #### Multithreading in Lambda functions
221 |
222 | Languages that support multi threading can speed up code execution by parallelizing some task and utilizing multiple cores of the processor. While Lambda runtime supports operations on multiple threads, some limitations apply here.
223 |
224 | As stated previously, amount of CPU power assigned to the function depends on the amount of assigned memory, starting with a small chunk at **128 MB** to **1,792 MB** where Lambda gets the **full vCPU core**. Above that level, additional vCPU cores will get assigned to your function.
225 |
226 | With this in mind, we can predict that multithreading will only improve Lambda performance if you assign more then one virtual core to the function (more than 1,792 MB of memory). Below that level, you can still use threads in your code, but execution will not be parallelized. Actually, the overhead caused by threads management might even result in longer execution time when compared to single-threaded code.
227 |
228 | **Single thread limitations**
229 |
230 | Let's take a very naive function, written in Go, as an example. The code below just run some sha256 hashing, on a content of a file, in two, synchronous loops. Performance of this Lambda handler is purely limited by CPU power, so it should serve as a good visualization of Lambda CPU management.
231 |
232 | ```go
233 | import "crypto/sha256"
234 | // ...
235 | func doHashing(file []byte) {
236 | for index := 0; index < 100; index++ {
237 | sha256.Sum256(file)
238 | }
239 | }
240 |
241 | var fileContent []byte
242 | func init() {
243 | // file is read during Lambda initialization, to minialize it's effect on handler performance
244 | res, err := ioutil.ReadFile("text_file.txt")
245 | fileContent = res
246 | if err != nil {
247 | log.Fatal(err)
248 | }
249 | }
250 |
251 | func LambdaHandler() (int, error) {
252 | // create 2x100 hashes synchronously, one after another
253 | doHashing(fileContent)
254 | doHashing(fileContent)
255 | return 0, nil
256 | }
257 |
258 | func main() {
259 | lambda.Start(LambdaHandler)
260 | }
261 | ```
262 |
263 | Now we can run this function with different memory settings
264 |
265 | 
266 |
267 | On the graph, it's clearly visible, that available CPU power only grows to the limit of 1,792 MB of memory configured (which translates to full vCPU core being assigned to the function). Everything above that results in more cores being added, but it does not further increase the computation power of the first core.
268 |
269 | **This means, that if your function code is entirely single threaded, you will not see any performance improvements above 1,792 MB level. You will however see increased execution costs.**
270 |
271 | **Multi threading performance example**
272 |
273 | Now, let's take the same function code, but make it multi threaded with go routines.
274 |
275 | ```go
276 | func doHashing(file []byte) {
277 | for index := 0; index < 100; index++ {
278 | sha256.Sum256(file)
279 | }
280 | }
281 |
282 | var fileContent []byte
283 | func init() {
284 | // file is read during Lambda initialization, to minialize it's effect on handler performance
285 | res, err := ioutil.ReadFile("text_file.txt")
286 | fileContent = res
287 | if err != nil {
288 | log.Fatal(err)
289 | }
290 | }
291 |
292 | func LambdaHandler() (int, error) {
293 | // now those two functions will be run on two separated threads
294 | go doHashing(fileContent) //this function will be executed on a separated thread
295 | doHashing(fileContent)
296 | return 0, nil
297 | }
298 |
299 | func main() {
300 | lambda.Start(LambdaHandler)
301 | }
302 | ```
303 |
304 | And the execution time for different configurations looks like this:
305 |
306 | 
307 |
308 | Now, the execution time drops when the amount of assigned memory increases. Up to 1,792 MB results for single and multi threaded code are very similar, since on a single core, multiple threads cannot run in parallel. Only, above that level is when you can see the difference, additional core allows the multi threaded code to be run in parallel, which gives a visible boost of the function performance.
309 |
310 | #### Code and package structure
311 |
312 | There are few more aspects that you should pay attention to when working on your serverless application.
313 |
314 | It's a structure of your code and a size of your lambda package. Both will affect the time your function needs to initialize.
315 |
316 | **Keep you handler as small as possible**
317 |
318 | Let's take a look a the example of basic function that reads the data from DynamoDB and returns the list of records.
319 |
320 | ```javascript
321 | const AWS = require('aws-sdk');
322 |
323 | export.handler = async (event, context) => {
324 | const dynamoDb = new AWS.DynamoDB({apiVersion: '2012-08-10'});
325 | // ... define query params
326 | const records = await dynamoDB.query(params).promise();
327 | // ... return records as a response
328 | }
329 | ```
330 |
331 | To understand what's wrong with this code, you have to be aware of what's the difference between cold start and warm starts. You can find the details [here](https://sosnowski.dev/anatomy-of-aws-lambda-ck1975g09000ijfs1fle758js#lambda-execution-in-details), however, what matters now is to distinguish code that is executed during the **initialization phase** vs code executed with **every request**.
332 |
333 | The `handler` function here is executed with every upcoming request, every new event passed to Lambda is handled by a separated handler execution. Everything outside of handler function, is only executed when the new runtime is being created, during so called **cold start or initialization phase**. Since the runtime is not destroyed after every execution, but reused between subsequent requests, consecutive `handler` executions have an access to the variables created during the initialization phase (like variable `AWS` in the example above).
334 |
335 | You might have noticed, at that point, what's wrong with the code above. DynamoDB connection is created with every handler invocation. While technically correct, this is a suboptimal implementation. DB connection does not have to be created for every invocation, we can create a single connection instance during the function initialization phase, and then just reuse it in the handler.
336 |
337 | ```javascript
338 | const AWS = require('aws-sdk');
339 | const dynamoDb = new AWS.DynamoDB({apiVersion: '2012-08-10'});
340 |
341 | export.handler = async (event, context) => {
342 | // ... define query params
343 | const records = await dynamoDB.query(params).promise();
344 | // ... return records as a response
345 | }
346 | ```
347 |
348 | Above version will initialize the DB connection during the cold start of a function, and a handler will reuse it when processing the event. Since you can expect that handler will be called multiple times per one cold start (exact numbers are very hard to predict and are based on multiple factors), this implementation is much more efficient.
349 |
350 | General rule is to remove from handler all the code that is not directly related to processing of a particular event. Any connection initialization, configuration loading / parsing, libraries initialization etc. should be called outside of handler, during the initialization phase.
351 |
352 | Be careful though, since if you extract to much logic out of your handler function, you might introduce not-so-easy-to-spot bugs to your code. Let's look at an example with random number generator based on some seed value.
353 |
354 | ```javascript
355 | const gen = require('random-seed');
356 | const seed = createSomeFancySeedValue();
357 |
358 | export.handler = async (event, context) => {
359 | const randomValue = gen.create(seed);
360 | console.log(`Random number: ${randomValue}`);
361 | }
362 | ```
363 |
364 | Moving seed generation outside of handler might seem like a good idea from the performance point of view, but in fact, it's breaking the whole functionality of the function, since seed value stays the same across multiple handler calls, which will result in "random" value being the same every time.
365 |
366 | **Reduce the size of you code package to reduce cold start time**
367 |
368 | Lambda function is being deployed as a zip file that contains all the code and resources needed for function to execute. This package is being stored in the S3 bucket. When Lambda cold starts a new runtime environment for the function, it downloads the file, unzips it and executes the code. The bigger you package is, the longer it takes Lambda to download it and the longer you will wait for your function to initialize.
369 |
370 | 
371 |
372 | There is no magic tricks here. Make sure that the package is as small as possible. Reduce the number of your dependencies, make sure that you don't use overpowered libraries and frameworks, keep the things simple and delegate the responsibility to managed AWS Services where possible.
373 |
374 | If you are using Node.js, be mindful about your `node_modules` directory. Make sure that for the deployment, you only package the dependencies needed for production build. Try to remove unnecessary files using tool like `node-prune`.
375 |
376 | #### Provisioned concurrency
377 |
378 | Recently, on re:Invent 2019, Amazon has presented new functionality that might help us optimize our Lambdas performance. It's [Provisioned Concurrency for Lambda Functions.](https://aws.amazon.com/blogs/aws/new-provisioned-concurrency-for-lambda-functions/)
379 |
380 | In short, it allows you to pre-initialize a requested number of execution environments, that will always stay "warm", regardless from you current workload. With such setup, requests routed to those instances, will not suffer from cold start latency, since Lambda will not have to create new runtimes from scratch. If the workload exceeds the number of provisioned environments, Lambda will just scale as with any other workload.
381 |
382 | This setting is especially useful, when you know that you can expect some defined number of requests for your Lambdas, and you would like to prioritize the response time (with a cost of additional numbers on your bill).
383 |
384 | ## Conclusion
385 |
386 | As you can see, there are multiple techniques that you can use, when optimizing your Lambda functions. As with every optimization, not every technique has visible effects all the time, but it's important to keep in mind the specificity of Lambda environment and restrictions that applies there. Also make sure that you evaluate the potential gains of the optimization process, since sometime the amount of work might not be worth the resulted execution time improvements. At the same time, hours of advanced code optimization might not give you the same performance benefits as rethinking your architecture approach and shifting it towards event driven "code as glue" architecture.
387 |
--------------------------------------------------------------------------------
/posts/static-serverless-site-with-nextjs.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: How to create statically generated, serverless site with NextJS, AWS CloudFront and Terraform
3 | author: Damian Sosnowski
4 | abstract: A case study, where we will build a statically pre-rendered site, using NextJS, and host it in AWS Cloud, fully serverless, using CloudFront, Lambda@Edge and S3. Also, we will apply Infrastructure as Code approach by leveraging Terraform and automate deployments with Github Actions.
5 | created: "2020-07-13"
6 | updated: "2020-07-13"
7 | tags:
8 | - aws
9 | - severless
10 | - NextJS
11 | - React
12 | - JAM
13 | ---
14 |
15 | I think I've managed to squeeze every possible buzzword in the article title ;) but every single one is true! Using my blog () as an example, we will review the implementation of the statically generated site using NextJS and TypeScript. Next, we will discuss how such site can be hosted on AWS cloud, fully serverless, using CloudFront for serving content, Lambda at Edge for requests routing and S3 bucket as a file storage. We will define the whole infrastructure using Terraform. Additionally, we will create some simple Github Actions to automate the deployment of our site.
16 |
17 | The source code of the blog is available [on my github](https://github.com/sosnowski/blog). So, let's start.
18 |
19 | ## What is a statically rendered page?
20 |
21 | Web development is an interesting profession, where every few years we change the main architecture paradigm.
22 |
23 | At the beginning, web pages were just **static html files**, served via the server to the browser. Later, html files were generated **dynamically on the server**, using languages like PHP.
24 |
25 | Next, everyone felt in love with the idea of fat-client applications, so called **Single Page Applications**, where the only served html file is empty index.html and all the content is dynamically generated by JavaScript. This approach however had several flaws, mostly around performance and SEO. So, we've started to move back.
26 |
27 | First, we've got back to **Server Side Rendering (SSR)**, but this time using frontend frameworks and JavaScript on the backend. But, to fully close the cycle, we had to go back to the beginning.
28 |
29 | Now, the JAM stack (JavaScript, API and Markdown) is on the rise. Using **static content generators**, we can combine the advantages of static files (performance, SEO and reliability) with the ability to serve dynamic content to our users. Instead of generating files per request, we can pre-generate all the pages on build time (or when data is updated) and serve static html files to the users.
30 |
31 | 
32 |
33 | Many of those tools go far beyond just using markdown, and allow us to use any source of data to pre-render our pages.
34 |
35 | 
36 |
37 | In this case study, we will use NextJS, a React - based framework, to create a statically generated blog.
38 |
39 | ## The Blog - high level plan
40 |
41 | I write my articles using [Notion](https://www.notion.so/) (by the way, you should check it, it's an amazing productivity tool that I use to organize my notes, tasks and any information I want to preserve). Than, after the article is ready, I can export it to a markdown file. It's convenient, since I can use a great editor to create my content (I'm not the VIM type of developer) and then just export it to an easy-to-use format.
42 |
43 | From those markdown files (and related images) I would like to generate a blog, with all my articles.
44 |
45 | 
46 |
47 | Normally we could use [Vercel](https://vercel.com/) to host our blog. It's easy to use and has a decent free plan. But, we want to have some fun! That's why we will try to host our blog in AWS Cloud. Still, to reduce the costs as much as possible, we will leverage the serverless approach and try to host it without any actual application server running in the background.
48 |
49 | ## Solution overview
50 |
51 | Let's summarize what we will create:
52 |
53 | 1. We will use NextJS to create a blog site
54 | 2. Posts will be stored in the Github repository, as markdown files
55 | 3. The build task will generate the static page using markdown as a data source
56 | 4. Generated files will be uploaded to the S3 bucket
57 | 5. We will serve the content to the users with Amazon CloudFront CDN
58 | 6. We will use Terraform to define AWS infrastructure
59 |
60 | 
61 |
62 | ## Implementation
63 |
64 | Let's start with the blog implementation. We will not go through every single line of code here, since most of it is just a standard React application. You can always refer to the source code [here](https://github.com/sosnowski/blog/tree/master/blog).
65 |
66 | ### Project structure
67 |
68 | We will us a standard structure of NextJS project.
69 |
70 | 
71 |
72 | We only have two pages: `index.tsx` which will be our main page, with a list of blog posts and `/post/[id].tsx` which will display the selected post.
73 |
74 | Static assets (including posts' images) will be stored in the `public` directory. Markdown files with the posts' content will be placed in `posts`.
75 |
76 | ### Pre-rendering main page
77 |
78 | NextJS has a very decent [documentation](https://nextjs.org/docs/basic-features/pages), explaining how it statically renders its pages. In short, at build time, NextJS checks all the files in pages directory and looks for those that are exporting `getStaticProps` function.
79 |
80 | Below, you can find a simplified version of `index.ts` page of my blog.
81 |
82 | ```tsx
83 | interface PageProps {
84 | articles: PostMetadata[]
85 | }
86 | // Page component, it will render a list of articles
87 | export default ({ articles }: PageProps) => {
88 | return (
89 |
90 | More articles
91 | {
92 | articles.map(post => {
93 | return ();
94 | })
95 | }
96 |
97 | );
98 | }
99 |
100 | /*
101 | this function will be called at build time.
102 | It returns props that will be passed to the page component
103 | */
104 |
105 | export const getStaticProps: GetStaticProps = async () => {
106 | const data = await getPostsMetdata();
107 | return {
108 | props: {
109 | articles: data
110 | }
111 | };
112 | }
113 | ```
114 |
115 | `getStaticProps` function will be executed at build time and the returned result will be used as props input for the page component. The component will render a HTML content that will be saved to a generated static file.
116 |
117 | It's a simple yet powerful concept. `getStaticProps` can read data from files (as in this case), but it can also query the database, make a request to the server or fetch it from any other source. Then you can use all React power to generate content. Everything at build time. The result is compiled down to a plain HTML file (and some JS and JSON if needed).
118 |
119 | ### Parsing Markdown Metadata
120 |
121 | Let's take a quick look at `getPostsMetadata` function. It uses [gray-matter](https://www.npmjs.com/package/gray-matter) library to extract metadata from markdown files.
122 |
123 | ```tsx
124 | // ...
125 | import matter from 'gray-matter';
126 |
127 | export interface PostMetadata {
128 | title: string;
129 | created: string;
130 | updated: string;
131 | tags: string[];
132 | abstract: string;
133 | id: string;
134 | }
135 |
136 | // extract metadata from a markdown file
137 | export const getPostMetadata = async (postFile: string): Promise => {
138 | const fileContent = await readFileAsync(join(postsPath, postFile), {
139 | encoding: 'utf8'
140 | });
141 |
142 | // read metadata using gray-matter
143 | const result = matter(fileContent);
144 | // file name is used as post id
145 | const postId = basename(postFile, extname(postFile));
146 | return {
147 | title: result.data.title,
148 | tags: result.data.tags,
149 | abstract: result.data.abstract,
150 | created: result.data.created,
151 | updated: result.data.updated,
152 | id: postId
153 | };
154 | };
155 |
156 | // extract metadata from all post files
157 | export const getPostsMetdata = async (): Promise => {
158 | // read list of files from /posts directory
159 | const dirContent: Dirent[] = await readDirAsync(postsPath, {
160 | withFileTypes: true,
161 | encoding: 'utf8'
162 | });
163 |
164 | // iterate through the list and call getPostMetadata on every element
165 | return Promise.all(
166 | dirContent
167 | .filter(entry => entry.isFile())
168 | .map((entry) => {
169 | return getPostMetadata(entry.name);
170 | })
171 | );
172 | }
173 | ```
174 |
175 | Metadata is defined on top of a markdown file. Here is an example from one of the articles:
176 |
177 | ```markdown
178 | ---
179 | title: Scalable Angular Applications
180 | author: Damian Sosnowski
181 | abstract: Currently, one of the most popular frameworks among the Web community...
182 | created: "2019-08-16"
183 | updated: "2019-08-16"
184 | tags:
185 | - angular
186 | - redux
187 | - architecture
188 | - webdev
189 | ---
190 |
191 | Article content here
192 | ```
193 |
194 | Array of `PostMetadata` records is returned to `index.tsx` file and used to render the list of articles on the main blog page.
195 |
196 | ### Pre-rendering post content
197 |
198 | Pre-rendering of post content works in a very similar way, post page also has `getStaticProps`
199 |
200 | function that will load and parse post content. The results are then passed to the page component that will render it. Let's look at `/post/[id].tsx`
201 |
202 | ```tsx
203 | interface Props {
204 | post: {
205 | content: string
206 | } & PostMetadata;
207 | }
208 |
209 | export default ({ post }: Props) => {
210 | return (
211 |
212 |
213 | {post.title}
214 | ...
215 |
216 |
217 |
218 | );
219 | }
220 |
221 | // read both metadata and article content from markdown file
222 | export const getStaticProps: GetStaticProps = async (context) => {
223 | const id = Array.isArray(context.params.id) ? context.params.id[0] : context.params.id;
224 | const postData = await getAllPostData(id);
225 | return {
226 | props: {
227 | post: {
228 | ...postData.meta,
229 | content: postData.content
230 | }
231 | }
232 | };
233 | }
234 | ```
235 |
236 | #### Pre-rendering multiple posts
237 |
238 | If we want to have a fully pre-rendered site, NextJS has to generate a separated html file for every blog post. So how does it now how many files should be rendered? This is where `getStaticPaths` function is used. It works the same way as `getStaticProps` , being called on build time, but instead of returning component input properties, it returns an array of dynamic URL parameters. In our case, an array of posts' ids.
239 |
240 | ```tsx
241 | export const getStaticPaths: GetStaticPaths = async () => {
242 | return {
243 | paths: (await getPostsMetdata()).map(meta => {
244 | return {
245 | params: {
246 | id: meta.id,
247 | }
248 | };
249 | }),
250 | fallback: false
251 | };
252 | }
253 | ```
254 |
255 | The resulting array is than used to render multiple static files. You can see, that `getStaticProps` function above is using post id that is passed in a context parameter.
256 |
257 | You can read more about dynamic routes and static content rendering in [NextJS documentation](https://nextjs.org/docs/basic-features/data-fetching).
258 |
259 | ### Parsing markdown content
260 |
261 | In order to display a blog post content, we have to convert it from markdown to html. There is an entire set of libraries and plugins around [unified ecosystem](https://unifiedjs.com/), designed to parse and modify markdown files. We will use those in our blog.
262 |
263 | ```tsx
264 | import markdown from 'remark-parse';
265 | import remark2rehype from 'remark-rehype';
266 | import html from 'rehype-stringify';
267 | import matter from 'gray-matter';
268 | import unified from 'unified';
269 |
270 | // ...
271 | /*
272 | Get both metadata and content of the article
273 | */
274 | export const getAllPostData = async (postId: string): Promise => {
275 | const fileContent = await readFileAsync(join(postsPath, `${postId}.md`), {
276 | encoding: 'utf8'
277 | });
278 | // extract metada with gray-matter
279 | const postMeta = matter(fileContent);
280 |
281 | /*
282 | define a unified pipeline, that will parse markdown to syntax tree,
283 | modify it and convert to html
284 | */
285 | const postHtml = await unified()
286 | .use(markdown) // parse markdown
287 | .use(remark2rehype) // convert to syntax tree
288 | .use(htmlParser) // my custom html modifications
289 | .use(html, { allowDangerousHtml: true }) // convert to html
290 | .process(postMeta.content); // provide article content as input
291 |
292 | return {
293 | meta: {
294 | title: postMeta.data.title,
295 | tags: postMeta.data.tags,
296 | abstract: postMeta.data.abstract,
297 | created: postMeta.data.created,
298 | updated: postMeta.data.updated,
299 | id: postId
300 | },
301 | content: postHtml.toString()
302 | };
303 | }
304 | ```
305 |
306 | You can [remark documentation](https://remark.js.org/) for detailed API explanation.
307 |
308 | For my blog, I've created a simple remark plugin, that slightly modifies the html syntax tree. For example it adds a lazy loading attribute to the images and it generates syntax highlighting for code blocks, using [highlight.js](https://highlightjs.org/). This way I can improve the performance of my blog, by running those tasks at build time. You can check detailed implementation [in the repository](https://github.com/sosnowski/blog/blob/master/blog/lib/posts.ts).
309 |
310 | **Resulting html is then rendered in the React component**
311 |
312 | ```tsx
313 | export const Content = ({ content, className, children }) => {
314 | return (
315 |
316 | );
317 | }
318 | ```
319 |
320 | ### Building and exporting the project
321 |
322 | In order to build our site we just have to run this command:
323 |
324 | ```bash
325 | next build && next export
326 | ```
327 |
328 | NexJS will create an optimized build of our application, running `getStaticPaths` and `getStaticProps` for our pages. Then, the result will be placed in the `out` directory.
329 |
330 | 
331 |
332 | Build output
333 |
334 | Those are the files that we will upload to the S3 bucket and serve to our users.
335 |
336 | **We finally have a working blog implementation, now let's define the infrastructure that will host it.**
337 |
338 | ## Infrastructure
339 |
340 | We will use Terraform to define our infrastructure. You can refer to its [documentation](https://www.terraform.io/docs/providers/aws/) and check the full terraform project [in the repository](https://github.com/sosnowski/blog/tree/master/infra).
341 |
342 | All infrastructure is set up in `us-east-1` region, as this is the region where CloudFront distribution is created. This will not affect our site performance, since the content will be delivered from Edge Locations closest to the users.
343 |
344 | ```bash
345 | provider "aws" {
346 | profile = "default"
347 | region = "us-east-1"
348 | }
349 | ```
350 |
351 | ### S3 Bucket
352 |
353 | 
354 |
355 | Let's start with an S3 bucket that we will use to store generated html files and static assets. The content of the bucket will be a direct copy of the `out` folder, described in the previous section.
356 |
357 | ```bash
358 | resource "aws_s3_bucket" "blog_bucket" {
359 | bucket = "your-bucket-name"
360 | acl = "private"
361 | force_destroy = true
362 | }
363 | ```
364 |
365 | The bucket is private and not accessible from the internet, we don't want users to load files directly from the bucket, we want all the requests to go through the CloudFront distribution.
366 |
367 | #### Bucket policy
368 |
369 | To achieve it, we have to create a proper policy, that will allow CloudFront to list files in the bucket and load the files to serve it to the users.
370 |
371 | ```bash
372 | resource "aws_cloudfront_origin_access_identity" "origin_access_identity" {
373 | comment = "cloudfront origin access identity
374 | }
375 |
376 | data "aws_iam_policy_document" "s3_policy" {
377 | statement {
378 | actions = ["s3:GetObject"]
379 | resources = ["${aws_s3_bucket.blog_bucket.arn}/*"]
380 |
381 | principals {
382 | type = "AWS"
383 | identifiers = ["${aws_cloudfront_origin_access_identity.origin_access_identity.iam_arn}"]
384 | }
385 | }
386 |
387 | statement {
388 | actions = ["s3:ListBucket"]
389 | resources = ["${aws_s3_bucket.blog_bucket.arn}"]
390 |
391 | principals {
392 | type = "AWS"
393 | identifiers = ["${aws_cloudfront_origin_access_identity.origin_access_identity.iam_arn}"]
394 | }
395 | }
396 | }
397 | ```
398 |
399 | And assign the policy to our bucket.
400 |
401 | ```bash
402 | resource "aws_s3_bucket_policy" "policy_for_cloudfront" {
403 | bucket = aws_s3_bucket.blog_bucket.id
404 | policy = data.aws_iam_policy_document.s3_policy.json
405 | }
406 | ```
407 |
408 | With those settings in place, our bucket is now ready to serve as an origin for the CloudFront distribution.
409 |
410 | ### CloudFront
411 |
412 | Time to setup the main element of our infrastructure, a CloudFront distribution. CloudFront is a Content Delivery Network, that users Amazon Edge Locations to cache and deliver the content from the servers that are in a close proximity to our users.
413 |
414 | 
415 |
416 | When user requests an asset from our site (an image, html file etc.) the request goes to CloudFront edge location, if the asset is in its cache, it's served to the user immediately. If not, CloudFront will load the asset from so called origin (S3 bucket in our case), cache it on Edge Location and deliver it to the user.
417 |
418 | #### Origin
419 |
420 | First, we define the origin for our CloudFront distribution. This will simply the S3 bucket where we store generated files.
421 |
422 | ```bash
423 | resource "aws_cloudfront_distribution" "blog_assets_distribution"
424 | origin {
425 | domain_name = aws_s3_bucket.blog_bucket.bucket_domain_name
426 | # just some unique ID of the origin
427 | origin_id = "s3_blog_assets_origin"
428 |
429 | s3_origin_config {
430 | origin_access_identity = aws_cloudfront_origin_access_identity.origin_access_identity.cloudfront_access_identity_path
431 | }
432 | }
433 |
434 | # ...
435 | ```
436 |
437 | #### Cache behaviours
438 |
439 | Next, we will define cache behaviour. Cache behaviour defines what files should be cached by CloudFront and for how long. We will define two behaviours, the default one, that will apply to all the requested files:
440 |
441 | ```bash
442 | #...
443 | # Default cache behavior. Will cover all requests to html, js files etc.
444 | default_cache_behavior {
445 | allowed_methods = ["GET", "HEAD", "OPTIONS"]
446 | cached_methods = ["GET", "HEAD"]
447 | target_origin_id = "s3_blog_assets_origin"
448 | compress = true
449 | forwarded_values {
450 | query_string = false
451 | cookies {
452 | forward = "none"
453 | }
454 | }
455 | viewer_protocol_policy = "redirect-to-https"
456 | # Time for which the files will be stored in cache (12h)
457 | default_ttl = 43200
458 | }
459 | ```
460 |
461 | And an additional behaviour, that will override the default one for files in the `/assets` directory.
462 |
463 | ```bash
464 | # Cache behavior for assets reuests
465 | ordered_cache_behavior {
466 | path_pattern = "/assets/*"
467 | allowed_methods = ["GET", "HEAD"]
468 | cached_methods = ["GET", "HEAD"]
469 | target_origin_id = "s3_blog_assets_origin"
470 | compress = false
471 | forwarded_values {
472 | query_string = false
473 | cookies {
474 | forward = "none"
475 | }
476 | }
477 | viewer_protocol_policy = "redirect-to-https"
478 |
479 | # 24h * 3
480 | default_ttl = 259200
481 | }
482 | ```
483 |
484 | By having two separated behaviours, we can adjust the caching time. Assets like images and fonts are unlikely to change, so we can cache those for longer periods of time.
485 |
486 | ### Lambda @ Edge for routing
487 |
488 | With the current setup we could already run our application. You can upload the output of NextJS export task to the S3 bucket and CloudFront will server those to the end users. There is however one more problem that we have to solve.
489 |
490 | NextJS will generate links based on our routing definitions. Link to the post will follow the pattern `/post/[id]` , for example `/post/anatomy-of-aws-lambda` . When browser requests this URL, CloudFront will try to server this file... which does not exists. Our bucket contains HTML file `/post/anatomy-of-aws-lambda.html` but obviously CloudFront is not able to guess that.
491 |
492 | 
493 |
494 | Normally NextJS server knows what files we are looking for and is able to server a proper HTML file when requested. But we don't have any server! So, what can we do?
495 |
496 | We can leverage Lambda@Edge, which is simply a Lambda function running directly on the Edge Location of our CloudFront distribution. Lambda at Edge can be triggered by four different events:
497 |
498 | - When CloudFront receives a request from a viewer (**viewer request**)
499 | - Before CloudFront forwards a request to the origin (**origin request**)
500 | - When CloudFront receives a response from the origin (**origin response**)
501 | - Before CloudFront returns the response to the viewer (**viewer response**)
502 |
503 | For us, the perfect fit is **origin request**.We will create a simple function, that will change the path of requested post by adding `.html` to the URL. Here is the code:
504 |
505 | ```tsx
506 | import { CloudFrontRequestEvent } from "aws-lambda";
507 |
508 | const isPost = /^\/post(.+)/;
509 | const hasExtension = /(.+)\.[a-zA-Z0-9]{2,5}$/;
510 |
511 | export const handler = async (event: CloudFrontRequestEvent, context: unknown) => {
512 | const request = event.Records[0].cf.request;
513 | const url: string = request.uri;
514 |
515 | // if it's a post request and has not extension, add .html
516 | if (url && url.match(isPost) && !url.match(hasExtension)) {
517 | request.uri = `${url}.html`;
518 | }
519 |
520 | return request;
521 | }
522 | ```
523 |
524 | This function will allow us to route requests to proper files.
525 |
526 | 
527 |
528 | We will use a simple [webpack configuration](https://github.com/sosnowski/blog/blob/master/infra/webpack.config.js) to compile function to JavaScript and bundle all dependencies in to a single, easy to deploy JS file.
529 |
530 | Now we just have to add Lambda to our Terraform definition.
531 |
532 | ```bash
533 | # Terraform will handle zipping the JS bundle
534 | data "archive_file" "cdn-origin-request-zip" {
535 | type = "zip"
536 | source_file = "dist/cdn-origin-request/handler.js"
537 | output_path = "dist/cdn-origin-request.zip"
538 | }
539 |
540 | # Lambda at Edge requires specific execution role
541 | # in order to be able to execute on CF Edge Location
542 |
543 | resource "aws_iam_role_policy" "cdn-lambda-execution" {
544 | name_prefix = "lambda-execution-policy-"
545 | role = aws_iam_role.cdn-lambda-execution.id
546 |
547 | policy = < {
708 | console.log('Reading static files in ' + staticFolder);
709 | const allStaticFiles = getAllFiles(staticFolder);
710 | console.log('Loading existing assets from S3...');
711 | // it will load a list of files in the /assets/ directory
712 | const existingAssets = await getS3Assets();
713 |
714 | for(let i = 0; i < allStaticFiles.length; i++) {
715 | const file = allStaticFiles[i];
716 | const imageKey = relative(staticFolder, file);
717 | // don't upload assets that are already uploaded
718 | if (!existingAssets.includes(imageKey)) {
719 | console.log(`Uploading file ${file} to ${imageKey}...`);
720 | await s3.send(new PutObjectCommand({
721 | Bucket: bucketName,
722 | Key: imageKey,
723 | Body: readFileSync(file),
724 | // set proper mime-type using mime-types library
725 | ContentType: lookup(file) || 'plain/text'
726 | }));
727 | console.log('Done');
728 | } else {
729 | console.log(`${imageKey} already uploaded`);
730 | }
731 | }
732 | console.log('All done');
733 | })();
734 | ```
735 |
736 | And that's it! Now every time you push a new version of your code, or a new article, Github will automatically build the code and deploy it to S3. **Our project is finished!**
737 |
738 | Feel free to clone or fork [the repository with the full implementation](https://github.com/sosnowski/blog) and play with it! You can also ask me questions on twitter [@sosnowsd](https://twitter.com/sosnowsd)
739 |
--------------------------------------------------------------------------------