109 | );
110 | }
111 | }
112 |
113 | export default connect(mapStateToProps, mapDispatchToProps)(Register);
114 |
--------------------------------------------------------------------------------
/frontend/src/constants/actionTypes.js:
--------------------------------------------------------------------------------
1 | export const APP_LOAD = 'APP_LOAD';
2 | export const REDIRECT = 'REDIRECT';
3 | export const ARTICLE_SUBMITTED = 'ARTICLE_SUBMITTED';
4 | export const SETTINGS_SAVED = 'SETTINGS_SAVED';
5 | export const DELETE_ARTICLE = 'DELETE_ARTICLE';
6 | export const SETTINGS_PAGE_UNLOADED = 'SETTINGS_PAGE_UNLOADED';
7 | export const HOME_PAGE_LOADED = 'HOME_PAGE_LOADED';
8 | export const HOME_PAGE_UNLOADED = 'HOME_PAGE_UNLOADED';
9 | export const ARTICLE_PAGE_LOADED = 'ARTICLE_PAGE_LOADED';
10 | export const ARTICLE_PAGE_UNLOADED = 'ARTICLE_PAGE_UNLOADED';
11 | export const ADD_COMMENT = 'ADD_COMMENT';
12 | export const DELETE_COMMENT = 'DELETE_COMMENT';
13 | export const ARTICLE_FAVORITED = 'ARTICLE_FAVORITED';
14 | export const ARTICLE_UNFAVORITED = 'ARTICLE_UNFAVORITED';
15 | export const SET_PAGE = 'SET_PAGE';
16 | export const APPLY_TAG_FILTER = 'APPLY_TAG_FILTER';
17 | export const CHANGE_TAB = 'CHANGE_TAB';
18 | export const PROFILE_PAGE_LOADED = 'PROFILE_PAGE_LOADED';
19 | export const PROFILE_PAGE_UNLOADED = 'PROFILE_PAGE_UNLOADED';
20 | export const LOGIN = 'LOGIN';
21 | export const LOGOUT = 'LOGOUT';
22 | export const REGISTER = 'REGISTER';
23 | export const LOGIN_PAGE_UNLOADED = 'LOGIN_PAGE_UNLOADED';
24 | export const REGISTER_PAGE_UNLOADED = 'REGISTER_PAGE_UNLOADED';
25 | export const ASYNC_START = 'ASYNC_START';
26 | export const ASYNC_END = 'ASYNC_END';
27 | export const EDITOR_PAGE_LOADED = 'EDITOR_PAGE_LOADED';
28 | export const EDITOR_PAGE_UNLOADED = 'EDITOR_PAGE_UNLOADED';
29 | export const ADD_TAG = 'ADD_TAG';
30 | export const REMOVE_TAG = 'REMOVE_TAG';
31 | export const UPDATE_FIELD_AUTH = 'UPDATE_FIELD_AUTH';
32 | export const UPDATE_FIELD_EDITOR = 'UPDATE_FIELD_EDITOR';
33 | export const FOLLOW_USER = 'FOLLOW_USER';
34 | export const UNFOLLOW_USER = 'UNFOLLOW_USER';
35 |
--------------------------------------------------------------------------------
/frontend/src/index.js:
--------------------------------------------------------------------------------
1 | import ReactDOM from 'react-dom';
2 | import { Provider } from 'react-redux';
3 | import React from 'react';
4 | import { store, history} from './store';
5 |
6 | import { Route, Switch } from 'react-router-dom';
7 | import { ConnectedRouter } from 'react-router-redux';
8 |
9 | import App from './components/App';
10 |
11 | ReactDOM.render((
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | ), document.getElementById('root'));
21 |
--------------------------------------------------------------------------------
/frontend/src/middleware.js:
--------------------------------------------------------------------------------
1 | import agent from './agent';
2 | import {
3 | ASYNC_START,
4 | ASYNC_END,
5 | LOGIN,
6 | LOGOUT,
7 | REGISTER
8 | } from './constants/actionTypes';
9 |
10 | const promiseMiddleware = store => next => action => {
11 | if (isPromise(action.payload)) {
12 | store.dispatch({ type: ASYNC_START, subtype: action.type });
13 |
14 | const currentView = store.getState().viewChangeCounter;
15 | const skipTracking = action.skipTracking;
16 |
17 | action.payload.then(
18 | res => {
19 | const currentState = store.getState()
20 | if (!skipTracking && currentState.viewChangeCounter !== currentView) {
21 | return
22 | }
23 | console.log('RESULT', res);
24 | action.payload = res;
25 | store.dispatch({ type: ASYNC_END, promise: action.payload });
26 | store.dispatch(action);
27 | },
28 | error => {
29 | const currentState = store.getState()
30 | if (!skipTracking && currentState.viewChangeCounter !== currentView) {
31 | return
32 | }
33 | console.log('ERROR', error);
34 | action.error = true;
35 | action.payload = error.response.body;
36 | if (!action.skipTracking) {
37 | store.dispatch({ type: ASYNC_END, promise: action.payload });
38 | }
39 | store.dispatch(action);
40 | }
41 | );
42 |
43 | return;
44 | }
45 |
46 | next(action);
47 | };
48 |
49 | const localStorageMiddleware = store => next => action => {
50 | if (action.type === REGISTER || action.type === LOGIN) {
51 | if (!action.error) {
52 | window.localStorage.setItem('jwt', action.payload.user.token);
53 | agent.setToken(action.payload.user.token);
54 | }
55 | } else if (action.type === LOGOUT) {
56 | window.localStorage.setItem('jwt', '');
57 | agent.setToken(null);
58 | }
59 |
60 | next(action);
61 | };
62 |
63 | function isPromise(v) {
64 | return v && typeof v.then === 'function';
65 | }
66 |
67 |
68 | export { promiseMiddleware, localStorageMiddleware }
69 |
--------------------------------------------------------------------------------
/frontend/src/reducer.js:
--------------------------------------------------------------------------------
1 | import article from './reducers/article';
2 | import articleList from './reducers/articleList';
3 | import auth from './reducers/auth';
4 | import { combineReducers } from 'redux';
5 | import common from './reducers/common';
6 | import editor from './reducers/editor';
7 | import home from './reducers/home';
8 | import profile from './reducers/profile';
9 | import settings from './reducers/settings';
10 | import { routerReducer } from 'react-router-redux';
11 |
12 | export default combineReducers({
13 | article,
14 | articleList,
15 | auth,
16 | common,
17 | editor,
18 | home,
19 | profile,
20 | settings,
21 | router: routerReducer
22 | });
23 |
--------------------------------------------------------------------------------
/frontend/src/reducers/article.js:
--------------------------------------------------------------------------------
1 | import {
2 | ARTICLE_PAGE_LOADED,
3 | ARTICLE_PAGE_UNLOADED,
4 | ADD_COMMENT,
5 | DELETE_COMMENT
6 | } from '../constants/actionTypes';
7 |
8 | export default (state = {}, action) => {
9 | switch (action.type) {
10 | case ARTICLE_PAGE_LOADED:
11 | return {
12 | ...state,
13 | article: action.payload[0].article,
14 | comments: action.payload[1].comments
15 | };
16 | case ARTICLE_PAGE_UNLOADED:
17 | return {};
18 | case ADD_COMMENT:
19 | return {
20 | ...state,
21 | commentErrors: action.error ? action.payload.errors : null,
22 | comments: action.error ?
23 | null :
24 | (state.comments || []).concat([action.payload.comment])
25 | };
26 | case DELETE_COMMENT:
27 | const commentId = action.commentId
28 | return {
29 | ...state,
30 | comments: state.comments.filter(comment => comment.id !== commentId)
31 | };
32 | default:
33 | return state;
34 | }
35 | };
36 |
--------------------------------------------------------------------------------
/frontend/src/reducers/articleList.js:
--------------------------------------------------------------------------------
1 | import {
2 | ARTICLE_FAVORITED,
3 | ARTICLE_UNFAVORITED,
4 | SET_PAGE,
5 | APPLY_TAG_FILTER,
6 | HOME_PAGE_LOADED,
7 | HOME_PAGE_UNLOADED,
8 | CHANGE_TAB,
9 | PROFILE_PAGE_LOADED,
10 | PROFILE_PAGE_UNLOADED,
11 | PROFILE_FAVORITES_PAGE_LOADED,
12 | PROFILE_FAVORITES_PAGE_UNLOADED
13 | } from '../constants/actionTypes';
14 |
15 | export default (state = {}, action) => {
16 | switch (action.type) {
17 | case ARTICLE_FAVORITED:
18 | case ARTICLE_UNFAVORITED:
19 | return {
20 | ...state,
21 | articles: state.articles.map(article => {
22 | if (article.slug === action.payload.article.slug) {
23 | return {
24 | ...article,
25 | favorited: action.payload.article.favorited,
26 | favoritesCount: action.payload.article.favoritesCount
27 | };
28 | }
29 | return article;
30 | })
31 | };
32 | case SET_PAGE:
33 | return {
34 | ...state,
35 | articles: action.payload.articles,
36 | articlesCount: action.payload.articlesCount,
37 | currentPage: action.page
38 | };
39 | case APPLY_TAG_FILTER:
40 | return {
41 | ...state,
42 | pager: action.pager,
43 | articles: action.payload.articles,
44 | articlesCount: action.payload.articlesCount,
45 | tab: null,
46 | tag: action.tag,
47 | currentPage: 0
48 | };
49 | case HOME_PAGE_LOADED:
50 | return {
51 | ...state,
52 | pager: action.pager,
53 | tags: action.payload[0].tags,
54 | articles: action.payload[1].articles,
55 | articlesCount: action.payload[1].articlesCount,
56 | currentPage: 0,
57 | tab: action.tab
58 | };
59 | case HOME_PAGE_UNLOADED:
60 | return {};
61 | case CHANGE_TAB:
62 | return {
63 | ...state,
64 | pager: action.pager,
65 | articles: action.payload.articles,
66 | articlesCount: action.payload.articlesCount,
67 | tab: action.tab,
68 | currentPage: 0,
69 | tag: null
70 | };
71 | case PROFILE_PAGE_LOADED:
72 | case PROFILE_FAVORITES_PAGE_LOADED:
73 | return {
74 | ...state,
75 | pager: action.pager,
76 | articles: action.payload[1].articles,
77 | articlesCount: action.payload[1].articlesCount,
78 | currentPage: 0
79 | };
80 | case PROFILE_PAGE_UNLOADED:
81 | case PROFILE_FAVORITES_PAGE_UNLOADED:
82 | return {};
83 | default:
84 | return state;
85 | }
86 | };
87 |
--------------------------------------------------------------------------------
/frontend/src/reducers/auth.js:
--------------------------------------------------------------------------------
1 | import {
2 | LOGIN,
3 | REGISTER,
4 | LOGIN_PAGE_UNLOADED,
5 | REGISTER_PAGE_UNLOADED,
6 | ASYNC_START,
7 | UPDATE_FIELD_AUTH
8 | } from '../constants/actionTypes';
9 |
10 | export default (state = {}, action) => {
11 | switch (action.type) {
12 | case LOGIN:
13 | case REGISTER:
14 | return {
15 | ...state,
16 | inProgress: false,
17 | errors: action.error ? action.payload.errors : null
18 | };
19 | case LOGIN_PAGE_UNLOADED:
20 | case REGISTER_PAGE_UNLOADED:
21 | return {};
22 | case ASYNC_START:
23 | if (action.subtype === LOGIN || action.subtype === REGISTER) {
24 | return { ...state, inProgress: true };
25 | }
26 | break;
27 | case UPDATE_FIELD_AUTH:
28 | return { ...state, [action.key]: action.value };
29 | default:
30 | return state;
31 | }
32 |
33 | return state;
34 | };
35 |
--------------------------------------------------------------------------------
/frontend/src/reducers/common.js:
--------------------------------------------------------------------------------
1 | import {
2 | APP_LOAD,
3 | REDIRECT,
4 | LOGOUT,
5 | ARTICLE_SUBMITTED,
6 | SETTINGS_SAVED,
7 | LOGIN,
8 | REGISTER,
9 | DELETE_ARTICLE,
10 | ARTICLE_PAGE_UNLOADED,
11 | EDITOR_PAGE_UNLOADED,
12 | HOME_PAGE_UNLOADED,
13 | PROFILE_PAGE_UNLOADED,
14 | PROFILE_FAVORITES_PAGE_UNLOADED,
15 | SETTINGS_PAGE_UNLOADED,
16 | LOGIN_PAGE_UNLOADED,
17 | REGISTER_PAGE_UNLOADED
18 | } from '../constants/actionTypes';
19 |
20 | const defaultState = {
21 | appName: 'Conduit',
22 | token: null,
23 | viewChangeCounter: 0
24 | };
25 |
26 | export default (state = defaultState, action) => {
27 | switch (action.type) {
28 | case APP_LOAD:
29 | return {
30 | ...state,
31 | token: action.token || null,
32 | appLoaded: true,
33 | currentUser: action.payload ? action.payload.user : null
34 | };
35 | case REDIRECT:
36 | return { ...state, redirectTo: null };
37 | case LOGOUT:
38 | return { ...state, redirectTo: '/', token: null, currentUser: null };
39 | case ARTICLE_SUBMITTED:
40 | const redirectUrl = `/article/${action.payload.article.slug}`;
41 | return { ...state, redirectTo: redirectUrl };
42 | case SETTINGS_SAVED:
43 | return {
44 | ...state,
45 | redirectTo: action.error ? null : '/',
46 | currentUser: action.error ? null : action.payload.user
47 | };
48 | case LOGIN:
49 | case REGISTER:
50 | return {
51 | ...state,
52 | redirectTo: action.error ? null : '/',
53 | token: action.error ? null : action.payload.user.token,
54 | currentUser: action.error ? null : action.payload.user
55 | };
56 | case DELETE_ARTICLE:
57 | return { ...state, redirectTo: '/' };
58 | case ARTICLE_PAGE_UNLOADED:
59 | case EDITOR_PAGE_UNLOADED:
60 | case HOME_PAGE_UNLOADED:
61 | case PROFILE_PAGE_UNLOADED:
62 | case PROFILE_FAVORITES_PAGE_UNLOADED:
63 | case SETTINGS_PAGE_UNLOADED:
64 | case LOGIN_PAGE_UNLOADED:
65 | case REGISTER_PAGE_UNLOADED:
66 | return { ...state, viewChangeCounter: state.viewChangeCounter + 1 };
67 | default:
68 | return state;
69 | }
70 | };
71 |
--------------------------------------------------------------------------------
/frontend/src/reducers/editor.js:
--------------------------------------------------------------------------------
1 | import {
2 | EDITOR_PAGE_LOADED,
3 | EDITOR_PAGE_UNLOADED,
4 | ARTICLE_SUBMITTED,
5 | ASYNC_START,
6 | ADD_TAG,
7 | REMOVE_TAG,
8 | UPDATE_FIELD_EDITOR
9 | } from '../constants/actionTypes';
10 |
11 | export default (state = {}, action) => {
12 | switch (action.type) {
13 | case EDITOR_PAGE_LOADED:
14 | return {
15 | ...state,
16 | articleSlug: action.payload ? action.payload.article.slug : '',
17 | title: action.payload ? action.payload.article.title : '',
18 | description: action.payload ? action.payload.article.description : '',
19 | body: action.payload ? action.payload.article.body : '',
20 | tagInput: '',
21 | tagList: action.payload ? action.payload.article.tagList : []
22 | };
23 | case EDITOR_PAGE_UNLOADED:
24 | return {};
25 | case ARTICLE_SUBMITTED:
26 | return {
27 | ...state,
28 | inProgress: null,
29 | errors: action.error ? action.payload.errors : null
30 | };
31 | case ASYNC_START:
32 | if (action.subtype === ARTICLE_SUBMITTED) {
33 | return { ...state, inProgress: true };
34 | }
35 | break;
36 | case ADD_TAG:
37 | return {
38 | ...state,
39 | tagList: state.tagList.concat([state.tagInput]),
40 | tagInput: ''
41 | };
42 | case REMOVE_TAG:
43 | return {
44 | ...state,
45 | tagList: state.tagList.filter(tag => tag !== action.tag)
46 | };
47 | case UPDATE_FIELD_EDITOR:
48 | return { ...state, [action.key]: action.value };
49 | default:
50 | return state;
51 | }
52 |
53 | return state;
54 | };
55 |
--------------------------------------------------------------------------------
/frontend/src/reducers/home.js:
--------------------------------------------------------------------------------
1 | import { HOME_PAGE_LOADED, HOME_PAGE_UNLOADED } from '../constants/actionTypes';
2 |
3 | export default (state = {}, action) => {
4 | switch (action.type) {
5 | case HOME_PAGE_LOADED:
6 | return {
7 | ...state,
8 | tags: action.payload[0].tags
9 | };
10 | case HOME_PAGE_UNLOADED:
11 | return {};
12 | default:
13 | return state;
14 | }
15 | };
16 |
--------------------------------------------------------------------------------
/frontend/src/reducers/profile.js:
--------------------------------------------------------------------------------
1 | import {
2 | PROFILE_PAGE_LOADED,
3 | PROFILE_PAGE_UNLOADED,
4 | FOLLOW_USER,
5 | UNFOLLOW_USER
6 | } from '../constants/actionTypes';
7 |
8 | export default (state = {}, action) => {
9 | switch (action.type) {
10 | case PROFILE_PAGE_LOADED:
11 | return {
12 | ...action.payload[0].profile
13 | };
14 | case PROFILE_PAGE_UNLOADED:
15 | return {};
16 | case FOLLOW_USER:
17 | case UNFOLLOW_USER:
18 | return {
19 | ...action.payload.profile
20 | };
21 | default:
22 | return state;
23 | }
24 | };
25 |
--------------------------------------------------------------------------------
/frontend/src/reducers/settings.js:
--------------------------------------------------------------------------------
1 | import {
2 | SETTINGS_SAVED,
3 | SETTINGS_PAGE_UNLOADED,
4 | ASYNC_START
5 | } from '../constants/actionTypes';
6 |
7 | export default (state = {}, action) => {
8 | switch (action.type) {
9 | case SETTINGS_SAVED:
10 | return {
11 | ...state,
12 | inProgress: false,
13 | errors: action.error ? action.payload.errors : null
14 | };
15 | case SETTINGS_PAGE_UNLOADED:
16 | return {};
17 | case ASYNC_START:
18 | return {
19 | ...state,
20 | inProgress: true
21 | };
22 | default:
23 | return state;
24 | }
25 | };
26 |
--------------------------------------------------------------------------------
/frontend/src/store.js:
--------------------------------------------------------------------------------
1 | import { applyMiddleware, createStore } from 'redux';
2 | import { createLogger } from 'redux-logger'
3 | import { composeWithDevTools } from 'redux-devtools-extension/developmentOnly';
4 | import { promiseMiddleware, localStorageMiddleware } from './middleware';
5 | import reducer from './reducer';
6 |
7 | import { routerMiddleware } from 'react-router-redux'
8 | import createHistory from 'history/createBrowserHistory';
9 |
10 | export const history = createHistory();
11 |
12 | // Build the middleware for intercepting and dispatching navigation actions
13 | const myRouterMiddleware = routerMiddleware(history);
14 |
15 | const getMiddleware = () => {
16 | if (process.env.NODE_ENV === 'production') {
17 | return applyMiddleware(myRouterMiddleware, promiseMiddleware, localStorageMiddleware);
18 | } else {
19 | // Enable additional logging in non-production environments.
20 | return applyMiddleware(myRouterMiddleware, promiseMiddleware, localStorageMiddleware, createLogger())
21 | }
22 | };
23 |
24 | export const store = createStore(
25 | reducer, composeWithDevTools(getMiddleware()));
26 |
--------------------------------------------------------------------------------
/infrastructure/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 |
3 | /docker/api/data
4 | /docker/db/data
5 | /docker/nginx/data
6 | /docker/web/data
7 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/after_install:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | mkdir -p /var/log/gunicorn
3 | mkdir -p /var/log/django
4 | mkdir -p /var/www/conduit/static/
5 |
6 | cd /deploy/backend || exit 2
7 | pipenv install
8 |
9 | AWS_DEFAULT_REGION="$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .region)"
10 | export AWS_DEFAULT_REGION
11 |
12 | DJANGO_SETTINGS_MODULE='conduit.settings.ec2'
13 | export DJANGO_SETTINGS_MODULE
14 |
15 | pipenv run python manage.py migrate
16 | pipenv run python manage.py collectstatic --no-input
17 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/gunicorn.ec2.conf:
--------------------------------------------------------------------------------
1 | proc_name = "gunicorn"
2 |
3 | bind = "0.0.0.0:9000"
4 |
5 | daemon = True
6 | pidfile = "/var/run/gunicorn.pid"
7 |
8 | loglevel = "DEBUG"
9 | accesslog = "/var/log/gunicorn/access.log"
10 | errorlog = "/var/log/gunicorn/error.log"
11 |
12 | keepalive = 1
13 | timeout = 300
14 | workers = 5
15 | worker_class = "gevent"
16 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/install_dependencies:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sudo apt update
3 | sudo apt install -y python3-pip jq
4 |
5 | pip3 install pipenv
6 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/start_server:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /deploy/backend || exit 2
3 |
4 | AWS_DEFAULT_REGION="$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .region)"
5 | export AWS_DEFAULT_REGION
6 |
7 | DJANGO_SETTINGS_MODULE='conduit.settings.ec2'
8 | export DJANGO_SETTINGS_MODULE
9 |
10 | pipenv run gunicorn --config ../infrastructure/aws/codedeploy/gunicorn.ec2.conf conduit.wsgi
11 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/stop_server:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [[ -f /var/run/gunicorn.pid ]]; then
3 | kill -s TERM "$(cat /var/run/gunicorn.pid)"
4 | fi
5 |
--------------------------------------------------------------------------------
/infrastructure/aws/codedeploy/validate_service:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | PID_FILE="/var/run/gunicorn.pid"
3 | if [[ -f "$PID_FILE" ]]; then
4 | PID="$(cat "$PID_FILE")"
5 | PROC_FILE="/proc/$PID/status"
6 | if [[ ! -f "$PROC_FILE" ]]; then
7 | echo "gunicorn is not running"
8 | rm -f "$PID_FILE"
9 | exit 2
10 | fi
11 | else
12 | echo "gunicorn is not running"
13 | exit 2
14 | fi
15 |
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/lambda_function.py:
--------------------------------------------------------------------------------
1 | import psycopg2
2 | import json
3 | import boto3
4 |
5 | client = boto3.client('ssm')
6 |
7 | db_host = client.get_parameter(
8 | Name='/prod/api/DATABASE_HOST')['Parameter']['Value']
9 | db_name = client.get_parameter(
10 | Name='/prod/api/DATABASE_NAME')['Parameter']['Value']
11 | db_user = client.get_parameter(
12 | Name='/prod/api/DATABASE_USER')['Parameter']['Value']
13 | db_pass = client.get_parameter(
14 | Name='/prod/api/DATABASE_PASSWORD', WithDecryption=True)['Parameter']['Value']
15 |
16 | db_port = 5432
17 |
18 |
19 | def create_conn():
20 | conn = None
21 | try:
22 | conn = psycopg2.connect("dbname={} user={} host={} password={}".format(
23 | db_name, db_user, db_host, db_pass))
24 | except:
25 | print("Cannot connect.")
26 | return conn
27 |
28 |
29 | def fetch(conn, query):
30 | result = []
31 | print("Now executing: {}".format(query))
32 | cursor = conn.cursor()
33 | cursor.execute(query)
34 | raw = cursor.fetchall()
35 | for line in raw:
36 | result.append(line)
37 | return result
38 |
39 |
40 | def lambda_handler(event, context):
41 |
42 | print(event)
43 |
44 | if 'query' in event.keys():
45 | query = event['query']
46 | else:
47 | query = ''
48 |
49 | query_cmd = "select * from articles_article where title like '%"+query+"%'"
50 |
51 | print(query_cmd)
52 |
53 | conn = create_conn()
54 |
55 | result = fetch(conn, query_cmd)
56 | conn.close()
57 |
58 | return {
59 | 'statusCode': 200,
60 | 'body': str(result)
61 | }
62 |
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tryolabs/aws-workshop/a8ccd86d4ab0891b547025304514b5ee9f19af19/infrastructure/aws/lambda/psycopg2/.DS_Store
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tryolabs/aws-workshop/a8ccd86d4ab0891b547025304514b5ee9f19af19/infrastructure/aws/lambda/psycopg2/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/_ipaddress.py:
--------------------------------------------------------------------------------
1 | """Implementation of the ipaddres-based network types adaptation
2 | """
3 |
4 | # psycopg/_ipaddress.py - Ipaddres-based network types adaptation
5 | #
6 | # Copyright (C) 2016 Daniele Varrazzo
7 | #
8 | # psycopg2 is free software: you can redistribute it and/or modify it
9 | # under the terms of the GNU Lesser General Public License as published
10 | # by the Free Software Foundation, either version 3 of the License, or
11 | # (at your option) any later version.
12 | #
13 | # In addition, as a special exception, the copyright holders give
14 | # permission to link this program with the OpenSSL library (or with
15 | # modified versions of OpenSSL that use the same license as OpenSSL),
16 | # and distribute linked combinations including the two.
17 | #
18 | # You must obey the GNU Lesser General Public License in all respects for
19 | # all of the code used other than OpenSSL.
20 | #
21 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
22 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
24 | # License for more details.
25 |
26 | from psycopg2.extensions import (
27 | new_type, new_array_type, register_type, register_adapter, QuotedString)
28 |
29 | # The module is imported on register_ipaddress
30 | ipaddress = None
31 |
32 | # The typecasters are created only once
33 | _casters = None
34 |
35 |
36 | def register_ipaddress(conn_or_curs=None):
37 | """
38 | Register conversion support between `ipaddress` objects and `network types`__.
39 |
40 | :param conn_or_curs: the scope where to register the type casters.
41 | If `!None` register them globally.
42 |
43 | After the function is called, PostgreSQL :sql:`inet` values will be
44 | converted into `~ipaddress.IPv4Interface` or `~ipaddress.IPv6Interface`
45 | objects, :sql:`cidr` values into into `~ipaddress.IPv4Network` or
46 | `~ipaddress.IPv6Network`.
47 |
48 | .. __: https://www.postgresql.org/docs/current/static/datatype-net-types.html
49 | """
50 | global ipaddress
51 | import ipaddress
52 |
53 | global _casters
54 | if _casters is None:
55 | _casters = _make_casters()
56 |
57 | for c in _casters:
58 | register_type(c, conn_or_curs)
59 |
60 | for t in [ipaddress.IPv4Interface, ipaddress.IPv6Interface,
61 | ipaddress.IPv4Network, ipaddress.IPv6Network]:
62 | register_adapter(t, adapt_ipaddress)
63 |
64 |
65 | def _make_casters():
66 | inet = new_type((869,), 'INET', cast_interface)
67 | ainet = new_array_type((1041,), 'INET[]', inet)
68 |
69 | cidr = new_type((650,), 'CIDR', cast_network)
70 | acidr = new_array_type((651,), 'CIDR[]', cidr)
71 |
72 | return [inet, ainet, cidr, acidr]
73 |
74 |
75 | def cast_interface(s, cur=None):
76 | if s is None:
77 | return None
78 | # Py2 version force the use of unicode. meh.
79 | return ipaddress.ip_interface(str(s))
80 |
81 |
82 | def cast_network(s, cur=None):
83 | if s is None:
84 | return None
85 | return ipaddress.ip_network(str(s))
86 |
87 |
88 | def adapt_ipaddress(obj):
89 | return QuotedString(str(obj))
90 |
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/_psycopg.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tryolabs/aws-workshop/a8ccd86d4ab0891b547025304514b5ee9f19af19/infrastructure/aws/lambda/psycopg2/_psycopg.so
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/psycopg1.py:
--------------------------------------------------------------------------------
1 | """psycopg 1.1.x compatibility module
2 |
3 | This module uses the new style connection and cursor types to build a psycopg
4 | 1.1.1.x compatibility layer. It should be considered a temporary hack to run
5 | old code while porting to psycopg 2. Import it as follows::
6 |
7 | from psycopg2 import psycopg1 as psycopg
8 | """
9 | # psycopg/psycopg1.py - psycopg 1.1.x compatibility module
10 | #
11 | # Copyright (C) 2003-2010 Federico Di Gregorio
12 | #
13 | # psycopg2 is free software: you can redistribute it and/or modify it
14 | # under the terms of the GNU Lesser General Public License as published
15 | # by the Free Software Foundation, either version 3 of the License, or
16 | # (at your option) any later version.
17 | #
18 | # In addition, as a special exception, the copyright holders give
19 | # permission to link this program with the OpenSSL library (or with
20 | # modified versions of OpenSSL that use the same license as OpenSSL),
21 | # and distribute linked combinations including the two.
22 | #
23 | # You must obey the GNU Lesser General Public License in all respects for
24 | # all of the code used other than OpenSSL.
25 | #
26 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
27 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
29 | # License for more details.
30 |
31 | import psycopg2._psycopg as _2psycopg # noqa
32 | from psycopg2.extensions import cursor as _2cursor
33 | from psycopg2.extensions import connection as _2connection
34 |
35 | from psycopg2 import * # noqa
36 | import psycopg2.extensions as _ext
37 | _2connect = connect
38 |
39 |
40 | def connect(*args, **kwargs):
41 | """connect(dsn, ...) -> new psycopg 1.1.x compatible connection object"""
42 | kwargs['connection_factory'] = connection
43 | conn = _2connect(*args, **kwargs)
44 | conn.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
45 | return conn
46 |
47 |
48 | class connection(_2connection):
49 | """psycopg 1.1.x connection."""
50 |
51 | def cursor(self):
52 | """cursor() -> new psycopg 1.1.x compatible cursor object"""
53 | return _2connection.cursor(self, cursor_factory=cursor)
54 |
55 | def autocommit(self, on_off=1):
56 | """autocommit(on_off=1) -> switch autocommit on (1) or off (0)"""
57 | if on_off > 0:
58 | self.set_isolation_level(_ext.ISOLATION_LEVEL_AUTOCOMMIT)
59 | else:
60 | self.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
61 |
62 |
63 | class cursor(_2cursor):
64 | """psycopg 1.1.x cursor.
65 |
66 | Note that this cursor implements the exact procedure used by psycopg 1 to
67 | build dictionaries out of result rows. The DictCursor in the
68 | psycopg.extras modules implements a much better and faster algorithm.
69 | """
70 |
71 | def __build_dict(self, row):
72 | res = {}
73 | for i in range(len(self.description)):
74 | res[self.description[i][0]] = row[i]
75 | return res
76 |
77 | def dictfetchone(self):
78 | row = _2cursor.fetchone(self)
79 | if row:
80 | return self.__build_dict(row)
81 | else:
82 | return row
83 |
84 | def dictfetchmany(self, size):
85 | res = []
86 | rows = _2cursor.fetchmany(self, size)
87 | for row in rows:
88 | res.append(self.__build_dict(row))
89 | return res
90 |
91 | def dictfetchall(self):
92 | res = []
93 | rows = _2cursor.fetchall(self)
94 | for row in rows:
95 | res.append(self.__build_dict(row))
96 | return res
97 |
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/psycopg2/tz.py:
--------------------------------------------------------------------------------
1 | """tzinfo implementations for psycopg2
2 |
3 | This module holds two different tzinfo implementations that can be used as
4 | the 'tzinfo' argument to datetime constructors, directly passed to psycopg
5 | functions or used to set the .tzinfo_factory attribute in cursors.
6 | """
7 | # psycopg/tz.py - tzinfo implementation
8 | #
9 | # Copyright (C) 2003-2010 Federico Di Gregorio
10 | #
11 | # psycopg2 is free software: you can redistribute it and/or modify it
12 | # under the terms of the GNU Lesser General Public License as published
13 | # by the Free Software Foundation, either version 3 of the License, or
14 | # (at your option) any later version.
15 | #
16 | # In addition, as a special exception, the copyright holders give
17 | # permission to link this program with the OpenSSL library (or with
18 | # modified versions of OpenSSL that use the same license as OpenSSL),
19 | # and distribute linked combinations including the two.
20 | #
21 | # You must obey the GNU Lesser General Public License in all respects for
22 | # all of the code used other than OpenSSL.
23 | #
24 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
25 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
27 | # License for more details.
28 |
29 | import datetime
30 | import time
31 |
32 | ZERO = datetime.timedelta(0)
33 |
34 |
35 | class FixedOffsetTimezone(datetime.tzinfo):
36 | """Fixed offset in minutes east from UTC.
37 |
38 | This is exactly the implementation__ found in Python 2.3.x documentation,
39 | with a small change to the `!__init__()` method to allow for pickling
40 | and a default name in the form ``sHH:MM`` (``s`` is the sign.).
41 |
42 | The implementation also caches instances. During creation, if a
43 | FixedOffsetTimezone instance has previously been created with the same
44 | offset and name that instance will be returned. This saves memory and
45 | improves comparability.
46 |
47 | .. __: http://docs.python.org/library/datetime.html#datetime-tzinfo
48 | """
49 | _name = None
50 | _offset = ZERO
51 |
52 | _cache = {}
53 |
54 | def __init__(self, offset=None, name=None):
55 | if offset is not None:
56 | self._offset = datetime.timedelta(minutes=offset)
57 | if name is not None:
58 | self._name = name
59 |
60 | def __new__(cls, offset=None, name=None):
61 | """Return a suitable instance created earlier if it exists
62 | """
63 | key = (offset, name)
64 | try:
65 | return cls._cache[key]
66 | except KeyError:
67 | tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
68 | cls._cache[key] = tz
69 | return tz
70 |
71 | def __repr__(self):
72 | offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
73 | return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
74 | % (offset_mins, self._name)
75 |
76 | def __getinitargs__(self):
77 | offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
78 | return (offset_mins, self._name)
79 |
80 | def utcoffset(self, dt):
81 | return self._offset
82 |
83 | def tzname(self, dt):
84 | if self._name is not None:
85 | return self._name
86 | else:
87 | seconds = self._offset.seconds + self._offset.days * 86400
88 | hours, seconds = divmod(seconds, 3600)
89 | minutes = seconds / 60
90 | if minutes:
91 | return "%+03d:%d" % (hours, minutes)
92 | else:
93 | return "%+03d" % hours
94 |
95 | def dst(self, dt):
96 | return ZERO
97 |
98 |
99 | STDOFFSET = datetime.timedelta(seconds=-time.timezone)
100 | if time.daylight:
101 | DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
102 | else:
103 | DSTOFFSET = STDOFFSET
104 | DSTDIFF = DSTOFFSET - STDOFFSET
105 |
106 |
107 | class LocalTimezone(datetime.tzinfo):
108 | """Platform idea of local timezone.
109 |
110 | This is the exact implementation from the Python 2.3 documentation.
111 | """
112 | def utcoffset(self, dt):
113 | if self._isdst(dt):
114 | return DSTOFFSET
115 | else:
116 | return STDOFFSET
117 |
118 | def dst(self, dt):
119 | if self._isdst(dt):
120 | return DSTDIFF
121 | else:
122 | return ZERO
123 |
124 | def tzname(self, dt):
125 | return time.tzname[self._isdst(dt)]
126 |
127 | def _isdst(self, dt):
128 | tt = (dt.year, dt.month, dt.day,
129 | dt.hour, dt.minute, dt.second,
130 | dt.weekday(), 0, -1)
131 | stamp = time.mktime(tt)
132 | tt = time.localtime(stamp)
133 | return tt.tm_isdst > 0
134 |
135 | LOCAL = LocalTimezone()
136 |
137 | # TODO: pre-generate some interesting time zones?
138 |
--------------------------------------------------------------------------------
/infrastructure/aws/lambda/serverless.yml:
--------------------------------------------------------------------------------
1 | service: serverless-aws-workshop
2 | provider:
3 | name: aws
4 | runtime: python3.7
5 | iamRoleStatements:
6 | - Effect: 'Allow'
7 | Action:
8 | - 'ssm:GetParameter'
9 | Resource: '*'
10 | - Effect: 'Allow'
11 | Action:
12 | - 'kms:Decrypt'
13 | Resource: '*'
14 | package:
15 | include:
16 | - lambda_function.py
17 | - psycopg2/**
18 | exclude:
19 | - '**'
20 | functions:
21 | lambda_handler:
22 | handler: lambda_function.lambda_handler
23 | events:
24 | - http:
25 | path: search
26 | method: get
27 |
--------------------------------------------------------------------------------
/infrastructure/docker/api/.env.template:
--------------------------------------------------------------------------------
1 | DATABASE_NAME=database_name
2 | DATABASE_USER=database_user
3 | DATABASE_PASSWORD=database_password
4 | DATABASE_HOST=database_host
5 |
--------------------------------------------------------------------------------
/infrastructure/docker/api/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3
2 |
3 | ENV PYTHONUNBUFFERED 1
4 |
5 | RUN pip install pipenv
6 |
7 | RUN mkdir /conf
8 | RUN mkdir /app
9 | RUN mkdir /data
10 |
11 | WORKDIR /app
12 |
13 | COPY backend/Pipfile /app
14 | COPY backend/Pipfile.lock /app
15 |
16 | RUN pipenv install
17 |
18 | COPY infrastructure/docker/api/gunicorn.docker.conf /conf/gunicorn.conf
19 |
20 | COPY backend /app
21 |
22 | ENV DJANGO_SETTINGS_MODULE 'conduit.settings.docker'
23 |
24 | EXPOSE 9000
25 | CMD [ \
26 | "pipenv", \
27 | "run", \
28 | "gunicorn", \
29 | "--config", "/conf/gunicorn.conf", \
30 | "conduit.wsgi" \
31 | ]
32 |
--------------------------------------------------------------------------------
/infrastructure/docker/api/gunicorn.docker.conf:
--------------------------------------------------------------------------------
1 | proc_name = "gunicorn"
2 |
3 | bind = "0.0.0.0:9000"
4 |
5 | loglevel = "DEBUG"
6 | accesslog = "/data/access.log"
7 | errorlog = "/data/error.log"
8 |
9 | keepalive = 1
10 | timeout = 300
11 | workers = 5
12 | worker_class = "gevent"
13 |
--------------------------------------------------------------------------------
/infrastructure/docker/db/.env.template:
--------------------------------------------------------------------------------
1 | POSTGRES_USER=postgres_user
2 | POSTGRES_PASSWORD=postgres_password
3 | POSTGRES_DB=postgres_db
4 | PGDATA=/pgdata
5 |
--------------------------------------------------------------------------------
/infrastructure/docker/django-admin:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # get_absolute path function
3 | get_abs_path() {
4 | local PARENT_DIR
5 | local ABS_PATH
6 | PARENT_DIR=$(dirname "$1")
7 | cd "$PARENT_DIR" || exit
8 | ABS_PATH="$(pwd)"/"$(basename "$1")"
9 | cd - > /dev/null || exit
10 | echo "$ABS_PATH"
11 | }
12 |
13 | INFRA_DIR="$(dirname "$(get_abs_path "$0")")"
14 |
15 | pushd "$INFRA_DIR" > /dev/null 2>&1
16 |
17 | if [[ $(docker-compose ps | wc -l) -lt 3 ]]; then
18 | ALREADY_RUNNING=0
19 | docker-compose up --build -d > /dev/null 2>&1
20 | else
21 | ALREADY_RUNNING=1
22 | fi
23 |
24 | # wait some seconds for the db to be up an ready
25 | until docker-compose exec db pg_isready > /dev/null 2>&1
26 | do
27 | sleep 1
28 | done
29 |
30 | docker-compose run api pipenv run python manage.py "$@"
31 |
32 | if [[ $ALREADY_RUNNING == 0 ]]; then
33 | docker-compose down > /dev/null 2>&1
34 | fi
35 |
36 | popd > /dev/null
37 |
--------------------------------------------------------------------------------
/infrastructure/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | db:
4 | image: postgres
5 | volumes:
6 | - ./db/data:/pgdata
7 | env_file: ./db/.env
8 | api:
9 | build:
10 | # Set parent dir as context so as to allow "COPY pegaces /app" in Dockerfile
11 | context: ../../
12 | dockerfile: infrastructure/docker/api/Dockerfile
13 | volumes:
14 | - ./api/data:/data
15 | links:
16 | - db:db
17 | env_file: ./api/.env
18 | depends_on:
19 | - db
20 | web:
21 | build:
22 | context: ../../
23 | dockerfile: infrastructure/docker/web/Dockerfile
24 | volumes:
25 | - ./web/data/:/data
26 | env_file: ./web/.env
27 | nginx:
28 | image: nginx
29 | volumes:
30 | - ./nginx/nginx.conf.template:/conf/nginx.conf.template:ro
31 | - ./nginx/sites-available.workshop.template:/conf/sites-available/workshop.template:ro
32 | - ./nginx/sites-enabled.workshop.template:/conf/sites-enabled/workshop.template:ro
33 | - ./nginx/entrypoint:/conf/entrypoint
34 | - ./nginx/data/:/data
35 | ports:
36 | - 80:80
37 | entrypoint: /conf/entrypoint
38 | links:
39 | - api:api
40 | - web:web
41 | env_file: ./nginx/.env
42 | depends_on:
43 | - api
44 | - web
45 |
--------------------------------------------------------------------------------
/infrastructure/docker/nginx/.env.template:
--------------------------------------------------------------------------------
1 | API_HOST=api_host
2 | API_PORT=9000
3 | WEB_HOST=web_host
4 | WEB_PORT=5000
5 |
--------------------------------------------------------------------------------
/infrastructure/docker/nginx/entrypoint:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DOLLAR='$'
4 | mkdir -p /etc/nginx/sites-enabled
5 | mkdir -p /etc/nginx/sites-available
6 | envsubst < /conf/sites-available/workshop.template > /etc/nginx/sites-available/workshop
7 | envsubst < /conf/sites-enabled/workshop.template > /etc/nginx/sites-enabled/workshop
8 | envsubst < /conf/nginx.conf.template > /etc/nginx/nginx.conf
9 | nginx
10 |
--------------------------------------------------------------------------------
/infrastructure/docker/nginx/sites-available.workshop.template:
--------------------------------------------------------------------------------
1 | # www to non-www redirect -- duplicate content is BAD:
2 | # https://github.com/h5bp/html5-boilerplate/blob/5370479476dceae7cc3ea105946536d6bc0ee468/.htaccess#L362
3 | # Choose between www and non-www, listen on the *wrong* one and redirect to
4 | # the right one -- http://wiki.nginx.org/Pitfalls#Server_Name
5 |
6 | server {
7 | # listen [::]:80 accept_filter=httpready; # for FreeBSD
8 | # listen 80 accept_filter=httpready; # for FreeBSD
9 | # listen [::]:80 deferred; # for Linux
10 | # listen 80 deferred; # for Linux
11 |
12 | listen [::]:80;
13 | listen 80;
14 |
15 | # logging
16 | access_log /data/access.log;
17 | error_log /data/error.log debug;
18 |
19 | # The host name to respond to
20 | server_name localhost;
21 |
22 | # The root directory for static files
23 | root /www;
24 |
25 | location /api {
26 | proxy_pass http://${API_HOST}:${API_PORT};
27 | proxy_set_header Host ${DOLLAR}http_host;
28 | proxy_set_header X-Real-IP ${DOLLAR}remote_addr;
29 | proxy_set_header X-Forwarded-For ${DOLLAR}proxy_add_x_forwarded_for;
30 | proxy_connect_timeout 300s;
31 | proxy_read_timeout 300s;
32 | sendfile on;
33 | }
34 |
35 | location / {
36 | proxy_pass http://${WEB_HOST}:${WEB_PORT};
37 | proxy_set_header Host ${DOLLAR}http_host;
38 | proxy_set_header X-Real-IP ${DOLLAR}remote_addr;
39 | proxy_set_header X-Forwarded-For ${DOLLAR}proxy_add_x_forwarded_for;
40 | proxy_connect_timeout 300s;
41 | proxy_read_timeout 300s;
42 | sendfile on;
43 | }
44 |
45 | #Specify a charset
46 | charset utf-8;
47 | }
48 |
--------------------------------------------------------------------------------
/infrastructure/docker/nginx/sites-enabled.workshop.template:
--------------------------------------------------------------------------------
1 | include sites-available/workshop;
2 |
--------------------------------------------------------------------------------
/infrastructure/docker/web/.env.template:
--------------------------------------------------------------------------------
1 | REACT_APP_USE_OWN_API=true
2 |
--------------------------------------------------------------------------------
/infrastructure/docker/web/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:8
2 |
3 | RUN mkdir /app
4 |
5 | WORKDIR /app
6 |
7 | RUN npm install -g serve
8 |
9 | COPY frontend /app
10 |
11 | RUN npm install
12 |
13 | ENV REACT_APP_USE_OWN_API=true
14 | RUN npm run build
15 |
16 | EXPOSE 5000
17 | CMD [ \
18 | "serve", \
19 | "-s", \
20 | "build" \
21 | ]
22 |
--------------------------------------------------------------------------------
/workshop/beanstalk/01-clean-up.md:
--------------------------------------------------------------------------------
1 | # Clean up
2 |
3 | We have a pretty interesting infrastructure running now so in order to integrate Beanstalk we need to remove some services to make room. Our current setup has four major components: Elastic Load Balancer (ELB), Auto Scaling Group (ASG), Virtual Private Cloud (VPC) and Relational Database Service (RDS). As powerful as it is Beanstalk [can't setup VPCs](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/vpc.html) for you and [is not recommend](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/AWSHowTo.RDS.html) for RDS other than for testing environments so we are going to keep these two and remove the ELB and ASG.
4 |
5 | To remove the ELB:
6 |
7 | 1. On your AWS Console, go to **EC2** under **Compute**
8 | 2. Click on **Load Balancers** under **LOAD BALANCING**
9 | 3. Select the load balancer we created earlier. We used the name `aws-workshop-load-balancer`
10 | 4. Click **Actions**
11 | 5. Select **Delete**
12 | 6. Click **Yes, Delete**
13 |
14 | Now do the same for the **Target Groups**, **Launch Configurations** and **Auto Scaling Groups**.
15 |
16 | Once you completely remove the ELB and ASG we need to terminate the EC2 instances we have running.
17 |
18 | 1. Go to **EC2** under **Compute**
19 | 2. Click on **Instances**
20 | 3. Using the checkbox at the left select all the instances that aren't the Bastion (if you have it running) and the NAT instance.
21 | 4. Click **Actions**
22 | 5. Click **Instance State**
23 | 6. Click Terminate
24 | 7. Click **Yes, Terminate**
25 |
26 | Next we are going to setup our application with a production environment.
27 |
28 | ---
29 | **Extra mil:** when all the instances are terminated remove all the security groups that aren't needed anymore. Could you leave your setup broken by doing this?
30 |
31 | ---
32 | **Next:** [create a new app](/workshop/beanstalk/02-new-app-environment.md)
--------------------------------------------------------------------------------
/workshop/beanstalk/03-finish-integration.md:
--------------------------------------------------------------------------------
1 | # Finish integration
2 |
3 | Now that we have our instance running we need to adjust some details to make it work with the other components of our infrastructure. Those are: our frontend in S3 and the database in RDS. Now we have to tell the frontend that the API now is reachable in another URL.
4 |
5 | First we need the new URL for the API.
6 |
7 | 1. Go to **Elastic Beanstalk** under **Compute**.
8 | 2. Click the **Conduit-prod** card under the **Conduit** application.
9 | 3. At the top, in the end of **All Applications**... breadcrumb you have the **Environment ID** and **URL**. Copy that URL.
10 |
11 | Now we need to paste the API URL in the Parameter Store read for the frontend.
12 |
13 | 1. Go to **EC2** under **Compute**.
14 | 2. Click on **Parameter Store** under **SYSTEMS MANAGER SHARED RESOURCES**.
15 | 3. Select the parameter **/prod/frontend/API_URL**.
16 | 4. Click **Actions**, **Edit Parameter**.
17 | 5. In the value field past the URL for the API. You may need to remove the last `/` so the URL ends in `elasticbeanstalk.com`. If you left the last path separator all the API calls will fail.
18 |
19 | For this change to take effect we need to run CodeBuild because this value is read when the [frontend is deployed](buildspec.frontend.yml).
20 |
21 | 1. Go to **Code Build** under **Developer Tools**.
22 | 2. Click your project name.
23 | 3. In the **Build History** section, select the checkbox at the left for the most recent build.
24 | 4. Click **Retry**.
25 |
26 | You can click the build name to follow the progress, it shouldn't take too long. When the build completes, the frontend will be hitting our new production environment. However, it won't work because we still need to give permissions to access the Parameter Store to our API web instances. This is the same we did in the past with our **ApiRole**, but as we are using the role created by Beanstalk for provisioning our instances now, we need to grant read access to the Parameter Store to that role.
27 |
28 | 1. Go to **IAM** under **Security, Identity & Compliance**.
29 | 2. Click **Roles** in the left pane.
30 | 3. Click **aws-elasticbeanstalk-ec2-role**, that's the role created for Beanstalk to use in the EC2 instances.
31 | 4. Click **Attach policy** in the **Permissions** tab.
32 | 5. Click the search bar and look for `AmazonSSMReadOnlyAccess` and select the checkbox in the left.
33 | 6. Click **Attach policy**.
34 |
35 | Great, now our instances can access the Parameter Store, but they still can't read the password protected values. To fix this we need to grant access to anyone with the **aws-elasticbeanstalk-ec2-role** role to our encryption keys.
36 |
37 | 1. In **IAM** under **Security, Identity & Compliance**, go to **Encryption keys**.
38 | 2. Scroll down to **Key Users** section and click **Add** under **This Account** sub section.
39 | 3. Select the **aws-elasticbeanstalk-service-role** role.
40 | 4. Click **Attach**.
41 |
42 | Ok, so our instances have full access to all the parameters in the Parameter Store now. We need to restart the server in our prod environment because the values from the Parameter Store are read when the app starts.
43 |
44 | 1. Go to **Elastic Beanstalk** under **Compute**.
45 | 2. Click the **Conduit-prod** card under the **Conduit** application.
46 | 3. Click **Action** on the right.
47 | 4. Click **Restart App Server(s)**.
48 |
49 | When the restart finishes the API should be working. You can navigate to the prod environment URL, append `/api` to get the default Django Rest-Framework page describing the API. Try also to navigate to the front end and inspect some of the requests to confirm that you are using the right environment.
50 |
51 | ---
52 | **Extra mil:**
53 |
54 | - What about the RDS? Why is it working without touching anything?
55 | - Check the **Scaling** card in the **Configuration** options for the environment. Click the gear and a **Time-based Scaling** action. Check how that change impact the configuration.
56 | - Not sure about the database? Login to one of your instance, install `postgresql` and try it yourself. Tip: Amazon Linux use [yum](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/install-software.html) package manager.
57 |
58 | ---
59 | **Next:** [conclusion](/workshop/beanstalk/04-conclusion.md)
--------------------------------------------------------------------------------
/workshop/beanstalk/04-conclusion.md:
--------------------------------------------------------------------------------
1 | # Conclusion
2 |
3 | If everything went as expected you have now a running production-ready environment accessible from the same frontend as before. Take some time to explore what Beanstalk offer from the environment dashboard. There are plenty of interesting metrics and health status reports.
4 |
5 | The more valuable feature of Beanstalk is the ability to setup and terminate environments for the same application independently. This makes a big difference when you are working on a real project where the changes have to be tested before move to production, new people come to the project and other leave, you need to demo things without the risk of some other team member break the app because it's on active development, etc. In that kind of situations is where Elastic Beanstalk shine.
6 |
7 | ---
8 | **Extra mil:**
9 |
10 | - Thinking about the later our current app architecture (frontend in S3 and API in EC2) is not the ideal combination for taking the most out of multi-environment scenarios. There are many options on how to do this and there are all good and bad depending on the case. Try to think about this and come up with your idea of how can you make the environment management more simple and implement it.
11 |
12 | What is the big pain point?
13 |
14 | - We mentioned [earlier](/workshop/beanstalk/introduction.md) that letting Beanstalk manage your RDS instances is **not** recommended for production environments. This is because in order to handle your environment configuration Beanstalk needs to handle the life time of your instances like if they were stateless entities. A database is the opposite of that.
15 |
16 | Try creating a new environment with an internal RDS called **Conduit-staging**.
--------------------------------------------------------------------------------
/workshop/beanstalk/introduction.md:
--------------------------------------------------------------------------------
1 | # Beanstalk
2 |
3 | [Elastic Benstalk](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/Welcome.html) is an AWS service that allows us to deploy a web app without having to worry about what combination of AWS services might be needed. All we have to do is describe what we need and let Beanstalk do the rest (create security groups, setup a load balancer, etc).
4 |
5 | Beanstalk also have some nice tools that aren't available in other AWS services that really add value other than the automatic setup, those are:
6 |
7 | - The ability to manage different environments for the same app (dev, prod, testing, etc).
8 | - Centralized panel to handle the setup for each environment.
9 | - Great monitoring metrics.
10 | - Detailed health status.
11 | - Really easy to setup new environments quickly.
12 |
13 | At the time of writing this, Beanstalk support apps developed in Java, PHP, .NET, Node.js, Python, Ruby out of the box and you can build your custom containers for other platforms all running on Amazon Linux.
14 |
15 | In this section we are going to use Beanstalk to setup a _production environment_ (this means with an [external RDS](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/AWSHowTo.RDS.html)) replacing our current Elastic Load Balancer (ELB) and Auto Scaling Group (ASG) setup with a Beanstalk-handled setup.
16 |
17 | It's important to remember not to make manual changes over the components generated by Beanstalk because it could prevent the correct clean up if we decide to remove an environment in the future. So to start fresh we need to remove some things before creating our new environment.
18 |
19 | ---
20 | **Next** [clean up current setup](/workshop/beanstalk/01-clean-up.md)
--------------------------------------------------------------------------------
/workshop/beanstalk/troubleshooting.md:
--------------------------------------------------------------------------------
1 | # Troubleshooting
2 |
3 | These are some places where you can look for info if something doesn't work as expected.
4 |
5 | - The deploy log is stored on each instances `/var/log/eb-activity.log` file.
6 | - Beanstalk use Apache to run the Django app so the error logs for Apache are under `/var/log/httpd` folder.
7 | - Your code is deployed to `/opt/python/current/` wich is a symblink to the last bundle deployed. All the past versions are in `/opt/python/bundle`.
8 | - The virtual env for your app is in `/opt/python/run/venv`.
9 | - Amazon Linux use `yum` to install packages. If you need some tool not installed by default, which is very likely to happen, [install it yourself](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/install-software.html).
10 |
11 | If you can't login to your instances the logs can be retrieved from Beanstalk.
12 |
13 | 1. Go to **Elastic Beanstalk** under **Compute**.
14 | 2. Select your environment.
15 | 3. Click **Logs** in the left pane.
16 | 4. Click **Request Logs** and then **Full Logs**. This will zip all the `/var/log` and `/var/opt/log` content and give you a link in the **Logs** table to download it.
17 |
18 | If nothing of this helps you, [open an issue](https://github.com/tryolabs/aws-workshop/issues/new) and we will try to help you.
--------------------------------------------------------------------------------
/workshop/elb-auto-scaling-group/01-load-balancer.md:
--------------------------------------------------------------------------------
1 | # Create a Load Balancer
2 |
3 | Elastic Load Balancing automatically distributes incoming application traffic across multiple targets, such as Amazon EC2 instances, containers, and IP addresses. When you are running applications in production, you typically will use multiple instances so if one fails, your application can still work. The Load Balancer will get the traffic, and will forward it to the instances that serve your app. You can more about this [here](https://aws.amazon.com/elasticloadbalancing/).
4 |
5 | 1. Go to **EC2** under **Compute** section.
6 | 2. On left menu select **Load Balancers** under **LOAD BALANCING**.
7 | 3. Click **Create Load Balancer**.
8 | 4. Select **Application Load Balancer**.
9 | 5. As name put: `aws-workshop-load-balancer`.
10 | 6. Select at least 2 Availability zones.
11 | 7. Click **Next: Configure Security Settings**.
12 | 8. Click **Next: Configure Security Groups**.
13 | 9. Select **Create a new security group** and as name put `load-balancer-security-group` and add a description.
14 | 10. Click **Next: Configure Routing**.
15 | 11. As name put: `aws-workshop-target-group`.
16 | 12. As Port: `9000`.
17 | 13. As path: `/api/tags`.
18 | 14. Click **Next: Register Targets**.
19 | 15. Click **Next: Review**.
20 | 16. Click **Create**.
21 | 17. Click **Close**.
22 |
23 | ---
24 | **Next:** [create an Auto Scaling Group](/workshop/elb-auto-scaling-group/02-auto-scaling-group.md).
25 |
--------------------------------------------------------------------------------
/workshop/elb-auto-scaling-group/02-auto-scaling-group.md:
--------------------------------------------------------------------------------
1 | # Create Auto Scaling Group
2 |
3 | Production applications need to be ready to tolerate a growing number of users at the same time. For example, if you get published in a popular blog, you may receive many more users that you had expected in a short period of time, and your application may crash because it's not able to sustain all the incoming traffic.
4 |
5 | Amazon provides [Auto Scaling Groups](https://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html) as way to build a more robust application which can handle increasing loads. Using these, you can setup rules (scaling policies) so more instances serving your application
6 |
7 | To create an Auto Scaling Group, first we need to create a [Launch Configuration](http://docs.aws.amazon.com/autoscaling/latest/userguide/LaunchConfiguration.html), which is basically a template that specifies properties of the instances that will be launched.
8 |
9 | ## Create Launch Configuration
10 | 1. Go to **EC2** under **Compute** section.
11 | 2. On left menu select **Launch Configuration** under **AUTO SCALING**.
12 | 3. Click **Create launch configuration**.
13 | 4. Look for Ubuntu Server (make sure it say Free tier eligible) and click Select.
14 | 5. Select `t2.micro` and then click on **Next: Configure Details**.
15 | 6. As name put: `aws-workshop-auto-scaling-group`.
16 | 7. As **IAM Role** select: `ApiRole`.
17 | 8. On **Advanced Settings**, there you have to select **As text** in **User data** and paste this bash script:
18 | ```
19 | #!/bin/bash
20 | export LC_ALL=C.UTF-8
21 | apt update
22 | apt -y install ruby
23 | cd /home/ubuntu
24 | wget https://aws-codedeploy-us-east-1.s3.amazonaws.com/latest/install
25 | chmod +x ./install
26 | ./install auto
27 | ```
28 | Be careful that there are NO SPACES before every line in the script.
29 | 9. Click **Next: Add Storage**.
30 | 10. Click **Next: Configure Security Group**.
31 | 11. Click **Create new security group**.
32 | 12. Security Group Name: `api-security-group`.
33 | 13. Click: **Add Rule**.
34 | 14. Type: **All TCP**.
35 | 15. Source: `load-balancer-security-group` and select the one suggested.
36 | 16. Click **Review**.
37 | 17. Click **Create launch configuration** and select the key pair to used to `ssh` into future instances.
38 |
39 | ## Add Security Group inbound rule
40 | 1. Go to **Security Groups** under **Network & Security** (still on EC2 service).
41 | 2. Open the `api-security-group` (created on the previous step).
42 | 3. Click **Edit inbound rules**.
43 | 4. Add a new rule with type `PostgreSQL` (port `5432` should be set automatically). As source select the `api-security-group` itself (start typing the name and select the one suggested). Note that this rule could not be added on the previous step because the security group didn't exist at that point.
44 | 5. Click **Save rules**.
45 |
46 | Now that we have our **Launch configuration** we can create our **Auto Scaling Group**.
47 |
48 | ## Create Auto Scaling Group
49 | 1. Go to **EC2** under **Compute** section.
50 | 2. On left menu select **Auto Scaling Groups** under **AUTO SCALING**.
51 | 3. Click: **Create Auto Scaling group**.
52 | 4. Select: `aws-workshop-auto-scaling-group` and then click **Next Step**.
53 | 5. On **Group name** put the same as in Launch configuration.
54 | 6. **Group size:** 2. At least we will have some redundancy form the start!
55 | 7. On **Subnet** add all the available options.
56 | 8. On **Advanced Details** click on: **Receive traffic from one or more load balancers**.
57 | 9. On **Target Groups** click and select: `aws-workshop-target-group`.
58 | 10. Click **Next: Configure scaling policies**.
59 | 11. Select: **Use scaling policies to adjust the capacity of this group**. We will configure a toy scaling policy only for learning. In a real system, you would have to do some benchmarking and determine your application's bottlenecks to setup an optimal scaling policy.
60 | 12. Configure it to scale between 2 and 4 instances.
61 | 13. Pick `Average CPU Utilization` as metric (imagine your app was compute intensive). In Target value, set something like 80.
62 | 14. **Instances need:** 180 seconds for warm up. See more [here](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-scaling-simple-step.html#as-step-scaling-warmup).
63 | 15. Click **Next: Configure Notifications**.
64 | 16. Click **Next: Configure Tags**.
65 | 17. Click **Review**.
66 | 18. Click **Create Auto Scaling group**.
67 | 19. Click **Close**.
68 |
69 | ---
70 | **Next:** finishing up, we need to [modify parameters and re-run CodeBuild](/workshop/elb-auto-scaling-group/03-finishing-up.md).
71 |
72 |
--------------------------------------------------------------------------------
/workshop/elb-auto-scaling-group/03-finishing-up.md:
--------------------------------------------------------------------------------
1 | # Finishing up
2 |
3 | Now, we have two instances on EC2, an ELB to distribute the traffic across them, and an Auto Scaling Group to have redundancy and scale in an automatic way if throughput needs to increase.
4 |
5 | In [the first section](/workshop/s3-web-ec2-api-rds/05-finishing-up.md), the `API_URL` parameter was set to the DNS name of our only instance. Now, we need to tell the web that the request must be done through the load balancer, so we need to modify `API_URL`.
6 | We also need to modify the CodeDeploy project so the tool knows that now we have an Auto Scaling Group and that it needs to run the deploy each time a new instance is launched.
7 | Finally, we need to re-run CodeBuild so the new bundle on S3 points to the DNS of the load balancer instead of the instance' DNS.
8 |
9 | ## Modify `API_URL`
10 | 1. Go to **EC2** under **Computer** section.
11 | 2. On left menu select **Load Balancer** under **LOAD BALANCING**.
12 | 3. Copy the DNS name of your load balancer that appears under **Description**.
13 | 4. On left menu, select **Parameter Store**.
14 | 5. Click on `/prod/frontend/API_URL` and on **Actions** select **Edit Parameter**.
15 | 6. As Value put: `http://` + the DNS that you copied 3 steps ago.
16 | 7. Click **Save Parameter**.
17 |
18 | ## Modify the CodeDeploy project
19 | 1. Go to **CodeDeploy** under **Developer Tools**.
20 | 2. Click your application's name.
21 | 3. Select your deployment group and on **Actions** select **Edit**.
22 | 4. On **Environment configuration** select your Auto Scaling Group on **Auto Scaling groups** tab.
23 | 5. Go to **Amazon EC2 instances** tab, and delete all existing Tag groups that we setup earlier.
24 | 6. Check **Enable load balancing**.
25 | 7. On **Load balancer** check **Application Load Balancer**.
26 | 8. Select your target group in the dropdown.
27 | 9. Click **Save**.
28 | 10. Select your deployment group and on **Actions** click **Deploy new version**.
29 | 11. On **Repository** type select: `My application is stored in GitHub`.
30 | 12. Repository Name: `tryolabs/aws-workshop`.
31 | 13. Get the last commit id and past it in the **Commit ID** field.
32 | 14. Then click **Deploy**.
33 |
34 | ## Re-run CodeBuild
35 | 1. Go to **CodeBuild** under **Developer Tools**.
36 | 2. Click **Start build**.
37 | 3. Click **Start build**.
38 |
39 | ## Update RDS security group
40 | To give access to the instances created by the auto scaling to the data base we need to update our Postgres instance security group.
41 |
42 | 1. Go to **RDS** under **Database**
43 | 2. Click **Instances** on the left
44 | 3. Select your instance and with the radio button on the left and click **Instance actions** and select **Modify**
45 | 4. Scroll to **Security group** under **Network & Security** section
46 | 5. Click on the security groups drop down and select `api-security-group`. This is the group we created with the Launch Configuration for our Auto Scaling Group in the [previous section](/workshop/elb-auto-scaling-group/02-auto-scaling-group.md#create-launch-configuration-group).
47 |
48 | Now, terminate all your running instances and wait for the Auto Scaling group to start the new ones, this might take some minutes. You can follow the current state of the ASG by going to **EC2**, **Auto Scaling Groups**, select your group and check the **Activity History** and **Instances** tabs. Once the new instances were in place and `running` you should be able to get the full site working on the URL of the load balancer.
49 |
50 | ---
51 | **Extra mile:** once you have the site running:
52 |
53 | - Can you tell which instance is getting the requests?
54 | - Try changing the _Desired_ and _Min_ parameters of the ASG and see what happens.
55 | - Force the launch of new instances by triggering a condition that would make the scale up policy activate (that is, without changing the _Desired_ value).
56 | > Tip: running `yes > /dev/null &` will max out one of the CPU cores.
57 |
58 | - Try running [ab](http://httpd.apache.org/docs/2.2/programs/ab.html) (installed by default on macOS) to stress test the API. Do you see any reaction in the AWS console?
59 |
60 | ---
61 | **Next:** [VPC configuration and Bastion instance](/workshop/vpc-subnets-bastion/introduction.md).
--------------------------------------------------------------------------------
/workshop/elb-auto-scaling-group/introduction.md:
--------------------------------------------------------------------------------
1 | # Add an extra EC2 instance with ELB and auto-scaling
2 |
3 | In this section we want to add an extra EC2 instance to be able to manage a bigger amount of trafic and improve our performance.
4 |
5 | To do that we are going to add also a [ELB](https://aws.amazon.com/elasticloadbalancing/) that is going to be the one in charge of distribute the traffic accross our instances.
6 |
7 | Also we will add an [auto-scaling group](https://aws.amazon.com/documentation/autoscaling/) with 2 availability zones.
8 | This way we ensure that if we have 2 instances one on each availability zone, and an [Availability Zone](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions-availability-zones) goes down and our instance terminated, AWS will automatically start a new instance in the other availability zone so we don't decrease our performance.
9 | Also we will create some rules to add more instances if our 2 instances are overloaded (example: using 80% of cpu for the last 5 minutes), you can add whatever rule you want.
10 |
11 | ---
12 | **Next:** [Create a Load Balancer](/workshop/elb-auto-scaling-group/01-load-balancer.md)
--------------------------------------------------------------------------------
/workshop/s3-web-ec2-api-rds/03-RDS.md:
--------------------------------------------------------------------------------
1 | # RDS
2 |
3 | ## Create a PostgreSQL instance in RDS
4 | 1. Go to **RDS** under **Database** section.
5 | 2. Click on **Create Database**.
6 | 3. Click on PostgreSQL logo, and under **Templates** section tick the _"Free Tier"_ checkbox.
7 | 4. Enter a name on _DB Instance identifier_ (we will need it later, so don’t forget it).
8 | 5. Enter a username and password and click Next (again, we will need these later).
9 | 6. Under **Connectivity** section verify that **Publicly Accessible** is set to No.
10 | 7. On **VPC security groups** choose _Select existing VPC security groups_ and select the security group you created when [launching the EC2 instance](/workshop/s3-web-ec2-api-rds/02-EC2-instances.md#launch-your-first-ec2-instance).
11 | 8. Pick a db name under **Additional Configuration** and click create Database (again, we will need the database name later).
12 |
13 | Now our instance is created. We configured its access, allowing every instance under the security group that was created in the previous section to connect.
14 |
15 | ## Add DB parameters on Parameters Store
16 |
17 | As before, we will need some variables stored in the parameter store, including the database name, username, password and endpoint. These variables are referenced in [this file](/backend/conduit/settings/ec2.py), so Django can access the database.
18 |
19 | 1. Go to **RDS** under **Database** section.
20 | 2. Click on Instances.
21 | 3. Wait for instance to create. Then see details of your db and copy the **Endpoint**. This will be the value for `DATABASE_HOST`.
22 | 4. Go to AWS console **Systems Manager** under **Management & Governance**.
23 | 5. On the left menu select **Parameter Store**.
24 | 6. Click Create Parameter.
25 | 7. Enter `/prod/api/DATABASE_NAME` as the name and a meaningful description like "Name of the PostgreSQL database".
26 | 8. Enter the DB name you selected before on the value attribute.
27 | 9. Click create parameter and close.
28 | 10. Now we will need to do the same thing for the username and host
29 | 1. For the username enter `/prod/api/DATABASE_USER` as the name and your database username and as the value
30 | 2. For the host enter `/prod/api/DATABASE_HOST` as the name and the hostname you copied earlier as the value
31 | 11. For `/prod/api/DATABASE_PASSWORD` do the same steps but select as **Type: Secure String** and as KMS Key ID the key `workshopkey`.
32 |
33 | Now we have our database parameters set, and the password encrypted. Only our EC2 instances will be able to decrypt it.
34 |
35 | ---
36 | **Extra mile:**
37 |
38 | - Can you `ping` the Postgres instance?
39 | - Try to connect to the DB through your running EC2 instance.
40 |
41 | ---
42 |
43 | **Next:** create a [CodeDeploy project to deploy your API](/workshop/s3-web-ec2-api-rds/04-code-deploy.md).
44 |
--------------------------------------------------------------------------------
/workshop/s3-web-ec2-api-rds/04-code-deploy.md:
--------------------------------------------------------------------------------
1 | # CodeDeploy
2 |
3 | [CodeBuild](http://docs.aws.amazon.com/codedeploy/latest/userguide/welcome.html) is a service to automate the deployment of any kind of applications to EC2 instances. The configuration is really simple and easy to adapt. The deployment process is described in an `appspec.yml` file like [this one](/appspec.yml). If you want to know what happens during the deploy, you can also check the implementation of the hooks [here](/infrastructure/aws/codedeploy).
4 |
5 | First, we need to create a default role for CodeDeploy so it can have access to other AWS services (like S3).
6 |
7 | ## Create CodeDeploy Role
8 | 1. Go to **IAM** under **Security, Identity & Compliance**.
9 | 2. Go to **Role** section and click **Create Role**.
10 | 3. Select **CodeDeploy** for both service and use case and click **Next: Permissions**.
11 | 4. Select **Next: Tags**.
12 | 5. Select **Next: Review**.
13 | 6. Type a name and description and click **Create Role**.
14 |
15 | Now we are ready to start using it.
16 |
17 | ## Configure Code Deploy
18 | 1. Go to **CodeDeploy** under **Developer Tools**.
19 | 2. Go to **Applications** and click **Create application**.
20 | 3. Enter an **Application name** and **EC2/On-premises** on **Compute platform** then click **Create Application**.
21 | 4. Click on **Create Deployment group** and enter a Deployment Group name.
22 | 5. On **Service role** select the role created to grant CodeDeploy access to the instances.
23 | 6. Select **In-place** on **Deployment Type** section.
24 | 7. Check **Amazon EC2 instances** in **Environment Configuration**, then on the first tag group select `environment` as Key and as Value `prod`, on the second line select `service` as Key and as Value `api`. This means that CodeDeploy will deploy our application to all the EC2 instances with those tags.
25 | 8. On **Deployment settings** select **CodeDeployDefault.OneAtATime** in Deployment Configurations.
26 | 9. Under **Load Balancer** uncheck **Enable load balancing**
27 | 10. Click **Create deployment group**
28 |
29 | Now our CodeDeploy application is ready. Let’s try our first deployment.
30 |
31 | 1. On the deployment group details of the group we just made, click **Create Deployment**
32 | 2. On **Repository type** select **"My application is stored in GitHub"**.
33 | 3. In **Connect to GitHub** section type your GitHub account and select **Connect to GitHub**.
34 | 4. Allow AWS to access your GitHub account, if needed.
35 | 5. Enter your repository name in the form _account/repository_.
36 | 6. In **Commit ID** type the commit hash that you want to deploy.
37 | 7. Select **Overwrite the content** below.
38 | 8. Click **Create Deployment**.
39 |
40 | During the deploy try **View instances** and then **View events** to follow the progress and see what's happening.
41 |
42 | ---
43 | **Extra mile:** once the deploy finishes:
44 |
45 | - Try hitting the API with something like [Postman](https://www.getpostman.com/) or [httpie](https://httpie.org/).
46 | - What effect did the deploy have? Where did all the Python code end up? Is the API connected with the RDS already? `ssh` in to get all those answers, and more.
47 |
48 | ---
49 | **Next:** we are going to [finish our first deploy](/workshop/s3-web-ec2-api-rds/05-finishing-up.md), only some extra parameters are missing!
50 |
--------------------------------------------------------------------------------
/workshop/s3-web-ec2-api-rds/05-finishing-up.md:
--------------------------------------------------------------------------------
1 | # Finishing up
2 |
3 | We are almost done. We have to add some more parameters and we are ready to deploy the whole project.
4 |
5 | ## Create API_URL on Parameter Store
6 | 1. Go to **EC2** under **Compute** section.
7 | 2. Select your instance.
8 | 3. Copy the **Public DNS** under **Description**.
9 | 4. On the left menu select **Parameter Store**.
10 | 5. Click **Create Parameter**.
11 | 6. Enter `/prod/frontend/API_URL` as name and `http://:9000` as value.
12 | 7. Click **Create Parameter** and close.
13 |
14 | This will be used by CodeBuild, so the frontend knows where the API is. You can check how [here](/buildspec.frontend.yml).
15 |
16 | ## Run CodeBuild project
17 | 1. Go to **CodeBuild** under the **Developer Tools** section.
18 | 2. Select the project created before and click **Start Build**.
19 | 3. Click **Start Build**.
20 | 4. Wait.
21 | 5. Check if all the phases run successfully.
22 | 6. Done.
23 |
24 | Now, if you go to the public URL provided by S3 (under **S3**, your bucket, **Properties**, **Static website hosting**) you will find the endpoint. If everything went as planned, you should see the complete website.
25 |
26 | ---
27 | **Next:** add an extra [EC2 instance with ELB and auto-scaling](/workshop/elb-auto-scaling-group/introduction.md).
28 |
--------------------------------------------------------------------------------
/workshop/s3-web-ec2-api-rds/introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | We are ready to start the deployment of our website.
4 |
5 | The first step will be the frontend. Because it’s a static website, we can create an [S3 bucket](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html), put all the code in it and serve it as a static website. Think of an S3 bucket as a folder in the cloud, which can be setup for access from the outside world via a URL (and even help a bit with your application's routes).
6 |
7 | To automate the build, we will use [CodeBuild](https://aws.amazon.com/codebuild/), AWS service to build projects on the go.
8 | CodeBuild will pull our repository, build the webpage and copy the build directory to S3. The configuration is specified on `buildspec.frontend.yml` on [the root folder of our repo](/buildspec.frontend.yml).
9 |
10 | In order to automate the deployment of our API to the EC2 instances, we will use [CodeDeploy](http://docs.aws.amazon.com/codedeploy/latest/userguide/welcome.html). It will pull our repo to the EC2 instances and start our server (gunicorn). The full deploy process is described in the `appspec.yml` file, [here](/appspec.yml).
11 |
12 | Last but not least our database will be hosted using [AWS RDS](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html), as a PostgreSQL instance.
13 |
14 | To sum up, in this section we will create:
15 |
16 | - an S3 bucket to host our static frontend.
17 | - a CodeBuild setup to build the frontend and copy the output to the S3 bucket.
18 | - a CodeDeploy setup to deploy our API to the EC2 instances.
19 | - a RDS PostgreSQL instance.
20 |
21 | > **Important:** after you are done with this workshop, you will ideally clean up your account, so you are not billed anymore. This means that you need to delete everything you have created.
22 | >
23 | > Many resources in AWS [can be tagged](https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). If something can be tagged, then you should tag it with a **unique name**. Later, you can use the [Tag Editor](https://aws.amazon.com/blogs/aws/resource-groups-and-tagging/) to find your tagged resources to delete, and make sure you don't leave anything behind.
24 |
25 | ---
26 |
27 | **Next:** learn how to [serve a static website from S3](/workshop/s3-web-ec2-api-rds/01-serve-website-from-s3.md).
28 |
--------------------------------------------------------------------------------
/workshop/serverless/02-api-integration.md:
--------------------------------------------------------------------------------
1 | ## API Integration
2 |
3 | Now we want to use our API endpoint we have been using before to have a single API endpoint to all of our API calls. For doing that we will connect our load balancer to fordward all requests to the path `/*/search/*`
4 |
5 | Firstly we need to create a new `Target Group` with our lambda registered as the targets.
6 |
7 | 1. Go to **EC2** under **Computing** section.
8 | 2. Click on **Target Groups**.
9 | 3. Click on **Create Target Group**.
10 | 4. Put a name and choose `Lambda function` as Target Type.
11 | 5. Select your lambda and click **Create**
12 |
13 | Now we will modify our load balancer to add a fordwarding rule for the new target group.
14 |
15 | 1. Go to **EC2** under **Computing** section.
16 | 2. Click on **Load Balancers** on the left.
17 | 3. Select your load balancer, go to the `Listeners` tab and click on `View/edit rules`.
18 | 4. Add a new rule with condition `IF` path is `/*/search/*` `THEN` Forward to your Target Grup you created before.
19 | 5. Click on **Update**
20 |
21 | ---
22 |
23 | This is the end of this part of the workshop. You could continue reading more about serverless architecture in the AWS ecosystem [here](https://aws.amazon.com/serverless/).
24 |
25 |
--------------------------------------------------------------------------------
/workshop/serverless/introduction.md:
--------------------------------------------------------------------------------
1 | # Add a lambda function to query search posts by title
2 |
3 | In this section we want to add a lambda function to be able to query our data base for posts which has some word in their title.
4 | In the nexts steps we will install [Serverless](https://serverless.com), a framework to easely deploy serverless architectures in AWS and others cloud platforms.
5 |
6 |
7 | ---
8 | **Next:** [Install and configure Serverless](/workshop/serverless/01-serverless.md)
--------------------------------------------------------------------------------
/workshop/set-up-users.md:
--------------------------------------------------------------------------------
1 | # Set up users on AWS
2 |
3 | > **TryoTip:** if you are using the **Tryolabs Playground AWS account**, this section does not apply. Please, read it anyway, so you have some context on what you would do with a bare new AWS account.
4 |
5 | As you might already now there is a special account in AWS called _root_. This is the account used to do the initial setup for users, roles and billing information. Is recommended to create a user with administrator privileges for the every day use and [not use the root account](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) to login to AWS. Additionally, you should make sure you enable [Multi Factor Authentication (MFA)](http://docs.aws.amazon.com/console/iam/security-status-activate-mfa) on your root account, and use an app like [Authy](https://authy.com/) as a second factor on your phone (Android/iOS).
6 |
7 | Next, we are going to use our root account to setup 2 AWS users.
8 |
9 | One will be used to access AWS via the console (web interface, so this will be your own user). The other will be used for accessing our account *programmatically*: we will create an **access key ID** and **secret access key** for the AWS API, CLI, SDK, and other development tools.
10 |
11 | Every account has some associated permissions. It is a good practice to have those strictly limited to the bare minimum necessary, especially for programmatic access. Permissions are handled by attaching [policies](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to the user accounts. There, you can customize the access levels to various AWS services.
12 |
13 | First we are going to create the user for the AWS console:
14 |
15 | 1. Login to your AWS account with the root user.
16 | 2. Go to **IAM** under Security, Identity & Compliance section.
17 | 3. Click on Users.
18 | 4. Click Add user button.
19 | 5. Enter a username and check the option: **AWS Management Console access** under the **Select AWS access type** section and then click next. You should also mark the option so that the user is forced to change his password on next login (pick a secure password!).
20 | 6. Select **Attach existing policies directly**.
21 | 7. Search for: `AdministratorAccess`, check it and click next.
22 | 8. Click on Create user. Copy the url and password that appear in the Success message.
23 |
24 | Now, lets login with our new user:
25 |
26 | 1. Log out from AWS and go to the link you copied earlier.
27 | 2. Enter the username and password that was auto-generated.
28 | 3. Enter your new password.
29 |
30 | After this, we can create the user to access AWS programmatically:
31 |
32 | 1. Repeat steps from 2 to 4 to setup a user.
33 | 2. Enter a username and check the option **Programmatic access** under the **Select AWS access type** section. Click next.
34 | 3. Select **Attach existing policies directly**.
35 | 4. Search for: `AdministratorAccess`, check it and click next. Of course, in a real use case, you would design or use a policy with more restricted access.
36 | 5. Click on Download CSV.
37 |
38 | In the downloaded file, you can find the access key id and the secret access key. You’ll need them to [configure your AWS CLI](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) in your computer. If you don’t have AWS CLI installed yet, you can do it following [these steps](http://docs.aws.amazon.com/cli/latest/userguide/installing.html).
39 |
40 | ---
41 | **Extra mile**: set the `ViewOnlyAccess` permissions to the user with programmatic access. Double points if you do it with the CLI.
42 |
43 | ---
44 |
45 | **Next:** [S3, RDS and EC2](/workshop/s3-web-ec2-api-rds/introduction.md).
46 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/01-create-vpc.md:
--------------------------------------------------------------------------------
1 | # VPC
2 |
3 | We are going to create our VPC with 4 subnets (2 private and 2 public).
4 |
5 | ## Create a VPC
6 | 1. Go to VPC under Networking & Content Delivery.
7 | 2. Go to Your VPCs on the left section.
8 | 3. Click on Create VPC.
9 | 4. As **Name** **tag** put: `awsworkshopvpc`.
10 | 5. As **IPv4 CIDR** **block** put: `10.0.0.0/16`.
11 | 6. Then click: Yes, Create.
12 |
13 | ## Create 4 subnets
14 | 1. Go to Subnets on the left section.
15 | 2. Click Create Subnet.
16 | 3. As **Name tag** put: `10.0.1.0-us-east-1a`.
17 | 4. **Availability Zone**: `us-east-1a`.
18 | 5. As **IPv4 CIDR** **block** put: `10.0.1.0/24`. CIDR block for any subnet will be a subset of the VPC CIDR block.
19 | 6. Then click in Yes, Create.
20 | 7. Repeat steps 2-6 using as **Name tag**: `10.0.2.0-us-east-1a`, **Availability Zone**: `us-east-1a` and **IPv4 CIDR block**: `10.0.2.0/24`.
21 | 8. Repeat steps 2-6 using as **Name tag**: `10.0.3.0-us-east-1b`, **Availability Zone**: `us-east-1b` and **IPv4 CIDR block**: `10.0.3.0/24`.
22 | 9. Repeat steps 2-6 using as **Name tag**: `10.0.4.0-us-east-1b`, **Availability Zone**: `us-east-1b` and **IPv4 CIDR block**: `10.0.4.0/24`.
23 |
24 | ---
25 | **Next:** [create an Internet Gateway and a public Routes table](/workshop/vpc-subnets-bastion/02-internet-gateway.md).
26 |
27 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/02-internet-gateway.md:
--------------------------------------------------------------------------------
1 | # Internet Gateway
2 |
3 | We already have our VPC with 4 subnets, but none of those can access the Internet (they are effectively private). To turn 2 of them into public, we need to setup an [Internet Gateway](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Internet_Gateway.html) for our VPC and create a Route Table to route all external traffic through the gateway.
4 | Finally, we need to associate 2 of our subnets to this route table and assign them a public IP, so they turn into public subnets.
5 |
6 | ## Create a Internet Gateway
7 | 1. Go to Internet Gateways on the left section.
8 | 2. Click Create Internet Gateway.
9 | 3. As Name tag put: `awsworkshopIGW`.
10 | 4. Click: Yes, Create.
11 | 5. Click Attach to VPC.
12 | 6. Click: Yes, Attach.
13 |
14 | ## Create Route tables
15 | 1. Go to Route Tables on the left section.
16 | 2. Click Create Route Table.
17 | 3. As Name tag: `awsWorkshopPublicRT`.
18 | 4. Click Yes, Create.
19 | 5. On the bottom section select the Routes tab.
20 | 6. Click on Edit button.
21 | 7. Click on Add another Route.
22 | 8. As **Destination** put `0.0.0.0/0`.
23 | 9. As **Target** select your Internet Gateway.
24 | 10. Click Save.
25 | 11. Now select Subnet Associations tab.
26 | 12. Click on Edit.
27 | 13. Select `10.0.1.0-us-east-1a` and `10.0.3.0-us-east-1b`.
28 | 14. Click Save.
29 |
30 | ## Assign public IP to our public subnet
31 | 1. Go to Subnets on the left section.
32 | 2. Select the `10.0.1.0-us-east-1a`.
33 | 3. Click on Subnet Actions.
34 | 4. Select Modify auto-assign IP settings.
35 | 5. Check: Enable auto-assign public IPv4 address.
36 | 6. Click Save.
37 | 7. Click Close.
38 | 8. Repeat steps 2-7 with `10.0.3.0-us-east-1b`.
39 |
40 | ---
41 | **Next:** [create a NAT Instance](/workshop/vpc-subnets-bastion/03-nat-instance.md).
42 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/03-nat-instance.md:
--------------------------------------------------------------------------------
1 | # NAT Instance
2 |
3 | Until now we have 2 public subnets and 2 private subnets. In the private ones, we will deploy the webserver instances that will be accessible via a Load Balancer.
4 |
5 | Even if these instances don't need to be reachable from outside of the VPC, they need to have Internet access to download and update packages. For this reason, we need a NAT through which we can route all external outbound traffic.
6 |
7 | AWS offers two options for NAT: [NAT Instance](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) and [NAT Gateway](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html).
8 | The Gateway offering is newer and easier to setup than the NAT Instance, and also automatically scales. However, for this tutorial, will go for the NAT Instance purely because it's cheaper (we don't want to be billed too much!).
9 |
10 | ## Create Nat Instance
11 | 1. Go to EC2 under Computer section.
12 | 2. Click on Instances on the left menu.
13 | 3. Click Launch Instance.
14 | 4. Select Community AMIs.
15 | 5. Type NAT and then hit Enter.
16 | 6. Select the first option that appears:
17 | 1. Root device type: `ebs`
18 | 2. Virtualization type: `hvm`
19 | 3. ENA Enabled: `No`.
20 | 7. Select `t2.micro` and click Next: Configure Instance Details.
21 | 8. On Network, select your VPC.
22 | 9. As subnet, select `10.0.1.0-us-east-1a`
23 | 10. Click Next: Add Storage.
24 | 11. Click Next: Add Tag.
25 | 12. As Key put `Name` and as Value `MyNat`.
26 | 13. Click Next: Configure Security Group.
27 | 14. Select: Create a new security group.
28 | 15. As **Security group name** put `natsecuritygroup`.
29 | 16. Add 3 rules: SSH, HTTP, HTTPS.
30 | 17. Click: Review and Launch.
31 | 18. Click: Next.
32 | 19. Click: Launch.
33 | 20. Select your key pair and click Launch Instances.
34 | 21. Click View Instances.
35 | 22. Select your NAT instance.
36 | 23. Go to Actions - Networking and click on **Change Source/Dest. Check**.
37 | 24. Click Yes, Disable.
38 | 25. Go to Actions again - Networking and click **Change Security Groups** and add the default one for the VPC.
39 |
40 | ## Create a route for private subnets through the NAT instance
41 | 1. Go to VPC under Networking & Content Delivery.
42 | 2. Go to Route Tables on the left section.
43 | 3. Select the Main subnet of `awsworkshopvpc`.
44 | 4. On the bottom section go to the Routes tab.
45 | 5. Click Edit.
46 | 6. Click Add another Route.
47 | 7. As Destination put: `0.0.0.0/0`.
48 | 8. As Target select your NAT Instance.
49 | 9. Click Save.
50 |
51 | ---
52 | **Next:** [create new Load Balancer](/workshop/vpc-subnets-bastion/04-load-balancer.md).
53 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/04-load-balancer.md:
--------------------------------------------------------------------------------
1 | # Load Balancer
2 |
3 | At this point, we need to create a Load Balancer to be able to route request from the web to our instances.
4 |
5 | ## Create a new Load Balancer
6 | 1. Go to EC2 under Computer section.
7 | 2. Click on Load Balancers.
8 | 3. Click Create Load Balancer.
9 | 4. Click Create on Application Load Balancer.
10 | 5. As Name put: `aws-workshop-load-balancer-vpc`.
11 | 6. On Availability Zones, on VPC select `awsworkshopvpc`.
12 | 7. Click on `us-east-1a`.
13 | 8. Click on `10.0.1.0-us-east-1a`.
14 | 9. Repeat steps 7 and 8 for `us-east-1b` and `10.0.3.0-us-east-1b`.
15 | 10. Click Next: Configure Security Settings.
16 | 11. Click Next: Configure Security Groups.
17 | 12. Select Create a **new** security group and then click Next: Configure Routing.
18 | 13. As name put: `aws-workshop-target-group-vpc`.
19 | 14. As Port: `9000`.
20 | 15. As path: `/api/tags`.
21 | 16. Click Next: Register Targets.
22 | 17. Click Next: Review.
23 | 18. Click: Create.
24 | 19. Click: Close.
25 | 20. Select the new load balancer.
26 | 21. Go to Description on bottom and find Security.
27 | 22. Click Edit Security Groups.
28 | 23. Select default (so that both security groups are selected).
29 | 24. Click Save.
30 | 25. Delete old Load Balancer.
31 |
32 | ## Modify API_URL
33 | Repeat the steps outlined in [this section](/workshop/elb-auto-scaling-group/03-finishing-up.md).
34 |
35 | ---
36 | **Next:** [move RDS into your VPC](/workshop/vpc-subnets-bastion/05-RDS.md).
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/05-RDS.md:
--------------------------------------------------------------------------------
1 | # RDS
2 |
3 | Now, we should move our RDS to the private subnets of our VPC. This way, we ensure that RDS is only accessible from these private subnets, and never from the outside world.
4 |
5 | ## Move RDS to your VPC
6 | 1. Open the [Amazon RDS console](https://console.aws.amazon.com/rds) and choose Subnet Groups on the left navigation pane.
7 | 2. Choose **Create DB Subnet Group**.
8 | 3. Enter the subnet name: `vpcsubnetgroup`.
9 | 4. As VPC ID: your VPC.
10 | 5. Then choose Availability Zone `us-east-1a` and Subnet Id `10.0.2.0-us-east-1a` and click Add.
11 | 6. Then choose Availability Zone `us-east-1b` and Subnet Id `10.0.4.0-us-east-1a` and click Add.
12 | 7. Click **Create**.
13 | 8. Go to Instances, select your RDS instance and on Instance Actions select Modify.
14 | 9. As Subnet Group select your `vpcsubnetgroup`.
15 | 10. Security Group: `default`.
16 | 11. Click Modify DB Instance.
17 | 12. Check Apply Immediately.
18 | 13. Click Continue.
19 |
20 | ---
21 | **Next:** [Auto Scaling Group](/workshop/vpc-subnets-bastion/06-auto-scaling-group.md).
22 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/06-auto-scaling-group.md:
--------------------------------------------------------------------------------
1 | # Auto Scaling Group
2 |
3 | We are going to create a new Launch Configuration and Auto Scaling Group so that all our instances start only in our private subnets.
4 |
5 | ## Create a new Launch Configuration
6 | 1. Go to EC2 under Compute.
7 | 2. Go to Auto Scaling Groups on the left menu.
8 | 3. Delete the existing Auto Scaling group.
9 | 4. Go to Launch Configuration on the left menu.
10 | 5. Delete existing Launch Configuration.
11 |
12 | Now, you need to create a new Launch Configuration that is almost identical to the one that you just deleted except for one thing: instead of creating a Security Group you need to choose the default one for your VPC.
13 |
14 | There is no simple way to find it because your AWS account already has a default VPC with its default security group and at this stage of the Launch Configuration wizard there is no way to distinguish between your VPC's default group and the default group for the default VPC (🤔). To find the security group:
15 |
16 | 1. Go to **VPC** under **Networking & Content Delivery**.
17 | 2. Select **Security Groups** on the **Security** section on the left.
18 | 3. Search for a group with name `default` and VPC `vpc-ugly-id | awsworkshopvpc`.
19 | 4. Copy the **Group ID** value.
20 |
21 | Once you have this Security Group Id, start the Launch Configuration creation wizard. Once you reach the _Click Next: Configure Security Group._ step, instead of creating a new security group choose **Select an existing security group** and look for the group with name _default_ and the same Id that you have. You can check the [previous instructions](/workshop/elb-auto-scaling-group/02-auto-scaling-group.md) if you need.
22 |
23 | ## Create Auto Scaling Group
24 | 1. Go to EC2 under Compute section.
25 | 2. On left menu select Auto Scaling Groups under AUTO SCALING.
26 | 3. Click: Create Auto Scaling group.
27 | 4. Select: `aws-workshop-auto-scaling-group` and then click Next Step.
28 | 5. On Group name put the same as in Launch configuration.
29 | 6. Group size: 2.
30 | 7. Network: `awsworkshopvpc`.
31 | 8. Subnet: `10.0.2.0-us-east-1a` and `10.0.4.0-us-east-1b`.
32 | 9. Advanced Details click on: Receive traffic from one or more load balancers.
33 | 10. On Target Groups double click and select: `aws-workshop-target-group-vpc`.
34 | 11. Click Next: Configure scaling policies.
35 | 12. Select: **Use scaling policies to adjust the capacity of this group**.
36 | 13. Between 2 and 4.
37 | 14. Target value: 80.
38 | 15. Instances need: 180.
39 | 16. Click: Next: Configure Notifications.
40 | 17. Click: Next: Configure Tags.
41 | 18. Click: Review.
42 | 19. Click: Create Auto Scaling group.
43 | 20. Click: close.
44 |
45 | ---
46 | **Extra mile:**
47 |
48 | - Why is the ASG only available on two subnets and not all of them?
49 | - Why do we need this configuration of subnets anyway? (2 public and 2 private).
50 |
51 | ---
52 | **Next:** [create a Bastion](/workshop/vpc-subnets-bastion/07-bastion.md) to be able to SSH into the private instances.
53 |
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/07-bastion.md:
--------------------------------------------------------------------------------
1 | # Bastion instance
2 |
3 | A bastion is a regular EC2 instance located in one of the public subnets, which allows incoming traffic through SSH. Through this instance, we will be able to SSH into any instance located in the private subnet (assuming they accept incoming traffic from the bastion).
4 |
5 | ## Create a Bastion Instance
6 | 1. Go to **EC2** under **Compute section**.
7 | 2. Click on Launch Instance.
8 | 3. Look for Ubuntu Server (make sure it says Free tier eligible) and click Select.
9 | 4. Select `t2.micro` and then click on Next: Configure Instance Details.
10 | 5. On Network, select your VPC.
11 | 6. As subnet, you can pick any of the two public ones. For example, `10.0.1.0-us-east-1a`.
12 | 8. Click Next: Add Storage.
13 | 9. Leave the default settings and click Next: Add Tags.
14 | 10. Click Add Tag.
15 | 11. Fill Key with `Name` and in Value with `bastion`.
16 | 12. Click on Next: Configure Security Group.
17 | 13. Write a meaningful name in **Security group name**.
18 | 14. Click Review and Launch.
19 | 15. Click Launch.
20 | 16. Select your key pair and click Launch Instances.
21 | 17. Select the Bastion on the instances list and on Actions/Networking select Change Security Groups.
22 | 18. Check the default security group of your VPC. Make sure that 2 security groups are checked, the default one and the one you created during the creation of the bastion.
23 |
24 | ## Accessing private instances through the bastion
25 |
26 | Now you have a public instance that can be accessed via SSH, but what you want is to be able to access to your private instances.
27 |
28 | To access the instances, you need to SSH with the PEM (key pair) that you had generated when launching the first one.
29 |
30 | ### Option 1: setup SSH agent forwarding
31 | You can read a guide [here](https://developer.github.com/v3/guides/using-ssh-agent-forwarding/). Even though the examples check access to GitHub, it's analogous to accessing our private instances.
32 |
33 | You can setup SSH so it's easier to access protected instances going transparently through the bastion. [Here](https://www.cyberciti.biz/faq/linux-unix-ssh-proxycommand-passing-through-one-host-gateway-server/) you have a nice guide.
34 |
35 | ### Option 2: copy the PEM file from your machine to the bastion instance
36 | Ideally, you would be using a different PEM file for the bastion and the instances (increased security).
37 |
38 | 1. Copy the file with `scp ~/.ssh/.pem ubuntu@:/home/ubuntu/.ssh -i ~/.ssh/.pem`.
39 | 2. SSH into the bastion.
40 | 2. Make sure the file permissions are correct: `chmod 400 `.
41 | 3. SSH into the instances (from the bastion) with `ssh -i `.
42 |
43 | ---
44 | **Extra mile:** `ssh` to one of the instances in the private subnets and `tracepath` to an external host. Do the same for a instance in the public subnets. What's the difference?
45 |
46 | ---
47 |
48 | **Next:** [finish the deploy](/workshop/vpc-subnets-bastion/08-finishing-up.md).
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/08-finishing-up.md:
--------------------------------------------------------------------------------
1 | # Finishing up
2 |
3 | As usual, the last steps are modifying our CodeDeploy project so it uses our new Auto Scaling Group, re-run the deploy and rebuild the web so it uses the new parameters.
4 |
5 | For this, you can repeat the steps outlined [here](/workshop/elb-auto-scaling-group/03-finishing-up.md#modify-the-codedeploy-project).
6 |
7 | ---
--------------------------------------------------------------------------------
/workshop/vpc-subnets-bastion/introduction.md:
--------------------------------------------------------------------------------
1 | # VPC and *bastion* instance
2 |
3 | The aim of this section is to improve a bit our security and redundancy. For this we are going to create a [custom VPC](https://aws.amazon.com/documentation/vpc/).
4 |
5 | Once we have our VPC, we will create 2 private subnets (where our Auto Scaling Group will launch the web server instances) in different Availability Zones (for redundancy reasons). We will also setup 2 public subnets in the same availability zones, which are needed by the load balancer. You can read more about VPC and subnets [here](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html).
6 |
7 | For our public subnets, we will need to setup an [Internet Gateway](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Internet_Gateway.html) and create a new [Route Table](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html), so any instance in the subnet can access the Internet.
8 |
9 | Since our application's instances will live in the **private** subnets, we also will need a [NAT Instance](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) that will route their Internet traffic through the public subnets. We need our instances to access the Internet so that we can download packages, update our system, etc.
10 |
11 | We need to create a new Launch Configuration and modify our Auto Scaling Group so from now on it deploys to our VPC in the right subnets. Also, our RDS (PostgreSQL database) needs to be moved to our VPC so our instances can reach it.
12 |
13 | To access our instances through SSH from the outside world, we will add a [bastion instance](https://aws.amazon.com/blogs/security/how-to-record-ssh-sessions-established-through-a-bastion-host/). Since the bastion has direct access to the instances, we can access them by accessing the bastion first.
14 |
15 | Finally, some changes need to be made to our CodeDeploy project so it deploys to our VPC, as expected.
16 |
17 | So, let's get started.
18 |
19 | ---
20 | **Next:** [create a VPC](/workshop/vpc-subnets-bastion/01-create-vpc.md).
21 |
22 |
--------------------------------------------------------------------------------