├── .gitignore ├── Express-Node-SQLite3 ├── Adding-roles.md ├── LICENSE ├── Routes-nested.md ├── SQL-2.md ├── SQL.md ├── Sessions.md ├── Step-by-step │ ├── Prep-1-Environment.md │ ├── Prep-2-.env.md │ ├── Prep-3-config.md │ ├── Step-1-Server.md │ ├── Step-2-index.md │ ├── Step-3-knexfile.md │ ├── Step-4-db-config.md │ ├── Step-5-create-tables.md │ ├── Step-6-create-seeds.md │ ├── Step-7-create-models.md │ └── Step-8-create-routers.md ├── Testing.md ├── bycrypt.md ├── data-modeling.md ├── deployment.md ├── foreign-keys.md ├── generateToke.md ├── middleware.md └── notes-on-security.md ├── Postgres ├── 001-types-of-databases.md ├── 002-database-organization-aka-schemas.md ├── 003-roles.md ├── 004-users-and-login-config.md └── 01-create-a-database.md ├── README.md └── SQL ├── 000-common-SELECT- mistakes.md ├── 1-What-is-a-database.md ├── 10-comparison-operators.md ├── 11-operator-precedence.md ├── 2-tables-columns-rows.md ├── 3-primary-and-foreing-keys.md ├── 4-importing-data.md ├── 5-renaming-columns.md ├── 6-column-concatenation.md ├── 7-aggregate-functions.md ├── 8-comments.md └── 9-AND-OR-NOT-keywords.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | *.lcov 24 | 25 | # nyc test coverage 26 | .nyc_output 27 | 28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 29 | .grunt 30 | 31 | # Bower dependency directory (https://bower.io/) 32 | bower_components 33 | 34 | # node-waf configuration 35 | .lock-wscript 36 | 37 | # Compiled binary addons (https://nodejs.org/api/addons.html) 38 | build/Release 39 | 40 | # Dependency directories 41 | node_modules/ 42 | jspm_packages/ 43 | 44 | # Snowpack dependency directory (https://snowpack.dev/) 45 | web_modules/ 46 | 47 | # TypeScript cache 48 | *.tsbuildinfo 49 | 50 | # Optional npm cache directory 51 | .npm 52 | 53 | # Optional eslint cache 54 | .eslintcache 55 | 56 | # Microbundle cache 57 | .rpt2_cache/ 58 | .rts2_cache_cjs/ 59 | .rts2_cache_es/ 60 | .rts2_cache_umd/ 61 | 62 | # Optional REPL history 63 | .node_repl_history 64 | 65 | # Output of 'npm pack' 66 | *.tgz 67 | 68 | # Yarn Integrity file 69 | .yarn-integrity 70 | 71 | # dotenv environment variables file 72 | .env 73 | .env.test 74 | 75 | # parcel-bundler cache (https://parceljs.org/) 76 | .cache 77 | .parcel-cache 78 | 79 | # Next.js build output 80 | .next 81 | out 82 | 83 | # Nuxt.js build / generate output 84 | .nuxt 85 | dist 86 | 87 | # Gatsby files 88 | .cache/ 89 | # Comment in the public line in if your project uses Gatsby and not Next.js 90 | # https://nextjs.org/blog/next-9-1#public-directory-support 91 | # public 92 | 93 | # vuepress build output 94 | .vuepress/dist 95 | 96 | # Serverless directories 97 | .serverless/ 98 | 99 | # FuseBox cache 100 | .fusebox/ 101 | 102 | # DynamoDB Local files 103 | .dynamodb/ 104 | 105 | # TernJS port file 106 | .tern-port 107 | 108 | # Stores VSCode versions used for testing VSCode extensions 109 | .vscode-test 110 | 111 | # yarn v2 112 | .yarn/cache 113 | .yarn/unplugged 114 | .yarn/build-state.yml 115 | .yarn/install-state.gz 116 | .pnp.* 117 | 118 | .DS_STORE 119 | 120 | #db 121 | /databases 122 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Adding-roles.md: -------------------------------------------------------------------------------- 1 | ## 3 parts to making `roles` work (a simple implementation) 2 | 3 | 1. Additional Middleware 4 | 5 | ```js 6 | module.exports = role => { 7 | return function(req, res, next) { 8 | //req.decodedJwt.roles is just looking for a role that we added 9 | //to 10 | if (req.decodedJwt.roles && req.decodedJwt.roles.includes(role)) { 11 | next(); 12 | } else if ( 13 | req.decodedJwt.roles && 14 | req.decodedJwt.roles.includes("Admin") 15 | ) { 16 | next(); 17 | } else { 18 | res.status(403).json({ you: "don't have permission" }); 19 | } 20 | }; 21 | }; 22 | ``` 23 | 24 | 2. Use middleware in routes restricted by role 25 | 26 | ```js 27 | router.get("/", restricted, checkRole("Admin"), (req, res) => { 28 | Users.find() 29 | .then(users => { 30 | res.json(users); 31 | }) 32 | .catch(err => res.send(err)); 33 | }); 34 | ``` 35 | 36 | 3) Add priveldges to token when we hand it back to the browser 37 | role restricted to Admin 38 | 39 | ```js 40 | function genToken(user) { 41 | //can perform other logic here 42 | const payload = { 43 | userid: user.id, 44 | username: user.username, 45 | // add roles: ["role"] to genToken file 46 | // if above mentioned logic exists: 47 | //roles: user.role 48 | roles: ["Student"] 49 | }; 50 | 51 | const options = { expiresIn: "1h" }; 52 | const token = jwt.sign(payload, secrets.jwtSecret, options); 53 | 54 | return token; 55 | } 56 | ``` 57 | 58 | # Asyc/Await Example 59 | 60 | 1. Additional Middleware 61 | 62 | ```js 63 | const secrets = require("../config/secrets"); 64 | const jwt = require("jsonwebtoken"); 65 | 66 | module.exports = department => { 67 | return (req, res, next) => { 68 | const token = req.headers.authorization; 69 | 70 | if (token) { 71 | jwt.verify(token, secrets.jwt, (err, payload) => { 72 | if (err) { 73 | res.status(403).json({ message: "You are not authorized" }); 74 | } else { 75 | if (department !== payload.userDepartment) { 76 | res.status(403).json({ 77 | message: 78 | "You are not do not have permission for this endpoint." 79 | }); 80 | } else { 81 | req.userDepartment = payload.userDepartment; 82 | next(); 83 | } 84 | } 85 | }); 86 | } else { 87 | res.status(400).json({ message: "No credentials provided" }); 88 | } 89 | }; 90 | }; 91 | ``` 92 | 93 | 1. Use middleware in routes restricted by role 94 | 95 | ```js 96 | router.get("/", permissions("finance"), async (req, res, next) => { 97 | try { 98 | const users = await db.find().where("department", req.userDepartment); 99 | res.json({ users }); 100 | } catch (err) { 101 | next(err); 102 | } 103 | }); 104 | ``` 105 | 106 | 2. Add priveldges to token when we hand it back to the browser 107 | role restricted by department 108 | 109 | ```js 110 | function genToken(user) { 111 | return jwt.sign( 112 | { 113 | userId: user.id, 114 | userDepartment: user.department 115 | }, 116 | secrets.jwt, 117 | { 118 | expiresIn: "5h" 119 | } 120 | ); 121 | } 122 | ``` 123 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Natalie Davis 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Routes-nested.md: -------------------------------------------------------------------------------- 1 | # Nested Routes 2 | 3 | ## Parent Router 4 | 5 | In the parent router we only need to require and use the child route 6 | 7 | ```javascript 8 | const tasksRouter = require("../tasks/tasks.router"); 9 | 10 | const router = express.Router(); 11 | 12 | router.use("/:id/tasks", tasksRouter); 13 | ``` 14 | 15 | ```javascript 16 | const express = require("express"); 17 | const projectModel = require("./projects-model"); 18 | const db = require("./projects-model"); 19 | const tasksRouter = require("../tasks/tasks.router"); 20 | 21 | const router = express.Router(); 22 | 23 | router.use("/:id/tasks", tasksRouter); 24 | 25 | router.get("/", async (req, res, next) => { 26 | try { 27 | const projects = await db.find(); 28 | res.json(projects); 29 | } catch (err) { 30 | next(err); 31 | } 32 | }); 33 | 34 | router.get("/:id", async (req, res, next) => { 35 | const { id } = req.params; 36 | const project = await db.findById(id); 37 | if (project) { 38 | res.json(project); 39 | } else { 40 | res.status(404).json({ message: "Could not find project with given id" }); 41 | } 42 | try { 43 | } catch (err) { 44 | next(err); 45 | } 46 | }); 47 | 48 | router.post("/", async (req, res, next) => { 49 | try { 50 | const newproject = await projectModel.add(req.body); 51 | res.status(201).json(newproject); 52 | } catch (err) { 53 | next(err); 54 | } 55 | }); 56 | 57 | router.put("/:id", async (req, res, next) => { 58 | try { 59 | const { id } = req.params; 60 | const project = await projectModel.update(id, req.body); 61 | if (project) { 62 | res.json(project); 63 | } else { 64 | res.status(404).json({ message: "Could not find project with given id" }); 65 | } 66 | } catch (err) { 67 | next(err); 68 | } 69 | }); 70 | 71 | router.delete("/:id", async (req, res, next) => { 72 | try { 73 | const { id } = req.params; 74 | const deletedCount = await projectModel.remove(id); 75 | console.log(deletedCount); 76 | if (deletedCount) { 77 | res.json({ removed: deletedCount }); 78 | } else { 79 | res.status(404).json({ message: "Could not find project with given id" }); 80 | } 81 | } catch (err) { 82 | next(err); 83 | } 84 | }); 85 | 86 | module.exports = router; 87 | ``` 88 | 89 | ## Child Router 90 | 91 | In the child route, we only need to merge params to have access to the parent routes params, if the child has an `id` that needs to be pulled from params, be careful not to call it `id` as that is likely being used in the parent param. Instead do something like: 92 | 93 | ```javascript 94 | router.post("/:taskID", async (req, res, next) => { 95 | try { 96 | const { id } = req.params; 97 | const newTask = await db.add(id, req.body); 98 | 99 | res.json(newTask); 100 | } catch (err) { 101 | next(err); 102 | } 103 | }); 104 | ``` 105 | 106 | ```javascript 107 | const express = require("express"); 108 | const db = require("./tasks-models"); 109 | 110 | const router = express.Router({ 111 | mergeParams: true 112 | }); 113 | 114 | router.get("/", async (req, res, next) => { 115 | try { 116 | const { id } = req.params; 117 | const tasks = await db.find(id); 118 | 119 | res.json(tasks); 120 | } catch (err) { 121 | next(err); 122 | } 123 | }); 124 | 125 | router.post("/", async (req, res, next) => { 126 | try { 127 | const { id } = req.params; 128 | const newTask = await db.add(id, req.body); 129 | 130 | res.json(newTask); 131 | } catch (err) { 132 | next(err); 133 | } 134 | }); 135 | 136 | module.exports = router; 137 | ``` 138 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/SQL-2.md: -------------------------------------------------------------------------------- 1 | # SQLite 2 | 3 | - SQLite is a Data Base Management System (DMBS). 4 | - It is a Relational (R)DBMS 5 | 6 | - SQL is the query language, it's applicable to many RDBMS 7 | 8 | ?'s to ask ourselves 9 | -How many columns? 10 | -What goes in the columns? 11 | -Are there any restrictions? 12 | 13 | There is one required column for every table - the Primary Key (ID) 14 | 15 | - Does not have to be named ID. 16 | - Does not have to be a number, only has to be unique. 17 | 18 | What's the difference between a Database Schema and a Database Table? 19 | 20 | - A Schema is the overall structure of the Database. 21 | - A Schema can have multiple Tables which are _like_ spreadsheets. 22 | 23 | # SQLite Datatypes 24 | 25 | - https://www.tutorialspoint.com/sqlite/sqlite_data_types.htm 26 | 27 | 1 28 | NUMERIC 29 | 30 | NUMERIC 31 | DECIMAL(10,5) 32 | BOOLEAN 33 | DATE 34 | DATETIME 35 | 36 | 2 37 | INTEGER 38 | 39 | The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value. 40 | 41 | INT 42 | INTEGER 43 | TINYINT 44 | SMALLINT 45 | MEDIUMINT 46 | BIGINT 47 | UNSIGNED BIG INT 48 | INT2 49 | INT8 50 | 51 | 3 52 | REAL 53 | 54 | The value is a floating point value, stored as an 8-byte IEEE floating point number. 55 | 56 | REAL 57 | DOUBLE 58 | DOUBLE PRECISION 59 | FLOAT 60 | 61 | 4 62 | TEXT 63 | 64 | The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE) 65 | 66 | CHARACTER(20) 67 | VARCHAR(255) 68 | VARYING CHARACTER(255) 69 | NCHAR(55) 70 | NATIVE CHARACTER(70) 71 | NVARCHAR(100) 72 | TEXT 73 | CLOB 74 | 75 | 5 76 | BLOB 77 | 78 | The value is a blob of data, stored exactly as it was input. 79 | 80 | ALSO null 81 | 82 | This list in not exhastive, SQLite is a light version of SQL and it plays well with some of the 83 | 84 | # Constraints 85 | 86 | - NOT NULL Constraint − Ensures that a column cannot have NULL value. 87 | 88 | - DEFAULT Constraint − Provides a default value for a column when none is specified. 89 | 90 | - UNIQUE Constraint − Ensures that all values in a column are different. 91 | 92 | - PRIMARY Key − Uniquely identifies each row/record in a database table. 93 | 94 | - CHECK Constraint − Ensures that all values in a column satisfies certain conditions. 95 | 96 | - Dropping Constraint - SQLite supports a limited subset of ALTER TABLE. The ALTER TABLE command in SQLite allows the user to rename a table or add a new column to an existing table. It is not possible to rename a column, remove a column, or add or remove constraints from a table. 97 | 98 | # DBL commands 99 | 100 | CREATE TABLE < table name> ( 101 | < column name > < data type > < contraints >, 102 | < column name > < data type > < contraints >, 103 | < column name > < data type > < contraints > 104 | ): 105 | 106 | DROP TABLE < table name >; //will delete table 107 | 108 | ALTER TABLE < table name > < arguments > 109 | 110 | # Steps 111 | 112 | 1. Open DB Browser 113 | 2. New Database - find project folder, create 114 | 3. Go to Execute SQL tab 115 | 116 | 1. CREATE TABLE "table-name" 117 | 118 | `CREATE TABLE "fruits" ( "id" INTEGER NOT NULL UNIQUE PRIMARY KEY AUTOINCREMENT );` 119 | 120 | 2. To add another row to an existing table: 121 | 122 | ```javascript 123 | DROP TABLE IF EXISTS "fruits"; CREATE TABLE "fruits" ( "id" INTEGER NOT NULL UNIQUE PRIMARY KEY AUTOINCREMENT, "name" TEXT NOT NULL UNIQUE, "avgWeightOz" FLOAT, "delicious" BOOLEAN DEFAULT 0 ); 124 | ``` 125 | 126 | 3. ALTER TABLE can be used to change the table name 127 | 128 | `ALTER TABLE "fruitss" RENAME TO "fruits"` 129 | 130 | The produce.db3-journal file keeps the database read-only. In the SQL browser, click `Write Changes` to "release" the database. (May need to restart server) 131 | 132 | When we `npm i knex` it not only installs the knex dependency but it also installs a knex CLI can access by `npx knex` 133 | 134 | `npx knex init` - creates knexfile.js which comes with a lot of boiler plate that we don't need. For now, delete everything except development. 135 | 136 | For this project: 137 | 138 | ```javascript 139 | module.exports = { 140 | deveplopment: { 141 | client: "sqlite3",, // our DBMS driver 142 | connection: { 143 | filename: "./data/produce.db3" 144 | }, // the location of our DB 145 | useNullAsDefault: true, // neccesary when using SQLite 146 | }, 147 | } 148 | 149 | { 150 | 151 | 152 | useNullAsDefault: true 153 | 154 | } 155 | ``` 156 | 157 | ```javascript 158 | const knex = require("knex"); 159 | const config = require("../knexfile"); 160 | 161 | const db = knex(config.development); 162 | 163 | module.exports = db; 164 | ``` 165 | 166 | In fruits-router: 167 | 168 | ```javascript 169 | const db = require("../utils/db"); 170 | ``` 171 | 172 | # Creating Schema outside of SQL Browser 173 | 174 | Shortcommings of creating schema in SQL Browser 175 | 176 | - Incovenient 177 | - Version Control 178 | - Changes are irreversible 179 | 180 | Database Migrations - https://en.wikipedia.org/wiki/Schema_migration 181 | 182 | Code written to a file that describes the changes that need to happen over time 183 | 184 | The knex CLI has a schema builder and a migrations feature. 185 | 186 | SQL Schema Builder - http://knexjs.org/#Schema 187 | 188 | 1. `npx knex migrate:make fruits` Where `fruits` is the migration name. 189 | 2. Migrations folder with a file that was create (filename is a timestamp and the name we gave the migration separated by an underscore). The timestamp is because we make changes to our migrations over time and they run in order they were defined in. Imagine trying to add a row to a table that hadn't been created yet? 190 | 191 | 1. This file has two exported functions in the `up` function is where we are going to put our schema builder code that creates or changes our schema in some way. 192 | 193 | ```javascript 194 | exports.up = async function(knex) { 195 | await knex.schema.createTable("fruits", table => { 196 | // translates to:`"id" INTEGER NOT NULL UNIQUE PRIMARY KEY AUTOINCREMENT`// 197 | table.integer("id").notNull().unique().primary() 198 | // Knex gives us a shortcut table.increments("id"); 199 | // translates to:`"name" NOT NULL UNIQUE` 200 | table .text("name") .notNull() .unique(); 201 | // translates to:`"avgWeightOz" FLOAT` 202 | table.float("avgWeightOz"); 203 | // translates to`"delicious" BOOLEAN DEFAULT 0` 204 | table.boolean("delicious").defaultTo(false); // or 0 }); 205 | }; 206 | ``` 207 | 208 | To bring those changes into the db `npx knex migrate:latest` 209 | 210 | 1. The `down` function is like moving back one step in time. It should be the reverse of the up function, 211 | 212 | ```javascript 213 | exports.down = async function(knex) { 214 | await knex.schema.dropTableIfExists("fruits"); 215 | };` 216 | ``` 217 | 218 | We DROP the table here execute by `npx knex migrate:rollback` 219 | 220 | To Add a Column: 221 | 222 | ```javascript 223 | exports.up = async function(knex) { 224 | await knex.schema.alterTable("fruits", table => { 225 | table.text("color"); 226 | }); 227 | }; 228 | 229 | exports.down = async function(knex) { 230 | await knex.schema.alterTable("fruits", table.dropColumn("color")); 231 | }; 232 | ``` 233 | 234 | - Always better to roll a new migration out to fix erros than to rollback, you don't want to lose data that your customers might have added. 235 | - Migrations run in batches. If we create our migrations at once, then it rolls them back in the same batch. When we roll back, it rolls back the last batch, not the last file. 236 | 237 | # Seeds 238 | 239 | 1. To create a Seed file with the CLI `npx knex seed:make 001_fruits` where < 001_fruits > is the file name. Unlike with migrations, knex does not prepend a date stamp which means if we want them to run in order, we need to number them in a way that is seqential. 240 | 241 | 2. In the new Seed file: 242 | 243 | ```javascript 244 | exports.seed = async function(knex) { await knex("fruits".truncate(), await knex("fruits").insert([ { name: "dragonfruit", avgWeightOz: 16.7, delicious: true, color: "red" }, { name: "strawberry", avgWeightOz: 0.42, delicious: true, color: "red" }, { name: "banana", avgWeightOz: 4.0, delicious: true, color: "yellow" }, { name: "noni", avgWeightOz: 15.0, delicious: false, color: "white" } ]); }; 245 | ``` 246 | 247 | To run Seed file: `npx knex seed:run` 248 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/SQL.md: -------------------------------------------------------------------------------- 1 | # DQL example 2 | 3 | ## Read 4 | 5 | SELECT < column names > FROM < table name > = < some value >; 6 | 7 | SELECT email FROM users WHERE name = 'Jason Maurer'; 8 | 9 | SQL keynames are not case sensitive, however convention is uppercase 10 | SQL commands always end with a semi-colon, which allows you to split commands up into multiple lines: 11 | 12 | SELECT email 13 | FROM users 14 | WHERE name = 'Jason Maurer'; 15 | 16 | # DML example - Don't forget the WHERE!! 17 | 18 | ## Create 19 | 20 | INSERT INTO < table name > (< column nmaes >) 21 | VALUES (< some values >) 22 | 23 | INSERT INTO Category(CategoryName, Description) 24 | Values('Frozen', 'Ready-to-eat meals'); 25 | 26 | ## Update 27 | 28 | UPDATE < table name> 29 | SET = < some value > 30 | WHERE < column name > = < some value > // This is the line that keeps us from udating EVERY row 31 | 32 | UPDATE Category 33 | SET Description = 'Desserts and ready-to-eat meals' 34 | WHERE Id = 9; 35 | 36 | ## Destroy 37 | 38 | DELETE FROM < table name > 39 | WHERE < column name > = < some value > 40 | 41 | DELETE FROM Category 42 | WHERE Id = 9; 43 | 44 | # ORM - Object Relational Data Base Mapping Library 45 | 46 | instead of "INSERT INTO users": 47 | 48 | const user = new User() 49 | user.save() 50 | 51 | ORM's don't provide a lot of flexibility 52 | 53 | # Query Builder 54 | 55 | Allow us to write JS code that will translate into ORM 56 | 57 | Using a Query Builder instead of "INSERT INTO users" 58 | 59 | table("users").insert() 60 | 61 | We will use a Query Builder called knexjs - http://knexjs.org 62 | 63 | # Dependencies 64 | 65 | npm i knex sqlite3 66 | 67 | knex cheatsheet https://devhints.io/knex 68 | 69 | ## Misc 70 | 71 | - .first() returns the first item in the array that will be sent back 72 | Notes based on lecture by Jason Maurer. 73 | 74 | - to return the newly updated object in a post instead of just the id using try catch (async/await) instead of returning id return a new query to posts for the id 75 | 76 | try { 77 | const payload = { 78 | title: req.body.title, 79 | contents: req.body.contents 80 | }; 81 | const [id] = await db("posts").insert(payload); 82 | res.json( 83 | await db("posts") 84 | .where("id", id) 85 | .first() 86 | ); 87 | } catch (err) { 88 | next(err); 89 | } 90 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Sessions.md: -------------------------------------------------------------------------------- 1 | # Sessions [(express-sessions)](https://github.com/expressjs/session#readme) 2 | 3 | ## Session/Cookie Flow: 4 | 5 | - Client sends credentials to server 6 | - Server verifies credentials 7 | - Server creates a session for the client 8 | - Server sends back cookie as header (Set-Cookie header) 9 | - Client stores thecookie in it's cookie jar 10 | - Client sends all cookies in the cookie jar on every request 11 | - Server verifies the cookies is valid 12 | - Server provides access to resource 13 | 14 | Allows us to create sessions in memomry on our machine: 15 | 16 | `npm i express-session` 17 | 18 | To be used: 19 | 20 | ```javascript 21 | const session = require("express-session"); 22 | server.use(session()); 23 | ``` 24 | 25 | In the login router, assign the session to the user: 26 | req.session.user = user; 27 | 28 | To validate in protected routes: 29 | 30 | ```javascript 31 | if (!req.session || !req.session.user) { 32 | return res.status(403).json({ 33 | message: "You are not authorized" 34 | }); 35 | } 36 | ``` 37 | 38 | The session function can take an options object: 39 | 40 | ```javascript 41 | server.use( 42 | session({ 43 | name: "banana", //, sid - changes name so it is not obvious we are using express-session 44 | resave: false, //keep it false to avoid recreating sessions that have not changed 45 | saveUninitialized: false, // GDPR laws agains setting cookies automatically 46 | secret: "keep it secret, keep it safe!", // to cryptographically sign the cookie, should abstract into an environment variable (.env) 47 | cookie: { 48 | httpOnly: true, //javascript can't access the contents of the cookie 49 | maxAge: 1000 * 60 * 60 * 24 * 7, //logs user out after 7 days 50 | secure: false // in prod this should be true so the cookie header is encrypted 51 | } 52 | }) 53 | ); 54 | ``` 55 | 56 | Updated restricted middleware: 57 | 58 | ```javascript 59 | module.exports = () => { 60 | const authError = { 61 | message: "Invalid Credentials" 62 | }; 63 | 64 | return (req, res, next) => { 65 | if (!req.session || !req.session.user) { 66 | return res.status(401).json(authError); 67 | } 68 | 69 | next(); 70 | }; 71 | }; 72 | ``` 73 | 74 | Updated protected route: 75 | 76 | ```javascript 77 | router.get("/protected", restricted(), async (req, res, next) => { 78 | try { 79 | res.json({ 80 | message: "You are authorized" 81 | }); 82 | } catch (err) { 83 | next(err); 84 | } 85 | }); 86 | ``` 87 | 88 | Logout: 89 | 90 | ```javascript 91 | router.get("/logout", restricted(), (req, res, next) => { 92 | req.session.destroy(err => { 93 | if (err) { 94 | next(err); 95 | } else { 96 | res.json({ 97 | message: "You are logged out" 98 | }); 99 | } 100 | }); 101 | }); 102 | ``` 103 | 104 | Adding persistance: 105 | As it stands, the session is stored in memory so everytime the server restarts, we lose our authorization. [Here is a list of express-session compatible stores.](https://github.com/expressjs/session#compatible-session-stores) 106 | 107 | `npm i connect-session-knex` 108 | 109 | In server.js 110 | 111 | ```javascript 112 | const knexSessionStore = require("connect-session-knex")(session); 113 | ``` 114 | 115 | Add to sessionConfig: 116 | 117 | ```javascript 118 | const sessionConfig = { 119 | name: "banana", 120 | resave: false, 121 | saveUninitialized: false, 122 | secret: "Keep it secret, keep it safe!", 123 | cookie: { 124 | httpOnly: true, 125 | maxAge: 1000 * 60 * 60 * 24 * 7, 126 | secure: false 127 | }, 128 | store: new knexSessionStore({ 129 | knex: require("../data/db.config"), 130 | tablename: "sessions", 131 | sidfieldname: "sid", 132 | createtable: true, 133 | clearInterval: 1000 * 60 * 60 134 | }) 135 | }; 136 | ``` 137 | 138 | This new store config creates a table in the database to store sessions 139 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Prep-1-Environment.md: -------------------------------------------------------------------------------- 1 | ## Dependencies 2 | 3 | ```javascript 4 | npm init 5 | npm i --save-dev nodemon jest supertest cross-env 6 | npm i express cors morgan helmet sqlite3 dotenv knex bcryptjs 7 | npx gitignore node 8 | ``` 9 | 10 | Add to .gitignore: 11 | 12 | ```js 13 | # Mac Files 14 | .DS_Store 15 | 16 | #vscode 17 | .vscode 18 | ``` 19 | 20 | # Package.json 21 | 22 | Scripts 23 | 24 | ```js 25 | "server": "cross-env NODE_ENV=dev nodemon index.js", 26 | "start": "node index.js", 27 | "test": "cross-env DB_ENV=testing jest --verbose --watch" 28 | //to be added 29 | , 30 | "jest": { 31 | "testEnvironment": "node" 32 | } 33 | ``` 34 | 35 | Add an additional configuration 36 | 37 | ```js 38 | , 39 | "jest": { 40 | "testEnvironment": "node" 41 | } 42 | 43 | ``` 44 | 45 | # JWT 46 | 47 | - npm i jsonwebtoken 48 | 49 | # Sessions and Cookies 50 | 51 | -npm i express-session 52 | 53 | # .env file 54 | 55 | //could do a secrets file that exports and object then call secrets.PORT, etc... 56 | PORT = 5000 57 | JWT_SECRET = "super secret key string" 58 | 59 | # Add to .gitignore 60 | 61 | ```js 62 | 63 | # database 64 | 65 | \*db3 66 | 67 | ``` 68 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Prep-2-.env.md: -------------------------------------------------------------------------------- 1 | ```javascript 2 | PORT = 5000; 3 | 4 | JWT_SECRET = "If I told you, it wouldn't be a super secret key string"; 5 | ``` 6 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Prep-3-config.md: -------------------------------------------------------------------------------- 1 | Create a directory named config 2 | 3 | # Secrets 4 | 5 | In config directory: 6 | 7 | - Create file named `secrets.js` 8 | 9 | ```javascript 10 | module.exports = { 11 | jwtSecret: process.env.JWT_SECRET 12 | }; 13 | ``` 14 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-1-Server.md: -------------------------------------------------------------------------------- 1 | ## Server 2 | 3 | - create directory named `api` 4 | - create file named `server.js` 5 | 6 | # Example of a server file: 7 | 8 | ```javascript 9 | const express = require("express"); 10 | const morgan = require("morgan"); 11 | const helmet = require("helmet"); 12 | const cors = require("cors"); 13 | 14 | const userRouter = require("../users/users-router"); 15 | const projectsRouter = require("../projects/projects-router"); 16 | const resourcesRouter = require("../resources/resource-router"); 17 | const authRouter = require("../auth/auth-router"); 18 | const valuesRouter = require("../values/values-router"); 19 | const contextsRouter = require("../contexts/context-router"); 20 | const userDataRouter = require("../user-data/user-data-router"); 21 | 22 | const server = express(); 23 | 24 | server.use(express.json()); 25 | server.use(cors()); 26 | server.use(morgan("dev")); 27 | server.use(helmet()); 28 | 29 | server.use("/api/auth", authRouter); 30 | server.use("/api/user", userRouter); 31 | server.use("/api/projects", projectsRouter); 32 | server.use("/api/resources", resourcesRouter); 33 | server.use("/api/values", valuesRouter); 34 | server.use("/api/contexts", contextsRouter); 35 | server.use("/api/user/data", userDataRouter); 36 | 37 | server.get("/", (req, res) => { 38 | res.send("
Here to serve."); 39 | }); 40 | 41 | server.use((err, req, res, next) => { 42 | console.log(err.message); 43 | res.status(500).json({ 44 | message: "Something went wrong!", 45 | error: err.message 46 | }); 47 | }); 48 | 49 | module.exports = server; 50 | ``` 51 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-2-index.md: -------------------------------------------------------------------------------- 1 | # Index.js (root level) 2 | 3 | - Require server 4 | 5 | # Example of a index.js file: 6 | 7 | ```javascript 8 | require("dotenv").config(); 9 | const server = require("./api/server"); 10 | 11 | //define port 12 | const port = process.env.PORT; 13 | server.listen(port, () => 14 | console.log(`\n*** Server Running on http://localhost:${port} ***\n`) 15 | ); 16 | ``` 17 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-3-knexfile.md: -------------------------------------------------------------------------------- 1 | # Create knexfile.js 2 | 3 | - create knexfile 4 | - npx knex init 5 | - create configs in knexfile.js 6 | 7 | # Example of a knex file: 8 | 9 | ```javascript 10 | module.exports = { 11 | development: { 12 | client: "sqlite3", 13 | useNullAsDefault: true, 14 | connection: { 15 | /* this is where we define our database name */ 16 | filename: "./data/projects.db3" 17 | }, 18 | migrations: { 19 | directory: "./data/migrations" 20 | }, 21 | seeds: { 22 | directory: "./data/seeds" 23 | }, 24 | pool: { 25 | afterCreate: (conn, done) => { 26 | conn.run("PRAGMA foreign_keys = ON", done); 27 | } 28 | } 29 | }, 30 | testing: { 31 | client: "sqlite3", 32 | connection: { 33 | filename: "./data/test.db3" 34 | }, 35 | useNullAsDefault: true, 36 | migrations: { 37 | directory: "./data/migrations" 38 | }, 39 | seeds: { 40 | directory: "./data/seeds" 41 | } 42 | } 43 | }; 44 | ``` 45 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-4-db-config.md: -------------------------------------------------------------------------------- 1 | # Create db-config.js (to be required in model files) 2 | 3 | - In data directory 4 | 5 | # Example of a db-config.js file: 6 | 7 | ```javascript 8 | const knex = require("knex"); 9 | const config = require("../knexfile"); 10 | 11 | const env = process.env.DB_ENV || "development"; 12 | 13 | module.exports = knex(config[env]); 14 | ``` 15 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-5-create-tables.md: -------------------------------------------------------------------------------- 1 | # Create Tables 2 | 3 | - table migrations are time stamped to ensure that they run in order 4 | - this table include examples of foreign keys and intermediary tables 5 | - npx migrate:make initial 6 | 7 | # Example of a Tables Migration: 8 | 9 | ```javascript 10 | exports.up = async function(knex) { 11 | await knex.schema.createTable("projects", tbl => { 12 | tbl.increments(); 13 | tbl.string("project_name").notNullable(); 14 | tbl.text("project_description"); 15 | tbl.boolean("project_complete").defaultTo("false"); 16 | }); 17 | await knex.schema.createTable("tasks", tbl => { 18 | tbl.increments(); 19 | tbl.text("task_description").notNullable(); 20 | tbl.text("task_notes"); 21 | tbl.boolean("complete").defaultTo("false"); 22 | tbl 23 | .integer("project_id") 24 | .notNullable() 25 | .unsigned() 26 | .references("id") 27 | .inTable("projects") 28 | .onUpdate("CASCADE") 29 | .onDelete("CASCADE"); 30 | }); 31 | await knex.schema.createTable("resources", tbl => { 32 | tbl.increments(); 33 | tbl.string("resource_name").notNullable(); 34 | tbl.text("resource_description"); 35 | }); 36 | await knex.schema.createTable("projects_resources", tbl => { 37 | tbl 38 | .integer("project_id") 39 | .notNullable() 40 | .unsigned() 41 | .references("id") 42 | .inTable("projects") 43 | .onUpdate("CASCADE") 44 | .onDelete("CASCADE"); 45 | tbl 46 | .integer("resource_id") 47 | .notNullable() 48 | .unsigned() 49 | .references("id") 50 | .inTable("resources") 51 | .onUpdate("CASCADE") 52 | .onDelete("CASCADE"); 53 | tbl.primary(["project_id", "resource_id"]); 54 | }); 55 | await knex.schema.createTable("contexts", tbl => { 56 | tbl.increments(); 57 | tbl.text("context").notNullable(); 58 | }); 59 | await knex.schema.createTable("tasks_contexts", tbl => { 60 | tbl 61 | .integer("task_id") 62 | .notNullable() 63 | .unsigned() 64 | .references("id") 65 | .inTable("tasks") 66 | .onUpdate("CASCADE") 67 | .onDelete("CASCADE"); 68 | tbl 69 | .integer("context_id") 70 | .notNullable() 71 | .unsigned() 72 | .references("id") 73 | .inTable("contexts") 74 | .onUpdate("CASCADE") 75 | .onDelete("CASCADE"); 76 | tbl.primary(["task_id", "context_id"]); 77 | }); 78 | }; 79 | 80 | exports.down = async function(knex) { 81 | await knex.schema.dropTableIfExists("tasks_contexts"); 82 | await knex.schema.dropTableIfExists("contexts"); 83 | await knex.schema.dropTableIfExists("projects_resources"); 84 | await knex.schema.dropTableIfExists("resources"); 85 | await knex.schema.dropTableIfExists("tasks"); 86 | await knex.schema.dropTableIfExists("projects"); 87 | }; 88 | ``` 89 | 90 | `npx knex migrate:latest` 91 | 92 | - if not yet in production and need to make changes 93 | `npx knex migrate:rollback` 94 | - allows you to rollback the table, make changes, then migrate:latest to apply changes 95 | - if in production, it is better to create new migrations that `alter` the table to protect any data a user may have saved to database already 96 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-6-create-seeds.md: -------------------------------------------------------------------------------- 1 | # Create Seeds 2 | 3 | - seeds are not timestamped, create them with a prefix (00 for cleanup, the rest should start at 001 and increment to ensure that they run sequentially according to how they were created, as some data may be required in future tables they need to be run correctly. Add id's to seeds, it is not necessary but could avoid future issues with foreign keys expecting a particular id) 4 | 5 | - npx knex seed:make 6 | 7 | - example of multiple seeds create sequentially exist [here](https://github.com/FreedomWriter/node-db-challenge/tree/master/data/seeds) 8 | 9 | # Example seed data: 10 | 11 | ```js 12 | const bcrypt = require("bcryptjs"); 13 | 14 | // user seed data with hashed password 15 | exports.seed = async function(knex) { 16 | await knex("users").truncate(); 17 | await knex("users").insert([ 18 | { id: 1, username: "myUser", password: bcrypt.hashSync("password", 10) } 19 | ]); 20 | }; 21 | 22 | // other examples of seed data 23 | exports.seed = async function(knex) { 24 | await knex("tasks").truncate(); 25 | await knex("tasks").insert([ 26 | { project_id: 1, task_description: "Enroll in Lambda" }, 27 | { project_id: 1, task_description: "Cry", notes: "Don't cry too long" }, 28 | { project_id: 1, task_description: "start understanding" } 29 | ]); 30 | }; 31 | ``` 32 | 33 | - When all seeds are created run them 34 | `npx knex:seed run` 35 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-7-create-models.md: -------------------------------------------------------------------------------- 1 | # Create models 2 | 3 | - For each resource, create a directory (in the case of projects as a resource, create a directory named `projects`) 4 | - In this folder, create a file named `projects-model.js` 5 | 6 | - require the db config 7 | - create needed functions and export them 8 | 9 | - These are very simple models, you will likely need to perform a number of joins to return the data the front end needs 10 | 11 | # Example of a model: 12 | 13 | ```javascript 14 | const db = 15 | require("../data/db.config") - 16 | function find() { 17 | return db("projects").select(); 18 | }; 19 | 20 | function findBy(filter) { 21 | return db("users") 22 | .where(filter) 23 | .findBy(); 24 | } 25 | 26 | function findById(id) { 27 | return db("projects") 28 | .limit(req.query.limit) 29 | .orderBy("name", req.query.orderBy) 30 | .where({ id }) 31 | .first(); 32 | } 33 | 34 | async function add(project) { 35 | const [id] = await db("projects").insert(project); 36 | return db("projects") 37 | .where({ id }) 38 | .first(); 39 | } 40 | 41 | async function update(id, body) { 42 | await db("projects") 43 | .where({ id }) 44 | .update(body); 45 | 46 | return findById(id); 47 | } 48 | 49 | function remove(id) { 50 | return db("projects") 51 | .where({ id }) 52 | .del(); 53 | } 54 | 55 | module.exports = { 56 | find, 57 | findBy, 58 | findById, 59 | add, 60 | update, 61 | remove 62 | }; 63 | ``` 64 | 65 | # An example of a more complex model using joins 66 | 67 | ```javascript 68 | async function find() { 69 | const project = await db("projects as p") 70 | .leftJoin("values as v", "p.value_id", "v.id") 71 | .leftJoin("users as u", "u.id", "p.user_id") 72 | .leftJoin("user_data as ud", "ud.project_id", "p.id") 73 | .leftJoin("tasks as t", "t.project_id", "p.id") 74 | .leftJoin("project_resources as pr", "pr.project_id", "p.id") 75 | .leftJoin("resources as r", "r.id", "pr.resource_id") 76 | .leftJoin("task_contexts as tc", "tc.task_id", "t.id") 77 | .leftJoin("contexts as c", "c.id", "tc.context_id") 78 | .select( 79 | "p.user_id", 80 | "u.username", 81 | "p.id", 82 | "p.project_name", 83 | "p.project_description", 84 | "p.project_complete", 85 | "t.task_description", 86 | "t.task_notes", 87 | "t.task_complete", 88 | "r.resource_name", 89 | "r.resource_description", 90 | "c.context" 91 | ); 92 | const valuesArr = await db("projects as p") 93 | .leftJoin("values as v", "p.value_id", "v.id") 94 | .select("v.value"); 95 | const values = valuesArr.filter(value => value.value !== null); 96 | return { values, project }; 97 | } 98 | ``` 99 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Step-by-step/Step-8-create-routers.md: -------------------------------------------------------------------------------- 1 | # Example of "/auth/login" and "/auth/register" 2 | 3 | ## Note 4 | 5 | These files are using `bcrypt`, `jwt`, `secrets` 6 | We are generating our `token` in this file 7 | We `hash` the password on `register` before sending it on to our model to be added to the db 8 | 9 | # Example of an Auth Router: 10 | 11 | ```javascript 12 | const router = require("express").Router(); 13 | const bcrypt = require("bcryptjs"); 14 | const jwt = require("jsonwebtoken"); 15 | 16 | const secrets = require("../config/secrets"); 17 | const Users = require("../users/users-model.js"); 18 | 19 | function generateToken(user) { 20 | return jwt.sign( 21 | { 22 | userId: user.id 23 | }, 24 | secrets.jwt, 25 | { 26 | expiresIn: "1h" 27 | } //1h = 1 hour 28 | ); 29 | } 30 | 31 | // async/await 32 | 33 | router.post("/register", async (req, res, next) => { 34 | try { 35 | let user = req.body; 36 | const hash = bcrypt.hashSync(user.password, 10); 37 | user.password = hash; 38 | const newUser = await db.add(user); 39 | const token = await genToken(newUser); 40 | res.status(201).json({ 41 | message: `Welcome ${user.username}`, 42 | token: token, 43 | department: user.department 44 | }); 45 | } catch (err) { 46 | next(err); 47 | } 48 | }); 49 | 50 | // for endpoints beginning with /api/auth 51 | router.post("/register", (req, res) => { 52 | let user = req.body; 53 | const hash = bcrypt.hashSync(user.password, 10); // 2 ^ n 54 | user.password = hash; 55 | 56 | Users.add(user) 57 | .then(saved => { 58 | const token = generateToken(saved); 59 | res.status(201).json({ 60 | message: `Welcome ${user.username}`, 61 | authToken: token 62 | }); 63 | }) 64 | .catch(error => { 65 | res.status(500).json(error); 66 | }); 67 | }); 68 | 69 | router.post("/login", async (req, res, next) => { 70 | try { 71 | const { username, password } = req.body; 72 | const user = await usersModel.findBy({ username }).first(); 73 | console.log(user); 74 | if (user !== undefined) { 75 | const passwordValid = await bcrypt.compareSync( 76 | password, 77 | user.password, 78 | 10 79 | ); 80 | if (passwordValid) { 81 | const token = generateToken(user); 82 | res.status(200).json({ 83 | message: `Welcome ${user.username}!`, 84 | token: token, 85 | user_id: user.id, 86 | user: user 87 | }); 88 | } 89 | } else { 90 | res.status(401).json({ message: "Invalid Credentials" }); 91 | } 92 | } catch (err) { 93 | next(err); 94 | } 95 | }); 96 | 97 | module.exports = router; 98 | ``` 99 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/Testing.md: -------------------------------------------------------------------------------- 1 | # Automated testing node.js (Still needs to be audited for accuracy) 2 | 3 | Types of Automated Testing 4 | 5 | - unit testing - we use this to test our pure functions 6 | - integrations testing - we use this to test routers and middleware, _should take priority_ 7 | - snapshot testing 8 | - end-to-end testing 9 | 10 | `npm --save-dev jest` - would have already done this is following guide 11 | 12 | # Add a test script 13 | 14 | ```js 15 | //start 16 | "test": "cross-env DB_ENV=testing jest --verbose --watch" 17 | // add 18 | "jest": { 19 | "testEnvironment": "node" 20 | } 21 | ``` 22 | 23 | # Create first test file 24 | 25 | In root create `index.spec.js` or alternatively `index.test.js` 26 | 27 | Create a placeholder test to ensure our test runner is finding files. 28 | 29 | ```js 30 | test("a placeholder test", async () => { 31 | expect(2 + 2).toBe(4); 32 | }); 33 | ``` 34 | 35 | # Supertest 36 | 37 | ## We have split out the code so that server.js defines the server and index.js actually starts it, so this isn't a problem for us. 38 | 39 | When using supertest, we don't want the server running, supertest will start it behind the scenes, automatically. Because of this, we need to adjust our index.js file. There, we have server.listen() which is running the server. We don't want that! We want to export our server. Then a really simple way to handle this problem by saying if this index file is required into another file, don't actually start the server, just export it. But if this index file is actually the file that node is running, start the server. 40 | 41 | ```js 42 | // in index.js 43 | if (!module.parent) { 44 | const port = process.env.PORT || 5000; 45 | server.listen(port, () => console.log(`\n** server up on port ${port} **\n`)); 46 | } 47 | const port = process.env.PORT || 5000; 48 | server.listen(port, () => console.log(`\n** server up on port ${port} **\n`)); 49 | 50 | module.exports = server; 51 | ``` 52 | 53 | Another method of solving this could be: 54 | 55 | - We could use an environment variable 56 | 57 | We have split out the code so that server.js defines the server and index.js actually starts it, so this isn't a problem for us. 58 | 59 | Consider 3 questions: 60 | 61 | 1. Does it return the expected status code? 62 | 2. Does it return the expected data format? 63 | 3. Does it return the expected data? 64 | 65 | # Simple Integration test 66 | 67 | ```js 68 | // test("a placeholder test", async () => { 69 | // expect(2 + 2).toBe(4); 70 | // }); 71 | 72 | const supertest = require("supertest"); 73 | //remember our server wount actually start 74 | // due to the if statement in index.js 75 | const server = require("./index"); 76 | const db = require("./data/dbConfig"); 77 | 78 | beforeEach(async () => { 79 | await db.seed.run; 80 | }); 81 | 82 | test("welcome route", async () => { 83 | const res = await supertest(server).get("/"); 84 | // Does it return the expected status code? 85 | expect(res.status).toBe(200); 86 | // // Does it return the expected data format? 87 | expect(res.type).toBe("application/json"); 88 | // Does it return the expected data? 89 | // console.log(res.body); 90 | expect(res.body.api).toBe("up"); 91 | }); 92 | 93 | test("create hobbit route", async () => { 94 | const res = await supertest(server) 95 | .post("/hobbits") 96 | .send({ name: "gaffer" }); 97 | expect(res.status).toBe(201); 98 | expect(res.type).toBe("application/json"); 99 | expect(res.body.name).toBe("gaffer"); 100 | //deep assertion 101 | expect(res.body).toEqual({ id: 5, name: "gaffer" }); 102 | }); 103 | 104 | test("get hobbit list", async () => { 105 | const res = await supertest(server).get("/hobbits"); 106 | expect(res.status).toBe(200); 107 | expect(res.type).toBe("application/json"); 108 | expect(res.body.length).toBeGreaterThan(0); 109 | expect(res.body[0].id).toBe(1); 110 | expect(res.body[0].name).toBe("sam"); 111 | }); 112 | ``` 113 | 114 | # Testing routes that access the database 115 | 116 | In `knexfile.js` 117 | 118 | ```js 119 | test: { 120 | client: 'sqlite3', 121 | connection: { 122 | filename: './data/test.db3', 123 | }, 124 | useNullAsDefault: true, 125 | migrations: { 126 | directory: './data/migrations', 127 | }, 128 | seeds: { 129 | directory: './data/seeds', 130 | }, 131 | } 132 | ``` 133 | 134 | In terminal: 135 | 136 | ```node 137 | npx knex migrate:latest --env=dev 138 | ``` 139 | 140 | ```node 141 | npx knex migrate:latest --env=test 142 | ``` 143 | 144 | # Unit test for model 145 | 146 | Hobbits Model Unit Tests 147 | create a file called `hobbitsModel.spec.js` in the `hobbits` directory 148 | 149 | ```js 150 | const db = require("../data/dbConfig"); 151 | const hobbitsModel = require("./hobbitsModel"); 152 | 153 | beforeEach(async () => { 154 | //re seed the database each time tests are run 155 | await db.seed.run(); 156 | }); 157 | describe("hobbits model", () => { 158 | test("getAll", async () => { 159 | const all = await hobbitsModel.getAll(); 160 | expect(all).toHaveLength(4); 161 | }); 162 | 163 | test("findById", async () => { 164 | const res = await hobbitsModel.findById(1); 165 | expect(res.name).toBe("sam"); 166 | }); 167 | 168 | test("insert", async () => { 169 | await hobbitsModel.insert({ name: "bilbo" }); 170 | const hobbits = await db("hobbits").select(); 171 | expect(hobbits).toHaveLength(5); 172 | }); 173 | 174 | test("update", async () => { 175 | await hobbitsModel.update(1, { name: "tom" }); 176 | const res = await hobbitsModel.findById(1); 177 | expect(res.name).toBe("tom"); 178 | }); 179 | 180 | test("delete", async () => { 181 | const res = await hobbitsModel.remove(1); 182 | expect(res).toBe(1); 183 | const hobbits = await hobbitsModel.getAll(); 184 | expect(hobbits).toHaveLength(3); 185 | expect(hobbits.length).toBeGreaterThan(0); 186 | }); 187 | }); 188 | 189 | // Need an afterEach to stop leak (destroy db connection) 190 | ``` 191 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/bycrypt.md: -------------------------------------------------------------------------------- 1 | [Bcrypt](https://www.npmjs.com/package/bcryptjs) is a key derivation library (Bcrypt is not an encryption library, it is a hashing library). 2 | 3 | ## Salting 4 | 5 | Adding a random string (though it is not truly random because it needs to be able to be reproduced) 6 | 7 | # To hash password using bcryptjs in the userModel on post 8 | 9 | npm i bcryptjs 10 | const bcrypt = require("bcryptjs") 11 | 12 | async function add(user) { 13 | user.password = await bcrypt.hash(user.password, 16) 14 | const [id] = await db("users").insert(user); 15 | return findById(id); 16 | } 17 | 18 | //rounds is 2^16 === 65,536 times, a bit high. optimal is where it takes about 1 second 19 | 20 | # To validate a hased password on login 21 | 22 | router.post("/login", async (req, res, next) => { 23 | try { 24 | const { username, password } = req.body; 25 | 26 | const user = await usersModel.findBy({ username }).first(); 27 | 28 | const passwordValid = await bycrypt.compare(password, user.password); 29 | 30 | if (user && passwordValid) { 31 | res.status(200).json({ message: `Welcome ${user.username}!` }); 32 | } else { 33 | res.status(401).json({ message: "Invalid Credentials" }); 34 | } 35 | 36 | } catch (err) { 37 | next(err); 38 | } 39 | }); 40 | 41 | # Middleware Function that validates a user 42 | 43 | function restricted() { 44 | const authError = { 45 | message: "Invalid Credentials" 46 | }; 47 | 48 | return async (req, res, next) => { 49 | try { 50 | const { username, password } = req.headers; 51 | if (!username || !password) { 52 | return res.status(401).json(authError); 53 | } 54 | const user = await usersModel.findBy({ username }).first(); 55 | if (!user) { 56 | return res.status(401).json(authError); 57 | } 58 | const passwordValid = await bycrypt.compare(password, user.password); 59 | if (!passwordValid) { 60 | return res.status(401).json(authError); 61 | } 62 | // if we reach this point in the code we know that the user is authenticated 63 | next(); 64 | } catch (err) { 65 | next(err); 66 | } 67 | }; 68 | } 69 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/data-modeling.md: -------------------------------------------------------------------------------- 1 | # Data Modeling Notes 2 | 3 | # Normalization 4 | 5 | - No field values are repeated 6 | - No redundant records 7 | - Each record has a unique primary key 8 | - All fields in a table should directly relate to the primary key (you shouldn't have a column in a table that is not related to that data) 9 | 10 | # One to One 1:1 11 | 12 | - each citizen has one social security number, each social security number belongs to one citizen 13 | - each house has one electricity account, each account corresponds to one house 14 | 15 | Either table could have the foreign key, but that foreign key must be unique. Consider if it would be better to represent the data in a single table. (It often will be better to have two tables.) 16 | 17 | # One to Many 1:\* 18 | 19 | - one user can have many posts, but each post belongs to one user 20 | - one foot can have many toes, each toe has one foot 21 | 22 | When you are working with one to many relationships, there is going to be a foreign key in one of the tables. To know which of the tables gets the foreign key, figure out which of the tables is the one and which is the many. The foreign key goes in the many table. 23 | 24 | # Many to Many _:_ 25 | 26 | - on band will play at many different venues, each venue will have many bands play 27 | - one magazine will have many authors, each author can write for many magazines 28 | 29 | With many to many tables we have to create an intermediary table that links the two tables together. In the intermediary table, each of the keys won't be unique but we expect to see unique combinations. 30 | 31 | Band table 32 | Venue table 33 | Table with Band Id and Venue Id //purpose of this table is to hold a relationship between farms and animals - primary key is (band_id, venue_id), sometimes called a join table or an intermediary table 34 | 35 | # Foreign Keys in Knex 36 | 37 | Code necessary to set up foreign key enforcement in knexfile.js: 38 | 39 | pool: { 40 | afterCreate: (conn, done) => { 41 | conn.run('PRAGMA foreign_keys = ON', done) 42 | }, 43 | }, 44 | 45 | We need to understand our one to many tables before we create the tables. The one has to be created before the many since the many will reference the one. 46 | 47 | You can chain .createTable to the end of another table. You especially want to do this when creating foreign keys. 48 | 49 | exports.up = function(knex) { 50 | return knex.schema.createTable('farms', tbl => { 51 | tbl.increments(); 52 | tbl.string('farm_name', 128).notNullable(); 53 | }) 54 | .createTable('ranchers', tbl => { 55 | tbl.unsigned() 56 | .notNullable() 57 | .references('id') 58 | .inTable('farms') 59 | }) 60 | } 61 | 62 | exports.down = function(knex) { 63 | return knex.schema.dropTableIfExists('ranchers') 64 | .dropTableIfExists('ranchers') 65 | }; 66 | 67 | Order matters in the down function, think about shoes and socks. First you put your socks on, then you put your shoes on. When taking them off, you have to take your shoes off first, then you can take your socks off. Drop tables in the opposite order that you created them. 68 | 69 | # 00-cleanup.js 70 | 71 | exports.seed = async (knex) => { 72 | await knex('zoo_animals').truncate() 73 | await knex('animals').truncate() 74 | await knex('species').truncate() 75 | await knex('zoos').truncate() 76 | } 77 | 78 | The above code can replace the cleaner library. the library sometimes does not respect the order and can cause problems. 79 | 80 | ## Requirements 81 | 82 | A client has hired you to build an API for managing `zoos` and the `animals` kept at each `zoo`. The API will be used for `zoos` in the _United States of America_, no need to worry about addresses in other countries. 83 | 84 | For the `zoos` the client wants to record: 85 | 86 | - name. 87 | - address. 88 | 89 | For the `animals` the client wants to record: 90 | 91 | - name. 92 | - species. 93 | - list of all the zoos where they have resided. 94 | 95 | Determine the database tables necessary to track this information. 96 | Label any relationships between table. 97 | 98 | We need 3 tables ( tables = entities, properties = columns or fields, relationships = foreign keys ) 99 | 100 | ## zoos 101 | 102 | id 103 | name 104 | address 105 | 106 | ## animals 107 | 108 | id 109 | name 110 | species_id 111 | 112 | ## species 113 | 114 | id 115 | name 116 | 117 | ## animals zoos 118 | 119 | zoo_id 120 | animal_id 121 | from_date 122 | to_date 123 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/deployment.md: -------------------------------------------------------------------------------- 1 | # Deployment Notes 2 | 3 | ## Qoute 4 | 5 | > Developers get paid to deliver value, not lines of code. 6 | > 7 | > — A pragmatic developer 8 | 9 | ## Jargon 10 | 11 | _"Single Responsiblity Principle"_ 12 | 13 | - a single unit of work should do one thing, one thing only, and do it really well. (It should only have one reason to change) 14 | 15 | ## Deployment 16 | 17 | - extract configuration into enviroment variables 18 | - set up Continuos Deployment from GitHub to Heroku 19 | 20 | # Steps For Continous Deployment 21 | 22 | 1. Add start script 23 | "start": "node index.js" 24 | start is one of the few keywords that you don't have to use "run" with. 25 | 26 | 2. Add dotenv as a dependency `npm i dotenv` 27 | 28 | - You can add an environment variable by typing 29 | 30 | `export PORT=4000` 31 | 32 | in the terminal (to remove `unset PORT`). This is an alternative to dotenv 33 | 34 | - `const dotenv = require('dotenv').config()` at the highest level in index.js (assuming that's where your server is listening) 35 | - At the root, create a file named `.env` 36 | - Hide your config data: in your gitignore file include `.env` 37 | 38 | 3. In Heroku 39 | - create a new app 40 | - connect to github repo 41 | - select continous deployment 42 | 43 | ## Misc 44 | 45 | - [The Twelve-Factor App](https://12factor.net/) 46 | 47 | - env variables 48 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/foreign-keys.md: -------------------------------------------------------------------------------- 1 | # Foreign Keys 2 | 3 | A foreign key is a column in a table that points to a Primary Key in another table. 4 | 5 | - foreign key has to be the same data type as the primary key it is pointing to 6 | 7 | ## Querying muliple tables (joins) 8 | 9 | # SQL (northwinds database) 10 | 11 | SELECT * FROM OrderDetail 12 | JOIN Product ON OrderDetail.ProductId = Product.Id 13 | 14 | This is saying "Select everything from the OrderDetail Left Table and for each resulting row, find the row in the Product table matches this condition: Where the ProductId in the OrderDetail table matches the Id in the Product table. 15 | 16 | To select multiple columns we would specify the table followed by a . and then the column name we are interested in, like this: 17 | 18 | SELECT OrderDetail.Id, Product.Id FROM OrderDetail 19 | JOIN Product ON OrderDetail.ProductId = Product.Id 20 | 21 | This is getting messy, isn't it? To clean it up we can use an alias 22 | 23 | SELECT o.Id, Product.Id 24 | FROM OrderDetail AS o 25 | JOIN Product AS p 26 | ON o.ProductId = p.Id 27 | 28 | Another Example where we find the employees first and last name associated with order number 16608 using a join. 29 | 30 | SELECT e.FirstName, e.LastName FROM "Order" AS o 31 | JOIN "Employee" AS e ON o.EmployeeId = e.id 32 | WHERE o.Id = 16608 33 | 34 | We used "" around Order because order is a reserved keyword (non case sensitive) for SQL, used "" around Employee for consistency. 35 | 36 | We could get the customer ContactName as well: 37 | 38 | SELECT e.FirstName, e.LastName, c.ContactName FROM "Order" AS o 39 | JOIN "Employee" AS e ON o.EmployeeId = e.id 40 | JOIN "Customer" AS c ON o.CustomerId = c.Id 41 | WHERE o.Id = 16608 42 | 43 | What if Ann Doddsworth (the employee from the above query) gets fired and we delete her row from the employee table? Now our SQL query returns null. 44 | 45 | There are multiple JOIN types in SQL. By default we are using what's known as an INNER JOIN. With INNER JOIN's the rule is if there isn't a match on both sides of the JOIN, nothing gets returned. 46 | 47 | Other Join Types: http://www.sql-join.com/sql-join-types 48 | 49 | INNER JOIN 50 | 51 | - Select all records from Left Table and Right Table, where the join condition is met. 52 | 53 | LEFT JOIN 54 | 55 | - Select all records from Left Table, along with records from Right Table for which the join condition is met (if at all). 56 | 57 | RIGHT JOIN 58 | 59 | - Select all records from Right Table, along with records from Left Table for which the join condition is met (if at all). 60 | 61 | FULL JOIN 62 | 63 | - Select all records from Left Table and Right Table, regardless of whether the join condition is met or not. 64 | 65 | So if Ann gets fired we would use a LEFT JOIN so that our query will return the data that is available 66 | 67 | SELECT o.Id, o.OrderDate, e.FirstName, e.LastName FROM "Order" AS o 68 | LEFT JOIN "Employee" AS e ON o.EmployeeId = e.id 69 | WHERE o.Id = 16608 70 | 71 | The table that we define with JOIN (Employee) is the right side, the table that we specify with FROM is the left side 72 | 73 | RIGHT JOIN and FULL JOIN are not supported in SQLite 74 | The ON is a condition as to how rows match 75 | 76 | ## [Aggregate functions](https://www.sqlite.org/lang_aggfunc.html) 77 | 78 | [Core functions](https://www.sqlite.org/lang_corefunc.html) 79 | 80 | Examples of SUM in SQL 81 | 82 | SELECT SUM(Quantity) AS Total FROM OrderDetail 83 | 84 | SELECT ProductId, SUM(Quantity) AS Total FROM OrderDetail 85 | GROUP BY ProductId 86 | 87 | Figure out how many products are in each category, count each one according to the CategoryId, return the Count and CategoryName: 88 | 89 | SELECT COUNT(Product.Id), Category.CategoryName FROM Product 90 | JOIN Category ON Category.Id = Product.CategoryId 91 | GROUP BY CategoryId 92 | 93 | # JOIN in knex 94 | 95 | INNER JOIN users AS u ON u.id = p.user_id 96 | WHERE user_id = id 97 | SELECT p.id, p.contents, u.username 98 | 99 | .join("table name we are joining", "1st part of conditional", "2nd part of conditional") 100 | .select("column to be returned", "column to be returned", "column to be returned") 101 | 102 | const { id } = req.params 103 | const posts = await db("posts as p") 104 | .join("users as u", "u.id", "p.user_id") 105 | .where({ user_id: id}) 106 | .select("p.id", "p.contents", "u.username") 107 | 108 | res.json(posts) 109 | 110 | # Separating queries from routes 111 | 112 | const postModel = require("./post-model"); 113 | 114 | router.get("/", async (req, res, next) => { 115 | try { 116 | const { id } = req.params; 117 | const posts = await db("posts as p") 118 | .join("users as u", "u.id", "p.user_id") 119 | .where({ user_id: id }) 120 | .select("p.id", "p.contents", "u.username"); 121 | 122 | res.json(posts); 123 | } catch (err) { 124 | next(err); 125 | } 126 | }); 127 | 128 | Becomes: 129 | 130 | router.get("/", async (req, res, next) => { 131 | try { 132 | const { id } = req.params; 133 | const posts = await postModel.find(id); 134 | 135 | res.json(posts); 136 | } catch (err) { 137 | next(err); 138 | } 139 | }); 140 | 141 | The model looks like: 142 | 143 | const db = require("../data/db-config.js"); 144 | 145 | function find(user_id) { 146 | return db("posts as p") 147 | .join("users as u", "u.id", "p.user_id") 148 | .where({ user_id }) 149 | .select("p.id", "p.contents", "u.username"); 150 | } 151 | 152 | module.exports = { 153 | find 154 | }; 155 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/generateToke.md: -------------------------------------------------------------------------------- 1 | # Function to generate a token 2 | 3 | ```javascript 4 | const jwt = require("jsonwebtoken"); 5 | const secrets = require("../config/secrets"); 6 | 7 | function generateToken(user) { 8 | return jwt.sign( 9 | { 10 | userId: user.id 11 | }, 12 | secrets.jwt, 13 | { 14 | expiresIn: "1h" 15 | } //1h = 1 hour 16 | ); 17 | } 18 | ``` 19 | 20 | # Example adding permissions to a user 21 | 22 | ```javascript 23 | function generateToken(user) { 24 | return jwt.sign( 25 | { 26 | userId: user.id, 27 | userRole: "student" 28 | }, 29 | "super secret string", 30 | { 31 | expiresIn: "1h" 32 | } //1h = 1 hour 33 | ); 34 | } 35 | ``` 36 | 37 | # Example of use in a login router 38 | 39 | ```javascript 40 | router.post("/login", (req, res) => { 41 | let { username, password } = req.body; 42 | 43 | Users.findBy({ username }) 44 | .first() 45 | .then(user => { 46 | if (user && bcrypt.compareSync(password, user.password)) { 47 | generateToken(user); 48 | res.status(200).json({ 49 | message: `Welcome ${user.username}`, 50 | authToken: generateToken 51 | }); 52 | } else { 53 | res.status(401).json({ message: "Invalid Credentials" }); 54 | } 55 | }) 56 | .catch(error => { 57 | res.status(500).json(error); 58 | }); 59 | }); 60 | ``` 61 | 62 | # Returning token on "/register" (no login after register) 63 | 64 | ```javascript 65 | router.post("/register", (req, res) => { 66 | let user = req.body; 67 | const hash = bcrypt.hashSync(user.password, 10); // 2 ^ n 68 | user.password = hash; 69 | 70 | Users.add(user) 71 | .then(saved => { 72 | const token = generateToken(saved); 73 | res.status(201).json({ 74 | message: `Welcome ${user.username}`, 75 | authToken: token 76 | }); 77 | }) 78 | .catch(error => { 79 | res.status(500).json(error); 80 | }); 81 | }); 82 | ``` 83 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/middleware.md: -------------------------------------------------------------------------------- 1 | # Create Validation Middleware 2 | 3 | Create a directory called middlewares. For each of the following middlewares, create a file for them and export. 4 | 5 | ## To restrict to and agent (for example: Postman) 6 | 7 | ```javascript 8 | module.exports = requiredAgent => (req, res, next) => { 9 | const userAgent = req.get("User-Agent"); 10 | // const formatted = requiredAgent.toLowercase(); 11 | // console.log(formatted); 12 | console.log(userAgent, requiredAgent); 13 | if (!userAgent.includes(requiredAgent)) { 14 | return res.status(500).json({ message: `Must be using ${requiredAgent}` }); 15 | } 16 | 17 | next(); 18 | }; 19 | ``` 20 | 21 | ## To validate id 22 | 23 | ```javascript 24 | const db = require("../accounts/accounts-router"); 25 | 26 | async function validateId(req, res, next) { 27 | try { 28 | const account = await db("accounts") 29 | .where("id", req.params.id) 30 | .first(); 31 | res.json(account); 32 | } catch (err) { 33 | next(`No account found with the id of ${req.params.id}`); 34 | } 35 | } 36 | 37 | module.exports = validateId; 38 | ``` 39 | 40 | ## To validate that a required value was passed 41 | 42 | ```javascript 43 | module.exports = prop => (req, res, next) => { 44 | req.body[prop] 45 | ? next() 46 | : res.status(400).json({ errorMessage: `${prop} is required.` }); 47 | }; 48 | ``` 49 | 50 | Would be required like: 51 | 52 | ```javascript 53 | const validator = require("../middlewares/validator"); 54 | and used like: 55 | router.post("/", validator("name"), validator("budget"), async (req, res, next) =>... 56 | ``` 57 | 58 | ## Restricted 59 | 60 | ```javascript 61 | const secrets = require("../config/secrets"); 62 | const jwt = require("jsonwebtoken"); 63 | 64 | module.exports = (req, res, next) => { 65 | const token = req.headers.authorization; 66 | 67 | if (token) { 68 | jwt.verify(token, secrets.jwt, (err, payload) => { 69 | if (err) { 70 | res.status(403).json({ message: "You are not authorized" }); 71 | } else { 72 | req.userId = payload.userId; 73 | next(); 74 | } 75 | }); 76 | } else { 77 | res.status(400).json({ message: "No credentials provided" }); 78 | } 79 | }; 80 | ``` 81 | 82 | ## Restricted with Persmissions 83 | 84 | ```javascript 85 | const secrets = require("../config/secrets"); 86 | const jwt = require("jsonwebtoken"); 87 | 88 | module.exports = role => { 89 | return (req, res, next) => { 90 | const token = req.headers.authorization; 91 | 92 | if (token) { 93 | jwt.verify(token, secrets.jwt, (err, payload) => { 94 | if (err) { 95 | res.status(403).json({ message: "You are not authorized" }); 96 | } else { 97 | if (role !== payload.userRole) { 98 | res.status(403).json({ 99 | message: "You are not do not have permission for this endpoint." 100 | }); 101 | } else { 102 | req.userId = payload.userId; 103 | next(); 104 | } 105 | } 106 | }); 107 | } else { 108 | res.status(400).json({ message: "No credentials provided" }); 109 | } 110 | }; 111 | }; 112 | 113 | //another example 114 | const secrets = require("../config/secrets"); 115 | const jwt = require("jsonwebtoken"); 116 | 117 | module.exports = department => { 118 | return (req, res, next) => { 119 | const token = req.headers.authorization; 120 | 121 | if (token) { 122 | jwt.verify(token, secrets.jwt, (err, payload) => { 123 | if (err) { 124 | res.status(403).json({ message: "You are not authorized" }); 125 | } else { 126 | if (department !== payload.userDepartment) { 127 | res.status(403).json({ 128 | message: "You are not do not have permission for this endpoint." 129 | }); 130 | } else { 131 | req.userDepartment = payload.userDepartment; 132 | next(); 133 | } 134 | } 135 | }); 136 | } else { 137 | res.status(400).json({ message: "No credentials provided" }); 138 | } 139 | }; 140 | }; 141 | ``` 142 | -------------------------------------------------------------------------------- /Express-Node-SQLite3/notes-on-security.md: -------------------------------------------------------------------------------- 1 | # Authentication Notes 2 | 3 | ## [12 Best Practices For User Account, Authorization and Password Management](https://cloud.google.com/blog/products/gcp/12-best-practices-for-user-account) By Ian Maddox 4 | 5 | Authentication/AuthN is when our website verifies the identity of the user 6 | Authorization/AuthZ is when our servers knows who you are, and is determining whether you have permission to access the requested resources. 7 | 8 | # Core Principles for Authentication 9 | 10 | - requiring strong passwords 11 | - properly storing passwords 12 | - preventing brute force attacks 13 | 14 | ## hashing 15 | 16 | hash("hello world") == "5fauidhay4t89qy4t8qn-4q-84tq4394ugis" 17 | hashing is not encryption, it is a one way, irreversible process. Think of it like a finger print for a small string of data. It's unique. 18 | 19 | ## examples of hashers (probably not spelled correctly) 20 | 21 | MD5 22 | shaw1 23 | shaw256 24 | bcrypt 25 | argon 26 | 27 | Red Flag - when an app tries to limit the length of your password. They are likely not hashing. No matter the length of the string, the hash is the same length. They are probably storing it encrypted (bad because it is reversable, if hackers get the secret key they can reverse the encryption) or as plain text. 28 | 29 | ## Brute Forcing 30 | 31 | Attackers attempt to guess a password over and over and over again. They use programs that can do this millions of guesses per seconds. 32 | 33 | Hashing alone isn't enough. Hackers create what is known as a Rainbow Table. It is essentially a table with every possible password hashed with the same algorthim the server is using. (there are many other kinds of brute force attacks) 34 | 35 | One of the things we can do is to try to intentionally slow down our code. We can introduce a `time complexity`. Essentially hash-ception. This makes the Rainbow Tables take so long, the become unworth the trouble. 36 | 37 | 100,000,000 hashes at 2 milliseconds per hash = 55 hours 38 | 100,000,000 hashes at 2 seconds per hash = 6 years 39 | 40 | ## Key Derivation Function 41 | 42 | Hash + Time = New Hash 43 | -------------------------------------------------------------------------------- /Postgres/001-types-of-databases.md: -------------------------------------------------------------------------------- 1 | # Types of Databases Postgress Creates 2 | 3 | When you set up your postgres database, 3 databases were created: 4 | 1. postgres 5 | - The default database that is created when you setup postgres (initdb). The default user you get with a postgres installation is `postgres` which creates a database called postgres. Postgres will assume a connection to a database with the same name as the user if no database is supplied. 6 | 2. template0 7 | - This is the template that is used to create template1 - do NOT change it. It's a backup for template1. 8 | 3. template1 9 | - Used to create new databases. Any changes made to template1 will be applied to all new databases. When you run the create database command, it will use template1 unless you specify differently. Because template1 is the default template, if it is being accessed or connected to, no new databses can be created until the connection is closed. 10 | 11 | 12 | -------------------------------------------------------------------------------- /Postgres/002-database-organization-aka-schemas.md: -------------------------------------------------------------------------------- 1 | # Database Organization 2 | 3 | Schemas allow us orgainze our tables, views, indexes, etc. 4 | 5 | Each database gets a public schema by default. 6 | 7 | Unless you specify a schema, the default is assumed to be public so: 8 | 9 | ```sql 10 | SELECT * FROM employees 11 | -- is the same as 12 | SELECT * FROM public.employees 13 | ``` 14 | 15 | There is a command to show all schemas: 16 | (when in postgres land in the terminal) 17 | ```node 18 | postgres=# \dn; 19 | // results in: 20 | 21 | List of schemas 22 | Name | Owner 23 | --------+---------- 24 | public | postgres 25 | (1 row) 26 | ``` 27 | where postgres is the database we are currently in. 28 | 29 | Using schemas: 30 | - allows many users to use one database without intefering with each other 31 | - different schemas can have tables with the same name, which wouldn't be possible with only one schema 32 | - allows us to organize database ojects into logical groups to make them more manageable 33 | - allows third party applications to be put into separate schemas so they do not collide with the name of other objects -------------------------------------------------------------------------------- /Postgres/003-roles.md: -------------------------------------------------------------------------------- 1 | # Roles 2 | 3 | Roles are all about security! Roles determine what someone is allowed or not allowed to do. 4 | 5 | A role can be an individual user, or a group of individuals. 6 | 7 | Roles have the ability to grant membership to another role, depending on how the role is set up. 8 | 9 | Roles have attributes and privileges 10 | - Attributes define privileges, but there are privileges that aren't attributes 11 | - you can have an attribute of super user that gives you the privilege to do whatever you like, but you can give individual privileges that have nothing to do with an attribute 12 | 13 | ## Role Attribute and Creation 14 | 15 | When a role is created, it is given certain attributes, which can determine the privileges of that role. 16 | 17 | Attributes can be things like whether a user can create a db, create a role, whether they are a superuser or whether they can login. 18 | 19 | Login privilege - a role with the LOGIN attribute ccan be considered the same thing as a "database user" 20 | 21 | Superuser status - superusers bypass all permission checks 22 | 23 | Database creation - a role must be given explicit permission to create databases 24 | 25 | Role creation - a role must be given explicit permission to create more roles 26 | 27 | Passord - only signifcant if given LOGIN privilege 28 | 29 | **ALWAYS ENCRYPT WHEN STORING A ROLE THAT CAN LOGIN** 30 | ```node 31 | CREATE ROLE readonly WITH LOGIN ENCRYPTED PASSWORD 'readonly'; 32 | 33 | // we are creating a role called `readonly` and we are giving it LOGIN ability with an encrypted password of "readonly" 34 | ``` 35 | 36 | To view roles that are available to us, use `\du` (in the command line when in postgres land) 37 | 38 | By default, only the creator of the database or a superuser has access to its objects, anyone else needs to be granted access. -------------------------------------------------------------------------------- /Postgres/004-users-and-login-config.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreedomWriter/node-boilerplate/5c65b197de16378b4c0f0eb1f2872b5ca09459dd/Postgres/004-users-and-login-config.md -------------------------------------------------------------------------------- /Postgres/01-create-a-database.md: -------------------------------------------------------------------------------- 1 | # Creating a database 2 | 3 | In the command line, get into postgres: 4 | ```node 5 | psql -U 6 | ``` 7 | -U indicates I'm going to give you a user 8 | 9 | Create a database with default options: 10 | 11 | ```node 12 | CREATE DATABASE ; -- semi colon not optional! 13 | ``` 14 | To specify different options, we use the `WITH` command: 15 | 16 | ```node 17 | CREATE DATABASE 18 | [ [ WITH ] [ OWNER [=] user_name -- default is the current user] 19 | [ TEMPLATE [=] template ] -- default is template1 20 | [ ENCODING [=] encoding ] -- default is UTF8 21 | [ LC_COLLATE [=] lc_collate ] 22 | [ LC_CTYPE [=] lc_type ] 23 | [ TABLESPACE [=] tablespace ] 24 | [ CONNECTION LIMIT [=] connlimit] -- default is 100 ] 25 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | My notes on the steps I take to create a Web API Server and database using: 2 | 3 | - express 4 | - cors 5 | - morgan 6 | - helmet 7 | - sqlite3 8 | - dotenv 9 | - knex 10 | - bcryptjs 11 | - jsonwebtoken 12 | 13 | With additional notes on Sessions using 14 | 15 | - express-session 16 | 17 | I'm a noob so if you see something I missed or something I should be looking deeper into, let me know! 18 | 19 | UPDATE: Adding notes on a deeper dive into SQL and broad level database management. These can be found in the SQL directory. 20 | -------------------------------------------------------------------------------- /SQL/000-common-SELECT- mistakes.md: -------------------------------------------------------------------------------- 1 | # Common SELECT Mistakes 2 | 3 | - misspelling commands 4 | - using `;` or `,` incorrectly 5 | - `;` is a terminates the statement in SQL 6 | - using `"` instead of `'` 7 | - single quotes are for writing text in sql 8 | - double quotes are for tables in sql 9 | - invalid column names 10 | 11 | ## Misc 12 | -------------------------------------------------------------------------------- /SQL/1-What-is-a-database.md: -------------------------------------------------------------------------------- 1 | # What is a database? 2 | 3 | tl;dr: 4 | 5 | A database is an organized collection of data, generally stored and accessed electronically from a computer system 6 | 7 | ## Before databases there was the File Processing System. 8 | 9 | Essentially resources were stored in files and had no connection to each other. In the case of an order processing system, instead of tables for each resource, there would be a file that existed for each resource. None of those files stored references to other files, so if something changed in one file, it would not be reflected in the rest of the system. Every one was building their own systems - no standardization. This meant every system was specific to a particular data, resulting in a user needing to learn a new system each time then needed to use a new FPS. Systems couldn't talk to each other, data was stored redundantly, it was difficult! 10 | 11 | ## Dabase Management System 12 | 13 | From [techopedia]("https://www.techopedia.com/definition/24361/database-management-systems-dbms") 14 | 15 | A database management system (DBMS) is a software package designed to define, manipulate, retrieve and manage data in a database. A DBMS generally manipulates the data itself, the data format, field names, record structure and file structure. It also defines rules to validate and manipulate this data. 16 | 17 | Database management systems are set up on specific data handling concepts, as the practice of administrating a database evolves. The earliest databases only handled individual single pieces of specially formatted data. Today’s more evolved systems can handle different kinds of less formatted data and tie them together in more elaborate ways. 18 | 19 | ### Relation Database Management System (RDBMS) 20 | 21 | A subset of DBMS. 22 | 23 | The most common type of databases 24 | 25 | ## Database Model 26 | 27 | A way to orgainze and store data 28 | 29 | Some examples include: 30 | 31 | - Hierarchical 32 | - Networking 33 | - Entity-Relationship 34 | - Relaional 35 | - Object Oriented 36 | - Flat 37 | - Semi-Structured 38 | - And the list goes on... 39 | 40 | ## Relational Model 41 | 42 | From [wiki](<"https://en.wikipedia.org/wiki/Relational_model#:~:text=The%20relational%20model%20(RM)%20for,of%20tuples%2C%20grouped%20into%20relations.">) 43 | 44 | The relational model (RM) for database management is an approach to managing data using a structure and language consistent with first-order predicate logic, first described in 1969 by English computer scientist Edgar F. Codd,[1][2] where all data is represented in terms of tuples, grouped into relations. A database organized in terms of the relational model is a relational database. 45 | 46 | The table structure was introduced with the relational model. 47 | -------------------------------------------------------------------------------- /SQL/10-comparison-operators.md: -------------------------------------------------------------------------------- 1 | # Comparison Operators 2 | 3 | We have access to the same comparison operators we are familiar with: 4 | 5 | - 1 < 2 // true 6 | - 1 > 2 // false 7 | - 1 <= 2 // true 8 | - 1 >= 2 // false 9 | - 1 = 2 // false \*note we have a single = to denote equality 10 | - 1 != 2 // true 11 | 12 | We also have another way to denote `not equal`: 13 | 14 | - 1 <> 2 // true 15 | 16 | These comparison operators are better used for numbers vs text. Text comparisons using these operators can result in some unexpected results. 17 | 18 | For ex: 19 | 20 | ```sql 21 | /* 22 | Is `a` greater than `a`? -- no 23 | Is 'b' greater than `c`? -- this is processed alphabetically and because b` comes before `c`, it is considered `greater than` in SQL 24 | */ 25 | 'abc' > 'ace' -- true 26 | ``` 27 | -------------------------------------------------------------------------------- /SQL/11-operator-precedence.md: -------------------------------------------------------------------------------- 1 | # Order of operations for a query 2 | 3 | Before diving into operator precedence, let's quickly think about the order a query is executed. 4 | 5 | FROM -- Which table are we dealing with?
6 | WHERE -- What conditions should be met
7 | SELECT -- What fields should be returned?
8 | 9 | ## Operator Precedence 10 | 11 | A statement that has muliple operators, is evaluated based on the priority of the operators. That priority at a high level looks like this, starting from the first thing to be evaluated (`highest priority`) to the last thing to be evalutate (`lowest priority`) 12 | 13 | - Parentheses 14 | - Multiplication/Division 15 | - Subraction/Addition 16 | - NOT 17 | - AND 18 | - OR 19 | 20 | If operators have `equal priority`, (suppose there are multiple AND conditions) the operators will be evaluated directionally, from left to right or right to left. There are specific rules dictating when the direction will be from right to left or left to right. 21 | 22 | The Postgres [docs]("https://www.postgresql.org/docs/7.2/sql-precedence.html") have a table that lists the associativity, which is another way of saying the direction it will be read in. 23 | 24 | AND and OR both have left associativity. This means that the following: 25 | 26 | ```sql 27 | SELECT state, gender FROM customers 28 | WHERE gender = 'F' 29 | AND state = 'OR' 30 | OR state = 'NY' 31 | ``` 32 | 33 | would not return the results we might expect it do which is a list of states and gender from all of the females living in New York or Oregon, 34 | 35 | What we have actually asked for is state and gender from customers who are female from Oregon, or from New York (regardless of gender). 36 | 37 | There are a couple of ways to fix this. We could add parenthesis: 38 | 39 | ```sql 40 | SELECT state, gender FROM customers 41 | WHERE gender = 'F' 42 | AND (state = 'OR' 43 | OR state = 'NY') 44 | ``` 45 | 46 | Or we can be more verbose: 47 | 48 | ```sql 49 | SELECT state, gender FROM customers 50 | WHERE gender = 'F' 51 | AND state = 'OR' 52 | OR state = 'NY' 53 | AND gender = 'F' 54 | ``` 55 | 56 | However the more verbose method is ineficient in comparison to using parenthesis. The verbose method will first look for all the females who live in Oregon. Then it would look for all of the females who live in New York, but it would look at the females who live in Oregon again, just to make sure they arent' New Yorkers, and of course, we would look at both the male and female records again. 57 | 58 | When we use the parenthesis, we are first selecting only the records from Oregon or New York, and then looking for females from that subset. 59 | 60 | Another example: 61 | 62 | ```sql 63 | SELECT state, gender, salary, state 64 | FROM customers 65 | WHERE age > 20 66 | AND salary > 1000 67 | AND gender = 'F' 68 | AND NOT state = 'NY' 69 | ``` 70 | 71 | 1. NOT has the highest priority, so `AND NOT state = 'NY"` is going to be the first thing executed. NOT reads to the right of itself 72 | 73 | AND reads to the left of itself so it will "chain up" 74 | 75 | 2. `age > 20` 76 | 3. `salary > 1000` 77 | 4. `gender = 'F' 78 | 79 | One more: 80 | 81 | ```sql 82 | SELECT state, gender, salary, state 83 | FROM customers 84 | WHERE age > 20 85 | OR age < 30 86 | AND salary > 1000 87 | AND gender = 'F' 88 | AND NOT state = 'NY' 89 | AND NOT state = 'OR' 90 | ``` 91 | 92 | We will have 2 filters happening: 93 | 94 | Filter 1: 95 | 96 | 1. Younger than 30 97 | 2. Not from New York 98 | 3. Not from Oregon 99 | 4. Salary greater than 100 100 | 101 | Filter 2: 102 | 103 | 1. Older than 20 104 | 105 | This would not give us a very useful data set. We would have nicely filtered data from the first filter, but the second filter would essentially undo all of that by dumping anyone older than 20 into our results 106 | 107 | A more complex example: 108 | 109 | ```sql 110 | SELECT state, gender, salary, state 111 | FROM customers 112 | WHERE 113 | ( 114 | salary > 10000 AND state = 'NY' 115 | OR ( 116 | (age > 20 AND age < 30) 117 | AND salary <= 20000 118 | ) 119 | ) 120 | AND gender = 'F' 121 | ``` 122 | 123 | This example says, give me anyone lives in New York and make more than 10000 or are older than 20 and younger then 30 and make more than or equal to 20000, once those conditions are met, give me the results that are female. 124 | -------------------------------------------------------------------------------- /SQL/2-tables-columns-rows.md: -------------------------------------------------------------------------------- 1 | # Tables, columns, and rows 2 | 3 | ## What is a table? 4 | 5 | - A table is a represntation of an object or resource. 6 | 7 | - Each table has a name that relates to the data being stored. 8 | 9 | - Each table has columns and rows. 10 | 11 | - These rows and columns form tables much in the way that spreadsheets have columns and rows that belong to them. In a relational model tables are more nuanced than a spreadsheet, but it can be helpful to think of them this way. 12 | 13 | ## What are columns aka Attributes? 14 | 15 | - Each column represents a specific type of data. 16 | 17 | - Each table should have an id column. 18 | 19 | - Each column has a header that describes the data it is concerned with. 20 | 21 | - Each column is unique. 22 | 23 | - A collection of columns is called the `Degree` of the relation. 24 | 25 | - Given a table with the columns: id, lastName, firstName, dob we could say "The degree of the relation is id, firstName, lastName, dob" 26 | 27 | - When we talk about what a column can store, we call that the `domain` or the `constraint`. So we can say in dob, only store dates - that is the contraint/domain. 28 | 29 | - We might say my table has < attributes > with < constraints >. 30 | 31 | ## What is a row aka tuple? 32 | 33 | - Rows represent an entry in a table. 34 | 35 | - When we start inserting data, we are creating rows. 36 | 37 | - Rows will include entries for columns that belong to a single entry. 38 | 39 | - Each row must follow the column contraints 40 | 41 | - When we talk about all of the rows (data) in a table, we call that the cardinality. 42 | -------------------------------------------------------------------------------- /SQL/3-primary-and-foreing-keys.md: -------------------------------------------------------------------------------- 1 | # Primary and foreign keys 2 | 3 | We need to be able to link our tables, we use primary and foreign keys 4 | 5 | Each table should have an id column. The id needs to be unique. The `id` is the `Primary Key` that `uniquely` identifies each `row`. 6 | 7 | Once we have a Primary key, we can reference data in a given table A to table B by using the Primary key of table A as a foreign key of table B. 8 | 9 | This is done by injecting a new column into table B whose sole purpose is to manage the relationship between the two tables. The Primary Key from table a is now the foreign key in table B. 10 | -------------------------------------------------------------------------------- /SQL/4-importing-data.md: -------------------------------------------------------------------------------- 1 | # Importing Data 2 | 3 | In the command line: 4 | 5 | ```node 6 | psql -h localhost -U postgres -d DatabaseName < filetoimport.sql 7 | ``` 8 | 9 | DatabaseName is to be replaced with the name of your database 10 | 11 | filetoimport.sql should be replaced with the file you are looking to import 12 | -------------------------------------------------------------------------------- /SQL/5-renaming-columns.md: -------------------------------------------------------------------------------- 1 | # Renaming Columns 2 | 3 | ```sql 4 | SELECT column as ' 5 | ``` 6 | 7 | Simply by selecting the column name and declaring a new name enclosed in quotes, we are able to rename a column. This allows us to present our data in a more friendly, human readable form. 8 | 9 | ```sql 10 | SELECT emp_no AS "Employe #", birth_date AS "Birthday", first_name AS "First name" 11 | FROM employees 12 | ``` 13 | 14 | Old Column Name --> New Column Name 15 | 16 | `emp_no` --> `Employee #` 17 | 18 | `birth_date` --> `Birthday` 19 | 20 | `first_name` --> `First name` 21 | 22 | NOTE: This does not alter the tables, only the way the data is returned from the query. 23 | -------------------------------------------------------------------------------- /SQL/6-column-concatenation.md: -------------------------------------------------------------------------------- 1 | # Column Concatenation 2 | 3 | Sometimes, when we present our data, we want to combine multiple columns into a single column. This can be acheived using the `Concat function`. 4 | 5 | NOTE: When we are refering text, we use single quotes, as we did in the concat function to include a space between the two columns. Double quotes are for selecting a column. 6 | 7 | ```sql 8 | SELECT Concat(emp_no, ' ', title) AS "Employee Title" 9 | FROM "public"."titles" 10 | ``` 11 | 12 | This will return a single column named `Employee Title` with the value of `10001 Senior Engineer` which is a combination of the emp_no: 10001 and title: Senior Engineer, with a single space between them. 13 | 14 | Concat is a `Scalar` function, which does a "forEach" and operates on each record independantly 15 | -------------------------------------------------------------------------------- /SQL/7-aggregate-functions.md: -------------------------------------------------------------------------------- 1 | # Aggregate functions 2 | 3 | In database management an aggregate function is a function where the values of multiple rows are grouped together as input on certain criteria to form a single value of more significant meaning. 4 | 5 | Some common examples: 6 | 7 | - AVG() -- calculates the average of a set of values. 8 | - COUNT() -- counts rows in a specified table or view. 9 | - MIN() -- gets the minimum value in a set of values. 10 | - MAX() -- gets the maximum value in a set of values. 11 | - SUM() -- calculates the sum of values. 12 | 13 | ## Examples 14 | 15 | ## COUNT 16 | 17 | ```sql 18 | SELECT count(emp_no) 19 | from employees 20 | ``` 21 | 22 | In this case it didn't matter which field we selected to count, we just needed something that would be present in each row. 23 | 24 | ## MIN 25 | 26 | ```sql 27 | SELECT min(emp_no) 28 | from employees 29 | ``` 30 | 31 | This would give us the smallest employee number. 32 | 33 | ## MAX 34 | 35 | ```sql 36 | SELECT max(emp_no) 37 | from employees 38 | ``` 39 | 40 | This would give us the largest employee number. 41 | 42 | ## SUM 43 | 44 | ```sql 45 | SELECT max(emp_no) 46 | from employees 47 | ``` 48 | 49 | This would give us a sum of all of the employee numbers. 50 | 51 | ## AVERAGE 52 | 53 | ```sql 54 | SELECT avg(emp_no) 55 | from employees 56 | ``` 57 | 58 | This would give us an average of all of the employee numbers. 59 | -------------------------------------------------------------------------------- /SQL/8-comments.md: -------------------------------------------------------------------------------- 1 | # Comments 2 | 3 | ```sql 4 | -- Comment all the things 5 | -- We can use single line comments 6 | 7 | /* 8 | Multi line COMMENTS 9 | are also a thing 10 | */ 11 | 12 | -- Don't forget the single quote must be used to get the space between the fields 13 | select concat(first_name, ' ', last_name) 14 | -- The alias for the concated fields needs to be before the FROM statement 15 | as "Full Name" 16 | from employees 17 | Where first_name = 'Mayumi' 18 | and last_name = 'Schueller' -- filter on Mayumi Schuller 19 | ``` 20 | 21 | Your code does NOT explain itself. 22 | -------------------------------------------------------------------------------- /SQL/9-AND-OR-NOT-keywords.md: -------------------------------------------------------------------------------- 1 | # The AND, OR, and NOT keywords 2 | 3 | Get all female customers who live in Oregon or New York: 4 | 5 | ## AND 6 | 7 | Using `AND` means that both conditions must be met, in order for a row to be included in the returned data. 8 | 9 | So given: 10 | 11 | ```sql 12 | select count(customerid) 13 | from customers 14 | where gender = 'F' 15 | AND state = 'OR' 16 | ``` 17 | 18 | We will get a count of all of the customerid's in the customer table where the gender is 'F' and the state is 'OR'. If a customer is male, or lives in a state beides Oregon, their record will not be included in our total count. 19 | 20 | So we could say, if `both` boolean expressions are `true`, the results will be returned. 21 | 22 | ## OR 23 | 24 | Using `OR` allows us to select data based on whether the first condition is met or the second condition is met. 25 | 26 | So given: 27 | 28 | ```sql 29 | select count(customerid) 30 | from customers 31 | where state = 'OR' 32 | OR state = 'NY' 33 | ``` 34 | 35 | We will get a count of all customers who live in either the state of Oregon or New York. 36 | 37 | So we could say if `either` boolean expression is `true` then the results will be returned. 38 | 39 | ## Combining AND and OR 40 | 41 | We can use these two filters together. If we wanted a count of all the customers who were female and lived in either Oregon or New York, our query would look like this: 42 | 43 | ```sql 44 | select count(customerid) 45 | from customers 46 | where gender = 'F' 47 | -- note the parenthesis that wraps the OR statements 48 | AND (state = 'OR' 49 | or state = 'NY') 50 | ``` 51 | 52 | ## NOT 53 | 54 | We can also exlude data using NOT: 55 | 56 | ```sql 57 | select count(customerid) 58 | from customers 59 | where NOT zip = 88654 60 | AND gender = 'F' 61 | AND (state = 'OR' 62 | or state = 'NY') 63 | ``` 64 | 65 | This will return a data set containing all female customers who live in Oregon or New York but not in zip code 88645. 66 | 67 | ## To note: 68 | 69 | There is an order of operations 70 | OR creates a new branch of the filter, the following query will not consider anything before it. 71 | 72 | So we could say, when using `NOT`, if the boolean expression is `false` the data will be returned. 73 | --------------------------------------------------------------------------------