9 |
10 |
--------------------------------------------------------------------------------
/.vitepress/theme/ToggleDark.vue:
--------------------------------------------------------------------------------
1 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/.vitepress/theme/dialect.js:
--------------------------------------------------------------------------------
1 | import { watch, ref, nextTick, inject } from "vue";
2 |
3 | export function createDialect(app) {
4 | const dialect = ref('mysql')
5 |
6 | if (!import.meta.url) {
7 | watch(dialect, (value) => {
8 | localStorage.setItem("sql-dialect", value);
9 | })
10 | nextTick(() => {
11 | const value = localStorage.getItem("sql-dialect");
12 | if (value) {
13 | dialect.value = value;
14 | }
15 | })
16 | }
17 |
18 | // provide for later inject
19 | app.provide('dialect', dialect)
20 |
21 | // expose $dialect to templates
22 | Object.defineProperty(app.config.globalProperties, '$dialect', {
23 | get() {
24 | return dialect.value
25 | }
26 | })
27 |
28 | return {
29 | dialect
30 | }
31 | }
32 |
33 | export function useDialect() {
34 | const dialect = inject('dialect')
35 |
36 | return {
37 | dialect
38 | }
39 | }
--------------------------------------------------------------------------------
/.vitepress/theme/index.js:
--------------------------------------------------------------------------------
1 | import defaultTheme from "vitepress/theme";
2 | import Layout from "./Layout.vue";
3 | import { createDialect } from "./dialect";
4 | import SqlOutput from "./SqlOutput.vue";
5 | import "./styles.css";
6 |
7 | // @todo: hack, vite.config.ts define option seem not to work
8 | globalThis.process = globalThis.process || {
9 | env: {}
10 | }
11 |
12 | export default {
13 | Layout,
14 | NotFound: defaultTheme.NotFound,
15 |
16 | enhanceApp({ app }) {
17 | createDialect(app)
18 | app.component("SqlOutput", SqlOutput);
19 | },
20 | };
21 |
--------------------------------------------------------------------------------
/.vitepress/theme/styles.css:
--------------------------------------------------------------------------------
1 | @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;700&family=Source+Code+Pro&display=swap');
2 |
3 | :root {
4 | --c-brand: #d26b38;
5 | --c-white-dark: #f5f5f5;
6 | --c-white-darker: #c5bab5;
7 | --c-brand-light: #ff8144;
8 | --font-family-base: 'Poppins', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
9 | --font-family-mono: 'Source Code Pro', source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace;
10 |
11 | --code-font-size: 14px;
12 | --code-line-height: 22px;
13 | /* --c-text-light-1: #c5bab5; */
14 | --c-text-light-2: #cd7244;
15 | --c-text-light-3: #d26a38;
16 | --c-text-dark-1: #7f7f7f;
17 | --code-bg-color: #362f2d;
18 | --code-inline-bg-color: rgb(68 52 47 / 5%);
19 | }
20 | html {
21 | font-size: 14px;
22 | }
23 | .dark {
24 | --c-white: #2a2420;
25 | --c-white-dark: #342c27;
26 | --c-white-darker: #201d1c;
27 | --c-black: #ffffff;
28 | --c-text-light-1: #c5bab5;
29 | --c-text-light-2: #dfa486;
30 | --c-text-light-3: #d26a38;
31 | --c-text-dark-1: #9f9f9f;
32 | --code-bg-color: #201d1c;
33 | --code-inline-bg-color: #201d1c;
34 |
35 | color-scheme: dark;
36 | }
37 |
38 | .nav-bar .item {
39 | font-size: 1rem;
40 | }
41 | .nav-bar .nav-bar-title {
42 | font-size: 1.4rem;
43 | }
44 | .sidebar > .sidebar-links > .sidebar-link + .sidebar-link {
45 | padding-top: .3rem;
46 | }
47 | .sidebar > .sidebar-links > .sidebar-link > a.sidebar-link-item {
48 | font-weight: 600;
49 | }
50 | .sidebar > .sidebar-links > .sidebar-link > .sidebar-link-item {
51 | padding: 0.35rem 1.5rem 0.35rem 1.25rem;
52 | }
53 |
54 | a.header-anchor {
55 | float: left;
56 | margin-top: 0.125em;
57 | margin-left: -1.1em;
58 | padding-right: 0.23em;
59 | font-size: 0.85em;
60 | opacity: 0;
61 | }
62 | .custom-block.warning {
63 | border-color: var(--c-brand);
64 | color: #914926;
65 | background-color: rgb(210 106 56 / 15%);
66 | }
67 | .custom-block.warning .custom-block-title {
68 | color: #d66026;
69 | }
70 | .custom-block.info {
71 | background-color: #fbf6f4;
72 | }
73 | .dark .custom-block.info {
74 | background-color: #3a3532;
75 | }
76 | .dark .custom-block.warning {
77 | color: var(--c-text-light-1);
78 | background-color: #462414;
79 | }
80 | .dark .custom-block.warning .custom-block-title {
81 | color: #d66026;
82 | }
83 |
84 | .home-hero {
85 | max-width: 42rem;
86 | margin-left: auto !important;
87 | margin-right: auto !important;
88 | }
89 | .home-hero .image {
90 | animation: spin 5s linear infinite;
91 | animation-play-state: paused;
92 | }
93 | .home-hero:hover .image {
94 | animation-play-state: running;
95 | }
96 | .theme .container {
97 | max-width: 54rem;
98 | }
99 | .container-home {
100 | max-width: 42rem;
101 | margin: 0 auto;
102 | background: var(--c-white-dark);
103 | padding: .5rem 2rem;
104 | border-radius: 6px;
105 | }
106 | @keyframes spin {
107 | from {
108 | transform: rotate(0deg);
109 | }
110 | to {
111 | transform: rotate(360deg);
112 | }
113 | }
114 |
115 | [data-dialect]::before {
116 | content: attr(data-dialect);
117 | }
118 |
119 | .language-sql code {
120 | color: #ccc;
121 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # knex.js documentation
2 |
3 | > [!IMPORTANT]
4 | > The documentation has been moved to the [knex](https://github.com/knex/knex) repo under the [`docs`](https://github.com/knex/knex/tree/master/docs) folder
5 |
6 | The vitepress documentation for [http://knexjs.org](http://knexjs.org)
7 |
8 | #### Development:
9 |
10 | ```bash
11 | yarn dev # or npm run dev
12 |
13 | ```
14 | npm run dev
15 |
16 | ```bash
17 | yarn install # or npm i
18 | yarn dev # or npm run dev
19 |
20 |
21 | ```
22 |
23 | #### Production:
24 |
25 | ```bash
26 | yarn build # or npm run build
27 | ```
28 |
29 | #### License:
30 |
31 | MIT
32 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@knex/documentation",
3 | "private": true,
4 | "version": "0.1.0",
5 | "description": "Knex Documentation Builder",
6 | "scripts": {
7 | "dev": "vitepress dev .",
8 | "build": "vitepress build .",
9 | "serve": "vitepress serve ."
10 | },
11 | "devDependencies": {
12 | "knex": "^2.4.0",
13 | "typescript": "^4.6.3",
14 | "vitepress": "^0.22.4"
15 | },
16 | "dependencies": {},
17 | "author": {
18 | "name": "Tim Griesser",
19 | "web": "https://github.com/tgriesser"
20 | },
21 | "license": "MIT"
22 | }
23 |
--------------------------------------------------------------------------------
/scripts/deploy-doc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | # abort on errors
4 | set -e
5 |
6 | # build
7 | npm run build
8 |
9 | # navigate into the build output directory
10 | cd .vitepress/dist
11 |
12 | # if you are deploying to a custom domain
13 | # echo 'www.example.com' > CNAME
14 |
15 | git init
16 | git add -A
17 | git commit -m 'deploy'
18 |
19 | git push -f git@github.com:knex/documentation.git master:gh-pages
20 |
21 | cd -
22 |
--------------------------------------------------------------------------------
/src/faq/index.md:
--------------------------------------------------------------------------------
1 |
2 | # F.A.Q.
3 |
4 | ## How do I help contribute?
5 |
6 | Glad you asked! Pull requests, or feature requests, though not always implemented, are a great way to help make Knex even better than it is now. If you're looking for something specific to help out with, there's a number of unit tests that aren't implemented yet, the library could never have too many of those. If you want to submit a fix or feature, take a look at the [Contributing](https://github.com/knex/knex/blob/master/CONTRIBUTING.md) readme in the Github and go ahead and open a ticket.
7 |
8 | ## How do I debug?
9 |
10 | Knex is beginning to make use of the [debug](https://github.com/visionmedia/debug) module internally, so you can set the `DEBUG` environment variable to `knex:*` to see all debugging, or select individual namespaces `DEBUG=knex:query,knex:tx` to constrain a bit.
11 |
12 | If you pass `{debug: true}` as one of the options in your initialize settings, you can see all of the query calls being made. Sometimes you need to dive a bit further into the various calls and see what all is going on behind the scenes. I'd recommend [node-inspector](https://github.com/dannycoates/node-inspector), which allows you to debug code with `debugger` statements like you would in the browser.
13 |
14 | At the start of your application code will catch any errors not otherwise caught in the normal promise chain handlers, which is very helpful in debugging.
15 |
16 | ## How do I run the test suite?
17 |
18 | The test suite looks for an environment variable called `KNEX_TEST` for the path to the database configuration. If you run the following command:
19 |
20 | ```bash
21 | $ export KNEX_TEST='/path/to/your/knex_config.js'
22 | $ npm test
23 | ```
24 |
25 | replacing with the path to your config file, and the config file is valid, the test suite should run properly.
26 |
27 | ## My tests are failing because slow DB connection and short test timeouts! How to extend test timeouts?
28 |
29 | Sometimes, e.g. when running CI on travis, test suite's default timeout of 5 seconds might be too short. In such cases an alternative test timeout value in milliseconds can be specified using the `KNEX_TEST_TIMEOUT` environment variable.
30 |
31 | ```bash
32 | $ export KNEX_TEST_TIMEOUT=30000
33 | $ npm test
34 | ```
35 |
36 | ## I found something broken with Amazon Redshift! Can you help?
37 |
38 | Because there is no testing platform available for Amazon Redshift, be aware that it is included as a dialect but is unsupported. With that said, please file an issue if something is found to be broken that is not noted in the documentation, and we will do our best.
39 |
--------------------------------------------------------------------------------
/src/faq/recipes.md:
--------------------------------------------------------------------------------
1 | # Recipes
2 |
3 | ## Using non-standard database that is compatible with PostgreSQL wire protocol (such as CockroachDB)
4 |
5 | Specify PostgreSQL version that database you are using is compatible with protocol-wise using `version` option, e. g.:
6 |
7 | ```js
8 | const knex = require('knex')({
9 | client: 'pg',
10 | version: '7.2',
11 | connection: {
12 | host: '127.0.0.1',
13 | user: 'your_database_user',
14 | password: 'your_database_password',
15 | database: 'myapp_test'
16 | }
17 | });
18 | ```
19 |
20 | Note that value of `version` option should be not the version of the database that you are using, but version of PostgreSQL that most closely matches functionality of the database that you are using. If not provided by database vendor, try using '7.2' as a baseline and keep increasing (within the range of existing PostgreSQL versions) until it starts (or stops) working.
21 |
22 | There are also known incompatibilities with migrations for databases that do not support select for update. See https://github.com/tgriesser/knex/issues/2002 for a workaround.
23 |
24 | ## Connecting to MSSQL on Azure SQL Database
25 |
26 | `{encrypt: true}` should be included in options branch of connection configuration:
27 |
28 |
29 | ```js
30 | knex({
31 | client : 'mssql',
32 | connection: {
33 | database: 'mydatabase',
34 | server: 'myserver.database.windows.net',
35 | user: 'myuser',
36 | password: 'mypass',
37 | port: 1433,
38 | connectionTimeout: 30000,
39 | options: {
40 | encrypt: true
41 | }
42 | }
43 | });
44 | ```
45 |
46 | [See all of node-mssql's connection options](https://github.com/tediousjs/node-mssql#configuration-1)
47 |
48 | ## Adding a full-text index for PostgreSQL
49 |
50 | ```js
51 | exports.up = (knex) => {
52 | return knex.schema.createTable('foo', (table) => {
53 | table.increments('id');
54 | table.specificType('fulltext', 'tsvector');
55 | table.index('fulltext', null, 'gin');
56 | })
57 | };
58 | ```
59 |
60 | ## DB access using SQLite and SQLCipher
61 |
62 | After you build the SQLCipher source and the npm SQLite3 package, and encrypt your DB (look elsewhere for these things), then anytime you open your database, you need to provide your encryption key using the SQL statement:
63 |
64 | ```sql
65 | PRAGMA KEY = 'secret'
66 | ```
67 |
68 | This PRAGMA is more completely documented in the SQLCipher site. When working with Knex this is best done when opening the DB, via the following:
69 |
70 | ```js
71 | const myDBConfig = {
72 | client: "sqlite3",
73 | connection: {
74 | filename: "myEncryptedSQLiteDbFile.db"
75 | },
76 | pool: {
77 | afterCreate: function(conn, done) {
78 | conn.run("PRAGMA KEY = 'secret'");
79 | done();
80 | }
81 | }
82 | };
83 | const knex = require('knex')(myDBConfig);
84 | ```
85 |
86 | Of course embedding the key value in your code is a poor security practice. Instead, retrieve the 'secret' from elsewhere.
87 |
88 | The key Knex thing to note here is the "afterCreate" function. This is documented in the knexjs.org site, but is not in the Table of Contents at this time, so do a browser find when on the site to get to it. It allows auto-updating DB settings when creating any new pool connections (of which there will only ever be one per file for Knex-SQLite).
89 |
90 | If you don't use the "afterCreate" configuration, then you will need to run a knex.raw statement with each and every SQL you execute, something like as follows:
91 |
92 | ```js
93 | return knex.raw("PRAGMA KEY = 'secret'")
94 | .then(() => knex('some_table')
95 | .select()
96 | .on('query-error', function(ex, obj) {
97 | console.log(
98 | "KNEX select from some_table ERR ex:",
99 | ex,
100 | "obj:",
101 | obj
102 | );
103 | })
104 | );
105 | ```
106 |
107 | ## Maintaining changelog for seeds (version >= 0.16.0-next1)
108 |
109 | In case you would like to use Knex.js changelog functionality to ensure your environments are only seeded once, but don't want to mix seed files with migration files, you can specify multiple directories as a source for your migrations:
110 |
111 | ```ts
112 | await knex.migrate.latest({
113 | directory: [
114 | 'src/services/orders/database/migrations',
115 | 'src/services/orders/database/seeds'
116 | ],
117 | sortDirsSeparately: true,
118 | tableName: 'orders_migrations',
119 | schemaName: 'orders',
120 | })
121 | ```
122 |
123 | ## Using explicit transaction management together with async code
124 |
125 | ```ts
126 | await knex.transaction(trx => {
127 | async function stuff() {
128 | trx.rollback(new Error('Foo'));
129 | };
130 | stuff()
131 | .then(() => {
132 | // do something
133 | });
134 | });
135 | ```
136 |
137 | Or alternatively:
138 |
139 | ```ts
140 | try {
141 | await knex.transaction(trx => {
142 | async function stuff() {
143 | trx.rollback(new Error('always explicit rollback this time'));
144 | }
145 | stuff();
146 | });
147 | // transaction was committed
148 | } catch (err) {
149 | // transaction was rolled back
150 | }
151 | ```
152 | (note that promise for `knex.transaction` resolves after transaction is rolled back or committed)
153 |
154 | ## Using parentheses with AND operator
155 |
156 | In order to generate query along the lines of
157 |
158 | ```sql
159 | SELECT "firstName", "lastName", "status"
160 | FROM "userInfo"
161 | WHERE "status" = 'active'
162 | AND ("firstName" ILIKE '%Ali%' OR "lastName" ILIKE '%Ali%');
163 | ```
164 |
165 | you need to use following approach:
166 |
167 | ```js
168 | queryBuilder
169 | .where('status', status.uuid)
170 | .andWhere((qB) => qB
171 | .where('firstName', 'ilike', `%${q}%`)
172 | .orWhere('lastName', 'ilike', `%${q}%`)
173 | )
174 | ```
175 |
176 | ## Calling an oracle stored procedure with bindout variables
177 |
178 | How to call and retrieve output from an oracle stored procedure
179 |
180 | ```ts
181 | const oracle = require('oracledb');
182 | const bindVars = {
183 | input_var1: 6,
184 | input_var2: 7,
185 | output_var: {
186 | dir: oracle.BIND_OUT
187 | },
188 | output_message: {
189 | dir: oracle.BIND_OUT
190 | }
191 | };
192 |
193 | const sp = 'BEGIN MULTIPLY_STORED_PROCEDURE(:input_var1, :input_var2, :output_var, :output_message); END;';
194 | const results = await knex.raw(sp, bindVars);
195 | console.log(results[0]); // 42
196 | console.log(results[1]); // 6 * 7 is the answer to life
197 | ```
198 |
199 | ## Node instance doesn't stop after using knex
200 |
201 | Make sure to close knex instance after execution to avoid Node process hanging due to open connections:
202 |
203 | ```js
204 | async function migrate() {
205 | try {
206 | await knex.migrate.latest({/**config**/})
207 | } catch (e) {
208 | process.exit(1)
209 | } finally {
210 | try {
211 | knex.destroy()
212 | } catch (e) {
213 | // ignore
214 | }
215 | }
216 | }
217 |
218 | migrate()
219 | ```
220 |
221 | ## Manually Closing Streams
222 |
223 | When using Knex's [stream interface](/guide/interfaces#streams), you can typically just `pipe` the return stream to any writable stream. However, with [`HTTPIncomingMessage`](http://nodejs.org/api/http.html#http_http_incomingmessage), you'll need to take special care to handle aborted requests.
224 |
225 | An `HTTPIncomingMessage` object is typically called `request`. This is the first argument in `'request'` events emitted on `http.Server` instances. [Express's `req`](http://expressjs.com/4x/api.html#request) implements a compatible interface and Hapi exposes this object on [its request objects](http://hapijs.com/api#request-object) as `request.raw.req`.
226 |
227 | You need to explicitly handle the case where an `HTTPIncomingMessage` is closed prematurely when streaming from a database with Knex. The easiest way to cause this is:
228 |
229 | 1. Visit an endpoint that takes several seconds to fully transmit a response
230 | 2. Close the browser window immediately after beginning the request
231 |
232 | When this happens while you are streaming a query to a client, you need to manually tell Knex that it can release the database connection in use back to the connection pool.
233 |
234 | ```js
235 | server.on('request', function (request, response) {
236 | const stream = knex.select('*').from('items').stream();
237 | request.on('close', stream.end.bind(stream));
238 | });
239 | ```
240 |
241 |
--------------------------------------------------------------------------------
/src/faq/support.md:
--------------------------------------------------------------------------------
1 | # Support
2 |
3 | Have questions about the library? Come join us in the [#bookshelf freenode IRC](http://webchat.freenode.net/?channels=bookshelf) channel for support on knex.js and [bookshelf.js](http://bookshelfjs.org), or post an issue on [Stack Overflow](http://stackoverflow.com/questions/tagged/knex.js) or in the GitHub [issue tracker](https://github.com/knex/knex/issues).
4 |
--------------------------------------------------------------------------------
/src/guide/extending.md:
--------------------------------------------------------------------------------
1 | # Extending
2 |
3 | To extend knex's builders, we have the following methods
4 |
5 | ```js
6 | knex.SchemaBuilder.extend("functionName", function() {
7 | console.log('Custom Schema Builder Function');
8 | return this;
9 | });
10 | knex.TableBuilder.extend("functionName", function() {
11 | console.log('Custom Table Builder Function');
12 | return this;
13 | });
14 | knex.ViewBuilder.extend("functionName", function() {
15 | console.log('Custom View Builder Function');
16 | return this;
17 | });
18 | knex.ColumnBuilder.extend("functionName", function() {
19 | console.log('Custom Column Builder Function');
20 | return this;
21 | });
22 | ```
23 |
24 |
25 | To add typescript support you can add the following (.d.ts):
26 | ```ts
27 | import "knex";
28 | declare module "knex" {
29 | namespace Knex {
30 | interface SchemaBuilder {
31 | functionName (): Knex.SchemaBuilder;
32 | }
33 | interface TableBuilder {
34 | functionName (): Knex.TableBuilder;
35 | }
36 | interface ViewBuilder {
37 | functionName (): Knex.ViewBuilder;
38 | }
39 | interface ColumnBuilder {
40 | functionName (): Knex.ColumnBuilder;
41 | }
42 | }
43 | }
44 | ```
--------------------------------------------------------------------------------
/src/guide/index.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 |
4 | Knex can be used as an SQL query builder in both Node.JS and the browser, limited to WebSQL's constraints (like the inability to drop tables or read schemas). Composing SQL queries in the browser for execution on the server is highly discouraged, as this can be the cause of serious security vulnerabilities. The browser builds outside of WebSQL are primarily for learning purposes - for example, you can pop open the console and build queries on this page using the **knex** object.
5 |
6 | ## Node.js
7 |
8 | The primary target environment for Knex is Node.js, you will need to install the `knex` library, and then install the appropriate database library: [`pg`](https://github.com/brianc/node-postgres) for PostgreSQL, CockroachDB and Amazon Redshift, [`pg-native`](https://github.com/brianc/node-pg-native) for PostgreSQL with native C++ `libpq` bindings (requires PostgresSQL installed to link against), [`mysql`](https://github.com/felixge/node-mysql) for MySQL or MariaDB, [`sqlite3`](https://github.com/mapbox/node-sqlite3) for SQLite3, or [`tedious`](https://github.com/tediousjs/tedious) for MSSQL.
9 |
10 | ```bash
11 | $ npm install knex --save
12 |
13 | # Then add one of the following (adding a --save) flag:
14 | $ npm install pg
15 | $ npm install pg-native
16 | $ npm install sqlite3
17 | $ npm install better-sqlite3
18 | $ npm install mysql
19 | $ npm install mysql2
20 | $ npm install oracledb
21 | $ npm install tedious
22 | ```
23 |
24 | _If you want to use CockroachDB or Redshift instance, you can use the `pg` driver._
25 |
26 | _If you want to use a MariaDB instance, you can use the `mysql` driver._
27 |
28 | ## Browser
29 |
30 | Knex can be built using a JavaScript build tool such as [browserify](http://browserify.org/) or [webpack](https://github.com/webpack/webpack). In fact, this documentation uses a webpack build which [includes knex](https://github.com/knex/documentation/blob/a4de1b2eb50d6699f126be8d134f3d1acc4fc69d/components/Container.jsx#L3). View source on this page to see the browser build in-action (the global `knex` variable).
31 |
32 | ## Configuration Options
33 |
34 | The `knex` module is itself a function which takes a configuration object for Knex, accepting a few parameters. The `client` parameter is required and determines which client adapter will be used with the library.
35 |
36 | ```js
37 | const knex = require('knex')({
38 | client: 'mysql',
39 | connection: {
40 | host : '127.0.0.1',
41 | port : 3306,
42 | user : 'your_database_user',
43 | password : 'your_database_password',
44 | database : 'myapp_test'
45 | }
46 | });
47 | ```
48 |
49 | The connection options are passed directly to the appropriate database client to create the connection, and may be either an object, a connection string, or a function returning an object:
50 |
51 | ::: info PostgreSQL
52 | Knex's PostgreSQL client allows you to set the initial search path for each connection automatically using an additional option "searchPath" as shown below.
53 |
54 | ```js
55 | const pg = require('knex')({
56 | client: 'pg',
57 | connection: process.env.PG_CONNECTION_STRING,
58 | searchPath: ['knex', 'public'],
59 | });
60 | ```
61 | :::
62 |
63 | When using the PostgreSQL driver, another usage pattern for instantiating the Knex configuration object could be to use a `connection: {}` object details to specify various flags such as enabling SSL, a connection string, and individual connection configuration fields all in the same object. Consider the following example:
64 |
65 | ::: info PostgreSQL
66 | If `connectionString` is highest priority to use. If left unspecified then connection details will be determined using the individual connection fields (`host`, `port`, etc), and finally an SSL configuration will be enabled based on a truthy value of `config["DB_SSL"]` which will also accept self-signed certificates.
67 |
68 | ```js
69 | const pg = require('knex')({
70 | client: 'pg',
71 | connection: {
72 | connectionString: config.DATABASE_URL,
73 | host: config["DB_HOST"],
74 | port: config["DB_PORT"],
75 | user: config["DB_USER"],
76 | database: config["DB_NAME"],
77 | password: config["DB_PASSWORD"],
78 | ssl: config["DB_SSL"] ? { rejectUnauthorized: false } : false,
79 | }
80 | });
81 | ```
82 | :::
83 |
84 | The following are SQLite usage patterns for instantiating the Knex configuration object:
85 |
86 | ::: info SQLite3 or Better-SQLite3
87 | When you use the SQLite3 or Better-SQLite3 adapter, there is a filename required, not a network connection. For example:
88 |
89 | ```js
90 | const knex = require('knex')({
91 | client: 'sqlite3', // or 'better-sqlite3'
92 | connection: {
93 | filename: "./mydb.sqlite"
94 | }
95 | });
96 | ```
97 |
98 | You can also run either SQLite3 or Better-SQLite3 with an in-memory database by providing `:memory:` as the filename. For example:
99 |
100 | ```js
101 | const knex = require('knex')({
102 | client: 'sqlite3', // or 'better-sqlite3'
103 | connection: {
104 | filename: ":memory:"
105 | }
106 | });
107 | ```
108 | :::
109 |
110 | ::: info SQLite3
111 | When you use the SQLite3 adapter, you can set flags used to open the connection. For example:
112 |
113 | ```js
114 | const knex = require('knex')({
115 | client: 'sqlite3',
116 | connection: {
117 | filename: "file:memDb1?mode=memory&cache=shared",
118 | flags: ['OPEN_URI', 'OPEN_SHAREDCACHE']
119 | }
120 | });
121 | ```
122 | :::
123 |
124 |
125 | ::: info Better-SQLite3
126 | With the Better-SQLite3 adapter, you can use `options.nativeBinding` to specify the location of the adapter's compiled C++ addon. This can be useful when your build system does a lot of transformation/relocation of files.
127 |
128 | Example use:
129 |
130 | ```js
131 | const knex = require('knex')({
132 | client: 'better-sqlite3',
133 | connection: {
134 | filename: ":memory:",
135 | options: {
136 | nativeBinding: "/path/to/better_sqlite3.node",
137 | },
138 | },
139 | });
140 | ```
141 |
142 | Additionally, you can open the database in read-only mode using `options.readonly`:
143 |
144 | ```js
145 | const knex = require('knex')({
146 | client: 'better-sqlite3',
147 | connection: {
148 | filename: "/path/to/db.sqlite3",
149 | options: {
150 | readonly: true,
151 | },
152 | },
153 | });
154 | ```
155 |
156 | For more information, see the [Better-SQLite3 documentation](https://github.com/WiseLibs/better-sqlite3/blob/master/docs/api.md#new-databasepath-options) on database connection options.
157 |
158 | :::
159 |
160 | ::: info MSSQL
161 | When you use the MSSQL client, you can define a `mapBinding` function to define your own logic for mapping from knex query parameters to `tedious` types.
162 | Returning undefined from the function will fallback to the default mapping.
163 | ```js
164 | import { TYPES } from 'tedious';
165 |
166 | const knex = require('knex')({
167 | client: 'mssql',
168 | connection: {
169 | options: {
170 | mapBinding: value => {
171 | // bind all strings to varchar instead of nvarchar
172 | if (typeof value === 'string') {
173 | return {
174 | type: TYPES.VarChar,
175 | value
176 | };
177 | }
178 |
179 | // allow devs to pass tedious type at query time
180 | if (value != null && value.type) {
181 | return {
182 | type: value.type,
183 | value: value.value
184 | };
185 | }
186 |
187 | // undefined is returned; falling back to default mapping function
188 | }
189 | }
190 | }
191 | });
192 | ```
193 | :::
194 |
195 | ::: info
196 | The database version can be added in knex configuration, when you use the PostgreSQL adapter to connect a non-standard database.
197 |
198 | ```js
199 | const knex = require('knex')({
200 | client: 'pg',
201 | version: '7.2',
202 | connection: {
203 | host : '127.0.0.1',
204 | port : 5432,
205 | user : 'your_database_user',
206 | password : 'your_database_password',
207 | database : 'myapp_test'
208 | }
209 | });
210 | ```
211 |
212 | ```js
213 | const knex = require('knex')({
214 | client: 'mysql',
215 | version: '5.7',
216 | connection: {
217 | host : '127.0.0.1',
218 | port : 3306,
219 | user : 'your_database_user',
220 | password : 'your_database_password',
221 | database : 'myapp_test'
222 | }
223 | });
224 | ```
225 | :::
226 |
227 | ::: info
228 | When using a custom PostgreSQL client like `knex-aurora-data-api-client`, you can explicitly state if it supports jsonb column types
229 |
230 | ```js
231 | const knex = require('knex')({
232 | client: require('knex-aurora-data-api-client').postgres,
233 | connection: { resourceArn, secretArn, database: `mydb` },
234 | version: 'data-api',
235 | jsonbSupport: true
236 | })
237 | ```
238 | :::
239 |
240 | A function can be used to determine the connection configuration dynamically. This function receives no parameters, and returns either a configuration object or a promise for a configuration object.
241 |
242 | ```js
243 | const knex = require('knex')({
244 | client: 'sqlite3',
245 | connection: () => ({
246 | filename: process.env.SQLITE_FILENAME
247 | })
248 | });
249 | ```
250 |
251 | By default, the configuration object received via a function is cached and reused for all connections. To change this behavior, an `expirationChecker` function can be returned as part of the configuration object. The `expirationChecker` is consulted before trying to create new connections, and in case it returns `true`, a new configuration object is retrieved. For example, to work with an authentication token that has a limited lifespan:
252 |
253 | ```js
254 | const knex = require('knex')({
255 | client: 'postgres',
256 | connection: async () => {
257 | const {
258 | token,
259 | tokenExpiration
260 | } = await someCallToGetTheToken();
261 |
262 | return {
263 | host : 'your_host',
264 | port : 5432,
265 | user : 'your_database_user',
266 | password : token,
267 | database : 'myapp_test',
268 | expirationChecker: () => {
269 | return tokenExpiration <= Date.now();
270 | }
271 | };
272 | }
273 | });
274 | ```
275 |
276 | You can also connect via a unix domain socket, which will ignore host and port.
277 |
278 | ```js
279 | const knex = require('knex')({
280 | client: 'mysql',
281 | connection: {
282 | socketPath : '/path/to/socket.sock',
283 | user : 'your_database_user',
284 | password : 'your_database_password',
285 | database : 'myapp_test'
286 | }
287 | });
288 | ```
289 |
290 | `userParams` is an optional parameter that allows you to pass arbitrary parameters which will be accessible via `knex.userParams` property:
291 |
292 | ```js
293 | const knex = require('knex')({
294 | client: 'mysql',
295 | connection: {
296 | host : '127.0.0.1',
297 | port : 3306,
298 | user : 'your_database_user',
299 | password : 'your_database_password',
300 | database : 'myapp_test'
301 | },
302 | userParams: {
303 | userParam1: '451'
304 | }
305 | });
306 | ```
307 |
308 | Initializing the library should normally only ever happen once in your application, as it creates a connection pool for the current database, you should use the instance returned from the initialize call throughout your library.
309 |
310 | Specify the client for the particular flavour of SQL you are interested in.
311 |
312 | ```js
313 | const pg = require('knex')({client: 'pg'});
314 |
315 | knex('table')
316 | .insert({a: 'b'})
317 | .returning('*')
318 | .toString();
319 | // "insert into "table" ("a") values ('b')"
320 |
321 | pg('table')
322 | .insert({a: 'b'})
323 | .returning('*')
324 | .toString();
325 | // "insert into "table" ("a") values ('b') returning *"
326 | ```
327 |
328 | ### withUserParams
329 |
330 | You can call method `withUserParams` on a Knex instance if you want to get a copy (with same connections) with custom parameters (e. g. to execute same migrations with different parameters)
331 |
332 | ```js
333 | const knex = require('knex')({
334 | // Params
335 | });
336 |
337 | const knexWithParams = knex.withUserParams({
338 | customUserParam: 'table1'
339 | });
340 | const customUserParam = knexWithParams
341 | .userParams
342 | .customUserParam;
343 | ```
344 |
345 | ### debug
346 |
347 | Passing a `debug: true` flag on your initialization object will turn on [debugging](/guide/query-builder.html#debug) for all queries.
348 |
349 | ### asyncStackTraces
350 |
351 | Passing an `asyncStackTraces: true` flag on your initialization object will turn on stack trace capture for all query builders, raw queries and schema builders. When a DB driver returns an error, this previously captured stack trace is thrown instead of a new one. This helps to mitigate default behaviour of `await` in node.js/V8 which blows the stack away. This has small performance overhead, so it is advised to use only for development. Turned off by default.
352 |
353 | ### pool
354 |
355 | The client created by the configuration initializes a connection pool, using the [tarn.js](https://github.com/vincit/tarn.js) library. This connection pool has a default setting of a `min: 2, max: 10` for the MySQL and PG libraries, and a single connection for sqlite3 (due to issues with utilizing multiple connections on a single file). To change the config settings for the pool, pass a `pool` option as one of the keys in the initialize block.
356 |
357 | Note that the default value of `min` is 2 only for historical reasons. It can result in problems with stale connections, despite tarn's default idle connection timeout of 30 seconds, which is only applied when there are more than `min` active connections. It is recommended to set `min: 0` so all idle connections can be terminated.
358 |
359 | Checkout the [tarn.js](https://github.com/vincit/tarn.js) library for more information.
360 |
361 | ```js
362 | const knex = require('knex')({
363 | client: 'mysql',
364 | connection: {
365 | host : '127.0.0.1',
366 | port : 3306,
367 | user : 'your_database_user',
368 | password : 'your_database_password',
369 | database : 'myapp_test'
370 | },
371 | pool: { min: 0, max: 7 }
372 | });
373 | ```
374 |
375 | If you ever need to explicitly teardown the connection pool, you may use `knex.destroy([callback])`. You may use `knex.destroy` by passing a callback, or by chaining as a promise, just not both. To manually initialize a destroyed connection pool, you may use knex.initialize(\[config\]), if no config is passed, it will use the first knex configuration used.
376 |
377 | ### afterCreate
378 |
379 | `afterCreate` callback (rawDriverConnection, done) is called when the pool aquires a new connection from the database server. done(err, connection) callback must be called for `knex` to be able to decide if the connection is ok or if it should be discarded right away from the pool.
380 |
381 | ```js
382 | const knex = require('knex')({
383 | client: 'pg',
384 | connection: {/*...*/},
385 | pool: {
386 | afterCreate: function (conn, done) {
387 | // in this example we use pg driver's connection API
388 | conn.query('SET timezone="UTC";', function (err) {
389 | if (err) {
390 | // first query failed,
391 | // return error and don't try to make next query
392 | done(err, conn);
393 | } else {
394 | // do the second query...
395 | conn.query(
396 | 'SELECT set_limit(0.01);',
397 | function (err) {
398 | // if err is not falsy,
399 | // connection is discarded from pool
400 | // if connection aquire was triggered by a
401 | // query the error is passed to query promise
402 | done(err, conn);
403 | });
404 | }
405 | });
406 | }
407 | }
408 | });
409 | ```
410 |
411 | ### acquireConnectionTimeout
412 |
413 | `acquireConnectionTimeout` defaults to 60000ms and is used to determine how long knex should wait before throwing a timeout error when acquiring a connection is not possible. The most common cause for this is using up all the pool for transaction connections and then attempting to run queries outside of transactions while the pool is still full. The error thrown will provide information on the query the connection was for to simplify the job of locating the culprit.
414 |
415 | ```js
416 | const knex = require('knex')({
417 | client: 'pg',
418 | connection: {/*...*/},
419 | pool: {/*...*/},
420 | acquireConnectionTimeout: 10000
421 | });
422 | ```
423 |
424 | ### fetchAsString
425 |
426 | Utilized by Oracledb. An array of types. The valid types are 'DATE', 'NUMBER' and 'CLOB'. When any column having one of the specified types is queried, the column data is returned as a string instead of the default representation.
427 |
428 | ```js
429 | const knex = require('knex')({
430 | client: 'oracledb',
431 | connection: {/*...*/},
432 | fetchAsString: [ 'number', 'clob' ]
433 | });
434 | ```
435 |
436 | ### migrations
437 |
438 | For convenience, any migration configuration may be specified when initializing the library. Read the [Migrations](/guide/migrations.html) section for more information and a full list of configuration options.
439 |
440 | ```js
441 | const knex = require('knex')({
442 | client: 'mysql',
443 | connection: {
444 | host : '127.0.0.1',
445 | port : 3306,
446 | user : 'your_database_user',
447 | password : 'your_database_password',
448 | database : 'myapp_test'
449 | },
450 | migrations: {
451 | tableName: 'migrations'
452 | }
453 | });
454 | ```
455 |
456 | ### postProcessResponse
457 |
458 | Hook for modifying returned rows, before passing them forward to user. One can do for example snake\_case -> camelCase conversion for returned columns with this hook. The `queryContext` is only available if configured for a query builder instance via [queryContext](/guide/schema-builder.html#querycontext).
459 |
460 | ```js
461 | const knex = require('knex')({
462 | client: 'mysql',
463 | // overly simplified snake_case -> camelCase converter
464 | postProcessResponse: (result, queryContext) => {
465 | // TODO: add special case for raw results
466 | // (depends on dialect)
467 | if (Array.isArray(result)) {
468 | return result.map(row => convertToCamel(row));
469 | } else {
470 | return convertToCamel(result);
471 | }
472 | }
473 | });
474 | ```
475 |
476 | ### wrapIdentifier
477 |
478 | Knex supports transforming identifier names automatically to quoted versions for each dialect. For example `'Table.columnName as foo'` for PostgreSQL is converted to "Table"."columnName" as "foo".
479 |
480 | With `wrapIdentifier` one may override the way how identifiers are transformed. It can be used to override default functionality and for example to help doing `camelCase` -> `snake_case` conversion.
481 |
482 | Conversion function `wrapIdentifier(value, dialectImpl, context): string` gets each part of the identifier as a single `value`, the original conversion function from the dialect implementation and the `queryContext`, which is only available if configured for a query builder instance via [builder.queryContext](/guide/query-builder.html#querycontext), and for schema builder instances via [schema.queryContext](/guide/schema-builder.html#querycontext) or [table.queryContext](/guide/schema-builder.html#querycontext-1). For example, with the query builder, `knex('table').withSchema('foo').select('table.field as otherName').where('id', 1)` will call `wrapIdentifier` converter for following values `'table'`, `'foo'`, `'table'`, `'field'`, `'otherName'` and `'id'`.
483 |
484 | ```js
485 | const knex = require('knex')({
486 | client: 'mysql',
487 | // overly simplified camelCase -> snake_case converter
488 | wrapIdentifier: (
489 | value,
490 | origImpl,
491 | queryContext
492 | ) => origImpl(convertToSnakeCase(value))
493 | });
494 | ```
495 |
496 | ### log
497 |
498 | Knex contains some internal log functions for printing warnings, errors, deprecations, and debug information when applicable. These log functions typically log to the console, but can be overwritten using the log option and providing alternative functions. Different log functions can be used for separate knex instances.
499 |
500 | ```js
501 | const knex = require('knex')({
502 | log: {
503 | warn(message) {
504 | },
505 | error(message) {
506 | },
507 | deprecate(message) {
508 | },
509 | debug(message) {
510 | },
511 | }
512 | });
513 | ```
514 |
515 | ### compileSqlOnError
516 |
517 | Knex builds an error message in case of query error. By default Knex adds compiled SQL (`SELECT * FROM users WHERE password = 'myPassword'`) to the error message. This can be changed to parameterized SQL (`SELECT * FROM users WHERE password = ?`) by setting `compileSqlOnError` to `false`.
518 |
519 | ```js
520 | const knex = require('knex')({
521 | compileSqlOnError: false
522 | });
523 | ```
524 |
525 | ## TypeScript
526 |
527 | While knex is written in JavaScript, officially supported TypeScript bindings are available (within the knex npm package).
528 |
529 | However it is to be noted that TypeScript support is currently best-effort. Knex has a very flexible API and not all usage patterns can be type-checked and in most such cases we err on the side of flexibility. In particular, lack of type errors doesn't currently guarantee that the generated queries will be correct and therefore writing tests for them is recommended even if you are using TypeScript.
530 |
531 | Many of the APIs accept `TRecord` and `TResult` type parameters, using which we can specify the type of a row in the database table and the type of the result of the query respectively. This is helpful for auto-completion when using TypeScript-aware editors like VSCode.
532 |
533 | To reduce boilerplate and add inferred types, you can augment `Tables` interface in `'knex/types/tables'` module.
534 |
535 | ```ts
536 | import { Knex } from 'knex';
537 |
538 | declare module 'knex/types/tables' {
539 | interface User {
540 | id: number;
541 | name: string;
542 | created_at: string;
543 | updated_at: string;
544 | }
545 |
546 | interface Tables {
547 | // This is same as specifying `knex('users')`
548 | users: User;
549 | // For more advanced types, you can specify separate type
550 | // for base model, "insert" type and "update" type.
551 | // But first: notice that if you choose to use this,
552 | // the basic typing showed above can be ignored.
553 | // So, this is like specifying
554 | // knex
555 | // .insert<{ name: string }>({ name: 'name' })
556 | // .into<{ name: string, id: number }>('users')
557 | users_composite: Knex.CompositeTableType<
558 | // This interface will be used for return type and
559 | // `where`, `having` etc where full type is required
560 | User,
561 | // Specifying "insert" type will also make sure
562 | // data matches interface in full. Meaning
563 | // if interface is `{ a: string, b: string }`,
564 | // `insert({ a: '' })` will complain about missing fields.
565 | //
566 | // For example, this will require only "name" field when inserting
567 | // and make created_at and updated_at optional.
568 | // And "id" can't be provided at all.
569 | // Defaults to "base" type.
570 | Pick & Partial>,
571 | // This interface is used for "update()" calls.
572 | // As opposed to regular specifying interface only once,
573 | // when specifying separate update interface, user will be
574 | // required to match it exactly. So it's recommended to
575 | // provide partial interfaces for "update". Unless you want to always
576 | // require some field (e.g., `Partial & { updated_at: string }`
577 | // will allow updating any field for User but require updated_at to be
578 | // always provided as well.
579 | //
580 | // For example, this wil allow updating all fields except "id".
581 | // "id" will still be usable for `where` clauses so
582 | // knex('users_composite')
583 | // .update({ name: 'name2' })
584 | // .where('id', 10)`
585 | // will still work.
586 | // Defaults to Partial "insert" type
587 | Partial>
588 | >;
589 | }
590 | }
591 | ```
592 |
593 | When TypeScript is configured to use a modern module resolution setting (`node16`, `nodenext`, etc.), the compiler expects that the declared module name ends with a `.js` file type. You will need to declare your inferred types as follows instead:
594 |
595 | ```ts
596 | // The trailing `.js` is required by the TypeScript compiler in certain configs:
597 | declare module 'knex/types/tables.js' { // <----- Different module path!!!
598 | interface Tables {
599 | // ...
600 | }
601 | }
602 | ```
603 |
--------------------------------------------------------------------------------
/src/guide/interfaces.md:
--------------------------------------------------------------------------------
1 | # Interfaces
2 |
3 | Knex.js provides several options to deal with query output. The following methods are present on the query builder, schema builder, and the raw builder:
4 |
5 | ## Promises
6 |
7 | [Promises](https://github.com/petkaantonov/bluebird#what-are-promises-and-why-should-i-use-them) are the preferred way of dealing with queries in knex, as they allow you to return values from a fulfillment handler, which in turn become the value of the promise. The main benefit of promises are the ability to catch thrown errors without crashing the node app, making your code behave like a **.try / .catch / .finally** in synchronous code.
8 |
9 | ```js
10 | knex.select('name')
11 | .from('users')
12 | .where('id', '>', 20)
13 | .andWhere('id', '<', 200)
14 | .limit(10)
15 | .offset(x)
16 | .then(function(rows) {
17 | return _.pluck(rows, 'name');
18 | })
19 | .then(function(names) {
20 | return knex.select('id')
21 | .from('nicknames')
22 | .whereIn('nickname', names);
23 | })
24 | .then(function(rows) {
25 | console.log(rows);
26 | })
27 | .catch(function(error) {
28 | console.error(error)
29 | });
30 | ```
31 |
32 | ### then
33 |
34 | **.then(onFulfilled, [onRejected])**
35 |
36 | Coerces the current query builder chain into a promise state, accepting the resolve and reject handlers as specified by the Promises/A+ spec. As stated in the spec, more than one call to the then method for the current query chain will resolve with the same value, in the order they were called; the query will not be executed multiple times.
37 |
38 | ```js
39 | knex.select('*')
40 | .from('users')
41 | .where({name: 'Tim'})
42 | .then(function(rows) {
43 | return knex
44 | .insert({user_id: rows[0].id, name: 'Test'}, 'id')
45 | .into('accounts');
46 | })
47 | .then(function(id) {
48 | console.log('Inserted Account ' + id);
49 | })
50 | .catch(function(error) { console.error(error); });
51 | ```
52 |
53 | ### catch
54 |
55 | **.catch(onRejected)**
56 |
57 | Coerces the current query builder into a promise state, catching any error thrown by the query, the same as calling .then(null, onRejected).
58 |
59 | ```js
60 | return knex.insert({id: 1, name: 'Test'}, 'id')
61 | .into('accounts')
62 | .catch(function(error) {
63 | console.error(error);
64 | })
65 | .then(function() {
66 | return knex.select('*')
67 | .from('accounts')
68 | .where('id', 1);
69 | })
70 | .then(function(rows) {
71 | console.log(rows[0]);
72 | })
73 | .catch(function(error) {
74 | console.error(error);
75 | });
76 | ```
77 |
78 | ## Callbacks
79 |
80 | ### asCallback
81 |
82 | **.asCallback(callback)**
83 |
84 | If you'd prefer a callback interface over promises, the asCallback function accepts a standard node style callback for executing the query chain. Note that as with the then method, subsequent calls to the same query chain will return the same result.
85 |
86 | ```js
87 | knex.select('name').from('users')
88 | .where('id', '>', 20)
89 | .andWhere('id', '<', 200)
90 | .limit(10)
91 | .offset(x)
92 | .asCallback(function(err, rows) {
93 | if (err) return console.error(err);
94 | knex.select('id')
95 | .from('nicknames')
96 | .whereIn('nickname', _.pluck(rows, 'name'))
97 | .asCallback(function(err, rows) {
98 | if (err) return console.error(err);
99 | console.log(rows);
100 | });
101 | });
102 | ```
103 |
104 | ## Streams
105 |
106 | Streams are a powerful way of piping data through as it comes in, rather than all at once. You can read more about streams [here at substack's stream handbook](https://github.com/substack/stream-handbook). See the following for example uses of stream & pipe. If you wish to use streams with PostgreSQL, you must also install the [pg-query-stream](https://github.com/brianc/node-pg-query-stream) module. If you wish to use streams with the `pgnative` dialect, please be aware that the results will not be streamed as they are received, but rather streamed after the entire result set has returned. On an HTTP server, make sure to [manually close your streams](https://github.com/knex/knex/wiki/Manually-Closing-Streams) if a request is aborted.
107 |
108 | ### stream
109 |
110 | **.stream([options], [callback])**
111 |
112 | If called with a callback, the callback is passed the stream and a promise is returned. Otherwise, the readable stream is returned.
113 | When the stream is consumed as an [iterator](https://nodejs.org/api/stream.html#readablesymbolasynciterator), if the loop terminates with a `break`, `return`, or a `throw`, the stream will be destroyed. In other terms, iterating over a stream will consume the stream fully.
114 |
115 | ```js
116 | // Retrieve the stream:
117 | const stream = knex.select('*')
118 | .from('users')
119 | .stream();
120 | stream.pipe(writableStream);
121 | ```
122 |
123 | ```js
124 | // With options:
125 | const stream = knex.select('*')
126 | .from('users')
127 | .stream({highWaterMark: 5});
128 | stream.pipe(writableStream);
129 | ```
130 |
131 | ```js
132 | // Use as an iterator
133 | const stream = knex.select('*')
134 | .from('users')
135 | .stream();
136 |
137 | for await (const row of stream) {
138 | /* ... */
139 | }
140 | ```
141 |
142 | ```js
143 | // Use as a promise:
144 | const stream = knex.select('*')
145 | .from('users')
146 | .where(knex.raw('id = ?', [1]))
147 | .stream(function(stream) {
148 | stream.pipe(writableStream);
149 | })
150 | .then(function() { /* ... */ })
151 | .catch(function(e) { console.error(e); });
152 | ```
153 |
154 | ### pipe
155 |
156 | **.pipe(writableStream)**
157 |
158 | Pipe a stream for the current query to a writableStream.
159 |
160 | ```js
161 | const stream = knex.select('*')
162 | .from('users')
163 | .pipe(writableStream);
164 | ```
165 |
166 | ## Events
167 |
168 | ### query
169 |
170 | A query event is fired just before a query takes place, providing data about the query, including the connection's `__knexUid` / `__knexTxId` properties and any other information about the query as described in toSQL. Useful for logging all queries throughout your application.
171 |
172 | ```js
173 | knex.select('*')
174 | .from('users')
175 | .on('query', function(data) {
176 | app.log(data);
177 | })
178 | .then(function() {
179 | // ...
180 | });
181 | ```
182 |
183 | ### query-error
184 |
185 | A query-error event is fired when an error occurs when running a query, providing the error object and data about the query, including the connection's `__knexUid` / `__knexTxId` properties and any other information about the query as described in toSQL. Useful for logging all query errors throughout your application.
186 |
187 | ```js
188 | knex.select(['NonExistentColumn'])
189 | .from('users')
190 | .on('query-error', function(error, obj) {
191 | app.log(error);
192 | })
193 | .then(function() { /* ... */ })
194 | .catch(function(error) {
195 | // Same error object as the query-error event provides.
196 | });
197 | ```
198 |
199 | ### query-response
200 |
201 | A query-response event is fired when a successful query has been run, providing the response of the query and data about the query, including the connection's `__knexUid` / `__knexTxId` properties and any other information about the query as described in toSQL, and finally the query builder used for the query.
202 |
203 | ```js
204 | knex.select('*')
205 | .from('users')
206 | .on('query-response', function(response, obj, builder) {
207 | // ...
208 | })
209 | .then(function(response) {
210 | // Same response as the emitted event
211 | })
212 | .catch(function(error) { });
213 | ```
214 |
215 | ### start
216 |
217 | A `start` event is fired right before a query-builder is compiled.
218 |
219 | ::: info
220 | While this event can be used to alter a builders state prior to compilation it is not to be recommended. Future goals include ways of doing this in a different manner such as hooks.
221 | :::
222 |
223 | ```js
224 | knex.select('*')
225 | .from('users')
226 | .on('start', function(builder) {
227 | builder
228 | .where('IsPrivate', 0)
229 | })
230 | .then(function(Rows) {
231 | //Only contains Rows where IsPrivate = 0
232 | })
233 | .catch(function(error) { });
234 | ```
235 |
236 | ## Other
237 |
238 | ### toString
239 |
240 | **.toString()**
241 |
242 | Returns an array of query strings filled out with the correct values based on bindings, etc. Useful for debugging, but should not be used to create queries for running them against DB.
243 |
244 | ```js
245 | const toStringQuery = knex.select('*')
246 | .from('users')
247 | .where('id', 1)
248 | .toString();
249 |
250 | // Outputs: console.log(toStringQuery);
251 | // select * from "users" where "id" = 1
252 | ```
253 |
254 | ### toSQL
255 |
256 | **.toSQL()**
257 | **.toSQL().toNative()**
258 |
259 | Returns an array of query strings filled out with the correct values based on bindings, etc. Useful for debugging and building queries for running them manually with DB driver. `.toSQL().toNative()` outputs object with sql string and bindings in a dialects format in the same way that knex internally sends them to underlying DB driver.
260 |
261 | ```js
262 | knex.select('*')
263 | .from('users')
264 | .where(knex.raw('id = ?', [1]))
265 | .toSQL()
266 | // Outputs:
267 | // {
268 | // bindings: [1],
269 | // method: 'select',
270 | // sql: 'select * from "users" where id = ?',
271 | // options: undefined,
272 | // toNative: function () {}
273 | // }
274 |
275 | knex.select('*')
276 | .from('users')
277 | .where(knex.raw('id = ?', [1]))
278 | .toSQL()
279 | .toNative()
280 | // Outputs for postgresql dialect:
281 | // {
282 | // bindings: [1],
283 | // sql: 'select * from "users" where id = $1',
284 | // }
285 | ```
286 |
--------------------------------------------------------------------------------
/src/guide/migrations.md:
--------------------------------------------------------------------------------
1 | # Migrations
2 |
3 | Migrations allow for you to define sets of schema changes so upgrading a database is a breeze.
4 |
5 | ## Migration CLI
6 |
7 | The migration CLI is bundled with the knex install, and is driven by the [node-liftoff](https://github.com/tkellen/node-liftoff) module. To install globally, run:
8 |
9 | ```bash
10 | $ npm install knex -g
11 | ```
12 |
13 | The migration CLI accepts the following general command-line options. You can view help text and additional options for each command using `--help`. E.g. `knex migrate:latest --help`.
14 |
15 | - `--debug`: Run with debugging
16 | - `--knexfile [path]`: Specify the knexfile path
17 | - `--knexpath [path]`: Specify the path to the knex instance
18 | - `--cwd [path]`: Specify the working directory
19 | - `--client [name]`: Set the DB client
20 | - `--connection [address]`: Set the DB connection
21 | - `--migrations-table-name`: Set the migration table name
22 | - `--migrations-directory`: Set the migrations directory
23 | - `--env`: environment, default: `process.env.NODE_ENV || development`
24 | - `--esm`: [Enables ESM module interoperability](#ecmascript-modules-esm-interoperability)
25 | - `--help`: Display help text for a particular command and exit.
26 |
27 | Migrations use a **knexfile**, which specify various configuration settings for the module. To create a new knexfile, run the following:
28 |
29 | ```bash
30 | $ knex init
31 |
32 | # or for .ts
33 |
34 | $ knex init -x ts
35 | ```
36 |
37 | will create a sample knexfile.js - the file which contains our various database configurations. Once you have a knexfile.js, you can use the migration tool to create migration files to the specified directory (default migrations). Creating new migration files can be achieved by running:
38 |
39 | ```bash
40 | $ knex migrate:make migration_name
41 |
42 | # or for .ts
43 |
44 | $ knex migrate:make migration_name -x ts
45 | ```
46 |
47 | - you can also create your migration using a specific stub file, this serves as a migration template to speed up development for common migration operations
48 | - if the --stub option is not passed, the CLI will use either the knex default stub for the chosen extension, or the config.stub file
49 |
50 | ```bash
51 | $ knex migrate:make --stub
52 |
53 | # or
54 |
55 | $ knex migrate:make --stub
56 | ```
57 |
58 | - if a stub path is provided, it must be relative to the knexfile.\[js, ts, etc\] location
59 | - if a is used, the stub is selected by its file name. The CLI will look for this file in the config.migrations.directory folder. If the config.migrations.directory is not defined, this operation will fail
60 |
61 | Once you have finished writing the migrations, you can update the database matching your `NODE_ENV` by running:
62 |
63 | ```bash
64 | $ knex migrate:latest
65 | ```
66 |
67 | You can also pass the `--env` flag or set `NODE_ENV` to select an alternative environment:
68 |
69 | ```bash
70 | $ knex migrate:latest --env production
71 |
72 | # or
73 |
74 | $ NODE_ENV=production knex migrate:latest
75 | ```
76 |
77 | To rollback the last batch of migrations:
78 |
79 | ```bash
80 | $ knex migrate:rollback
81 | ```
82 |
83 | To rollback all the completed migrations:
84 |
85 | ```bash
86 | $ knex migrate:rollback --all
87 | ```
88 |
89 | To run the next migration that has not yet been run
90 |
91 | ```bash
92 | $ knex migrate:up
93 | ```
94 |
95 | To run the specified migration that has not yet been run
96 |
97 | ```bash
98 | $ knex migrate:up 001_migration_name.js
99 | ```
100 |
101 | To undo the last migration that was run
102 |
103 | ```bash
104 | $ knex migrate:down
105 | ```
106 |
107 | To undo the specified migration that was run
108 |
109 | ```bash
110 | $ knex migrate:down 001_migration_name.js
111 | ```
112 |
113 | To list both completed and pending migrations:
114 |
115 | ```bash
116 | $ knex migrate:list
117 | ```
118 |
119 | ## Seed files
120 |
121 | Seed files allow you to populate your database with test or seed data independent of your migration files.
122 |
123 | ### Seed CLI
124 |
125 | To create a seed file, run:
126 |
127 | ```bash
128 | $ knex seed:make seed_name
129 | ```
130 |
131 | Seed files are created in the directory specified in your knexfile.js for the current environment. A sample seed configuration looks like:
132 |
133 | ```js
134 | module.exports = {
135 | // ...
136 | development: {
137 | client: {/* ... */},
138 | connection: {/* ... */},
139 | seeds: {
140 | directory: './seeds/dev'
141 | }
142 | }
143 | // ...
144 | }
145 | ```
146 |
147 | If no `seeds.directory` is defined, files are created in `./seeds`. Note that the seed directory needs to be a relative path. Absolute paths are not supported (nor is it good practice).
148 |
149 | To run seed files, execute:
150 |
151 | ```bash
152 | $ knex seed:run
153 | ```
154 |
155 | Seed files are executed in alphabetical order. Unlike migrations, _every_ seed file will be executed when you run the command. You should design your seed files to reset tables as needed before inserting data.
156 |
157 | To run specific seed files, execute:
158 |
159 | ```bash
160 | $ knex seed:run --specific=seed-filename.js --specific=another-seed-filename.js
161 | ```
162 |
163 | ## knexfile.js
164 |
165 | A knexfile.js generally contains all of the configuration for your database. It can optionally provide different configuration for different environments. You may pass a `--knexfile` option to any of the command line statements to specify an alternate path to your knexfile.
166 |
167 | ### Basic configuration
168 |
169 | ```js
170 | module.exports = {
171 | client: 'pg',
172 | connection: process.env.DATABASE_URL || {
173 | user: 'me',
174 | database: 'my_app'
175 | }
176 | };
177 | ```
178 |
179 | You can also use an async function to get connection details for your configuration. This is useful when you need to fetch credentials from a secure location like vault.
180 |
181 | ```js
182 | const getPassword = async () => {
183 | // TODO: implement me
184 | return 'my_pass'
185 | }
186 |
187 | module.exports = {
188 | client: 'pg',
189 | connection: async () => {
190 | const password = await getPassword()
191 | return { user: 'me', password }
192 | },
193 | migrations: {}
194 | };
195 | ```
196 |
197 | ### Environment configuration
198 |
199 | ```js
200 | module.exports = {
201 | development: {
202 | client: 'pg',
203 | connection: { user: 'me', database: 'my_app' }
204 | },
205 | production: {
206 | client: 'pg',
207 | connection: process.env.DATABASE_URL
208 | }
209 | };
210 | ```
211 |
212 | ### Custom migration
213 |
214 | You may provide a custom migration stub to be used in place of the default option.
215 |
216 | ```js
217 | module.exports = {
218 | client: 'pg',
219 | migrations: {
220 | stub: 'migration.stub'
221 | }
222 | };
223 | ```
224 |
225 | ### Custom migration name
226 |
227 | You may provide a custom migration name to be used in place of the default option.
228 |
229 | ```js
230 | module.exports = {
231 | client: 'pg',
232 | migrations: {
233 | getNewMigrationName: (name) => {
234 | return `${+new Date()}-${name}.js`;
235 | }
236 | }
237 | };
238 | ```
239 |
240 | ### Generated migration extension
241 |
242 | You can control extension of generated migrations.
243 |
244 | ```js
245 | module.exports = {
246 | client: 'pg',
247 | migrations: {
248 | extension: 'ts'
249 | }
250 | };
251 | ```
252 |
253 | ### Knexfile in other languages
254 |
255 | Knex uses [Liftoff](https://github.com/js-cli/js-liftoff) to support knexfile written in other compile-to-js languages.
256 |
257 | Depending on the language, this may require you to install additional dependencies. The complete list of dependencies for each supported language can be found [here](https://github.com/gulpjs/interpret#extensions).
258 |
259 | Most common cases are typescript (for which [typescript](https://www.npmjs.com/package/typescript) and [ts-node](https://www.npmjs.com/package/ts-node) packages are recommended), and coffeescript (for which [coffeescript](https://www.npmjs.com/package/coffeescript) dependency is required).
260 |
261 | If you don't specify the extension explicitly, the extension of generated migrations/seed files will be inferred from the knexfile extension
262 |
263 | ## Migration API
264 |
265 | `knex.migrate` is the class utilized by the knex migrations cli.
266 |
267 | Each method takes an optional `config` object, which may specify the following properties:
268 |
269 | - `directory`: a relative path to the directory containing the migration files. Can be an array of paths (default `./migrations`)
270 | - `extension`: the file extension used for the generated migration files (default `js`)
271 | - `tableName`: the table name used for storing the migration state (default `knex_migrations`)
272 | - `schemaName`: the schema name used for storing the table with migration state (optional parameter, only works on DBs that support multiple schemas in a single DB, such as PostgreSQL)
273 | - `disableTransactions`: don't run migrations inside transactions (default `false`)
274 | - `disableMigrationsListValidation`: do not validate that all the already executed migrations are still present in migration directories (default `false`)
275 | - `sortDirsSeparately`: if true and multiple directories are specified, all migrations from a single directory will be executed before executing migrations in the next folder (default `false`)
276 | - `loadExtensions`: array of file extensions which knex will treat as migrations. For example, if you have typescript transpiled into javascript in the same folder, you want to execute only javascript migrations. In this case, set `loadExtensions` to `['.js']` (Notice the dot!) (default `['.co', '.coffee', '.eg', '.iced', '.js', '.litcoffee', '.ls', '.ts']`)
277 | - `migrationSource`: specify a custom migration source, see [Custom Migration Source](#custom-migration-sources) for more info (default filesystem)
278 |
279 | ### Transactions in migrations
280 |
281 | By default, each migration is run inside a transaction. Whenever needed, one can disable transactions for all migrations via the common migration config option `config.disableTransactions` or per-migration, via exposing a boolean property `config.transaction` from a migration file:
282 |
283 | ```js
284 | exports.up = function(knex) {
285 | return knex.schema
286 | .createTable('users', function (table) {
287 | table.increments('id');
288 | table.string('first_name', 255).notNullable();
289 | table.string('last_name', 255).notNullable();
290 | })
291 | .createTable('products', function (table) {
292 | table.increments('id');
293 | table.decimal('price').notNullable();
294 | table.string('name', 1000).notNullable();
295 | });
296 | };
297 |
298 | exports.down = function(knex) {
299 | return knex.schema
300 | .dropTable("products")
301 | .dropTable("users");
302 | };
303 |
304 | exports.config = { transaction: false };
305 | ```
306 |
307 | The same config property can be used for enabling transaction per-migration in case the common configuration has `disableTransactions: true`.
308 |
309 | ### make
310 |
311 | **knex.migrate.make(name, [config])**
312 |
313 | Creates a new migration, with the name of the migration being added.
314 |
315 | ### latest
316 |
317 | **knex.migrate.latest([config])**
318 |
319 | Runs all migrations that have not yet been run.
320 |
321 | If you need to run something only after all migrations have finished their execution, you can do something like this:
322 |
323 | ```js
324 | knex.migrate.latest()
325 | .then(function() {
326 | return knex.seed.run();
327 | })
328 | .then(function() {
329 | // migrations are finished
330 | });
331 | ```
332 |
333 | ### rollback
334 |
335 | **knex.migrate.rollback([config], [all])**
336 |
337 | Rolls back the latest migration group. If the `all` parameter is truthy, all applied migrations will be rolled back instead of just the last batch. The default value for this parameter is `false`.
338 |
339 | ### up
340 |
341 | **knex.migrate.up([config])**
342 |
343 | Runs the specified (by `config.name` parameter) or the next chronological migration that has not yet be run.
344 |
345 | ### down
346 |
347 | **knex.migrate.down([config])**
348 |
349 | Will undo the specified (by `config.name` parameter) or the last migration that was run.
350 |
351 | ### currentVersion
352 |
353 | **knex.migrate.currentVersion([config])**
354 |
355 | Retrieves and returns the current migration version, as a promise. If there aren't any migrations run yet, returns "none" as the value for the currentVersion.
356 |
357 | ### list
358 |
359 | **knex.migrate.list([config])**
360 |
361 | Will return list of completed and pending migrations
362 |
363 | ### unlock
364 |
365 | **knex.migrate.forceFreeMigrationsLock([config])**
366 |
367 | Forcibly unlocks the migrations lock table, and ensures that there is only one row in it.
368 |
369 | ## Notes about locks
370 |
371 | A lock system is there to prevent multiple processes from running the same migration batch in the same time. When a batch of migrations is about to be run, the migration system first tries to get a lock using a `SELECT ... FOR UPDATE` statement (preventing race conditions from happening). If it can get a lock, the migration batch will run. If it can't, it will wait until the lock is released.
372 |
373 | Please note that if your process unfortunately crashes, the lock will have to be _manually_ removed with `knex migrate:unlock` in order to let migrations run again.
374 |
375 | The locks are saved in a table called "`tableName`\_lock"; it has a column called `is_locked` that `knex migrate:unlock` sets to `0` in order to release the lock. The `index` column in the lock table exists for compatibility with some database clusters that require a primary key, but is otherwise unused. There must be only one row in this table, or an error will be thrown when running migrations: "Migration table is already locked". Run `knex migrate:unlock` to ensure that there is only one row in the table.
376 |
377 | ## Custom migration sources
378 |
379 | Knex supports custom migration sources, allowing you full control of where your migrations come from. This can be useful for custom folder structures, when bundling with webpack/browserify and other scenarios.
380 |
381 | ```js
382 | // Create a custom migration source class
383 | class MyMigrationSource {
384 | // Must return a Promise containing a list of migrations.
385 | // Migrations can be whatever you want,
386 | // they will be passed as arguments to getMigrationName
387 | // and getMigration
388 | getMigrations() {
389 | // In this example we are just returning migration names
390 | return Promise.resolve(['migration1'])
391 | }
392 |
393 | getMigrationName(migration) {
394 | return migration;
395 | }
396 |
397 | getMigration(migration) {
398 | switch(migration) {
399 | case 'migration1':
400 | return {
401 | up(knex) { /* ... */ },
402 | down(knex) { /* ... */ },
403 | }
404 | }
405 | }
406 | }
407 |
408 | // pass an instance of your migration source as knex config
409 | knex.migrate.latest({
410 | migrationSource: new MyMigrationSource()
411 | })
412 | ```
413 |
414 | ### Webpack migration source example
415 |
416 | An example of how to create a migration source where migrations are included in a webpack bundle.
417 |
418 | ```js
419 | const path = require('path')
420 |
421 | class WebpackMigrationSource {
422 | constructor(migrationContext) {
423 | this.migrationContext = migrationContext
424 | }
425 |
426 | getMigrations() {
427 | return Promise.resolve(
428 | this.migrationContext.keys().sort()
429 | )
430 | }
431 |
432 | getMigrationName(migration) {
433 | return path.parse(migration).base
434 | }
435 |
436 | getMigration(migration) {
437 | return this.migrationContext(migration)
438 | }
439 | }
440 |
441 | // pass an instance of your migration source as knex config
442 | knex.migrate.latest({
443 | migrationSource: new WebpackMigrationSource(
444 | require.context('./migrations', false, /.js$/)
445 | )
446 | })
447 |
448 | // with webpack >=5, require.context will add
449 | // both the relative and absolute paths to the context
450 | // to avoid duplicate migration errors, you'll need
451 | // to filter out one or the other this example filters
452 | // out absolute paths, leaving only the relative
453 | // ones(./migrations/*.js):
454 | knex.migrate.latest({
455 | migrationSource: new WebpackMigrationSource(
456 | require.context('./migrations', false, /^\.\/.*\.js$/)
457 | )
458 | })
459 | ```
460 |
461 | ## ECMAScript modules (ESM) Interoperability
462 |
463 | ECMAScript Module support for knex CLI's configuration, migration and seeds
464 | enabled by the `--esm` flag, ECMAScript Interoperability is provided by the [_'esm'_](https://github.com/standard-things/esm) module.
465 | You can find [here](https://github.com/standard-things/esm) more information about 'esm' superpowers.
466 |
467 | Node 'mjs' files are handled by NodeJS own import mechanics
468 | and do not require the use of the '--esm' flag.
469 | But you might need it anyway for Node v10 under certain scenarios.
470 | You can find details about NodeJS ECMAScript modules [here](https://nodejs.org/api/esm.html)
471 |
472 | While it is possible to mix and match different module formats (extensions)
473 | between your knexfile, seeds and migrations,
474 | some format combinations will require specific NodeJS versions,
475 | _Notably mjs/cjs files will follow NodeJS import and require restrictions._
476 | You can see [here](https://github.com/knex/knex/blob/master/test/cli/esm-interop.spec.js) many possible scenarios,
477 | and [here](https://github.com/knex/knex/tree/master/test/jake-util/knexfile-imports) some sample configurations
478 |
479 | Node v10.\* require the use of the '--experimental-module' flag in order to use the 'mjs' or 'cjs' extension.
480 |
481 | ```bash
482 | # launching knex on Node v10 to use mjs/cjs modules
483 | node --experimental-modules ./node_modules/.bin/knex $@
484 | ```
485 |
486 | When using migration and seed files with '.cjs' or '.mjs' extensions, you will need to specify that explicitly:
487 |
488 | ```ts
489 | /**
490 | * knexfile.mjs
491 | */
492 | export default {
493 | migrations: {
494 | // ... client, connection,etc ....
495 | directory: './migrations',
496 | loadExtensions: ['.mjs'] //
497 | }
498 | }
499 | ```
500 |
501 | When using '.mjs' extensions for your knexfile and '.js' for the seeds/migrations, you will need to specify that explicitly.
502 |
503 | ```ts
504 | /**
505 | * knexfile.mjs
506 | */
507 | export default {
508 | migrations: {
509 | // ... client, connection,etc ....
510 | directory: './migrations',
511 | loadExtensions: ['.js'] // knex will search for 'mjs' file by default
512 | }
513 | }
514 | ```
515 |
516 | For the knexfile you can use a default export,
517 | it will take precedence over named export.
518 |
519 | ```ts
520 | /**
521 | * filename: knexfile.js
522 | * For the knexfile you can use a default export
523 | **/
524 | export default {
525 | client: 'sqlite3',
526 | connection: {
527 | filename: '../test.sqlite3',
528 | },
529 | migrations: {
530 | directory: './migrations',
531 | },
532 | seeds: {
533 | directory: './seeds',
534 | },
535 | }
536 |
537 | /**
538 | * filename: knexfile.js
539 | * Let knex find the configuration by providing named exports,
540 | * but if exported a default, it will take precedence, and it will be used instead
541 | **/
542 | const config = {
543 | client: 'sqlite3',
544 | connection: {
545 | filename: '../test.sqlite3',
546 | },
547 | migrations: {
548 | directory: './migrations',
549 | },
550 | seeds: {
551 | directory: './seeds',
552 | },
553 | };
554 | /** this will be used, it has precedence over named export */
555 | export default config;
556 | /** Named exports, will be used if you didn't provide a default export */
557 | export const { client, connection, migrations, seeds } = config;
558 | ```
559 |
560 | Seed and migration files need to follow Knex conventions
561 |
562 | ```ts
563 | // file: seed.js
564 | /**
565 | * Same as with the CommonJS modules
566 | * You will need to export a "seed" named function.
567 | * */
568 | export function seed(knex) {
569 | // ... seed logic here
570 | }
571 |
572 | // file: migration.js
573 | /**
574 | * Same as the CommonJS version, the miration file should export
575 | * "up" and "down" named functions
576 | */
577 | export function up(knex) {
578 | // ... migration logic here
579 | }
580 | export function down(knex) {
581 | // ... migration logic here
582 | }
583 | ```
584 |
585 | ## Seed API
586 |
587 | `knex.seed` is the class utilized by the knex seed CLI.
588 |
589 | Each method takes an optional `config` object, which may specify the following properties:
590 |
591 | - `directory`: a relative path to the directory containing the seed files. Can be an array of paths (default `./seeds`)
592 | - `loadExtensions`: array of file extensions which knex will treat as seeds. For example, if you have typescript transpiled into javascript in the same folder, you want to execute only javascript seeds. In this case, set `loadExtensions` to `['.js']` (Notice the dot!) (default `['.co', '.coffee', '.eg', '.iced', '.js', '.litcoffee', '.ls', '.ts']`)
593 | - `recursive`: if true, will find seed files recursively in the directory / directories specified
594 | - `specific`: a specific seed file or an array of seed files to run from the seeds directory, if its value is `undefined` it will run all the seeds (default `undefined`). If an array is specified, seed files will be run in the same order as the array
595 | - `sortDirsSeparately`: if true and multiple directories are specified, all seeds from a single directory will be executed before executing seeds in the next folder (default `false`)
596 | - `seedSource`: specify a custom seed source, see [Custom Seed Source](#custom-seed-sources) for more info (default filesystem)
597 | - `extension`: extension to be used for newly generated seeds (default `js`)
598 | - `timestampFilenamePrefix`: whether timestamp should be added as a prefix for newly generated seeds (default `false`)
599 |
600 |
601 | ### make
602 |
603 | **knex.seed.make(name, [config])**
604 |
605 | Creates a new seed file, with the name of the seed file being added. If the seed directory config is an array of paths, the seed file will be generated in the latest specified.
606 |
607 | ### run
608 |
609 | **knex.seed.run([config])**
610 |
611 | Runs all seed files for the current environment.
612 |
613 | ## Custom seed sources
614 |
615 | Knex supports custom seed sources, allowing you full control of where your seeds come from. This can be useful for custom folder structures, when bundling with webpack/browserify and other scenarios.
616 |
617 | ```js
618 | // Create a custom seed source class
619 | class MySeedSource {
620 | // Must return a Promise containing a list of seeds.
621 | // Seeds can be whatever you want, they will be passed as
622 | // arguments to getSeed
623 | getSeeds() {
624 | // In this example we are just returning seed names
625 | return Promise.resolve(['seed1'])
626 | }
627 |
628 | getSeed(seed) {
629 | switch(seed) {
630 | case 'seed1':
631 | return (knex) => { /* ... */ }
632 | }
633 | }
634 | }
635 |
636 | // pass an instance of your seed source as knex config
637 | knex.seed.run({ seedSource: new MySeedSource() })
638 | ```
639 |
--------------------------------------------------------------------------------
/src/guide/raw.md:
--------------------------------------------------------------------------------
1 | # Raw
2 |
3 |
4 | Sometimes you may need to use a raw expression in a query. Raw query object may be injected pretty much anywhere you want, and using proper bindings can ensure your values are escaped properly, preventing SQL-injection attacks.
5 |
6 | ## Raw Parameter Binding
7 |
8 | One can parameterize sql given to `knex.raw(sql, bindings)`. Parameters can be positional named. One can also choose if parameter should be treated as value or as sql identifier e.g. in case of `'TableName.ColumnName'` reference.
9 |
10 | ```js
11 | knex('users')
12 | .select(knex.raw('count(*) as user_count, status'))
13 | .where(knex.raw(1))
14 | .orWhere(knex.raw('status <> ?', [1]))
15 | .groupBy('status')
16 | ```
17 |
18 | Positional bindings `?` are interpreted as values and `??` are interpreted as identifiers.
19 |
20 | ```js
21 | knex('users').where(knex.raw('?? = ?', ['user.name', 1]))
22 | ```
23 |
24 | Named bindings such as `:name` are interpreted as values and `:name:` interpreted as identifiers. Named bindings are processed so long as the value is anything other than `undefined`.
25 |
26 | ```js
27 | const raw = ':name: = :thisGuy or :name: = :otherGuy or :name: = :undefinedBinding'
28 |
29 | knex('users')
30 | .where(
31 | knex.raw(raw, {
32 | name: 'users.name',
33 | thisGuy: 'Bob',
34 | otherGuy: 'Jay',
35 | undefinedBinding: undefined
36 | }))
37 | ```
38 |
39 | For simpler queries where one only has a single binding, `.raw` can accept said binding as its second parameter.
40 |
41 | ```js
42 | knex('users')
43 | .where(
44 | knex.raw('LOWER("login") = ?', 'knex')
45 | )
46 | .orWhere(
47 | knex.raw('accesslevel = ?', 1)
48 | )
49 | .orWhere(
50 | knex.raw('updtime = ?', '01-01-2016')
51 | )
52 | ```
53 |
54 | Since there is no unified syntax for array bindings, instead you need to treat them as multiple values by adding `?` directly in your query.
55 |
56 | ```js
57 | const myArray = [1,2,3]
58 | knex.raw('select * from users where id in (' + myArray.map(_ => '?').join(',') + ')', [...myArray]);
59 |
60 | ```
61 | query will become:
62 |
63 | ```sql
64 | select * from users where id in (?, ?, ?) /* with bindings [1,2,3] */
65 | ```
66 |
67 | To prevent replacement of `?` one can use the escape sequence `\\?`.
68 |
69 | ```js
70 | knex.select('*')
71 | .from('users')
72 | .where('id', '=', 1)
73 | .whereRaw('?? \\? ?', ['jsonColumn', 'jsonKey'])
74 | ```
75 |
76 | To prevent replacement of named bindings one can use the escape sequence `\\:`.
77 |
78 | ```js
79 | knex.select('*')
80 | .from('users')
81 | .whereRaw(":property: = '\\:value' OR \\:property: = :value", {
82 | property: 'name',
83 | value: 'Bob'
84 | })
85 | ```
86 |
87 | ## Raw Expressions
88 |
89 | Raw expressions are created by using `knex.raw(sql, [bindings])` and passing this as a value for any value in the query chain.
90 |
91 | ```js
92 | knex('users').select(knex.raw('count(*) as user_count, status'))
93 | .where(knex.raw(1))
94 | .orWhere(knex.raw('status <> ?', [1]))
95 | .groupBy('status')
96 | ```
97 |
98 | ## Raw Queries
99 |
100 | The `knex.raw` may also be used to build a full query and execute it, as a standard query builder query would be executed. The benefit of this is that it uses the connection pool and provides a standard interface for the different client libraries.
101 |
102 | ```js
103 | knex.raw('select * from users where id = ?', [1])
104 | .then(function(resp) { /*...*/ });
105 | ```
106 |
107 | Note that the response will be whatever the underlying sql library would typically return on a normal query, so you may need to look at the documentation for the base library the queries are executing against to determine how to handle the response.
108 |
109 | ## Wrapped Queries
110 |
111 | The raw query builder also comes with a `wrap` method, which allows wrapping the query in a value:
112 |
113 | ```js
114 | const subcolumn = knex.raw(
115 | 'select avg(salary) from employee where dept_no = e.dept_no'
116 | )
117 | .wrap('(', ') avg_sal_dept');
118 |
119 | knex.select('e.lastname', 'e.salary', subcolumn)
120 | .from('employee as e')
121 | .whereRaw('dept_no = e.dept_no')
122 | ```
123 |
124 | Note that the example above be achieved more easily using the [as](/guide/query-builder#as) method.
125 |
126 | ```js
127 | const subcolumn = knex.avg('salary')
128 | .from('employee')
129 | .whereRaw('dept_no = e.dept_no')
130 | .as('avg_sal_dept');
131 |
132 | knex.select('e.lastname', 'e.salary', subcolumn)
133 | .from('employee as e')
134 | .whereRaw('dept_no = e.dept_no')
135 | ```
136 |
--------------------------------------------------------------------------------
/src/guide/ref.md:
--------------------------------------------------------------------------------
1 | # Ref
2 |
3 |
4 | Can be used to create references in a query, such as column- or tablenames. This is a good and shorter alternative to using `knex.raw('??', 'tableName.columName')` which essentially does the same thing.
5 |
6 | ## Usage
7 |
8 | `knex.ref` can be used essentially anywhere in a build-chain. Here is an example:
9 |
10 | ```js
11 | knex(knex.ref('Users').withSchema('TenantId'))
12 | .where(knex.ref('Id'), 1)
13 | .orWhere(knex.ref('Name'), 'Admin')
14 | .select(['Id', knex.ref('Name').as('Username')])
15 | ```
16 |
17 |
21 |
22 | ### withSchema
23 |
24 | The Ref function supports schema using `.withSchema(string)`:
25 |
26 | ```js
27 | knex(knex.ref('users').withSchema('TenantId')).select()
28 | ```
29 |
30 | ### alias
31 |
32 | Alias is supported using `.alias(string)`
33 |
34 | ```js
35 | knex('users')
36 | .select(knex.ref('Id').as('UserId'))
37 | ```
38 |
39 |
41 |
--------------------------------------------------------------------------------
/src/guide/schema-builder.md:
--------------------------------------------------------------------------------
1 |
2 | # Schema Builder
3 |
4 | The `knex.schema` is a **getter function**, which returns a stateful object containing the query. Therefore be sure to obtain a new instance of the `knex.schema` for every query. These methods return [promises](/guide/interfaces.html#promises).
5 |
6 | ## Essentials
7 |
8 | ### withSchema
9 |
10 | **knex.schema.withSchema([schemaName])**
11 |
12 | Specifies the schema to be used when using the schema-building commands.
13 |
14 | ```js
15 | knex.schema.withSchema('public').createTable('users', function (table) {
16 | table.increments();
17 | })
18 | ```
19 |
20 | ### createTable
21 |
22 | **knex.schema.createTable(tableName, callback)**
23 |
24 | Creates a new table on the database, with a callback function to modify the table's structure, using the schema-building commands.
25 |
26 | ```js
27 | knex.schema.createTable('users', function (table) {
28 | table.increments();
29 | table.string('name');
30 | table.timestamps();
31 | })
32 | ```
33 |
34 | ### createTableLike
35 |
36 | **knex.schema.createTableLike(tableName, tableNameToCopy, [callback])**
37 |
38 | Creates a new table on the database based on another table. Copy only the structure : columns, keys and indexes (expected on SQL Server which only copy columns) and not the data. Callback function can be specified to add columns in the duplicated table.
39 |
40 | ```js
41 | knex.schema.createTableLike('new_users', 'users')
42 |
43 | // "new_users" table contains columns
44 | // of users and two new columns 'age' and 'last_name'.
45 | knex.schema.createTableLike('new_users', 'users', (table) => {
46 | table.integer('age');
47 | table.string('last_name');
48 | })
49 | ```
50 |
51 | ### dropTable
52 |
53 | **knex.schema.dropTable(tableName)**
54 |
55 | Drops a table, specified by tableName.
56 |
57 | ```js
58 | knex.schema.dropTable('users')
59 | ```
60 |
61 | ### dropTableIfExists
62 |
63 | **knex.schema.dropTableIfExists(tableName)**
64 |
65 | Drops a table conditionally if the table exists, specified by tableName.
66 |
67 | ```js
68 | knex.schema.dropTableIfExists('users')
69 | ```
70 |
71 | ### renameTable
72 |
73 | **knex.schema.renameTable(from, to)**
74 |
75 | Renames a table from a current tableName to another.
76 |
77 | ```js
78 | knex.schema.renameTable('old_users', 'users')
79 | ```
80 |
81 | ### hasTable
82 |
83 | **knex.schema.hasTable(tableName)**
84 |
85 | Checks for a table's existence by tableName, resolving with a boolean to signal if the table exists.
86 |
87 | ```js
88 | knex.schema.hasTable('users').then(function(exists) {
89 | if (!exists) {
90 | return knex.schema.createTable('users', function(t) {
91 | t.increments('id').primary();
92 | t.string('first_name', 100);
93 | t.string('last_name', 100);
94 | t.text('bio');
95 | });
96 | }
97 | });
98 | ```
99 |
100 | ### hasColumn
101 |
102 | **knex.schema.hasColumn(tableName, columnName)**
103 |
104 | Checks if a column exists in the current table, resolves the promise with a boolean, true if the column exists, false otherwise.
105 |
106 | ### table
107 |
108 | **knex.schema.table(tableName, callback)**
109 |
110 | Chooses a database table, and then modifies the table, using the Schema Building functions inside of the callback.
111 |
112 | ```js
113 | knex.schema.table('users', function (table) {
114 | table.dropColumn('name');
115 | table.string('first_name');
116 | table.string('last_name');
117 | })
118 | ```
119 |
120 | ### alterTable
121 |
122 | **knex.schema.alterTable(tableName, callback)**
123 |
124 | Chooses a database table, and then modifies the table, using the Schema Building functions inside of the callback.
125 |
126 | ```js
127 | knex.schema.alterTable('users', function (table) {
128 | table.dropColumn('name');
129 | table.string('first_name');
130 | table.string('last_name');
131 | })
132 | ```
133 |
134 | ### createView
135 |
136 | **knex.schema.createView(tableName, callback)**
137 |
138 | Creates a new view on the database, with a callback function to modify the view's structure, using the schema-building commands.
139 |
140 | ```js
141 | knex.schema.createView('users_view', function (view) {
142 | view.columns(['first_name']);
143 | view.as(knex('users').select('first_name').where('age','>', '18'));
144 | })
145 | ```
146 |
147 | ### createViewOrReplace
148 |
149 | **knex.schema.createViewOrReplace(tableName, callback)**
150 |
151 | Creates a new view or replace it on the database, with a callback function to modify the view's structure, using the schema-building commands. You need to specify at least the same columns in same order (you can add extra columns). In SQLite, this function generate drop/create view queries (view columns can be different).
152 |
153 | ```js
154 | knex.schema.createViewOrReplace('users_view', function (view) {
155 | view.columns(['first_name']);
156 | view.as(knex('users').select('first_name').where('age','>', '18'));
157 | })
158 | ```
159 |
160 | ### createMaterializedView
161 |
162 | **knex.schema.createMaterializedView(viewName, callback)**
163 |
164 | Creates a new materialized view on the database, with a callback function to modify the view's structure, using the schema-building commands. Only on PostgreSQL, CockroachDb, Redshift and Oracle.
165 |
166 | ```js
167 | knex.schema.createMaterializedView('users_view', function (view) {
168 | view.columns(['first_name']);
169 | view.as(knex('users').select('first_name').where('age','>', '18'));
170 | })
171 | ```
172 |
173 | ### refreshMaterializedView
174 |
175 | **knex.schema.refreshMaterializedView(viewName)**
176 |
177 | Refresh materialized view on the database. Only on PostgreSQL, CockroachDb, Redshift and Oracle.
178 |
179 | ```js
180 | knex.schema.refreshMaterializedView('users_view')
181 | ```
182 |
183 | ### dropView
184 |
185 | **knex.schema.dropView(viewName)**
186 |
187 | Drop view on the database.
188 |
189 | ```js
190 | knex.schema.dropView('users_view')
191 | ```
192 |
193 | ### dropViewIfExists
194 |
195 | **knex.schema.dropViewIfExists(viewName)**
196 |
197 | Drop view on the database if exists.
198 |
199 | ```js
200 | knex.schema.dropViewIfExists('users_view')
201 | ```
202 |
203 | ### dropMaterializedView
204 |
205 | **knex.schema.dropMaterializedView(viewName)**
206 |
207 | Drop materialized view on the database. Only on PostgreSQL, CockroachDb, Redshift and Oracle.
208 |
209 | ```js
210 | knex.schema.dropMaterializedView('users_view')
211 | ```
212 |
213 | ### dropMaterializedViewIfExists
214 |
215 | **knex.schema.dropMaterializedViewIfExists(viewName)**
216 |
217 | Drop materialized view on the database if exists. Only on PostgreSQL, CockroachDb, Redshift and Oracle.
218 |
219 | ```js
220 | knex.schema.dropMaterializedViewIfExists('users_view')
221 | ```
222 |
223 | ### renameView
224 |
225 | **knex.schema.renameView(viewName)**
226 |
227 | Rename a existing view in the database. Not supported by Oracle and SQLite.
228 |
229 | ```js
230 | knex.schema.renameView('users_view')
231 | ```
232 |
233 | ### alterView
234 |
235 | **knex.schema.alterView(viewName)**
236 |
237 | Alter view to rename columns or change default values. Only available on PostgreSQL, MSSQL and Redshift.
238 |
239 | ```js
240 | knex.schema.alterView('view_test', function (view) {
241 | view.column('first_name').rename('name_user');
242 | view.column('bio').defaultTo('empty');
243 | })
244 | ```
245 |
246 | ### generateDdlCommands
247 |
248 | **knex.schema.generateDdlCommands()**
249 |
250 | Generates complete SQL commands for applying described schema changes, without executing anything. Useful when knex is being used purely as a query builder. Generally produces same result as .toSQL(), with a notable exception with SQLite, which relies on asynchronous calls to the database for building part of its schema modification statements
251 |
252 | ```js
253 | const ddlCommands = knex.schema.alterTable(
254 | 'users',
255 | (table) => {
256 | table
257 | .foreign('companyId')
258 | .references('company.companyId')
259 | .withKeyName('fk_fkey_company');
260 | }
261 | ).generateDdlCommands();
262 | ```
263 |
264 | ### raw
265 |
266 | **knex.schema.raw(statement)**
267 |
268 | Run an arbitrary sql query in the schema builder chain.
269 |
270 | ```js
271 | knex.schema.raw("SET sql_mode='TRADITIONAL'")
272 | .table('users', function (table) {
273 | table.dropColumn('name');
274 | table.string('first_name');
275 | table.string('last_name');
276 | })
277 | ```
278 |
279 | ### queryContext
280 |
281 | **knex.schema.queryContext(context)**
282 |
283 | Allows configuring a context to be passed to the [wrapIdentifier](/guide/#wrapidentifier) hook. The context can be any kind of value and will be passed to `wrapIdentifier` without modification.
284 |
285 | ```js
286 | knex.schema.queryContext({ foo: 'bar' })
287 | .table('users', function (table) {
288 | table.string('first_name');
289 | table.string('last_name');
290 | })
291 | ```
292 |
293 | The context configured will be passed to `wrapIdentifier` for each identifier that needs to be formatted, including the table and column names. However, a different context can be set for the column names via [table.queryContext](/guide/query-builder#querycontext).
294 |
295 | Calling `queryContext` with no arguments will return any context configured for the schema builder instance.
296 |
297 | ### dropSchema
298 |
299 | **knex.schema.dropSchema(schemaName, [cascade])**
300 |
301 | Drop a schema, specified by the schema's name, with optional cascade option (default to false). Only supported by PostgreSQL.
302 |
303 | ```js
304 | //drop schema 'public'
305 | knex.schema.dropSchema('public')
306 | //drop schema 'public' cascade
307 | knex.schema.dropSchema('public', true)
308 | ```
309 |
310 | ### dropSchemaIfExists
311 |
312 | **knex.schema.dropSchemaIfExists(schemaName, [cascade])**
313 |
314 | Drop a schema conditionally if the schema exists, specified by the schema's name, with optional cascade option (default to false). Only supported by PostgreSQL.
315 |
316 | ```js
317 | //drop schema if exists 'public'
318 | knex.schema.dropSchemaIfExists('public')
319 | //drop schema if exists 'public' cascade
320 | knex.schema.dropSchemaIfExists('public', true)
321 | ```
322 |
323 | ## Schema Building
324 |
325 | ### dropColumn
326 |
327 | **table.dropColumn(name)**
328 |
329 | Drops a column, specified by the column's name
330 |
331 | ### dropColumns
332 |
333 | **table.dropColumns(columns)**
334 |
335 | Drops multiple columns, taking a variable number of column names.
336 |
337 | ### renameColumn
338 |
339 | **table.renameColumn(from, to)**
340 |
341 | Renames a column from one name to another.
342 |
343 | ### increments
344 |
345 | **table.increments(name, options={[primaryKey: boolean = true])**
346 |
347 | Adds an auto incrementing column. In PostgreSQL this is a serial; in Amazon Redshift an integer identity(1,1). This will be used as the primary key for the table if the column isn't in another primary key. Also available is a bigIncrements if you wish to add a bigint incrementing number (in PostgreSQL bigserial). Note that a primary key is created by default if the column isn't in primary key (with primary function), but you can override this behaviour by passing the `primaryKey` option. If you use this function with primary function, the column is added to the composite primary key. With SQLite, autoincrement column need to be a primary key, so if primary function is used, primary keys are transformed in unique index. MySQL don't support autoincrement column without primary key, so multiple queries are generated to create int column, add increments column to composite primary key then modify the column to autoincrement column.
348 |
349 | ```js
350 | // create table 'users'
351 | // with a primary key using 'increments()'
352 | knex.schema.createTable('users', function (table) {
353 | table.increments('userId');
354 | table.string('name');
355 | });
356 |
357 | // create table 'users'
358 | // with a composite primary key ('userId', 'name').
359 | // increments doesn't generate primary key.
360 | knex.schema.createTable('users', function (table) {
361 | table.primary(['userId', 'name']);
362 | table.increments('userId');
363 | table.string('name');
364 | });
365 |
366 | // reference the 'users' primary key in new table 'posts'
367 | knex.schema.createTable('posts', function (table) {
368 | table.integer('author').unsigned().notNullable();
369 | table.string('title', 30);
370 | table.string('content');
371 |
372 | table.foreign('author').references('userId').inTable('users');
373 | });
374 | ```
375 |
376 | A primaryKey option may be passed, to disable to automatic primary key creation:
377 |
378 | ```js
379 | // create table 'users'
380 | // with a primary key using 'increments()'
381 | // but also increments field 'other_id'
382 | // that does not need primary key
383 | knex.schema.createTable('users', function (table) {
384 | table.increments('id');
385 | table.increments('other_id', { primaryKey: false });
386 | });
387 | ```
388 |
389 | ### integer
390 |
391 | **table.integer(name, length)**
392 |
393 | Adds an integer column. On PostgreSQL you cannot adjust the length, you need to use other option such as bigInteger, etc
394 |
395 | ### bigInteger
396 |
397 | **table.bigInteger(name)**
398 |
399 | In MySQL or PostgreSQL, adds a bigint column, otherwise adds a normal integer. Note that bigint data is returned as a string in queries because JavaScript may be unable to parse them without loss of precision.
400 |
401 | ### tinyint
402 |
403 | **table.tinyint(name, length)**
404 |
405 | Adds a tinyint column
406 |
407 | ### smallint
408 |
409 | **table.smallint(name)**
410 |
411 | Adds a smallint column
412 |
413 | ### mediumint
414 |
415 | **table.mediumint(name)**
416 |
417 | Adds a mediumint column
418 |
419 | ### bigint
420 |
421 | **table.bigint(name)**
422 |
423 | Adds a bigint column
424 |
425 | ### text
426 |
427 | **table.text(name, [textType])**
428 |
429 | Adds a text column, with optional textType for MySql text datatype preference. textType may be mediumtext or longtext, otherwise defaults to text.
430 |
431 | ### string
432 |
433 | **table.string(name, [length])**
434 |
435 | Adds a string column, with optional length defaulting to 255.
436 |
437 | ### float
438 |
439 | **table.float(column, [precision], [scale])**
440 |
441 | Adds a float column, with optional precision (defaults to 8) and scale (defaults to 2).
442 |
443 | ### double
444 |
445 | **table.double(column, [precision], [scale])**
446 |
447 | Adds a double column, with optional precision (defaults to 8) and scale (defaults to 2). In SQLite/MSSQL this is a float with no precision/scale; In PostgreSQL this is a double precision; In Oracle this is a number with matching precision/scale.
448 |
449 | ### decimal
450 |
451 | **table.decimal(column, [precision], [scale])**
452 |
453 | Adds a decimal column, with optional precision (defaults to 8) and scale (defaults to 2). Specifying NULL as precision creates a decimal column that can store numbers of any precision and scale. (Only supported for Oracle, SQLite, Postgres)
454 |
455 | ### boolean
456 |
457 | **table.boolean(name)**
458 |
459 | Adds a boolean column.
460 |
461 | ### date
462 |
463 | **table.date(name)**
464 |
465 | Adds a date column.
466 |
467 | ### datetime
468 |
469 | **table.datetime(name, options={[useTz: boolean], [precision: number]})**
470 |
471 | Adds a datetime column. By default PostgreSQL creates column with timezone (timestamptz type). This behaviour can be overriden by passing the useTz option (which is by default true for PostgreSQL). MySQL and MSSQL do not have useTz option.
472 |
473 | A precision option may be passed:
474 |
475 | ```js
476 | table.datetime('some_time', { precision: 6 }).defaultTo(knex.fn.now(6))
477 | ```
478 |
479 | ### time
480 |
481 | **table.time(name, [precision])**
482 |
483 | Adds a time column, with optional precision for MySQL. Not supported on Amazon Redshift.
484 |
485 | In MySQL a precision option may be passed:
486 |
487 | ```js
488 | table.time('some_time', { precision: 6 })
489 | ```
490 |
491 | ### timestamp
492 |
493 | **table.timestamp(name, options={[useTz: boolean], [precision: number]})**
494 |
495 | Adds a timestamp column. By default PostgreSQL creates column with timezone (timestamptz type) and MSSQL does not (datetime2). This behaviour can be overriden by passing the useTz option (which is by default false for MSSQL and true for PostgreSQL). MySQL does not have useTz option.
496 |
497 | ```js
498 | table.timestamp('created_at').defaultTo(knex.fn.now());
499 | ```
500 |
501 | In PostgreSQL and MySQL a precision option may be passed:
502 |
503 | ```js
504 | table.timestamp('created_at', { precision: 6 }).defaultTo(knex.fn.now(6));
505 | ```
506 |
507 | In PostgreSQL and MSSQL a timezone option may be passed:
508 |
509 | ```js
510 | table.timestamp('created_at', { useTz: true });
511 | ```
512 |
513 | ### timestamps
514 |
515 | **table.timestamps([useTimestamps], [defaultToNow], [useCamelCase])**
516 |
517 | Adds created\_at and updated\_at columns on the database, setting each to datetime types. When true is passed as the first argument a timestamp type is used instead. Both columns default to being not null and using the current timestamp when true is passed as the second argument. Note that on MySQL the .timestamps() only have seconds precision, to get better precision use the .datetime or .timestamp methods directly with precision. If useCamelCase is true, the name of columns are createdAt and updatedAt.
518 |
519 | ::: info
520 | PostgreSQL `updated_at` field will not automatically be updated. Please see this [issue](https://github.com/knex/knex/issues/1928 "issue") for details
521 | :::
522 |
523 | ### dropTimestamps
524 |
525 | **table.dropTimestamps([useCamelCase])**
526 |
527 | Drops the columns created\_at and updated\_at from the table, which can be created via timestamps. If useCamelCase is true, the name of columns are createdAt and updatedAt.
528 |
529 | ### binary
530 |
531 | **table.binary(name, [length])**
532 |
533 | Adds a binary column, with optional length argument for MySQL.
534 |
535 | ### enum / enu
536 |
537 | **table.enu(col, values, [options])**
538 |
539 | Adds a enum column, (aliased to enu, as enum is a reserved word in JavaScript). Implemented as unchecked varchar(255) on Amazon Redshift. Note that the second argument is an array of values. Example:
540 |
541 | ```js
542 | table.enu('column', ['value1', 'value2'])
543 | ```
544 |
545 | For Postgres, an additional options argument can be provided to specify whether or not to use Postgres's native TYPE:
546 |
547 | ```js
548 | table.enu('column', ['value1', 'value2'], { useNative: true, enumName: 'foo_type' })
549 | ```
550 |
551 | It will use the values provided to generate the appropriate TYPE. Example:
552 |
553 | ```sql
554 | CREATE TYPE "foo_type" AS ENUM ('value1', 'value2');
555 | ```
556 |
557 | To use an existing native type across columns, specify 'existingType' in the options (this assumes the type has already been created):
558 |
559 | ::: info
560 | Since the enum values aren't utilized for a native && existing type, the type being passed in for values is immaterial.
561 | :::
562 |
563 | ```js
564 | table.enu('column', null, { useNative: true, existingType: true, enumName: 'foo_type' })
565 | ```
566 |
567 | If you want to use existing enums from a schema, different from the schema of your current table, specify 'schemaName' in the options:
568 |
569 | ```js
570 | table.enu('column', null, { useNative: true, existingType: true, enumName: 'foo_type', schemaName: 'public' })
571 | ```
572 |
573 | Knex does not provide any way to alter enumerations after creation. To change an enumeration later on you must use Knex.raw, and the appropriate command for your database.
574 |
575 | ### json
576 |
577 | **table.json(name)**
578 |
579 | Adds a json column, using the built-in json type in PostgreSQL, MySQL and SQLite, defaulting to a text column in older versions or in unsupported databases.
580 |
581 | For PostgreSQL, due to incompatibility between native array and json types, when setting an array (or a value that could be an array) as the value of a json or jsonb column, you should use JSON.stringify() to convert your value to a string prior to passing it to the query builder, e.g.
582 |
583 | ```js
584 | knex.table('users')
585 | .where({id: 1})
586 | .update({json_data: JSON.stringify(mightBeAnArray)});
587 | ```
588 |
589 | ### jsonb
590 |
591 | **table.jsonb(name)**
592 |
593 | Adds a jsonb column. Works similar to table.json(), but uses native jsonb type if possible.
594 |
595 | ### uuid
596 |
597 | **table.uuid(name, options=({[useBinaryUuid:boolean],[primaryKey:boolean]})**
598 |
599 | Adds a uuid column - this uses the built-in uuid type in PostgreSQL, and falling back to a char(36) in other databases by default.
600 | If useBinaryUuid is true, binary(16) is used. See uuidToBin function to convert uuid in binary before inserting and binToUuid to convert binary uuid to uuid.
601 | If primaryKey is true, then for PostgreSQL the field will be configured as `uuid primary key`, for CockroackDB an additional `default gen_random_uuid()` is set on the type.
602 |
603 | You may set the default value to the uuid helper function. Not supported by Redshift.
604 |
605 | ```js
606 | knex.schema.createTable(tblName, (table) => {
607 | table.uuid('uuidColumn').defaultTo(knex.fn.uuid());
608 | });
609 | ```
610 |
611 | ### geometry
612 |
613 | **table.geometry(name)**
614 |
615 | Adds a geometry column. Supported by SQLite, MSSQL and PostgreSQL.
616 |
617 | ```js
618 | knex.schema.createTable(tblName, (table) => {
619 | table.geometry('geometryColumn');
620 | });
621 | ```
622 |
623 | ### geography
624 |
625 | **table.geography(name)**
626 |
627 | Adds a geography column. Supported by SQLite, MSSQL and PostgreSQL (in PostGIS extension).
628 |
629 | ```js
630 | knex.schema.createTable(tblName, (table) => {
631 | table.geography('geographyColumn');
632 | });
633 | ```
634 |
635 | ### point
636 |
637 | **table.point(name)**
638 |
639 | Add a point column. Not supported by CockroachDB and MSSQL.
640 |
641 | ```js
642 | knex.schema.createTable(tblName, (table) => {
643 | table.point('pointColumn');
644 | });
645 | ```
646 |
647 | ### comment
648 |
649 | **table.comment(value)**
650 |
651 | Sets the comment for a table.
652 |
653 | ### engine
654 |
655 | **table.engine(val)**
656 |
657 | Sets the engine for the database table, only available within a createTable call, and only applicable to MySQL.
658 |
659 | ### charset
660 |
661 | **table.charset(val)**
662 |
663 | Sets the charset for the database table, only available within a createTable call, and only applicable to MySQL.
664 |
665 | ### collate
666 |
667 | **table.collate(val)**
668 |
669 | Sets the collation for the database table, only available within a createTable call, and only applicable to MySQL.
670 |
671 | ### inherits
672 |
673 | **table.inherits(val)**
674 |
675 | Sets the tables that this table inherits, only available within a createTable call, and only applicable to PostgreSQL.
676 |
677 | ### specificType
678 |
679 | **table.specificType(name, type)**
680 |
681 | Sets a specific type for the column creation, if you'd like to add a column type that isn't supported here.
682 |
683 | ### index
684 |
685 | **table.index(columns, [indexName], options=({[indexType: string], [storageEngineIndexType: 'btree'|'hash'], [predicate: QueryBuilder]}))**
686 |
687 | Adds an index to a table over the given columns. A default index name using the columns is used unless indexName is specified. In MySQL, the storage engine index type may be 'btree' or 'hash' index types, more info in Index Options section : [https://dev.mysql.com/doc/refman/8.0/en/create-index.html](https://dev.mysql.com/doc/refman/8.0/en/create-index.html). The indexType can be optionally specified for PostgreSQL and MySQL. Amazon Redshift does not allow creating an index. In PostgreSQL, SQLite and MSSQL a partial index can be specified by setting a 'where' predicate.
688 |
689 | ```js
690 | knex.table('users', function (table) {
691 | table.index(['name', 'last_name'], 'idx_name_last_name', {
692 | indexType: 'FULLTEXT',
693 | storageEngineIndexType: 'hash',
694 | predicate: knex.whereNotNull('email'),
695 | });
696 | });
697 | ```
698 |
699 | ### dropIndex
700 |
701 | **table.dropIndex(columns, [indexName])**
702 |
703 | Drops an index from a table. A default index name using the columns is used unless indexName is specified (in which case columns is ignored). Amazon Redshift does not allow creating an index.
704 |
705 | ### setNullable
706 |
707 | **table.setNullable(column)**
708 |
709 | Makes table column nullable.
710 |
711 | ### dropNullable
712 |
713 | **table.dropNullable(column)**
714 |
715 | Makes table column not nullable. Note that this operation will fail if there are already null values in this column.
716 |
717 | ### primary
718 |
719 | **table.primary(columns, options=({[constraintName:string],[deferrable:'not deferrable'|'deferred'|'immediate']})**
720 |
721 | Create a primary key constraint on table using input `columns`. If you need to create a composite primary key, pass an array of columns to `columns`. Constraint name defaults to `tablename_pkey` unless `constraintName` is specified. On Amazon Redshift, all columns included in a primary key must be not nullable. Deferrable primary constraint are supported on Postgres and Oracle and can be set by passing deferrable option to options object.
722 |
723 | ```js
724 | knex.schema.alterTable('users', function(t) {
725 | t.unique('email')
726 | })
727 | knex.schema.alterTable('job', function(t) {
728 | t.primary('email',{constraintName:'users_primary_key',deferrable:'deferred'})
729 | })
730 | ```
731 |
732 | ::: info
733 | If you want to chain primary() while creating new column you can use [primary](#primary-1)
734 | :::
735 |
736 | ### unique
737 |
738 | **table.unique(columns, options={[indexName: string], [deferrable:'not deferrable'|'immediate'|'deferred'], [storageEngineIndexType:'btree'|'hash'], [useConstraint:true|false], [predicate: QueryBuilder]})**
739 |
740 | Adds an unique index to a table over the given `columns`. In MySQL, the storage engine index type may be 'btree' or 'hash' index types, more info in Index Options section : [https://dev.mysql.com/doc/refman/8.0/en/create-index.html](https://dev.mysql.com/doc/refman/8.0/en/create-index.html). A default index name using the columns is used unless indexName is specified. If you need to create a composite index, pass an array of column to `columns`. Deferrable unique constraint are supported on Postgres and Oracle and can be set by passing deferrable option to options object. In MSSQL and Postgres, you can set the `useConstraint` option to true to create a unique constraint instead of a unique index (defaults to false for MSSQL, true for Postgres without `predicate`, false for Postgres with `predicate`). In PostgreSQL, SQLite and MSSQL a partial unique index can be specified by setting a 'where' predicate.
741 |
742 | ```js
743 | knex.schema.alterTable('users', function(t) {
744 | t.unique('email')
745 | })
746 | knex.schema.alterTable('job', function(t) {
747 | t.unique(['account_id', 'program_id'], {indexName: 'job_composite_index', deferrable: 'deferred', storageEngineIndexType: 'hash'})
748 | })
749 | knex.schema.alterTable('job', function(t) {
750 | t.unique(['account_id', 'program_id'], {indexName: 'job_composite_index', useConstraint: true})
751 | })
752 | knex.schema.alterTable('job', function(t) {
753 | t.unique(['account_id', 'program_id'], {indexName: 'job_composite_index', predicate: knex.whereNotNull('account_id')})
754 | })
755 | ```
756 |
757 | ::: info
758 | If you want to chain unique() while creating new column you can use [unique](#unique-1)
759 | :::
760 |
761 | ### foreign
762 |
763 | **table.foreign(columns, [foreignKeyName])[.onDelete(statement).onUpdate(statement).withKeyName(foreignKeyName).deferrable(type)]**
764 |
765 | Adds a foreign key constraint to a table for an existing column using `table.foreign(column).references(column)` or multiple columns using `table.foreign(columns).references(columns).inTable(table)`.
766 |
767 | A default key name using the columns is used unless `foreignKeyName` is specified.
768 |
769 | You can also chain `onDelete()` and/or `onUpdate()` to set the reference option `(RESTRICT, CASCADE, SET NULL, NO ACTION)` for the operation. You can also chain `withKeyName()` to override default key name that is generated from table and column names (result is identical to specifying second parameter to function `foreign()`).
770 |
771 | Deferrable foreign constraint is supported on Postgres and Oracle and can be set by chaining `.deferrable(type)`
772 |
773 | Note that using `foreign()` is the same as `column.references(column)` but it works for existing columns.
774 |
775 | ```js
776 | knex.schema.table('users', function (table) {
777 | table.integer('user_id').unsigned()
778 | table.foreign('user_id').references('Items.user_id_in_items').deferrable('deferred')
779 | })
780 | ```
781 |
782 | ### dropForeign
783 |
784 | **table.dropForeign(columns, [foreignKeyName])**
785 |
786 | Drops a foreign key constraint from a table. A default foreign key name using the columns is used unless foreignKeyName is specified (in which case columns is ignored).
787 |
788 | ### dropUnique
789 |
790 | **table.dropUnique(columns, [indexName])**
791 |
792 | Drops a unique key constraint from a table. A default unique key name using the columns is used unless indexName is specified (in which case columns is ignored).
793 |
794 | ### dropPrimary
795 |
796 | **table.dropPrimary([constraintName])**
797 |
798 | Drops the primary key constraint on a table. Defaults to tablename\_pkey unless constraintName is specified.
799 |
800 | ### queryContext
801 |
802 | **table.queryContext(context)**
803 |
804 | Allows configuring a context to be passed to the [wrapIdentifier](/guide/#wrapidentifier) hook for formatting table builder identifiers. The context can be any kind of value and will be passed to `wrapIdentifier` without modification.
805 |
806 | ```js
807 | knex.schema.table('users', function (table) {
808 | table.queryContext({ foo: 'bar' });
809 | table.string('first_name');
810 | table.string('last_name');
811 | })
812 | ```
813 |
814 | This method also enables overwriting the context configured for a schema builder instance via [schema.queryContext](/guide/schema-builder#querycontext):
815 |
816 | ```js
817 | knex.schema.queryContext('schema context')
818 | .table('users', function (table) {
819 | table.queryContext('table context');
820 | table.string('first_name');
821 | table.string('last_name');
822 | })
823 | ```
824 |
825 | Note that it's also possible to overwrite the table builder context for any column in the table definition:
826 |
827 | ```js
828 | knex.schema.queryContext('schema context')
829 | .table('users', function (table) {
830 | table.queryContext('table context');
831 | table.string('first_name').queryContext('first_name context');
832 | table.string('last_name').queryContext('last_name context');
833 | })
834 | ```
835 |
836 | Calling `queryContext` with no arguments will return any context configured for the table builder instance.
837 |
838 | ## Chainable Methods
839 |
840 | The following three methods may be chained on the schema building methods, as modifiers to the column.
841 |
842 | ### alter
843 |
844 | **column.alter(options={[alterNullable: boolean = true, alterType: boolean = true])**
845 |
846 | Marks the column as an alter / modify, instead of the default add.
847 |
848 | ::: warning
849 | This only works in .alterTable() and is not supported by SQlite or Amazon Redshift. Alter is _not_ done incrementally over older column type so if you like to add `notNullable` and keep the old default value, the alter statement must contain both `.notNullable().defaultTo(1).alter()`. If one just tries to add `.notNullable().alter()` the old default value will be dropped. Nullable alterations are done only if alterNullable is true. Type alterations are done only if alterType is true.
850 | :::
851 |
852 | ```js
853 | knex.schema.alterTable('user', function(t) {
854 | t.increments().primary(); // add
855 | // drops previous default value from column,
856 | // change type to string and add not nullable constraint
857 | t.string('username', 35).notNullable().alter();
858 | // drops both not null constraint and the default value
859 | t.integer('age').alter();
860 | // if alterNullable is false, drops only the default value
861 | t.integer('age').alter({alterNullable : false});
862 | // if alterType is false, type of column is not altered.
863 | t.integer('age').alter({alterType : false});
864 | });
865 | ```
866 |
867 | ### index
868 |
869 | **column.index([indexName], options=({[indexType: string], [storageEngineIndexType: 'btree'|'hash'], [predicate: QueryBuilder]}))**
870 |
871 | Specifies a field as an index. If an indexName is specified, it is used in place of the standard index naming convention of tableName\_columnName. In MySQL, the storage engine index type may be 'btree' or 'hash' index types, more info in Index Options section : [https://dev.mysql.com/doc/refman/8.0/en/create-index.html](https://dev.mysql.com/doc/refman/8.0/en/create-index.html). The indexType can be optionally specified for PostgreSQL and MySQL. No-op if this is chained off of a field that cannot be indexed. In PostgreSQL, SQLite and MSSQL a partial index can be specified by setting a 'where' predicate.
872 |
873 | ### primary
874 |
875 | **column.primary(options=({[constraintName:string],[deferrable:'not deferrable'|'deferred'|'immediate']}));**
876 |
877 | Sets a primary key constraint on `column`. Constraint name defaults to `tablename_pkey` unless `constraintName` is specified. On Amazon Redshift, all columns included in a primary key must be not nullable. Deferrable primary constraint are supported on Postgres and Oracle and can be set by passing deferrable option to options object.
878 |
879 | ```js
880 | knex.schema.table('users', function (table) {
881 | table.integer('user_id').primary('email',{constraintName:'users_primary_key',deferrable:'deferred'})
882 | })
883 | ```
884 |
885 | ::: info
886 | If you want to create primary constraint on existing column use [primary](#primary)
887 | :::
888 |
889 | ### unique
890 |
891 | **column.unique(options={[indexName:string],[deferrable:'not deferrable'|'immediate'|'deferred']})**
892 |
893 | Sets the `column` as unique. On Amazon Redshift, this constraint is not enforced, but it is used by the query planner. Deferrable unique constraint are supported on Postgres and Oracle and can be set by passing deferrable option to options object.
894 |
895 | ```js
896 | knex.schema.table('users', function (table) {
897 | table.integer('user_id').unique({indexName:'user_unique_id', deferrable:'immediate'})
898 | })
899 | ```
900 |
901 | ::: info
902 | If you want to create unique constraint on existing column use [unique](#unique)
903 | :::
904 |
905 | ### references
906 |
907 | **column.references(column)**
908 |
909 | Sets the "column" that the current column references as a foreign key. "column" can either be "." syntax, or just the column name followed up with a call to inTable to specify the table.
910 |
911 | ### inTable
912 |
913 | **column.inTable(table)**
914 |
915 | Sets the "table" where the foreign key column is located after calling column.references.
916 |
917 | ### onDelete
918 |
919 | **column.onDelete(command)**
920 |
921 | Sets the SQL command to be run "onDelete".
922 |
923 | ### onUpdate
924 |
925 | **column.onUpdate(command)**
926 |
927 | Sets the SQL command to be run "onUpdate".
928 |
929 | ### defaultTo
930 |
931 | **column.defaultTo(value, options={[constraintName: string = undefined]))**
932 |
933 | Sets the default value for the column on an insert.
934 |
935 | In MSSQL a constraintName option may be passed to ensure a specific constraint name:
936 |
937 | ```js
938 | column.defaultTo('value', { constraintName: 'df_table_value' });
939 | ```
940 |
941 | ### unsigned
942 |
943 | **column.unsigned()**
944 |
945 | Specifies a number as unsigned. Only for numeric values.
946 |
947 | ### notNullable
948 |
949 | **column.notNullable()**
950 |
951 | Adds a not null on the current column being created.
952 |
953 | ### nullable
954 |
955 | **column.nullable()**
956 |
957 | Default on column creation, this explicitly sets a field to be nullable.
958 |
959 | ### first
960 |
961 | **column.first()**
962 |
963 | Sets the column to be inserted on the first position, only used in MySQL alter tables.
964 |
965 | ### after
966 |
967 | **column.after(field)**
968 |
969 | Sets the column to be inserted after another, only used in MySQL alter tables.
970 |
971 | ### comment
972 |
973 | **column.comment(value)**
974 |
975 | Sets the comment for a column.
976 |
977 | ```js
978 | knex.schema.createTable('accounts', function(t) {
979 | t.increments().primary();
980 | t.string('email').unique().comment('This is the email field');
981 | });
982 | ```
983 |
984 | ### collate
985 |
986 | **column.collate(collation)**
987 |
988 | Sets the collation for a column (only works in MySQL). Here is a list of all available collations: [https://dev.mysql.com/doc/refman/5.5/en/charset-charsets.html](https://dev.mysql.com/doc/refman/5.5/en/charset-charsets.html)
989 |
990 | ```js
991 | knex.schema.createTable('users', function(t) {
992 | t.increments();
993 | t.string('email').unique().collate('utf8_unicode_ci');
994 | });
995 | ```
996 |
997 | ## View
998 |
999 | ### columns
1000 |
1001 | **view.columns([columnNames])**
1002 |
1003 | Specify the columns of the view.
1004 |
1005 | ```js
1006 | knex.schema.createView('users_view', function (view) {
1007 | view.columns(['first_name', 'last_name']);
1008 | view.as(knex('users').select('first_name').where('age','>', '18'));
1009 | });
1010 | ```
1011 |
1012 | ### as
1013 |
1014 | **view.as(selectQuery)**
1015 |
1016 | Specify the select query of the view.
1017 |
1018 | ### checkOption
1019 |
1020 | **view.checkOption()**
1021 |
1022 | Add check option on the view definition. On OracleDb, MySQL, PostgreSQL and Redshift.
1023 |
1024 | ### localCheckOption
1025 |
1026 | **view.localCheckOption()**
1027 |
1028 | Add local check option on the view definition. On MySQL, PostgreSQL and Redshift.
1029 |
1030 | ### cascadedCheckOption
1031 |
1032 | **view.cascadedCheckOption()**
1033 |
1034 | Add cascaded check option on the view definition. On MySQL, PostgreSQL and Redshift.
1035 |
1036 | ## Checks
1037 |
1038 | ### check
1039 |
1040 | **table.check(checkPredicate, [bindings], [constraintName]))**
1041 |
1042 | Specify a check on table or column with raw predicate.
1043 |
1044 | ```js
1045 | knex.schema.createTable('product', function (table) {
1046 | table.integer('price_min');
1047 | table.integer('price');
1048 | table.check('?? >= ??', ['price', 'price_min']);
1049 | })
1050 | ```
1051 |
1052 | ### checkPositive
1053 |
1054 | **column.checkPositive([constraintName])**
1055 |
1056 | Specify a check on column that test if the value of column is positive.
1057 |
1058 | ```js
1059 | knex.schema.createTable('product', function (table) {
1060 | table.integer('price').checkPositive();
1061 | })
1062 | ```
1063 |
1064 | ### checkNegative
1065 |
1066 | **column.checkNegative([constraintName])**
1067 |
1068 | Specify a check on column that test if the value of column is negative.
1069 |
1070 | ```js
1071 | knex.schema.createTable('product', function (table) {
1072 | table.integer('price_decrease').checkNegative();
1073 | })
1074 | ```
1075 |
1076 | ### checkIn
1077 |
1078 | **column.checkIn(values, [constraintName])**
1079 |
1080 | Specify a check on column that test if the value of column is contained in a set of specified values.
1081 |
1082 | ```js
1083 | knex.schema.createTable('product', function (table) {
1084 | table.string('type').checkIn(['table', 'chair', 'sofa']);
1085 | })
1086 | ```
1087 |
1088 | ### checkNotIn
1089 |
1090 | **column.checkNotIn(values, [constraintName])**
1091 |
1092 | Specify a check on column that test if the value of column is not contains in a set of specified values.
1093 |
1094 | ```js
1095 | knex.schema.createTable('product', function (table) {
1096 | table.string('type').checkNotIn(['boot', 'shoe']);
1097 | })
1098 | ```
1099 |
1100 | ### checkBetween
1101 |
1102 | **column.checkBetween(values, [constraintName])**
1103 |
1104 | Specify a check on column that test if the value of column is within a range of values.
1105 |
1106 | ```js
1107 | knex.schema.createTable('product', function (table) {
1108 | table.integer('price').checkBetween([0, 100]);
1109 | })
1110 | // You can add checks on multiple intervals
1111 | knex.schema.createTable('product', function (table) {
1112 | table.integer('price').checkBetween([ [0, 20], [30,40] ]);
1113 | })
1114 | ```
1115 |
1116 | ### checkLength
1117 |
1118 | **column.checkLength(operator, length, [constraintName])**
1119 |
1120 | Specify a check on column that test if the length of a string match the predicate.
1121 |
1122 | ```js
1123 | knex.schema.createTable('product', function (table) {
1124 | // operator can be =, !=, <=, >=, <, >
1125 | t.varchar('phone').checkLength('=', 8);
1126 | })
1127 | ```
1128 |
1129 | ### checkRegex
1130 |
1131 | **column.checkRegex(regex, [constraintName])**
1132 |
1133 | Specify a check on column that test if the value match the specified regular expression. In MSSQL only simple pattern matching in supported but not regex syntax.
1134 |
1135 | ```js
1136 | knex.schema.createTable('product', function (table) {
1137 | table.string('phone').checkRegex('[0-9]{8}');
1138 | // In MSSQL, {8} syntax don't work,
1139 | // you need to duplicate [0-9].
1140 | table.string('phone').checkRegex('[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]');
1141 | })
1142 | ```
1143 |
1144 | ### dropChecks
1145 |
1146 | **table.dropChecks([checkConstraintNames])**
1147 |
1148 | Drop checks constraint given an array of constraint names.
1149 |
1150 | ```js
1151 | knex.schema.createTable('product', function (table) {
1152 | table.integer('price').checkPositive('price_check')
1153 | table.integer('price_proportion').checkBetween([0, 100],'price_proportion_check')
1154 | table.dropChecks(['price_check', 'price_proportion_check']);
1155 | })
1156 | ```
1157 |
--------------------------------------------------------------------------------
/src/guide/transactions.md:
--------------------------------------------------------------------------------
1 | # Transactions
2 |
3 | Transactions are an important feature of relational databases, as they allow correct recovery from failures and keep a database consistent even in cases of system failure. All queries within a transaction are executed on the same database connection, and run the entire set of queries as a single unit of work. Any failure will mean the database will rollback any queries executed on that connection to the pre-transaction state.
4 |
5 | Transactions are handled by passing a handler function into `knex.transaction`. The handler function accepts a single argument, an object which may be used in two ways:
6 |
7 | 1. As the "promise aware" knex connection
8 | 2. As an object passed into a query with [transacting](/guide/query-builder#transacting) and eventually call commit or rollback.
9 |
10 | Consider these two examples:
11 |
12 | ```js
13 | // Using trx as a query builder:
14 | knex.transaction(function(trx) {
15 |
16 | const books = [
17 | {title: 'Canterbury Tales'},
18 | {title: 'Moby Dick'},
19 | {title: 'Hamlet'}
20 | ];
21 |
22 | return trx
23 | .insert({name: 'Old Books'}, 'id')
24 | .into('catalogues')
25 | .then(function(ids) {
26 | books.forEach((book) => book.catalogue_id = ids[0]);
27 | return trx('books').insert(books);
28 | });
29 | })
30 | .then(function(inserts) {
31 | console.log(inserts.length + ' new books saved.');
32 | })
33 | .catch(function(error) {
34 | // If we get here, that means that
35 | // neither the 'Old Books' catalogues insert,
36 | // nor any of the books inserts will have taken place.
37 | console.error(error);
38 | });
39 | ```
40 |
41 | And then this example:
42 |
43 | ```js
44 | // Using trx as a transaction object:
45 | knex.transaction(function(trx) {
46 |
47 | const books = [
48 | {title: 'Canterbury Tales'},
49 | {title: 'Moby Dick'},
50 | {title: 'Hamlet'}
51 | ];
52 |
53 | knex.insert({name: 'Old Books'}, 'id')
54 | .into('catalogues')
55 | .transacting(trx)
56 | .then(function(ids) {
57 | books.forEach((book) => book.catalogue_id = ids[0]);
58 | return knex('books').insert(books).transacting(trx);
59 | })
60 | .then(trx.commit)
61 | .catch(trx.rollback);
62 | })
63 | .then(function(inserts) {
64 | console.log(inserts.length + ' new books saved.');
65 | })
66 | .catch(function(error) {
67 | // If we get here, that means that
68 | // neither the 'Old Books' catalogues insert,
69 | // nor any of the books inserts will have taken place.
70 | console.error(error);
71 | });
72 | ```
73 |
74 | Same example as above using await/async:
75 |
76 | ```ts
77 | try {
78 | await knex.transaction(async trx => {
79 |
80 | const books = [
81 | {title: 'Canterbury Tales'},
82 | {title: 'Moby Dick'},
83 | {title: 'Hamlet'}
84 | ];
85 |
86 | const ids = await trx('catalogues')
87 | .insert({
88 | name: 'Old Books'
89 | }, 'id')
90 |
91 | books.forEach((book) => book.catalogue_id = ids[0])
92 | const inserts = await trx('books').insert(books)
93 |
94 | console.log(inserts.length + ' new books saved.')
95 | })
96 | } catch (error) {
97 | // If we get here, that means that neither the 'Old Books' catalogues insert,
98 | // nor any of the books inserts will have taken place.
99 | console.error(error);
100 | }
101 | ```
102 |
103 | Same example as above using another await/async approach:
104 |
105 | ```ts
106 | try {
107 | await knex.transaction(async trx => {
108 |
109 | const books = [
110 | {title: 'Canterbury Tales'},
111 | {title: 'Moby Dick'},
112 | {title: 'Hamlet'}
113 | ];
114 |
115 | const ids = await knex('catalogues')
116 | .insert({
117 | name: 'Old Books'
118 | }, 'id')
119 | .transacting(trx)
120 |
121 | books.forEach(book => book.catalogue_id = ids[0])
122 | await knex('books')
123 | .insert(books)
124 | .transacting(trx)
125 |
126 | console.log(inserts.length + ' new books saved.')
127 | })
128 | } catch (error) {
129 | console.error(error);
130 | }
131 | ```
132 |
133 | Throwing an error directly from the transaction handler function automatically rolls back the transaction, same as returning a rejected promise.
134 |
135 | Notice that if a promise is not returned within the handler, it is up to you to ensure `trx.commit`, or `trx.rollback` are called, otherwise the transaction connection will hang.
136 |
137 | Calling `trx.rollback` will return a rejected Promise. If you don't pass any argument to `trx.rollback`, a generic `Error` object will be created and passed in to ensure the Promise always rejects with something.
138 |
139 | Note that Amazon Redshift does not support savepoints in transactions.
140 |
141 | In some cases you may prefer to create transaction but only execute statements in it later. In such case call method `transaction` without a handler function:
142 |
143 | ```ts
144 | // Using trx as a transaction object:
145 | const trx = await knex.transaction();
146 |
147 | const books = [
148 | {title: 'Canterbury Tales'},
149 | {title: 'Moby Dick'},
150 | {title: 'Hamlet'}
151 | ];
152 |
153 | trx('catalogues')
154 | .insert({name: 'Old Books'}, 'id')
155 | .then(function(ids) {
156 | books.forEach((book) => book.catalogue_id = ids[0]);
157 | return trx('books').insert(books);
158 | })
159 | .then(trx.commit)
160 | .catch(trx.rollback);
161 | ```
162 |
163 | If you want to create a reusable transaction instance, but do not want to actually start it until it is used, you can create a transaction provider instance. It will start transaction after being called for the first time, and return same transaction on subsequent calls:
164 |
165 | ```ts
166 | // Does not start a transaction yet
167 | const trxProvider = knex.transactionProvider();
168 |
169 | const books = [
170 | {title: 'Canterbury Tales'},
171 | {title: 'Moby Dick'},
172 | {title: 'Hamlet'}
173 | ];
174 |
175 | // Starts a transaction
176 | const trx = await trxProvider();
177 | const ids = await trx('catalogues')
178 | .insert({name: 'Old Books'}, 'id')
179 | books.forEach((book) => book.catalogue_id = ids[0]);
180 | await trx('books').insert(books);
181 |
182 | // Reuses same transaction
183 | const sameTrx = await trxProvider();
184 | const ids2 = await sameTrx('catalogues')
185 | .insert({name: 'New Books'}, 'id')
186 | books.forEach((book) => book.catalogue_id = ids2[0]);
187 | await sameTrx('books').insert(books);
188 | ```
189 |
190 | You can access the promise that gets resolved after transaction is rolled back explicitly by user or committed, or rejected if it gets rolled back by DB itself, when using either way of creating transaction, from field `executionPromise`:
191 |
192 | ```ts
193 | const trxProvider = knex.transactionProvider();
194 | const trx = await trxProvider();
195 | const trxPromise = trx.executionPromise;
196 |
197 | const trx2 = await knex.transaction();
198 | const trx2Promise = trx2.executionPromise;
199 |
200 | const trxInitPromise = new Promise(async (resolve, reject) => {
201 | knex.transaction((transaction) => {
202 | resolve(transaction);
203 | });
204 | });
205 | const trx3 = await trxInitPromise;
206 | const trx3Promise = trx3.executionPromise;
207 | ```
208 |
209 | You can check if a transaction has been committed or rolled back with the method `isCompleted`:
210 |
211 | ```ts
212 | const trx = await knex.transaction();
213 | trx.isCompleted(); // false
214 | await trx.commit();
215 | trx.isCompleted(); // true
216 |
217 | const trx2 = knex.transactionProvider();
218 | await trx2.rollback();
219 | trx2.isCompleted(); // true
220 | ```
221 |
222 | You can check the property `knex.isTransaction` to see if the current knex instance you are working with is a transaction.
223 |
224 | ## Transaction Modes
225 |
226 | In case you need to specify an isolation level for your transaction, you can use a config parameter `isolationLevel`. Not supported by oracle and sqlite, options are `read uncommitted`, `read committed`, `repeatable read`, `snapshot` (mssql only), `serializable`.
227 |
228 | ```ts
229 | // Simple read skew example
230 | const isolationLevel = 'read committed';
231 | const trx = await knex.transaction({isolationLevel});
232 | const result1 = await trx(tableName).select();
233 | await knex(tableName).insert({ id: 1, value: 1 });
234 | const result2 = await trx(tableName).select();
235 | await trx.commit();
236 | // result1 may or may not deep equal result2 depending on isolation level
237 | ```
238 |
239 | You may also set the transaction mode as `read only` using the `readOnly` config parameter. It is currently only supported on mysql, postgres, and redshift.
240 |
241 | ```ts
242 | const trx = await knex.transaction({ readOnly: true });
243 | // 💥 Cannot `INSERT` while inside a `READ ONLY` transaction
244 | const result = await trx(tableName).insert({ id: 1, foo: 'bar' });
245 | ```
246 |
--------------------------------------------------------------------------------
/src/guide/utility.md:
--------------------------------------------------------------------------------
1 | # Utility
2 |
3 | A collection of utilities that the knex library provides for convenience.
4 |
5 | ## batchInsert
6 | **knex.batchInsert(tableName)**
7 |
8 | The `batchInsert` utility will insert a batch of rows wrapped inside a transaction _(which is automatically created unless explicitly given a transaction using [transacting](/guide/query-builder#transacting))_, at a given `chunkSize`.
9 |
10 | It's primarily designed to be used when you have thousands of rows to insert into a table.
11 |
12 | By default, the `chunkSize` is set to 1000.
13 |
14 | BatchInsert also allows for [returning values](/guide/query-builder#returning) and supplying transactions using [transacting](/guide/query-builder#transacting).
15 |
16 | ```js
17 | const rows = [{/*...*/}, {/*...*/}];
18 | const chunkSize = 30;
19 | knex.batchInsert('TableName', rows, chunkSize)
20 | .returning('id')
21 | .then(function(ids) { /*...*/ })
22 | .catch(function(error) { /*...*/ });
23 |
24 | knex.transaction(function(tr) {
25 | return knex.batchInsert('TableName', rows, chunkSize)
26 | .transacting(tr)
27 | })
28 | .then(function() { /*...*/ })
29 | .catch(function(error) { /*...*/ });
30 | ```
31 |
32 | ## now
33 |
34 | **knex.fn.now(precision)**
35 |
36 | Return the current timestamp with a precision (optional)
37 |
38 | ```js
39 | table.datetime('some_time', { precision: 6 }).defaultTo(knex.fn.now(6))
40 | ```
41 |
42 | ## uuid
43 |
44 | **knex.fn.uuid()**
45 |
46 | Return a uuid generation function. Not supported by Redshift
47 |
48 | ```js
49 | table.uuid('uuid').defaultTo(knex.fn.uuid())
50 | ```
51 |
52 | ## uuidToBin
53 |
54 | **knex.fn.uuidToBin(uuid)**
55 |
56 | Convert a string uuid (char(36)) to a binary uuid (binary(16))
57 |
58 | ```js
59 | knex.schema.createTable('uuid_table', (t) => {
60 | t.uuid('uuid_col_binary', { useBinaryUuid: true });
61 | });
62 | knex('uuid_table').insert({
63 | uuid_col_binary: knex.fn.uuidToBin('3f06af63-a93c-11e4-9797-00505690773f'),
64 | });
65 | ```
66 |
67 | ## binToUuid
68 |
69 | **knex.fn.binToUuid(binaryUuid)**
70 |
71 | Convert a binary uuid (binary(16)) to a string uuid (char(36))
72 |
73 | ```js
74 | const res = await knex('uuid_table').select('uuid_col_binary');
75 | knex.fn.binToUuid(res[0].uuid_col_binary)
76 | ```
77 |
--------------------------------------------------------------------------------
/src/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | home: true
3 | heroImage: /knex-logo.png
4 | heroAlt: Logo knex
5 | heroText: Knex.js
6 | tagline: SQL query builder
7 | actionText: View guide
8 | actionLink: /guide/
9 | altActionText: Star on GitHub
10 | altActionLink: https://github.com/knex/knex
11 | title: SQL Query Builder for Javascript
12 | ---
13 |
14 |
15 |
16 | **Knex.js** (pronounced [/kəˈnɛks/](https://youtu.be/19Av0Lxml-I?t=521)) is a "batteries included" SQL query builder for **PostgreSQL**, **CockroachDB**, **MSSQL**, **MySQL**, **MariaDB**, **SQLite3**, **Better-SQLite3**, **Oracle**, and **Amazon Redshift** designed to be flexible, portable, and fun to use.
17 |
18 | It features both traditional node style [callbacks](/guide/interfaces#callbacks) as well as a [promise](/guide/interfaces#promises) interface for cleaner async flow control, [a stream interface](/guide/interfaces#streams), full-featured [query](/guide/query-builder) and [schema](/guide/schema-builder) builders, [**transaction support (with savepoints)**](/guide/transactions), connection [pooling](/guide/#pool) and standardized responses between different query clients and dialects.
19 |
20 |