├── .eslintrc.js ├── .github └── workflows │ ├── main.yml │ └── master.yml ├── .gitignore ├── .gitmodules ├── HOWTO_RELEASE.md ├── LICENSE ├── LOGGING.md ├── NEWS.md ├── README.md ├── app.js ├── carto-package.json ├── config └── environments │ ├── config.js │ ├── development.js.example │ ├── production.js.example │ ├── staging.js.example │ └── test.js.example ├── docs ├── examples │ └── 01-example.md ├── guides │ ├── 01-introduction.md │ ├── 02-authentication.md │ ├── 03-batch-queries.md │ ├── 04-creating-tables.md │ ├── 05-handling-geospatial-data.md │ ├── 06-metrics.md │ ├── 07-query-optimizations.md │ ├── 08-making-calls.md │ └── 09-copy-queries.md ├── reference │ └── swagger.yaml └── support │ ├── 01-support-options.md │ ├── 02-contribute.md │ ├── 03-version-number.md │ ├── 04-libraries-support.md │ ├── 05-rate-limiting.md │ ├── 06-timeout-limiting.md │ └── 07-tips-and-tricks.md ├── lib ├── api │ ├── api-router.js │ ├── health-check-controller.js │ ├── jobs-wip-controller.js │ ├── middlewares │ │ ├── access-validator.js │ │ ├── affected-tables.js │ │ ├── authorization.js │ │ ├── body-parser.js │ │ ├── cache-channel.js │ │ ├── cache-control.js │ │ ├── cancel-on-client-abort.js │ │ ├── client-header.js │ │ ├── connection-params.js │ │ ├── content.js │ │ ├── cors.js │ │ ├── db-quota.js │ │ ├── error.js │ │ ├── formatter.js │ │ ├── last-modified.js │ │ ├── log-query.js │ │ ├── log-req-res.js │ │ ├── logger.js │ │ ├── params.js │ │ ├── profiler.js │ │ ├── pubsub-metrics.js │ │ ├── query-may-write.js │ │ ├── rate-limit.js │ │ ├── served-by-host-header.js │ │ ├── socket-timeout.js │ │ ├── surrogate-key.js │ │ ├── tag.js │ │ ├── timeout-limits.js │ │ └── user.js │ ├── sql │ │ ├── copy-controller.js │ │ ├── job-controller.js │ │ ├── query-controller.js │ │ └── sql-router.js │ └── version-controller.js ├── auth │ ├── apikey.js │ ├── auth-api.js │ └── oauth.js ├── batch │ ├── README.md │ ├── batch.js │ ├── index.js │ ├── job-backend.js │ ├── job-canceller.js │ ├── job-queue.js │ ├── job-runner.js │ ├── job-service.js │ ├── job-status.js │ ├── leader │ │ ├── locker.js │ │ └── provider │ │ │ └── redis-distlock.js │ ├── maintenance │ │ ├── host-user-queue-mover.js │ │ └── remove-old-batch-jobs.js │ ├── models │ │ ├── job-base.js │ │ ├── job-factory.js │ │ ├── job-fallback.js │ │ ├── job-multiple.js │ │ ├── job-simple.js │ │ ├── job-state-machine.js │ │ └── query │ │ │ ├── fallback.js │ │ │ ├── main-fallback.js │ │ │ ├── query-base.js │ │ │ ├── query-factory.js │ │ │ ├── query-fallback.js │ │ │ └── query.js │ ├── pubsub │ │ ├── channel.js │ │ ├── job-publisher.js │ │ └── job-subscriber.js │ ├── query-runner.js │ ├── scheduler │ │ ├── capacity │ │ │ ├── fixed.js │ │ │ ├── http-load.js │ │ │ └── http-simple.js │ │ ├── host-scheduler.js │ │ └── scheduler.js │ ├── user-database-metadata-service.js │ └── util │ │ ├── debug.js │ │ └── forever.js ├── models │ ├── bin-encoder.js │ ├── cartodb-request.js │ └── formats │ │ ├── README │ │ ├── index.js │ │ ├── ogr.js │ │ ├── ogr │ │ ├── csv.js │ │ ├── geopackage.js │ │ ├── kml.js │ │ ├── shp.js │ │ └── spatialite.js │ │ ├── pg.js │ │ └── pg │ │ ├── arraybuffer.js │ │ ├── geojson.js │ │ ├── json.js │ │ ├── svg.js │ │ └── topojson.js ├── monitoring │ └── health-check.js ├── postgresql │ └── error-codes.js ├── server-options.js ├── server.js ├── services │ ├── error-handler-factory.js │ ├── error-handler.js │ ├── logger.js │ ├── pg-entities-access-validator.js │ ├── pubsub-metrics.js │ ├── stream-copy-metrics.js │ ├── stream-copy.js │ ├── throttler-stream.js │ ├── user-database-service.js │ └── user-limits.js ├── stats │ ├── client.js │ └── profiler-proxy.js └── utils │ ├── content-disposition.js │ ├── date-to-json.js │ ├── filename-sanitizer.js │ ├── logger.js │ ├── md5.js │ ├── query-info.js │ └── query-may-write.js ├── metro ├── config.json ├── index.js ├── log-collector.js ├── metrics-collector.js └── metro.js ├── package-lock.json ├── package.json └── test ├── acceptance ├── app-auth-test.js ├── app-configuration-test.js ├── app-test.js ├── auth-api-test.js ├── backend-crash-test.js ├── batch │ ├── batch-drain-test.js │ ├── batch-limits-test.js │ ├── batch-multiquery-test.js │ ├── batch-test.js │ ├── batch-wip-test.js │ ├── job-callback-template-test.js │ ├── job-fallback-test.js │ ├── job-query-limit-test.js │ ├── job-query-order-test.js │ ├── job-query-timeout-test.js │ ├── job-test.js │ ├── job-timing-test.js │ ├── leader-job-query-order-test.js │ ├── leader-multiple-users-query-order-test.js │ ├── queued-jobs-limit-test.js │ ├── scheduler-basic-test.js │ └── use-cases-test.js ├── cache-headers-test.js ├── client-headers-test.js ├── copy-abort-test.js ├── copy-endpoints-test.js ├── copy-statements-test.js ├── copy-throttle-test.js ├── custom-middlewares-test.js ├── export │ ├── arraybuffer-test.js │ ├── csv-test.js │ ├── folder-test.js │ ├── geojson-test.js │ ├── geopackage-test.js │ ├── kml-test.js │ ├── shapefile-test.js │ ├── spatialite-test.js │ ├── svg-test.js │ ├── timeout-test.js │ └── topojson-test.js ├── frontend-abort-test.js ├── handle-query-test.js ├── health-check-test.js ├── last-modified-header-test.js ├── oauth │ └── oauth_test.py ├── pagination-test.js ├── pg-entities-access-validator-test.js ├── pg-types-test.js ├── query-float-values-test.js ├── query-multipart-test.js ├── query-returning-test.js ├── rate-limit-test.js ├── regressions-test.js ├── skipfields-test.js ├── stream-responses-test.js ├── surrogate-key-test.js ├── system-queries-test.js ├── timeout-test.js ├── transaction-test.js └── x-cache-channel-test.js ├── helper.js ├── index.js ├── integration ├── batch │ ├── job-backend-test.js │ ├── job-canceller-test.js │ ├── job-publisher-test.js │ ├── job-queue-test.js │ ├── job-runner-test.js │ ├── job-service-test.js │ ├── locker-test.js │ └── scheduler-test.js ├── pubsub-metrics-test.js └── stream-copy-test.js ├── support ├── .gitignore ├── assert.js ├── batch-test-client.js ├── csv │ ├── copy_test_table.csv │ └── copy_test_table.csv.gz ├── db_utils.js ├── libredis_cell.dylib ├── libredis_cell.so ├── middlewares │ ├── teapot-headers.js │ └── teapot-response.js ├── redis-utils.js ├── sql │ ├── populated_places_simple_reduced.sql │ ├── quota_mock.sql │ └── test.sql └── test-client.js └── unit ├── batch ├── job-publisher-test.js ├── job-queue-test.js └── job-subscriber-test.js ├── error-handler-factory-test.js ├── health-check-test.js ├── model └── bin-encoder-test.js ├── oauth-test.js ├── pg-entities-access-validator-test.js ├── pubsub-metrics-test.js └── query-info-test.js /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | env: { 3 | commonjs: true, 4 | es6: true, 5 | node: true, 6 | mocha: true 7 | }, 8 | extends: [ 9 | 'standard' 10 | ], 11 | globals: { 12 | Atomics: 'readonly', 13 | SharedArrayBuffer: 'readonly' 14 | }, 15 | parserOptions: { 16 | ecmaVersion: 2018 17 | }, 18 | rules: { 19 | "indent": ["error", 4], 20 | "semi": ["error", "always"] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | # in this workflow we don't run the tests. Only build image, tag (also latests) and upload. The tests are not run because they are run 2 | # on each pull request, and there is a branch protection that forces to have branch up to date before merging, so tests are always run 3 | # with the latest code 4 | 5 | name: master build image 6 | on: 7 | push: 8 | branches: 9 | - master 10 | 11 | env: 12 | GCLOUD_VERSION: '306.0.0' 13 | ARTIFACTS_PROJECT_ID: cartodb-on-gcp-main-artifacts 14 | 15 | jobs: 16 | build-master: 17 | runs-on: ubuntu-18.04 18 | timeout-minutes: 15 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | with: 23 | submodules: true 24 | token: ${{ secrets.CARTOFANTE_PERSONAL_TOKEN }} 25 | 26 | - name: Build image 27 | run: | 28 | echo ${GITHUB_SHA::7} 29 | echo ${GITHUB_REF##*/} 30 | docker build -f private/Dockerfile --label="org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label=org.opencontainers.image.revision=${GITHUB_SHA} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:latest -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_REF##*/} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_SHA::7} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_REF##*/}--${GITHUB_SHA::7} . 31 | docker build -f private/Dockerfile.onprem --label="org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label=org.opencontainers.image.revision=${GITHUB_SHA} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:latest -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_REF##*/} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_SHA::7} -t gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_REF##*/}--${GITHUB_SHA::7} . 32 | 33 | - uses: google-github-actions/auth@v0 34 | with: 35 | credentials_json: ${{ secrets.ARTIFACTS_GCLOUD_ACCOUNT_BASE64 }} 36 | - name: Setup gcloud authentication 37 | uses: google-github-actions/setup-gcloud@v0 38 | with: 39 | version: ${{env.GCLOUD_VERSION}} 40 | 41 | - name: Configure docker 42 | run: gcloud auth configure-docker 43 | 44 | - name: Upload image 45 | run: | 46 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_REF##*/} 47 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_SHA::7} 48 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:${GITHUB_REF##*/}--${GITHUB_SHA::7} 49 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api:latest 50 | 51 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_REF##*/} 52 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_SHA::7} 53 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:${GITHUB_REF##*/}--${GITHUB_SHA::7} 54 | docker push gcr.io/$ARTIFACTS_PROJECT_ID/sql-api-onprem:latest 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config/environments/*.js 2 | logs 3 | pids/*.pid 4 | *.sock 5 | test/tmp/* 6 | node_modules* 7 | .idea/* 8 | .vscode/ 9 | test/redis.pid 10 | test/redis-server.log 11 | test/test.log 12 | test/acceptance/oauth/venv/* 13 | coverage/ 14 | npm-debug.log 15 | log/*.log 16 | yarn.lock 17 | .nyc_output 18 | build_resources/ 19 | .dockerignore 20 | Dockerfile 21 | docker_node_modules 22 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "private"] 2 | path = private 3 | url = git@github.com:CartoDB/CartoDB-SQL-API-private.git 4 | branch = master 5 | -------------------------------------------------------------------------------- /HOWTO_RELEASE.md: -------------------------------------------------------------------------------- 1 | # How to release 2 | 3 | 1. Test (npm test), fix if broken before proceeding. 4 | 2. Ensure proper version in `package.json` and `package-lock.json`. 5 | 3. Ensure NEWS section exists for the new version, review it, add release date. 6 | 4. Commit `package.json`, `package-lock.json`, NEWS. 7 | 5. Run `git tag -a Major.Minor.Patch`. Use NEWS section as content. 8 | 6. Stub NEWS/package for next version. 9 | 10 | ## Versions 11 | 12 | * Bugfix releases increment Patch component of version. 13 | * Feature releases increment Minor and set Patch to zero. 14 | * If backward compatibility is broken, increment Major and set to zero Minor and Patch. 15 | * Branches named 'b.' are kept for any critical fix that might need to be shipped before next feature release is ready. 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, CartoDB 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | 3. Neither the name of the copyright holder nor the names of its contributors 15 | may be used to endorse or promote products derived from this software without 16 | specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /LOGGING.md: -------------------------------------------------------------------------------- 1 | # Logging structured traces 2 | 3 | In order to have meaningful and useful log traces, you should follow 4 | some general guidelines described in the [Project Guidelines](http://doc-internal.cartodb.net/platform/guidelines.html#structured-logging). 5 | 6 | In this project there is a specific logger in place that takes care of 7 | format and context of the traces for you. Take a look at [logger.js](https://github.com/CartoDB/CartoDB-SQL-API/blob/b28835ff56a4b3a98e5273d192d48a81974f5a14/lib/utils/logger.js) 8 | (NOTE: that file will be moved soon to a common module). 9 | 10 | The logger is instantiated as part of the [server startup process](https://github.com/CartoDB/CartoDB-SQL-API/blob/b28835ff56a4b3a98e5273d192d48a81974f5a14/lib/server.js#L17), 11 | then passed to middlewares and other client classes. 12 | 13 | There are many examples of how to use the logger to generate traces 14 | throughout the code. Here are a few of them: 15 | 16 | ``` 17 | lib/api/middlewares/log-query.js: logger.info({ sql: ensureMaxQueryLength(res.locals.params.sql) }, 'Input query'); 18 | lib/api/middlewares/profiler.js: logger.info({ stats, duration: stats.response / 1000, duration_ms: stats.response }, 'Request profiling stats'); 19 | lib/batch/batch.js: self.logger.info({ job: job.toJSON() }, 'Batch query job finished'); 20 | lib/services/stream-copy-metrics.js: this.logger.info({ ingestion: logData }, 'Copy to/from query metrics'); 21 | ``` 22 | -------------------------------------------------------------------------------- /carto-package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "carto_sql_api", 3 | "current_version": { 4 | "requires": { 5 | "node": "^12.16.3", 6 | "npm": "^6.14.4", 7 | "gdal":">=1.11.0" 8 | }, 9 | "works_with": { 10 | "redis": ">=4.0.0", 11 | "postgresql": ">=10.0.0", 12 | "postgis": ">=2.4.4.5", 13 | "carto_postgresql_ext": ">=0.35.0" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /docs/examples/01-example.md: -------------------------------------------------------------------------------- 1 | ## Example 1 -------------------------------------------------------------------------------- /docs/guides/01-introduction.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | 3 | CARTO's SQL API allows you to interact with your tables and data inside CARTO, as if you were running SQL statements against a normal database. The database behind CARTO is PostgreSQL so if you need help with specific SQL statements or you want to learn more about it, visit the [official documentation](http://www.postgresql.org/docs/9.1/static/sql.html). 4 | 5 | There are two main situations in which you would want to use the SQL API: 6 | 7 | - You want to **insert, update** or **delete** data. For example, you would like to insert a new column with a latitude and longitude data. 8 | 9 | - You want to **select** data from public tables in order to use it on your website or in your app. For example, you need to find the 10 closest records to a particular location. 10 | 11 | Remember that in order to access, read or modify data in private tables, you will need to authenticate your requests. When a table is public, you can do non-authenticated queries that read data, but you cannot write or modify data without authentication. 12 | -------------------------------------------------------------------------------- /docs/guides/02-authentication.md: -------------------------------------------------------------------------------- 1 | ## Authentication 2 | 3 | For all access to private tables, and for write access to public tables, CARTO enforces secure API access that requires you to authorize your queries. In order to authorize queries, you need to use an API Key. 4 | 5 | ### API Key 6 | 7 | The API Key offers the simplest way to access private data, or perform writes and updates to your public data. Remember that your API Key protects access to your data, so keep it confidential and only share it if you want others to have this access. If necessary, you can reset your API Key from your CARTO dashboard. 8 | 9 | **Tip:** For details about how to access, or reset, your API Key, see [the Auth API Documentation](https://carto.com/developers/auth-api/) details. 10 | 11 | To use your API Key, pass it as a parameter in an URL call to the CARTO API. For example, to perform an insert into your table, you would use the following URL structure. 12 | 13 | ##### Example 14 | 15 | ```bash 16 | https://{username}.carto.com/api/v2/sql?q={SQL statement}&api_key={api_key} 17 | ``` 18 | -------------------------------------------------------------------------------- /docs/guides/04-creating-tables.md: -------------------------------------------------------------------------------- 1 | ## Creating Tables with the SQL API 2 | 3 | [Writing data to your CARTO account]({{ site.sqlapi_docs }}/guides/making-calls#write-data-to-your-carto-account) enables you to manage data through SQL queries, it does not automatically connect tables as datasets to _Your datasets_ dashboard in CARTO. 4 | 5 | You must apply the `CDB_CartodbfyTable`function to a target table in order to create and display connected datasets in your account. This additional step of "CARTOfying" data is the process of converting an arbitrary PostgreSQL table into a valid CARTO table, and registering it into the system so that it can be used in the graphical user interface, and the CARTO Engine, to generate maps and analysis. 6 | 7 | ### Create Tables 8 | 9 | To create a visible table in CARTO, run the following SQL query with the SQL API: 10 | 11 | ```bash 12 | CREATE TABLE {table_name} 13 | ( 14 | {column1} {data type}, 15 | {column2} {data type}, 16 | {column3} {data type}, 17 | ... 18 | ); 19 | ``` 20 | 21 | While this begins the process of creating the structure for the table, it is still not visible in your dashboard. Run the following request to make the table visible. 22 | 23 | ```bash 24 | SELECT cdb_cartodbfytable({table_name}); 25 | ``` 26 | 27 | **Tip:** If you belong to an organization, you must also include the username as part of the request. 28 | 29 | ```bash 30 | SELECT cdb_cartodbfytable({username}, {table_name}); 31 | ``` 32 | You will get an `Please set user quota before cartodbfying tables` error if you don't include the username as part of the request. 33 | 34 | The table is created and added as a connected dataset in _Your datasets_ dashboard. Refresh your browser to ensure that you can visualize it in your account. Once a table is connected to _Your datasets_ dashboard in CARTO, any modifications that you apply to your data through the SQL API are automatically updated. 35 | 36 | ### Rename Tables 37 | 38 | To rename a connected dataset in _Your datasets_ dashboard, run the following SQL query with the SQL API: 39 | 40 | ```bash 41 | ALTER TABLE {table_name} RENAME to {renamed table_name}; 42 | ``` 43 | 44 | It may take a few seconds for the connected table to appear renamed. Refresh your browser to ensure that you can visualize the changes in _Your datasets_ dashboard. 45 | 46 | ### Remove a Table 47 | 48 | If you remove a table, **any maps using the connected dataset will be affected**. The deleted dataset cannot be recovered. Even if you create a new table with the same name as a removed table, CARTO still internalizes it as a different table. 49 | 50 | Some users and third-party libraries update the data from a map dataset making a _DROP TABLE_ + _CREATE TABLE_ + _INSERT_ in the table. Doing that, your map will be affected unless you make the _DROP TABLE_ + _CREATE TABLE_ **inside a transaction**. But our recommendation is to use _TRUNCATE TABLE_ + _INSERT_. 51 | 52 | To remove a connected dataset from _Your datasets_ dashboard, run the following SQL query with the SQL API: 53 | 54 | ```bash 55 | DROP TABLE {table_name}; 56 | ``` 57 | 58 | This removes the connected table from _Your datasets_ dashboard. Refresh your browser to ensure that the connected dataset was removed. 59 | -------------------------------------------------------------------------------- /docs/guides/05-handling-geospatial-data.md: -------------------------------------------------------------------------------- 1 | ## Handling Geospatial Data 2 | 3 | Handling geospatial data through the SQL API is easy. By default, *the_geom* is returned straight from the database, in a format called Well-Known Binary. There are a handful of ways you can transform your geometries into more useful formats. 4 | 5 | The first is to use the format=GeoJSON method described above. Others can be handled through your SQL statements directly. For example, enclosing your the_geom in a function called [ST_AsGeoJSON](http://www.postgis.org/documentation/manual-svn/ST_AsGeoJSON.html) will allow you to use JSON for your data but a GeoJSON string for your geometry column only. Alternatively, using a the [ST_AsText](http://www.postgis.org/documentation/manual-svn/ST_AsGeoJSON.html) function will return your geometry as Well-Known Text. 6 | 7 | #### ST_AsGeoJSON 8 | 9 | ##### Call 10 | 11 | ```bash 12 | https://{username}.carto.com/api/v2/sql?q=SELECT cartodb_id,ST_AsGeoJSON(the_geom) as the_geom FROM {table_name} LIMIT 1 13 | ``` 14 | 15 | ##### Result 16 | 17 | ```javascript 18 | { 19 | time: 0.003, 20 | total_rows: 1, 21 | rows: [ 22 | { 23 | cartodb_id: 1, 24 | the_geom: "{"type":"Point","coordinates":[-97.3349,35.4979]}" 25 | } 26 | ] 27 | } 28 | ``` 29 | 30 | #### ST_AsText 31 | 32 | ##### Call 33 | 34 | ```bash 35 | https://{username}.carto.com/api/v2/sql?q=SELECT cartodb_id,ST_AsText(the_geom) FROM {table_name} LIMIT 1 36 | ``` 37 | 38 | ##### Result 39 | 40 | ```javascript 41 | { 42 | time: 0.003, 43 | total_rows: 1, 44 | rows: [ 45 | { 46 | cartodb_id: 1, 47 | the_geom: "POINT(-74.0004162 40.6920918)", 48 | } 49 | ] 50 | } 51 | ``` 52 | 53 | More advanced methods exist in the PostGIS library to extract meaningful data from your geometry. Explore the PostGIS documentation and get familiar with functions such as, [ST_XMin](http://www.postgis.org/docs/ST_XMin.html), [ST_XMax](http://www.postgis.org/docs/ST_XMax.html), [ST_AsText](http://www.postgis.org/docs/ST_AsText.html), and so on. 54 | 55 | All data returned from *the_geom* column is in WGS 84 (EPSG:4326). You can change this quickly on the fly, by using SQL. For example, if you prefer geometries using the Hanoi 1972 (EPSG:4147) projection, use [ST_Transform](http://www.postgis.org/docs/ST_Transform.html), 56 | 57 | #### ST_Transform 58 | 59 | ```bash 60 | https://{username}.carto.com/api/v2/sql?q=SELECT ST_Transform(the_geom,4147) FROM {table_name} LIMIT 1 61 | ``` 62 | 63 | CARTO also stores a second geometry column, *the_geom_webmercator*. We use this internally to build your map tiles as fast as we can. In the user-interface it is hidden, but it is visible and available for use. In this column, we store a reprojected version of all your geometries using Web Mercator (EPSG:3857). 64 | -------------------------------------------------------------------------------- /docs/guides/06-metrics.md: -------------------------------------------------------------------------------- 1 | ## Metrics 2 | 3 | SQL API provides you with a set of operations to handle metrics. You are able to manage timer operations that allow you to analyze the time spent in your queries, and counter operations that allow you to measure the number of successful and failed queries. 4 | 5 | ### Timers 6 | - **sqlapi.query**: time to return a query resultset from the API, splitted into: 7 | + **sqlapi.query.init**: time to prepare params from the request 8 | + **sqlapi.query.getDBParams**: time to retrieve the database connection params 9 | + **sqlapi.query.authenticate**: time to determine if request is authenticated 10 | + **sqlapi.query.setDBAuth**: time to set the authenticated connection params 11 | + **sqlapi.query.queryExplain**: time to retrieve affected tables from the query 12 | + **sqlapi.query.eventedQuery**: (pg) Time to prepare and execute the query 13 | + **sqlapi.query.beforeSink**: time to start sending the response. 14 | + **sqlapi.query.gotRows**: Time until it finished processing all rows in the resultset. 15 | + **sqlapi.query.generate**: Time to prepare and generate a response from ogr 16 | + **sqlapi.query.finish**: time to handle an exception 17 | 18 | ### Counters 19 | - **sqlapi.query.success**: number of successful queries 20 | - **sqlapi.query.error**: number of failed queries 21 | -------------------------------------------------------------------------------- /docs/guides/07-query-optimizations.md: -------------------------------------------------------------------------------- 1 | ## Query Optimizations 2 | 3 | There are some tricks to consider when using the SQL API that might make your application a little faster. 4 | 5 | * Only request the fields you need. Selecting all columns will return a full version of your geometry in *the_geom*, as well as a reprojected version in *the_geom_webmercator* 6 | * Use PostGIS functions to simplify and filter out unneeded geometries when possible. One very handy function is, [ST_Simplify](http://www.postgis.org/docs/ST_Simplify.html) 7 | * Remember to build indexes that will speed up some of your more common queries. For details, see [Creating Indexes](#creating-indexes) 8 | * Use *cartodb_id* to retrieve specific rows of your data, this is the unique key column added to every CARTO table. For a sample use case, view the [_Faster data updates with CARTO](https://carto.com/blog/faster-data-updates-with-cartodb/) blogpost 9 | * Check if your polygons contain an excessive number of vertices, and subdivide them if they do. Learn how in this [Subdivide All the Things](https://carto.com/blog/subdivide-all-things/) blogpost. 10 | 11 | ### Creating Indexes 12 | 13 | In order to better improve map performance, advanced users can use the SQL API to add custom indexes to their data. Creating indexes is useful if you have a large dataset with filtered data. By indexing select data, you are improving the performance of the map and generating the results faster. The index functionality is useful in the following scenarios: 14 | 15 | - If you are filtering a dataset by values in one or a more columns 16 | - If you are regularly querying data through the SQL API, and filtering by one or a more columns 17 | - If you are creating Torque maps on very large datasets. Since Torque maps are based on time-sensitive data (i.e. a date or numeric column), creating an index on the time data is optimal 18 | 19 | Indexed data is typically a single column representing filtered data. To create a single column index, apply this SQL query to your dataset: 20 | 21 | {% highlight bash %} 22 | CREATE INDEX idx_{DATASET NAME}_{COLUMN_NAME} ON {DATASET_NAME} ({COLUMN_NAME}) 23 | {% endhighlight %} 24 | 25 | **Tip:** You can also apply more advanced, multi-column indexes. Please review the full documentation about [PostgreSQL Indexes](http://www.postgresql.org/docs/9.1/static/sql-createindex.html) before proceeding. 26 | 27 | **Note:** Indexes are allocated towards the amount of data storage associated with your account. Be mindful when creating custom indexes. Note that indexes automatically generated by CARTO are _not_ counted against your quota. For example, `the_geom` and `cartodb_id` columns. These columns are used to index geometries for your dataset and are not associated with storage. 28 | -------------------------------------------------------------------------------- /docs/support/01-support-options.md: -------------------------------------------------------------------------------- 1 | ## Support Options 2 | 3 | Feeling stuck? There are many ways to find help. 4 | 5 | * Ask a question on [GIS StackExchange](https://gis.stackexchange.com/questions/tagged/carto) using the `CARTO` tag. 6 | * [Report an issue](https://github.com/CartoDB/cartodb/issues) in Github. 7 | * Engine Plan customers have additional access to enterprise-level support through CARTO's support representatives. 8 | 9 | If you just want to describe an issue or share an idea, just 10 | 11 | ### Issues on Github 12 | 13 | If you think you may have found a bug, or if you have a feature request that you would like to share with the SQL API team, please [open an issue](https://github.com/CartoDB/CartoDB-SQL-API/issues/new). 14 | 15 | ### Community support on GIS Stack Exchange 16 | 17 | GIS Stack Exchange is the most popular community in the geospatial industry. This is a collaboratively-edited question and answer site for geospatial programmers and technicians. It is a fantastic resource for asking technical questions about developing and maintaining your application. 18 | 19 | 20 | When posting a new question, please consider the following: 21 | 22 | * Read the GIS Stack Exchange [help](https://gis.stackexchange.com/help) and [how to ask](https://gis.stackexchange.com/help/how-to-ask) pages for guidelines and tips about posting questions. 23 | * Be very clear about your question in the subject. A clear explanation helps those trying to answer your question, as well as those who may be looking for information in the future. 24 | * Be informative in your post. Details, code snippets, logs, screenshots, etc. help others to understand your problem. 25 | * Use code that demonstrates the problem. It is very hard to debug errors without sample code to reproduce the problem. 26 | 27 | ### Engine Plan Customers 28 | 29 | Engine Plan customers have additional support options beyond general community support. As per your account Terms of Service, you have access to enterprise-level support through CARTO's support representatives available at [enterprise-support@carto.com](mailto:enterprise-support@carto.com) 30 | 31 | In order to speed up the resolution of your issue, provide as much information as possible (even if it is a link from community support). This allows our engineers to investigate your problem as soon as possible. 32 | 33 | If you are not yet CARTO customer, browse our [plans & pricing](https://carto.com/pricing/) and find the right plan for you. 34 | -------------------------------------------------------------------------------- /docs/support/02-contribute.md: -------------------------------------------------------------------------------- 1 | ## Contribute 2 | 3 | CARTO platform is an open-source ecosystem. You can read about the [fundamentals]({{site.fundamental_docs}}/components/) of CARTO architecture and its components. 4 | We are more than happy to receive your contributions to the code and the documentation as well. 5 | 6 | ## Filling a ticket 7 | 8 | If you want to open a new issue in our repository, please follow these instructions: 9 | 10 | 1. Descriptive title. 11 | 2. Write a good description, it always helps. 12 | 3. Specify the steps to reproduce the problem. 13 | 4. Try to add an example showing the problem. 14 | 15 | ## Contributing code 16 | 17 | Best part of open source, collaborate in SQL API code!. We like hearing from you, so if you have any bug fixed, or a new feature ready to be merged, those are the steps you should follow: 18 | 19 | 1. Fork the repository. 20 | 2. Create a new branch in your forked repository. 21 | 3. Commit your changes. Add new tests if it is necessary. 22 | 4. Open a pull request. 23 | 5. Any of the maintainers will take a look. 24 | 6. If everything works, it will merged and released \o/. 25 | 26 | If you want more detailed information, this [GitHub guide](https://guides.github.com/activities/contributing-to-open-source/) is a must. 27 | 28 | ## Completing documentation 29 | 30 | SQL API documentation is located in ```docs/```. That folder is the content that appears in the [Developer Center](https://carto.com/developers/sql-api/). Just follow the instructions described in [contributing code](#contributing-code) and after accepting your pull request, we will make it appear online :). 31 | 32 | **Tip:** A convenient, easy way of proposing changes in documentation is by using the GitHub editor directly on the web. You can easily create a branch with your changes and make a PR from there. 33 | 34 | ## Submitting contributions 35 | 36 | You will need to sign a Contributor License Agreement (CLA) before making a submission. [Learn more here](https://carto.com/contributions). 37 | -------------------------------------------------------------------------------- /docs/support/03-version-number.md: -------------------------------------------------------------------------------- 1 | ## API Version Number 2 | 3 | Ensure that you are using the [latest version](https://github.com/CartoDB/CartoDB-SQL-API) of our SQL API. For example, you can check that you are using **Version 2** by looking at your request URLS. They should all contain **/v2/** in the URLs as follows, `https://{username}.carto.com/api/v2/sql` 4 | -------------------------------------------------------------------------------- /docs/support/04-libraries-support.md: -------------------------------------------------------------------------------- 1 | ## Libraries in Different Languages 2 | 3 | To make things easier for developers, we provide client libraries for different programming languages and caching functionalities. 4 | 5 | **Note:** These libraries are externally developed and maintained. Use caution when using libraries in different languages, as some of these resources may be out-of-date. 6 | 7 | - **R** 8 | To help more researchers use CARTO to drive their geospatial data, we have released the R client library. [Fork it on GitHub!](https://github.com/Vizzuality/cartodb-r) 9 | 10 | - **Node.js** 11 | This demo app authenticates with your CARTO and shows how to perform read and write queries using the SQL API. [Fork it on GitHub!](https://github.com/Vizzuality/cartodb-nodejs) 12 | 13 | - **PHP** 14 | The PHP library provides a wrapper around the SQL API to get PHP objects straight from SQL calls to CARTO. [Fork it on GitHub!](https://github.com/Vizzuality/cartodbclient-php) 15 | 16 | - **Python** 17 | CARTO provides access to the SQL API through [CARTOframes](https://github.com/cartodb/cartoframes/), a package built with data scientists in mind, and [CARTO Python SDK](https://github.com/cartodb/carto-python/), built to support application development. Install cartoframes with `pip install cartoframes` and the SDK with `pip install carto`. 18 | 19 | - **Java** 20 | Very basic example of how to access CARTO SQL API. [Fork it on GitHub!](https://github.com/cartodb/cartodb-java-client) 21 | 22 | - **.NET** 23 | .NET library for authenticating with CARTO using an API Key, based on work started by [The Data Republic](http://www.thedatarepublic.com/). [Fork it on GitHub!](https://github.com/thedatarepublic/CartoDBClientDotNET) 24 | 25 | - **Clojure** 26 | Clojure library for authenticating with CARTO, maintained by [REDD Metrics](http://www.reddmetrics.com/). [Fork it on GitHub!](https://github.com/reddmetrics/cartodb-clj) 27 | 28 | - **iOS** 29 | Objective-C library for interacting with CARTO in native iOS applications. [Fork it on GitHub!](https://github.com/jmnavarro/cartodb-objectivec-client) 30 | 31 | - **Golang** 32 | A Go client for the CARTO SQL API that supports authentication using an API key. [Fork it on GitHub!](https://github.com/agonzalezro/cartodb_go) 33 | -------------------------------------------------------------------------------- /docs/support/06-timeout-limiting.md: -------------------------------------------------------------------------------- 1 | ## Timeout limit 2 | 3 | Our APIs work following a request <-> response model. While CARTO is busy getting that action done or retrieving that information, part of our infrastructure is devoted to that process and is therefore unavailable for any other user. Typically this is not a problem, as most requests get serviced quickly enough. However, certain requests can take a long time to process, either by design (e.g., updating a huge table) or by mistake. To prevent this long-running queries from effectively blocking the usage of our platform resources, CARTO will discard requests that cannot be fulfilled in less than a certain amount of time. 4 | 5 | SQL API is affected by this kind of limiting. 6 | 7 | ### Per User 8 | 9 | Timeout limit is on a per-user basis (or more accurately described, per user access). 10 | 11 | ### How it works 12 | 13 | Every query has a statement timeout. When a request reaches that value, the response returns an error. 14 | 15 | ### Response Codes 16 | 17 | When query exceeds the timeout limit, the API will return an HTTP `429 Too Many Requests` error. 18 | 19 | ### Tips 20 | 21 | You are able to avoid common issues that trigger timeout limits following these actions: 22 | 23 | - Always use database indexes 24 | - Try to use batch API to insert/update/delete data 25 | 26 | ### Timeout Limits Chart 27 | 28 | Below, you can find the values of the timeout limit by user account type. 29 | 30 | |Enterprise plans |Individual plans |Free plans | 31 | | --- | --- | --- | 32 | | 25 seconds | 15 seconds | 5 seconds | 33 | -------------------------------------------------------------------------------- /lib/api/health-check-controller.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const HealthCheckBackend = require('../monitoring/health-check'); 4 | 5 | module.exports = class HealthCheckController { 6 | constructor () { 7 | this.healthCheckBackend = new HealthCheckBackend(global.settings.disabled_file); 8 | } 9 | 10 | route (apiRouter) { 11 | apiRouter.get('/health', healthCheck({ healthCheckBackend: this.healthCheckBackend })); 12 | } 13 | }; 14 | 15 | function healthCheck ({ healthCheckBackend }) { 16 | return function healthCheckMiddleware (req, res) { 17 | const healthConfig = global.settings.health || {}; 18 | 19 | if (!healthConfig.enabled) { 20 | return res.status(200).send({ enabled: false, ok: true }); 21 | } 22 | 23 | const startTime = Date.now(); 24 | 25 | healthCheckBackend.check((err) => { 26 | const ok = !err; 27 | const response = { 28 | enabled: true, 29 | ok, 30 | elapsed: Date.now() - startTime 31 | }; 32 | 33 | if (err) { 34 | response.err = err.message; 35 | } 36 | 37 | res.status(ok ? 200 : 503).send(response); 38 | }); 39 | }; 40 | } 41 | -------------------------------------------------------------------------------- /lib/api/jobs-wip-controller.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const bodyParser = require('./middlewares/body-parser'); 4 | const error = require('./middlewares/error'); 5 | 6 | module.exports = class JobsWipController { 7 | constructor ({ jobService }) { 8 | this.jobService = jobService; 9 | } 10 | 11 | route (apiRouter) { 12 | apiRouter.get('/jobs-wip', [ 13 | bodyParser(), 14 | listWorkInProgressJobs(this.jobService), 15 | sendResponse(), 16 | error({ logger: null }) 17 | ]); 18 | } 19 | }; 20 | 21 | function listWorkInProgressJobs (jobService) { 22 | return function listWorkInProgressJobsMiddleware (req, res, next) { 23 | jobService.listWorkInProgressJobs((err, list) => { 24 | if (err) { 25 | return next(err); 26 | } 27 | 28 | res.body = list; 29 | 30 | next(); 31 | }); 32 | }; 33 | } 34 | 35 | function sendResponse () { 36 | return function sendResponseMiddleware (req, res) { 37 | res.status(res.statusCode || 200).send(res.body); 38 | }; 39 | } 40 | -------------------------------------------------------------------------------- /lib/api/middlewares/access-validator.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const pgEntitiesAccessValidator = require('../../services/pg-entities-access-validator'); 4 | 5 | module.exports = function accessValidator () { 6 | return function accessValidatorMiddleware (req, res, next) { 7 | const { affectedTables, authorizationLevel } = res.locals; 8 | 9 | if (!pgEntitiesAccessValidator.validate(affectedTables, authorizationLevel)) { 10 | const error = new SyntaxError('system tables are forbidden'); 11 | error.http_status = 403; 12 | 13 | return next(error); 14 | } 15 | 16 | return next(); 17 | }; 18 | }; 19 | -------------------------------------------------------------------------------- /lib/api/middlewares/affected-tables.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const PSQL = require('cartodb-psql'); 4 | const queryTables = require('cartodb-query-tables').queryTables; 5 | 6 | module.exports = function affectedTables () { 7 | return function affectedTablesMiddleware (req, res, next) { 8 | const { logger } = res.locals; 9 | const { sql } = res.locals.params; 10 | const { authDbParams } = res.locals; 11 | const pg = new PSQL(authDbParams); 12 | 13 | queryTables.getQueryMetadataModel(pg, sql) 14 | .then(affectedTables => { 15 | res.locals.affectedTables = affectedTables; 16 | 17 | req.profiler.done('queryExplain'); 18 | 19 | return next(); 20 | }) 21 | .catch(err => { 22 | logger.warn({ exception: err }, 'Error on query explain'); 23 | 24 | return next(); 25 | }); 26 | }; 27 | }; 28 | -------------------------------------------------------------------------------- /lib/api/middlewares/cache-channel.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function cacheChannel () { 4 | return function cacheChannelMiddleware (req, res, next) { 5 | const { affectedTables, mayWrite } = res.locals; 6 | const skipNotUpdatedAtTables = true; 7 | 8 | if (!!affectedTables && affectedTables.getTables(skipNotUpdatedAtTables).length > 0 && !mayWrite) { 9 | res.header('X-Cache-Channel', affectedTables.getCacheChannel(skipNotUpdatedAtTables)); 10 | } 11 | 12 | next(); 13 | }; 14 | }; 15 | -------------------------------------------------------------------------------- /lib/api/middlewares/cache-control.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const ONE_MINUTE_IN_SECONDS = 60; 4 | const THREE_MINUTE_IN_SECONDS = 60 * 3; 5 | const FIVE_MINUTES_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 5; 6 | const TEN_MINUTES_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 10; 7 | const FIFTEEN_MINUTES_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 15; 8 | const THIRTY_MINUTES_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 30; 9 | const ONE_HOUR_IN_SECONDS = ONE_MINUTE_IN_SECONDS * 60; 10 | const ONE_YEAR_IN_SECONDS = ONE_HOUR_IN_SECONDS * 24 * 365; 11 | 12 | const defaultCacheTTL = { 13 | ttl: ONE_YEAR_IN_SECONDS, 14 | fallbackTtl: FIVE_MINUTES_IN_SECONDS 15 | }; 16 | 17 | const validFallbackTTL = [ 18 | ONE_MINUTE_IN_SECONDS, 19 | THREE_MINUTE_IN_SECONDS, 20 | FIVE_MINUTES_IN_SECONDS, 21 | TEN_MINUTES_IN_SECONDS, 22 | FIFTEEN_MINUTES_IN_SECONDS, 23 | THIRTY_MINUTES_IN_SECONDS, 24 | ONE_HOUR_IN_SECONDS 25 | ]; 26 | 27 | const { ttl, fallbackTtl } = Object.assign(defaultCacheTTL, global.settings.cache); 28 | 29 | module.exports = function cacheControlHeader () { 30 | if (!validFallbackTTL.includes(fallbackTtl)) { 31 | const message = [ 32 | 'Invalid fallback TTL value for Cache-Control header.', 33 | `Got ${fallbackTtl}, expected ${validFallbackTTL.join(', ')}` 34 | ].join(' '); 35 | 36 | throw new Error(message); 37 | } 38 | 39 | return function cacheControlHeaderMiddleware (req, res, next) { 40 | const { cachePolicy } = res.locals.params; 41 | const { affectedTables, mayWrite } = res.locals; 42 | 43 | if (cachePolicy === 'persist') { 44 | res.header('Cache-Control', `public,max-age=${ONE_YEAR_IN_SECONDS}`); 45 | 46 | return next(); 47 | } 48 | 49 | if (affectedTables && affectedTables.getTables().every(table => table.updated_at !== null)) { 50 | const maxAge = mayWrite ? 0 : ttl; 51 | res.header('Cache-Control', `no-cache,max-age=${maxAge},must-revalidate,public`); 52 | 53 | return next(); 54 | } 55 | 56 | const maxAge = fallbackTtl; 57 | res.header( 58 | 'Cache-Control', 59 | `no-cache,max-age=${computeNextTTL({ ttlInSeconds: maxAge })},must-revalidate,public` 60 | ); 61 | 62 | return next(); 63 | }; 64 | }; 65 | 66 | function computeNextTTL ({ ttlInSeconds } = {}) { 67 | const nowInSeconds = Math.ceil(Date.now() / 1000); 68 | const secondsAfterPreviousTTLStep = nowInSeconds % ttlInSeconds; 69 | const secondsToReachTheNextTTLStep = ttlInSeconds - secondsAfterPreviousTTLStep; 70 | 71 | return secondsToReachTheNextTTLStep; 72 | } 73 | -------------------------------------------------------------------------------- /lib/api/middlewares/cancel-on-client-abort.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function cancelOnClientAbort () { 4 | return function cancelOnClientAbortMiddleware (req, res, next) { 5 | req.on('aborted', () => { 6 | if (req.formatter && typeof req.formatter.cancel === 'function') { 7 | req.formatter.cancel(); 8 | } 9 | }); 10 | 11 | next(); 12 | }; 13 | }; 14 | -------------------------------------------------------------------------------- /lib/api/middlewares/client-header.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function clientHeader () { 4 | return function clientHeaderMiddleware (req, res, next) { 5 | const { client } = req.query; 6 | 7 | if (client) { 8 | res.set('Carto-Client', client); 9 | } 10 | 11 | return next(); 12 | }; 13 | }; 14 | -------------------------------------------------------------------------------- /lib/api/middlewares/connection-params.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function connectionParams (userDatabaseService) { 4 | return function connectionParamsMiddleware (req, res, next) { 5 | const { user, api_key: apikeyToken, authorizationLevel } = res.locals; 6 | 7 | userDatabaseService.getConnectionParams(user, apikeyToken, authorizationLevel, 8 | function (err, userDbParams, authDbParams) { 9 | req.profiler.done('getConnectionParams'); 10 | 11 | if (err) { 12 | return next(err); 13 | } 14 | 15 | res.locals.userDbParams = userDbParams; 16 | res.locals.authDbParams = authDbParams; 17 | 18 | next(); 19 | }); 20 | }; 21 | }; 22 | -------------------------------------------------------------------------------- /lib/api/middlewares/content.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const getContentDisposition = require('../../utils/content-disposition'); 4 | 5 | module.exports = function content () { 6 | return function contentMiddleware (req, res, next) { 7 | const { filename } = res.locals.params; 8 | const { formatter } = req; 9 | const useInline = !req.query.format && !req.body.format && !req.query.filename && !req.body.filename; 10 | 11 | res.header('Content-Disposition', getContentDisposition(formatter, filename, useInline)); 12 | res.header('Content-Type', formatter.getContentType()); 13 | 14 | next(); 15 | }; 16 | }; 17 | -------------------------------------------------------------------------------- /lib/api/middlewares/cors.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function cors () { 4 | return function (req, res, next) { 5 | res.header('Access-Control-Allow-Origin', '*'); 6 | res.header('Access-Control-Allow-Headers', '*'); 7 | res.header('Access-Control-Expose-Headers', '*'); 8 | 9 | if (req.method === 'OPTIONS') { 10 | return res.send(); 11 | } 12 | 13 | next(); 14 | }; 15 | }; 16 | -------------------------------------------------------------------------------- /lib/api/middlewares/db-quota.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const PSQL = require('cartodb-psql'); 4 | 5 | const remainingQuotaQuery = 'SELECT _CDB_UserQuotaInBytes() - CDB_UserDataSize(current_schema()) AS remaining_quota'; 6 | 7 | module.exports = function dbQuota () { 8 | return function dbQuotaMiddleware (req, res, next) { 9 | const { userDbParams } = res.locals; 10 | const pg = new PSQL(userDbParams); 11 | pg.connect((err, client, done) => { 12 | if (err) { 13 | return next(err); 14 | } 15 | client.query(remainingQuotaQuery, (err, result) => { 16 | if (err) { 17 | return next(err); 18 | } 19 | const remainingQuota = result.rows[0].remaining_quota; 20 | res.locals.dbRemainingQuota = remainingQuota; 21 | done(); 22 | next(); 23 | }); 24 | }); 25 | }; 26 | }; 27 | -------------------------------------------------------------------------------- /lib/api/middlewares/error.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const errorHandlerFactory = require('../../services/error-handler-factory'); 4 | 5 | module.exports = function error ({ logger }) { 6 | return function errorMiddleware (err, req, res, next) { 7 | const errorHandler = errorHandlerFactory(err); 8 | const errorResponse = errorHandler.getResponse(); 9 | const errorLogger = res.locals.logger || logger; 10 | 11 | errorLogger.error({ exception: err }, 'Error while handling the request'); 12 | 13 | // Force inline content disposition 14 | res.header('Content-Disposition', 'inline'); 15 | 16 | res.header('Content-Type', 'application/json; charset=utf-8'); 17 | res.status(getStatusError(errorHandler, req)); 18 | 19 | if (req.query && req.query.callback) { 20 | res.jsonp(errorResponse); 21 | } else { 22 | res.json(errorResponse); 23 | } 24 | 25 | return next(); 26 | }; 27 | }; 28 | 29 | function getStatusError (errorHandler, req) { 30 | let statusError = errorHandler.http_status; 31 | 32 | // JSONP has to return 200 status error 33 | if (req && req.query && req.query.callback) { 34 | statusError = 200; 35 | } 36 | 37 | return statusError; 38 | } 39 | -------------------------------------------------------------------------------- /lib/api/middlewares/formatter.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const formats = require('../../models/formats'); 4 | 5 | module.exports = function formatter () { 6 | return function formatterMiddleware (req, res, next) { 7 | const { format } = res.locals.params; 8 | 9 | const FormatClass = formats[format]; 10 | req.formatter = new FormatClass(); 11 | 12 | next(); 13 | }; 14 | }; 15 | -------------------------------------------------------------------------------- /lib/api/middlewares/last-modified.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function lastModified () { 4 | return function lastModifiedMiddleware (req, res, next) { 5 | const { affectedTables } = res.locals; 6 | 7 | if (affectedTables) { 8 | const lastUpdatedAt = affectedTables.getLastUpdatedAt(Date.now()); 9 | res.header('Last-Modified', new Date(lastUpdatedAt).toUTCString()); 10 | } 11 | 12 | next(); 13 | }; 14 | }; 15 | -------------------------------------------------------------------------------- /lib/api/middlewares/log-query.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const MAX_SQL_LENGTH = (global.settings.logQueries && global.settings.maxQueriesLogLength) || 1024; 4 | 5 | module.exports = function logQuery () { 6 | if (!global.settings.logQueries) { 7 | return function noopLogQuery (req, res, next) { 8 | return next(); 9 | }; 10 | } 11 | 12 | return function logQueryMiddleware (req, res, next) { 13 | const { logger } = res.locals; 14 | 15 | logger.info({ sql: ensureMaxQueryLength(res.locals.params.sql) }, 'Input query'); 16 | 17 | return next(); 18 | }; 19 | }; 20 | 21 | function ensureMaxQueryLength (sql, length = MAX_SQL_LENGTH) { 22 | return sql.substring(0, length); 23 | } 24 | -------------------------------------------------------------------------------- /lib/api/middlewares/log-req-res.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function logReqRes ({ logOnEvent = 'finish' } = {}) { 4 | return function logReqResMiddleware (req, res, next) { 5 | const { logger } = res.locals; 6 | logger.info({ client_request: req }, 'Incoming request'); 7 | res.on(logOnEvent, () => logger.info({ server_response: res, status: res.statusCode }, 'Response sent')); 8 | res.on('close', () => res.locals.logger.info({ end: true }, 'Request done')); 9 | next(); 10 | }; 11 | }; 12 | -------------------------------------------------------------------------------- /lib/api/middlewares/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const uuid = require('uuid'); 4 | 5 | module.exports = function initLogger ({ logger }) { 6 | return function initLoggerMiddleware (req, res, next) { 7 | const requestId = req.get('X-Request-Id') || uuid.v4(); 8 | res.locals.logger = logger.child({ request_id: requestId }); 9 | next(); 10 | }; 11 | }; 12 | -------------------------------------------------------------------------------- /lib/api/middlewares/profiler.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const Profiler = require('../../stats/profiler-proxy'); 4 | const { name: prefix } = require('../../../package.json'); 5 | 6 | module.exports = function profiler ({ statsClient, logOnEvent = 'finish' }) { 7 | return function profilerMiddleware (req, res, next) { 8 | const start = new Date(); 9 | const { logger } = res.locals; 10 | 11 | req.profiler = new Profiler({ 12 | profile: global.settings.useProfiler, 13 | statsd_client: statsClient 14 | }); 15 | 16 | req.profiler.start(prefix); 17 | 18 | res.on(logOnEvent, () => { 19 | req.profiler.add({ response: new Date() - start }); 20 | req.profiler.end(); 21 | const stats = req.profiler.toJSON(); 22 | logger.info({ stats, duration: stats.response / 1000, duration_ms: stats.response }, 'Request profiling stats'); 23 | 24 | try { 25 | req.profiler.sendStats(); 26 | } catch (err) { 27 | logger.warn({ exception: err }, 'Could not send stats to StatsD'); 28 | } 29 | }); 30 | 31 | next(); 32 | }; 33 | }; 34 | -------------------------------------------------------------------------------- /lib/api/middlewares/pubsub-metrics.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const EVENT_VERSION = '1'; 4 | const MAX_LENGTH = 100; 5 | 6 | function pubSubMetrics (pubSubMetricsService) { 7 | if (!pubSubMetricsService.isEnabled()) { 8 | return function pubSubMetricsDisabledMiddleware (req, res, next) { next(); }; 9 | } 10 | 11 | return function pubSubMetricsMiddleware (req, res, next) { 12 | const data = getEventData(req, res); 13 | 14 | if (data.event) { 15 | pubSubMetricsService.sendEvent(data.event, data.attributes); 16 | } 17 | 18 | return next(); 19 | }; 20 | } 21 | 22 | function getEventData (req, res) { 23 | const event = normalizedField(req.get('Carto-Event')); 24 | const eventSource = normalizedField(req.get('Carto-Event-Source')); 25 | const eventGroupId = normalizedField(req.get('Carto-Event-Group-Id')); 26 | 27 | if (!event || !eventSource) { 28 | return [undefined, undefined]; 29 | } 30 | 31 | const attributes = { 32 | event_source: eventSource, 33 | user_id: res.locals.userId, 34 | response_code: res.statusCode.toString(), 35 | source_domain: req.hostname, 36 | event_time: new Date().toISOString(), 37 | event_version: EVENT_VERSION 38 | }; 39 | 40 | if (eventGroupId) { 41 | attributes.event_group_id = eventGroupId; 42 | } 43 | 44 | const responseTime = getResponseTime(res); 45 | 46 | if (responseTime) { 47 | attributes.response_time = responseTime.toString(); 48 | } 49 | 50 | return { event, attributes }; 51 | } 52 | 53 | function normalizedField (field) { 54 | if (!field) { 55 | return undefined; 56 | } 57 | 58 | return field.toString().trim().substr(0, MAX_LENGTH); 59 | } 60 | 61 | function getResponseTime (res) { 62 | const profiler = res.get('X-SQLAPI-Profiler'); 63 | let stats; 64 | 65 | try { 66 | stats = JSON.parse(profiler); 67 | } catch (e) { 68 | return undefined; 69 | } 70 | 71 | return stats.total; 72 | } 73 | 74 | module.exports = pubSubMetrics; 75 | -------------------------------------------------------------------------------- /lib/api/middlewares/query-may-write.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const queryMayWrite = require('../../utils/query-may-write'); 4 | 5 | module.exports = function mayWrite () { 6 | return function mayWriteMiddleware (req, res, next) { 7 | const { sql } = res.locals.params; 8 | res.locals.mayWrite = queryMayWrite(sql); 9 | 10 | next(); 11 | }; 12 | }; 13 | -------------------------------------------------------------------------------- /lib/api/middlewares/rate-limit.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const RATE_LIMIT_ENDPOINTS_GROUPS = { 4 | QUERY: 'query', 5 | JOB_CREATE: 'job_create', 6 | JOB_GET: 'job_get', 7 | JOB_DELETE: 'job_delete', 8 | COPY_FROM: 'copy_from', 9 | COPY_TO: 'copy_to' 10 | }; 11 | 12 | function rateLimit (userLimits, endpointGroup = null) { 13 | if (!isRateLimitEnabled(endpointGroup)) { 14 | return function rateLimitDisabledMiddleware (req, res, next) { next(); }; 15 | } 16 | 17 | return function rateLimitMiddleware (req, res, next) { 18 | userLimits.getRateLimit(res.locals.user, endpointGroup, function (err, userRateLimit) { 19 | if (err) { 20 | return next(err); 21 | } 22 | 23 | if (!userRateLimit) { 24 | return next(); 25 | } 26 | 27 | const [isBlocked, limit, remaining, retry, reset] = userRateLimit; 28 | 29 | res.set({ 30 | 'Carto-Rate-Limit-Limit': limit, 31 | 'Carto-Rate-Limit-Remaining': remaining, 32 | 'Carto-Rate-Limit-Reset': reset 33 | }); 34 | 35 | if (isBlocked) { 36 | // retry is floor rounded in seconds by redis-cell 37 | res.set('Retry-After', retry + 1); 38 | 39 | const rateLimitError = new Error( 40 | 'You are over platform\'s limits. Please contact us to know more details' 41 | ); 42 | rateLimitError.http_status = 429; 43 | rateLimitError.context = 'limit'; 44 | rateLimitError.detail = 'rate-limit'; 45 | return next(rateLimitError); 46 | } 47 | 48 | return next(); 49 | }); 50 | }; 51 | } 52 | 53 | function isRateLimitEnabled (endpointGroup) { 54 | return global.settings.ratelimits.rateLimitsEnabled && 55 | endpointGroup && 56 | global.settings.ratelimits.endpoints[endpointGroup]; 57 | } 58 | 59 | module.exports = rateLimit; 60 | module.exports.RATE_LIMIT_ENDPOINTS_GROUPS = RATE_LIMIT_ENDPOINTS_GROUPS; 61 | -------------------------------------------------------------------------------- /lib/api/middlewares/served-by-host-header.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const os = require('os'); 4 | 5 | module.exports = function servedByHostHeader () { 6 | const hostname = global.settings.api_hostname || os.hostname().split('.')[0]; 7 | 8 | return function servedByHostHeaderMiddleware (req, res, next) { 9 | res.set('X-Served-By-Host', hostname); 10 | 11 | next(); 12 | }; 13 | }; 14 | -------------------------------------------------------------------------------- /lib/api/middlewares/socket-timeout.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function socketTimeout () { 4 | if (!Object.prototype.hasOwnProperty.call(global.settings, 'node_socket_timeout')) { 5 | return function dummySocketTimeoutMiddleware (req, res, next) { 6 | next(); 7 | }; 8 | } 9 | 10 | const timeout = parseInt(global.settings.node_socket_timeout); 11 | 12 | return function socketTimeoutMiddleware (req, res, next) { 13 | // Set connection timeout 14 | req.connection.setTimeout(timeout); 15 | 16 | next(); 17 | }; 18 | }; 19 | -------------------------------------------------------------------------------- /lib/api/middlewares/surrogate-key.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function surrogateKey () { 4 | return function surrogateKeyMiddleware (req, res, next) { 5 | const { affectedTables, mayWrite } = res.locals; 6 | const skipNotUpdatedAtTables = true; 7 | 8 | if (!!affectedTables && affectedTables.getTables(skipNotUpdatedAtTables).length > 0 && !mayWrite) { 9 | res.header('Surrogate-Key', affectedTables.key(skipNotUpdatedAtTables).join(' ')); 10 | } 11 | 12 | next(); 13 | }; 14 | }; 15 | -------------------------------------------------------------------------------- /lib/api/middlewares/tag.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function tag ({ tags, logOnEvent = 'finish' }) { 4 | if (!Array.isArray(tags) || !tags.every((tag) => typeof tag === 'string')) { 5 | throw new Error('Required "tags" option must be a valid Array: [string, string, ...]'); 6 | } 7 | 8 | return function tagMiddleware (req, res, next) { 9 | const { logger } = res.locals; 10 | res.locals.tags = tags; 11 | res.on(logOnEvent, () => logger.info({ tags: res.locals.tags }, 'Request tagged')); 12 | 13 | next(); 14 | }; 15 | }; 16 | -------------------------------------------------------------------------------- /lib/api/middlewares/timeout-limits.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function timeoutLimits (metadataBackend) { 4 | return function timeoutLimitsMiddleware (req, res, next) { 5 | const { user, authorizationLevel } = res.locals; 6 | 7 | metadataBackend.getUserTimeoutRenderLimits(user, function (err, timeoutRenderLimit) { 8 | req.profiler.done('getUserTimeoutLimits'); 9 | 10 | if (err) { 11 | return next(err); 12 | } 13 | 14 | const userLimits = { 15 | timeout: (authorizationLevel === 'master') ? timeoutRenderLimit.render : timeoutRenderLimit.renderPublic 16 | }; 17 | 18 | res.locals.userLimits = userLimits; 19 | 20 | next(); 21 | }); 22 | }; 23 | }; 24 | -------------------------------------------------------------------------------- /lib/api/middlewares/user.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const CdbRequest = require('../../models/cartodb-request'); 4 | 5 | module.exports = function user (metadataBackend) { 6 | const cdbRequest = new CdbRequest(); 7 | 8 | return function userMiddleware (req, res, next) { 9 | try { 10 | res.locals.user = getUserNameFromRequest(req, cdbRequest); 11 | res.set('Carto-User', res.locals.user); 12 | } catch (err) { 13 | return next(err); 14 | } 15 | 16 | metadataBackend.getUserId(res.locals.user, (err, userId) => { 17 | if (err || !userId) { 18 | const error = new Error('Unauthorized'); 19 | error.type = 'auth'; 20 | error.subtype = 'user-not-found'; 21 | error.http_status = 404; 22 | error.message = errorUserNotFoundMessageTemplate(res.locals.user); 23 | 24 | return next(error); 25 | } 26 | 27 | res.locals.userId = userId; 28 | res.set('Carto-User-Id', `${userId}`); 29 | res.locals.logger = res.locals.logger.child({ 'cdb-user': res.locals.user }); 30 | return next(); 31 | }); 32 | }; 33 | }; 34 | 35 | function getUserNameFromRequest (req, cdbRequest) { 36 | return cdbRequest.userByReq(req); 37 | } 38 | 39 | function errorUserNotFoundMessageTemplate (user) { 40 | return `Sorry, we can't find CARTO user '${user}'. Please check that you have entered the correct domain.`; 41 | } 42 | -------------------------------------------------------------------------------- /lib/api/version-controller.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const versions = { 4 | cartodb_sql_api: require('./../../package.json').version 5 | }; 6 | 7 | module.exports = class VersionController { 8 | route (apiRouter) { 9 | apiRouter.get('/version', version()); 10 | } 11 | }; 12 | 13 | function version () { 14 | return function versionMiddleware (req, res) { 15 | res.send(versions); 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /lib/auth/apikey.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * this module allows to auth user using an pregenerated api key 5 | */ 6 | function ApikeyAuth (req, metadataBackend, username, apikeyToken) { 7 | this.req = req; 8 | this.metadataBackend = metadataBackend; 9 | this.username = username; 10 | this.apikeyToken = apikeyToken; 11 | } 12 | 13 | module.exports = ApikeyAuth; 14 | 15 | function usernameMatches (basicAuthUsername, requestUsername) { 16 | return !(basicAuthUsername && (basicAuthUsername !== requestUsername)); 17 | } 18 | 19 | ApikeyAuth.prototype.verifyCredentials = function (callback) { 20 | this.metadataBackend.getApikey(this.username, this.apikeyToken, (err, apikey) => { 21 | if (err) { 22 | err.http_status = 500; 23 | err.message = 'Unexpected error'; 24 | 25 | return callback(err); 26 | } 27 | 28 | if (isApiKeyFound(apikey)) { 29 | if (!usernameMatches(apikey.user, this.username)) { 30 | const usernameError = new Error('Forbidden'); 31 | usernameError.type = 'auth'; 32 | usernameError.subtype = 'api-key-username-mismatch'; 33 | usernameError.http_status = 403; 34 | 35 | return callback(usernameError); 36 | } 37 | 38 | if (!apikey.grantsSql) { 39 | const forbiddenError = new Error('forbidden'); 40 | forbiddenError.http_status = 403; 41 | 42 | return callback(forbiddenError); 43 | } 44 | 45 | return callback(null, getAuthorizationLevel(apikey)); 46 | } else { 47 | const apiKeyNotFoundError = new Error('Unauthorized'); 48 | apiKeyNotFoundError.type = 'auth'; 49 | apiKeyNotFoundError.subtype = 'api-key-not-found'; 50 | apiKeyNotFoundError.http_status = 401; 51 | 52 | return callback(apiKeyNotFoundError); 53 | } 54 | }); 55 | }; 56 | 57 | ApikeyAuth.prototype.hasCredentials = function () { 58 | return !!this.apikeyToken; 59 | }; 60 | 61 | ApikeyAuth.prototype.getCredentials = function () { 62 | return this.apikeyToken; 63 | }; 64 | 65 | function getAuthorizationLevel (apikey) { 66 | return apikey.type; 67 | } 68 | 69 | function isApiKeyFound (apikey) { 70 | return apikey.type !== null && 71 | apikey.user !== null && 72 | apikey.databasePassword !== null && 73 | apikey.databaseRole !== null; 74 | } 75 | -------------------------------------------------------------------------------- /lib/auth/auth-api.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var ApiKeyAuth = require('./apikey'); 4 | var OAuthAuth = require('./oauth'); 5 | 6 | function AuthApi (req, requestParams) { 7 | this.req = req; 8 | this.authBackend = getAuthBackend(req, requestParams); 9 | 10 | this._hasCredentials = null; 11 | } 12 | 13 | AuthApi.prototype.getType = function () { 14 | if (this.authBackend instanceof ApiKeyAuth) { 15 | return 'apiKey'; 16 | } else if (this.authBackend instanceof OAuthAuth) { 17 | return 'oAuth'; 18 | } 19 | }; 20 | 21 | AuthApi.prototype.hasCredentials = function () { 22 | if (this._hasCredentials === null) { 23 | this._hasCredentials = this.authBackend.hasCredentials(); 24 | } 25 | return this._hasCredentials; 26 | }; 27 | 28 | AuthApi.prototype.getCredentials = function () { 29 | return this.authBackend.getCredentials(); 30 | }; 31 | 32 | AuthApi.prototype.verifyCredentials = function (callback) { 33 | if (this.hasCredentials()) { 34 | this.authBackend.verifyCredentials(callback); 35 | } else { 36 | callback(null, false); 37 | } 38 | }; 39 | 40 | function getAuthBackend (req, requestParams) { 41 | if (requestParams.api_key) { 42 | return new ApiKeyAuth(req, requestParams.metadataBackend, requestParams.user, requestParams.api_key); 43 | } else { 44 | return new OAuthAuth(req, requestParams.metadataBackend); 45 | } 46 | } 47 | 48 | module.exports = AuthApi; 49 | -------------------------------------------------------------------------------- /lib/batch/README.md: -------------------------------------------------------------------------------- 1 | # Batch Queries 2 | 3 | This document describes features from Batch Queries, it also details some internals that might be useful for maintainers 4 | and developers. 5 | 6 | 7 | ## Redis data structures 8 | 9 | ### Jobs definition 10 | 11 | Redis Hash: `batch:jobs:{UUID}`. 12 | 13 | Redis DB: global.settings.batch_db || 5. 14 | 15 | It stores the job definition, the user, and some metadata like the final status, the failure reason, and so. 16 | 17 | ### Job queues 18 | 19 | Redis List: `batch:queue:{username}`. 20 | 21 | Redis DB: global.settings.batch_db || 5. 22 | 23 | It stores a pending list of jobs per user. It points to a job definition with the `{UUID}`. 24 | 25 | ### Job notifications 26 | 27 | Redis Pub/Sub channel: `batch:users`. 28 | 29 | Redis DB: 0. 30 | 31 | In order to notify new jobs, it uses a Pub/Sub channel were the username for the queued job is published. 32 | 33 | 34 | ## Job types 35 | 36 | Format for the currently supported query types, and what they are missing in terms of features. 37 | 38 | ### Simple 39 | 40 | ```json 41 | { 42 | "query": "update ..." 43 | } 44 | ``` 45 | 46 | Does not support main fallback queries. Ideally it should support something like: 47 | 48 | ```json 49 | { 50 | "query": "update ...", 51 | "onsuccess": "select 'general success fallback'", 52 | "onerror": "select 'general error fallback'" 53 | } 54 | ``` 55 | 56 | ### Multiple 57 | 58 | ```json 59 | { 60 | "query": [ 61 | "update ...", 62 | "select ... into ..." 63 | ] 64 | } 65 | ``` 66 | 67 | Does not support main fallback queries. Ideally it should support something like: 68 | 69 | ```json 70 | { 71 | "query": [ 72 | "update ...", 73 | "select ... into ..." 74 | ], 75 | "onsuccess": "select 'general success fallback'", 76 | "onerror": "select 'general error fallback'" 77 | } 78 | ``` 79 | 80 | ### Fallback 81 | 82 | ```json 83 | { 84 | "query": { 85 | "query": [ 86 | { 87 | "query": "select 1", 88 | "onsuccess": "select 'success fallback query 1'", 89 | "onerror": "select 'error fallback query 1'" 90 | }, 91 | { 92 | "query": "select 2", 93 | "onerror": "select 'error fallback query 2'" 94 | } 95 | ], 96 | "onsuccess": "select 'general success fallback'", 97 | "onerror": "select 'general error fallback'" 98 | } 99 | } 100 | ``` 101 | 102 | It's weird to have two nested `query` attributes. Also, it's not possible to mix _plain_ with _fallback_ ones. 103 | Ideally it should support something like: 104 | 105 | ```json 106 | { 107 | "query": [ 108 | { 109 | "query": "select 1", 110 | "onsuccess": "select 'success fallback query 1'", 111 | "onerror": "select 'error fallback query 1'" 112 | }, 113 | "select 2" 114 | ], 115 | "onsuccess": "select 'general success fallback'", 116 | "onerror": "select 'general error fallback'" 117 | } 118 | } 119 | ``` 120 | 121 | Where you don't need a nested `query` attribute, it's just an array as in Multiple job type, and you can mix objects and 122 | plain queries. 123 | -------------------------------------------------------------------------------- /lib/batch/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var JobRunner = require('./job-runner'); 4 | var QueryRunner = require('./query-runner'); 5 | var JobCanceller = require('./job-canceller'); 6 | var JobSubscriber = require('./pubsub/job-subscriber'); 7 | var UserDatabaseMetadataService = require('./user-database-metadata-service'); 8 | var JobPublisher = require('./pubsub/job-publisher'); 9 | var JobQueue = require('./job-queue'); 10 | var JobBackend = require('./job-backend'); 11 | var JobService = require('./job-service'); 12 | var Batch = require('./batch'); 13 | 14 | module.exports = function batchFactory (metadataBackend, redisPool, name, statsdClient, logger) { 15 | var userDatabaseMetadataService = new UserDatabaseMetadataService(metadataBackend); 16 | 17 | var jobSubscriber = new JobSubscriber(redisPool); 18 | var jobPublisher = new JobPublisher(redisPool); 19 | 20 | var jobQueue = new JobQueue(metadataBackend, jobPublisher, logger); 21 | var jobBackend = new JobBackend(metadataBackend, jobQueue, logger); 22 | var queryRunner = new QueryRunner(userDatabaseMetadataService, logger); 23 | var jobCanceller = new JobCanceller(); 24 | var jobService = new JobService(jobBackend, jobCanceller, logger); 25 | var jobRunner = new JobRunner(jobService, jobQueue, queryRunner, metadataBackend, statsdClient); 26 | 27 | return new Batch( 28 | name, 29 | userDatabaseMetadataService, 30 | jobSubscriber, 31 | jobQueue, 32 | jobRunner, 33 | jobService, 34 | redisPool, 35 | logger 36 | ); 37 | }; 38 | -------------------------------------------------------------------------------- /lib/batch/job-canceller.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var PSQL = require('cartodb-psql'); 4 | 5 | function JobCanceller () { 6 | } 7 | 8 | module.exports = JobCanceller; 9 | 10 | JobCanceller.prototype.cancel = function (job, callback) { 11 | const dbConfiguration = { 12 | host: job.data.host, 13 | port: job.data.port, 14 | dbname: job.data.dbname, 15 | user: job.data.dbuser, 16 | pass: job.data.pass 17 | }; 18 | 19 | doCancel(job.data.job_id, dbConfiguration, callback); 20 | }; 21 | 22 | function doCancel (jobId, dbConfiguration, callback) { 23 | var pg = new PSQL(dbConfiguration); 24 | 25 | getQueryPID(pg, jobId, function (err, pid) { 26 | if (err) { 27 | return callback(err); 28 | } 29 | 30 | if (!pid) { 31 | return callback(); 32 | } 33 | 34 | doCancelQuery(pg, pid, function (err, isCancelled) { 35 | if (err) { 36 | return callback(err); 37 | } 38 | 39 | if (!isCancelled) { 40 | return callback(new Error('Query has not been cancelled')); 41 | } 42 | 43 | callback(); 44 | }); 45 | }); 46 | } 47 | 48 | function getQueryPID (pg, jobId, callback) { 49 | var getPIDQuery = "SELECT pid FROM pg_stat_activity WHERE query LIKE '/* " + jobId + " */%'"; 50 | 51 | pg.query(getPIDQuery, function (err, result) { 52 | if (err) { 53 | return callback(err); 54 | } 55 | 56 | if (!result.rows[0] || !result.rows[0].pid) { 57 | // query is not running actually, but we have to callback w/o error to cancel the job anyway. 58 | return callback(); 59 | } 60 | 61 | callback(null, result.rows[0].pid); 62 | }); 63 | } 64 | 65 | function doCancelQuery (pg, pid, callback) { 66 | var cancelQuery = 'SELECT pg_cancel_backend(' + pid + ')'; 67 | 68 | pg.query(cancelQuery, function (err, result) { 69 | if (err) { 70 | return callback(err); 71 | } 72 | 73 | var isCancelled = result.rows[0].pg_cancel_backend; 74 | 75 | callback(null, isCancelled); 76 | }); 77 | } 78 | -------------------------------------------------------------------------------- /lib/batch/job-status.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var JOB_STATUS_ENUM = { 4 | PENDING: 'pending', 5 | RUNNING: 'running', 6 | DONE: 'done', 7 | CANCELLED: 'cancelled', 8 | FAILED: 'failed', 9 | SKIPPED: 'skipped', 10 | UNKNOWN: 'unknown' 11 | }; 12 | 13 | module.exports = JOB_STATUS_ENUM; 14 | 15 | var finalStatus = [ 16 | JOB_STATUS_ENUM.CANCELLED, 17 | JOB_STATUS_ENUM.DONE, 18 | JOB_STATUS_ENUM.FAILED, 19 | JOB_STATUS_ENUM.UNKNOWN 20 | ]; 21 | module.exports.isFinal = function (status) { 22 | return finalStatus.indexOf(status) !== -1; 23 | }; 24 | -------------------------------------------------------------------------------- /lib/batch/leader/locker.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var RedisDistlockLocker = require('./provider/redis-distlock'); 4 | var debug = require('../util/debug')('leader-locker'); 5 | var EventEmitter = require('events').EventEmitter; 6 | var util = require('util'); 7 | 8 | var LOCK = { 9 | TTL: 5000 10 | }; 11 | 12 | function Locker (locker, ttl) { 13 | EventEmitter.call(this); 14 | this.locker = locker; 15 | this.ttl = (Number.isFinite(ttl) && ttl > 0) ? ttl : LOCK.TTL; 16 | this.renewInterval = this.ttl / 5; 17 | this.intervalIds = {}; 18 | } 19 | util.inherits(Locker, EventEmitter); 20 | 21 | module.exports = Locker; 22 | 23 | Locker.prototype.lock = function (resource, callback) { 24 | var self = this; 25 | debug('Locker.lock(%s, %d)', resource, this.ttl); 26 | this.locker.lock(resource, this.ttl, function (err, lock) { 27 | if (!err) { 28 | self.startRenewal(resource); 29 | } 30 | return callback(err, lock); 31 | }); 32 | }; 33 | 34 | Locker.prototype.unlock = function (resource, callback) { 35 | var self = this; 36 | debug('Locker.unlock(%s)', resource); 37 | this.locker.unlock(resource, function (err) { 38 | self.stopRenewal(resource); 39 | return callback(err); 40 | }); 41 | }; 42 | 43 | Locker.prototype.startRenewal = function (resource) { 44 | var self = this; 45 | if (!Object.prototype.hasOwnProperty.call(this.intervalIds, resource)) { 46 | this.intervalIds[resource] = setInterval(function () { 47 | debug('Trying to extend lock resource=%s', resource); 48 | self.locker.lock(resource, self.ttl, function (err, _lock) { 49 | if (err) { 50 | self.emit('error', err, resource); 51 | return self.stopRenewal(resource); 52 | } 53 | if (_lock) { 54 | debug('Extended lock resource=%s', resource); 55 | } 56 | }); 57 | }, this.renewInterval); 58 | } 59 | }; 60 | 61 | Locker.prototype.stopRenewal = function (resource) { 62 | if (Object.prototype.hasOwnProperty.call(this.intervalIds, resource)) { 63 | clearInterval(this.intervalIds[resource]); 64 | delete this.intervalIds[resource]; 65 | } 66 | }; 67 | 68 | module.exports.create = function createLocker (type, config) { 69 | if (type !== 'redis-distlock') { 70 | throw new Error('Invalid type Locker type. Valid types are: "redis-distlock"'); 71 | } 72 | var locker = new RedisDistlockLocker(config.pool); 73 | return new Locker(locker, config.ttl); 74 | }; 75 | -------------------------------------------------------------------------------- /lib/batch/models/job-factory.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var JobSimple = require('./job-simple'); 4 | var JobMultiple = require('./job-multiple'); 5 | var JobFallback = require('./job-fallback'); 6 | 7 | var Models = [JobSimple, JobMultiple, JobFallback]; 8 | 9 | function JobFactory () { 10 | } 11 | 12 | module.exports = JobFactory; 13 | 14 | JobFactory.create = function (data) { 15 | if (!data.query) { 16 | throw new Error('You must indicate a valid SQL'); 17 | } 18 | 19 | for (var i = 0; i < Models.length; i++) { 20 | if (Models[i].is(data.query)) { 21 | return new Models[i](data); 22 | } 23 | } 24 | 25 | throw new Error('there is no job class for the provided query'); 26 | }; 27 | -------------------------------------------------------------------------------- /lib/batch/models/job-simple.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var JobBase = require('./job-base'); 5 | var jobStatus = require('../job-status'); 6 | 7 | function JobSimple (jobDefinition) { 8 | JobBase.call(this, jobDefinition); 9 | 10 | if (!this.data.status) { 11 | this.data.status = jobStatus.PENDING; 12 | } 13 | } 14 | util.inherits(JobSimple, JobBase); 15 | 16 | module.exports = JobSimple; 17 | 18 | JobSimple.is = function (query) { 19 | return typeof query === 'string'; 20 | }; 21 | 22 | JobSimple.prototype.getNextQuery = function () { 23 | if (this.isPending()) { 24 | return this.data.query; 25 | } 26 | }; 27 | 28 | JobSimple.prototype.setQuery = function (query) { 29 | if (!JobSimple.is(query)) { 30 | throw new Error('You must indicate a valid SQL'); 31 | } 32 | 33 | JobSimple.super_.prototype.setQuery.call(this, query); 34 | }; 35 | 36 | JobSimple.prototype.toJSON = function () { 37 | return { 38 | class: this.constructor.name, 39 | id: this.data.job_id, 40 | username: this.data.user, 41 | status: this.data.status, 42 | failed_reason: this.data.failed_reason, 43 | created: this.data.created_at, 44 | updated: this.data.updated_at, 45 | elapsed: elapsedTime(this.data.created_at, this.data.updated_at), 46 | dbhost: this.data.host 47 | }; 48 | }; 49 | 50 | function elapsedTime (startedAt, endedAt) { 51 | if (!startedAt || !endedAt) { 52 | return; 53 | } 54 | 55 | var start = new Date(startedAt); 56 | var end = new Date(endedAt); 57 | return end.getTime() - start.getTime(); 58 | } 59 | -------------------------------------------------------------------------------- /lib/batch/models/job-state-machine.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var assert = require('assert'); 4 | var JobStatus = require('../job-status'); 5 | var validStatusTransitions = [ 6 | [JobStatus.PENDING, JobStatus.RUNNING], 7 | [JobStatus.PENDING, JobStatus.CANCELLED], 8 | [JobStatus.PENDING, JobStatus.UNKNOWN], 9 | [JobStatus.PENDING, JobStatus.SKIPPED], 10 | [JobStatus.RUNNING, JobStatus.DONE], 11 | [JobStatus.RUNNING, JobStatus.FAILED], 12 | [JobStatus.RUNNING, JobStatus.CANCELLED], 13 | [JobStatus.RUNNING, JobStatus.PENDING], 14 | [JobStatus.RUNNING, JobStatus.UNKNOWN] 15 | ]; 16 | 17 | function JobStateMachine () { 18 | } 19 | 20 | module.exports = JobStateMachine; 21 | 22 | JobStateMachine.prototype.isValidTransition = function (initialStatus, finalStatus) { 23 | var transition = [initialStatus, finalStatus]; 24 | 25 | for (var i = 0; i < validStatusTransitions.length; i++) { 26 | try { 27 | assert.deepStrictEqual(transition, validStatusTransitions[i]); 28 | return true; 29 | } catch (e) { 30 | continue; 31 | } 32 | } 33 | 34 | return false; 35 | }; 36 | 37 | JobStateMachine.prototype.isFinalStatus = function (status) { 38 | return JobStatus.isFinal(status); 39 | }; 40 | -------------------------------------------------------------------------------- /lib/batch/models/query/fallback.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var QueryBase = require('./query-base'); 5 | var jobStatus = require('../../job-status'); 6 | 7 | function Fallback (index) { 8 | QueryBase.call(this, index); 9 | } 10 | util.inherits(Fallback, QueryBase); 11 | 12 | module.exports = Fallback; 13 | 14 | Fallback.is = function (query) { 15 | if (query.onsuccess || query.onerror) { 16 | return true; 17 | } 18 | return false; 19 | }; 20 | 21 | Fallback.prototype.getNextQuery = function (job) { 22 | if (this.hasOnSuccess(job)) { 23 | return this.getOnSuccess(job); 24 | } 25 | if (this.hasOnError(job)) { 26 | return this.getOnError(job); 27 | } 28 | }; 29 | 30 | Fallback.prototype.getOnSuccess = function (job) { 31 | if (job.query.query[this.index].status === jobStatus.DONE && 32 | job.query.query[this.index].fallback_status === jobStatus.PENDING) { 33 | var onsuccessQuery = job.query.query[this.index].onsuccess; 34 | if (onsuccessQuery) { 35 | onsuccessQuery = onsuccessQuery.replace(/<%=\s*job_id\s*%>/g, job.job_id); 36 | } 37 | return onsuccessQuery; 38 | } 39 | }; 40 | 41 | Fallback.prototype.hasOnSuccess = function (job) { 42 | return !!this.getOnSuccess(job); 43 | }; 44 | 45 | Fallback.prototype.getOnError = function (job) { 46 | if (job.query.query[this.index].status === jobStatus.FAILED && 47 | job.query.query[this.index].fallback_status === jobStatus.PENDING) { 48 | var onerrorQuery = job.query.query[this.index].onerror; 49 | if (onerrorQuery) { 50 | onerrorQuery = onerrorQuery.replace(/<%=\s*job_id\s*%>/g, job.job_id); 51 | onerrorQuery = onerrorQuery.replace(/<%=\s*error_message\s*%>/g, job.query.query[this.index].failed_reason); 52 | } 53 | return onerrorQuery; 54 | } 55 | }; 56 | 57 | Fallback.prototype.hasOnError = function (job) { 58 | return !!this.getOnError(job); 59 | }; 60 | 61 | Fallback.prototype.setStatus = function (status, job, errorMessage) { 62 | var isValid = false; 63 | 64 | isValid = this.isValidTransition(job.query.query[this.index].fallback_status, status); 65 | 66 | if (isValid) { 67 | job.query.query[this.index].fallback_status = status; 68 | if (status === jobStatus.FAILED && errorMessage) { 69 | job.query.query[this.index].failed_reason = errorMessage; 70 | } 71 | } 72 | 73 | return isValid; 74 | }; 75 | 76 | Fallback.prototype.getStatus = function (job) { 77 | return job.query.query[this.index].fallback_status; 78 | }; 79 | -------------------------------------------------------------------------------- /lib/batch/models/query/main-fallback.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var QueryBase = require('./query-base'); 5 | var jobStatus = require('../../job-status'); 6 | 7 | function MainFallback () { 8 | QueryBase.call(this); 9 | } 10 | util.inherits(MainFallback, QueryBase); 11 | 12 | module.exports = MainFallback; 13 | 14 | MainFallback.is = function (job) { 15 | if (job.query.onsuccess || job.query.onerror) { 16 | return true; 17 | } 18 | return false; 19 | }; 20 | 21 | MainFallback.prototype.getNextQuery = function (job) { 22 | if (this.hasOnSuccess(job)) { 23 | return this.getOnSuccess(job); 24 | } 25 | 26 | if (this.hasOnError(job)) { 27 | return this.getOnError(job); 28 | } 29 | }; 30 | 31 | MainFallback.prototype.getOnSuccess = function (job) { 32 | if (job.status === jobStatus.DONE && job.fallback_status === jobStatus.PENDING) { 33 | return job.query.onsuccess; 34 | } 35 | }; 36 | 37 | MainFallback.prototype.hasOnSuccess = function (job) { 38 | return !!this.getOnSuccess(job); 39 | }; 40 | 41 | MainFallback.prototype.getOnError = function (job) { 42 | if (job.status === jobStatus.FAILED && job.fallback_status === jobStatus.PENDING) { 43 | return job.query.onerror; 44 | } 45 | }; 46 | 47 | MainFallback.prototype.hasOnError = function (job) { 48 | return !!this.getOnError(job); 49 | }; 50 | 51 | MainFallback.prototype.setStatus = function (status, job, previous) { 52 | var isValid = false; 53 | var appliedToFallback = false; 54 | 55 | if (previous.isValid && !previous.appliedToFallback) { 56 | if (this.isFinalStatus(status) && !this.hasNextQuery(job)) { 57 | isValid = this.isValidTransition(job.fallback_status, jobStatus.SKIPPED); 58 | 59 | if (isValid) { 60 | job.fallback_status = jobStatus.SKIPPED; 61 | appliedToFallback = true; 62 | } 63 | } 64 | } else if (!previous.isValid) { 65 | isValid = this.isValidTransition(job.fallback_status, status); 66 | 67 | if (isValid) { 68 | job.fallback_status = status; 69 | appliedToFallback = true; 70 | } 71 | } 72 | 73 | return { isValid: isValid, appliedToFallback: appliedToFallback }; 74 | }; 75 | -------------------------------------------------------------------------------- /lib/batch/models/query/query-base.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var JobStateMachine = require('../job-state-machine'); 5 | 6 | function QueryBase (index) { 7 | JobStateMachine.call(this); 8 | 9 | this.index = index; 10 | } 11 | util.inherits(QueryBase, JobStateMachine); 12 | 13 | module.exports = QueryBase; 14 | 15 | // should be implemented 16 | QueryBase.prototype.setStatus = function () { 17 | throw new Error('Unimplemented method'); 18 | }; 19 | 20 | // should be implemented 21 | QueryBase.prototype.getNextQuery = function () { 22 | throw new Error('Unimplemented method'); 23 | }; 24 | 25 | QueryBase.prototype.hasNextQuery = function (job) { 26 | return !!this.getNextQuery(job); 27 | }; 28 | 29 | QueryBase.prototype.getStatus = function () { 30 | throw new Error('Unimplemented method'); 31 | }; 32 | -------------------------------------------------------------------------------- /lib/batch/models/query/query-factory.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var QueryFallback = require('./query-fallback'); 4 | 5 | function QueryFactory () { 6 | } 7 | 8 | module.exports = QueryFactory; 9 | 10 | QueryFactory.create = function (job, index) { 11 | if (QueryFallback.is(job.query.query[index])) { 12 | return new QueryFallback(job, index); 13 | } 14 | 15 | throw new Error('there is no query class for the provided query'); 16 | }; 17 | -------------------------------------------------------------------------------- /lib/batch/models/query/query-fallback.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var QueryBase = require('./query-base'); 5 | var Query = require('./query'); 6 | var Fallback = require('./fallback'); 7 | var jobStatus = require('../../job-status'); 8 | 9 | function QueryFallback (job, index) { 10 | QueryBase.call(this, index); 11 | 12 | this.init(job, index); 13 | } 14 | 15 | util.inherits(QueryFallback, QueryBase); 16 | 17 | QueryFallback.is = function (query) { 18 | if (Query.is(query)) { 19 | return true; 20 | } 21 | return false; 22 | }; 23 | 24 | QueryFallback.prototype.init = function (job, index) { 25 | this.query = new Query(index); 26 | 27 | if (Fallback.is(job.query.query[index])) { 28 | this.fallback = new Fallback(index); 29 | } 30 | }; 31 | 32 | QueryFallback.prototype.getNextQuery = function (job) { 33 | if (this.query.hasNextQuery(job)) { 34 | return this.query.getNextQuery(job); 35 | } 36 | 37 | if (this.fallback && this.fallback.hasNextQuery(job)) { 38 | return this.fallback.getNextQuery(job); 39 | } 40 | }; 41 | 42 | QueryFallback.prototype.setStatus = function (status, job, previous, errorMesssage) { 43 | var isValid = false; 44 | var appliedToFallback = false; 45 | 46 | if (previous.isValid && !previous.appliedToFallback) { 47 | if (status === jobStatus.FAILED || status === jobStatus.CANCELLED) { 48 | this.query.setStatus(jobStatus.SKIPPED, job, errorMesssage); 49 | 50 | if (this.fallback) { 51 | this.fallback.setStatus(jobStatus.SKIPPED, job); 52 | } 53 | } 54 | } else if (!previous.isValid) { 55 | isValid = this.query.setStatus(status, job, errorMesssage); 56 | 57 | if (this.fallback) { 58 | if (!isValid) { 59 | isValid = this.fallback.setStatus(status, job, errorMesssage); 60 | appliedToFallback = true; 61 | } else if (isValid && this.isFinalStatus(status) && !this.fallback.hasNextQuery(job)) { 62 | this.fallback.setStatus(jobStatus.SKIPPED, job); 63 | } 64 | } 65 | } 66 | 67 | return { isValid: isValid, appliedToFallback: appliedToFallback }; 68 | }; 69 | 70 | QueryFallback.prototype.getStatus = function (job) { 71 | return this.query.getStatus(job); 72 | }; 73 | 74 | module.exports = QueryFallback; 75 | -------------------------------------------------------------------------------- /lib/batch/models/query/query.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var QueryBase = require('./query-base'); 5 | var jobStatus = require('../../job-status'); 6 | 7 | function Query (index) { 8 | QueryBase.call(this, index); 9 | } 10 | util.inherits(Query, QueryBase); 11 | 12 | module.exports = Query; 13 | 14 | Query.is = function (query) { 15 | if (query.query && typeof query.query === 'string') { 16 | return true; 17 | } 18 | 19 | return false; 20 | }; 21 | 22 | Query.prototype.getNextQuery = function (job) { 23 | if (job.query.query[this.index].status === jobStatus.PENDING) { 24 | var query = { 25 | query: job.query.query[this.index].query 26 | }; 27 | if (Number.isFinite(job.query.query[this.index].timeout)) { 28 | query.timeout = job.query.query[this.index].timeout; 29 | } 30 | return query; 31 | } 32 | }; 33 | 34 | Query.prototype.setStatus = function (status, job, errorMesssage) { 35 | var isValid = false; 36 | 37 | isValid = this.isValidTransition(job.query.query[this.index].status, status); 38 | 39 | if (isValid) { 40 | job.query.query[this.index].status = status; 41 | if (status === jobStatus.RUNNING) { 42 | job.query.query[this.index].started_at = new Date().toISOString(); 43 | } 44 | if (this.isFinalStatus(status)) { 45 | job.query.query[this.index].ended_at = new Date().toISOString(); 46 | } 47 | if (status === jobStatus.FAILED && errorMesssage) { 48 | job.query.query[this.index].failed_reason = errorMesssage; 49 | } 50 | } 51 | 52 | return isValid; 53 | }; 54 | 55 | Query.prototype.getStatus = function (job) { 56 | return job.query.query[this.index].status; 57 | }; 58 | -------------------------------------------------------------------------------- /lib/batch/pubsub/channel.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | DB: 0, 3 | NAME: 'batch:users' 4 | }; 5 | -------------------------------------------------------------------------------- /lib/batch/pubsub/job-publisher.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Channel = require('./channel'); 4 | var debug = require('./../util/debug')('pubsub:publisher'); 5 | var error = require('./../util/debug')('pubsub:publisher:error'); 6 | 7 | function JobPublisher (pool) { 8 | this.pool = pool; 9 | } 10 | 11 | JobPublisher.prototype.publish = function (user) { 12 | var self = this; 13 | 14 | this.pool.acquire(Channel.DB) 15 | .then(client => { 16 | client.publish(Channel.NAME, user, function (err) { 17 | self.pool.release(Channel.DB, client) 18 | .then(() => { 19 | if (err) { 20 | return error('Error publishing to ' + Channel.NAME + ':' + user + ', ' + err.message); 21 | } 22 | 23 | debug('publish to ' + Channel.NAME + ':' + user); 24 | }) 25 | .catch(err => error('Error releasing redis client: ' + err.message)); 26 | }); 27 | }) 28 | .catch(err => error('Error adquiring redis client: ' + err.message)); 29 | }; 30 | 31 | module.exports = JobPublisher; 32 | -------------------------------------------------------------------------------- /lib/batch/pubsub/job-subscriber.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Channel = require('./channel'); 4 | var debug = require('./../util/debug')('pubsub:subscriber'); 5 | var error = require('./../util/debug')('pubsub:subscriber:error'); 6 | 7 | function JobSubscriber (pool) { 8 | this.pool = pool; 9 | } 10 | 11 | module.exports = JobSubscriber; 12 | 13 | JobSubscriber.prototype.subscribe = function (onJobHandler, callback) { 14 | var self = this; 15 | 16 | self.pool.acquire(Channel.DB) 17 | .then(client => { 18 | self.client = client; 19 | client.removeAllListeners('message'); 20 | client.unsubscribe(Channel.NAME); 21 | client.subscribe(Channel.NAME); 22 | client.on('message', function (channel, user) { 23 | debug('message received in channel=%s from user=%s', channel, user); 24 | onJobHandler(user); 25 | }); 26 | 27 | client.on('error', function () { 28 | self.unsubscribe(); 29 | self.pool.release(Channel.DB, client) 30 | .catch(err => error('Error releasing redis client: ' + err.message)); 31 | self.subscribe(onJobHandler); 32 | }); 33 | 34 | if (callback) { 35 | callback(); 36 | } 37 | }) 38 | .catch(err => { 39 | error('Error adquiring redis client: ' + err.message); 40 | if (callback) { 41 | return callback(err); 42 | } 43 | }); 44 | }; 45 | 46 | JobSubscriber.prototype.unsubscribe = function (callback) { 47 | if (this.client && this.client.connected) { 48 | this.client.unsubscribe(Channel.NAME, callback); 49 | } else { 50 | if (callback) { 51 | return callback(null); 52 | } 53 | } 54 | }; 55 | -------------------------------------------------------------------------------- /lib/batch/query-runner.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var PSQL = require('cartodb-psql'); 4 | 5 | function QueryRunner (userDatabaseMetadataService, logger) { 6 | this.userDatabaseMetadataService = userDatabaseMetadataService; 7 | this.logger = logger; 8 | } 9 | 10 | module.exports = QueryRunner; 11 | 12 | function hasDBParams (dbparams) { 13 | return (dbparams.user && dbparams.host && dbparams.port && dbparams.dbname && dbparams.pass); 14 | } 15 | 16 | QueryRunner.prototype.run = function (jobId, sql, user, timeout, dbparams, callback) { 17 | if (hasDBParams(dbparams)) { 18 | return this._run(dbparams, jobId, sql, timeout, callback); 19 | } 20 | 21 | const dbConfigurationError = new Error('Batch Job DB misconfiguration'); 22 | 23 | return callback(dbConfigurationError); 24 | }; 25 | 26 | QueryRunner.prototype._run = function (dbparams, jobId, sql, timeout, callback) { 27 | var pg = new PSQL(dbparams); 28 | this.logger.debug('Running query [timeout=%d] %s', timeout, sql); 29 | pg.query(`/* ${jobId} */ ${sql}`, callback, false, timeout); 30 | }; 31 | -------------------------------------------------------------------------------- /lib/batch/scheduler/capacity/fixed.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | function FixedCapacity (capacity) { 4 | this.capacity = Math.max(1, capacity); 5 | } 6 | 7 | module.exports = FixedCapacity; 8 | 9 | FixedCapacity.prototype.getCapacity = function (callback) { 10 | return callback(null, this.capacity); 11 | }; 12 | -------------------------------------------------------------------------------- /lib/batch/scheduler/capacity/http-load.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var util = require('util'); 4 | var debug = require('../../util/debug')('capacity-http-load'); 5 | var HttpSimpleCapacity = require('./http-simple'); 6 | 7 | function HttpLoadCapacity (host, capacityEndpoint) { 8 | HttpSimpleCapacity.call(this, host, capacityEndpoint); 9 | } 10 | util.inherits(HttpLoadCapacity, HttpSimpleCapacity); 11 | 12 | module.exports = HttpLoadCapacity; 13 | 14 | HttpLoadCapacity.prototype.getCapacity = function (callback) { 15 | this.getResponse(function (err, values) { 16 | var capacity = 1; 17 | 18 | if (err) { 19 | return callback(null, capacity); 20 | } 21 | 22 | var cores = parseInt(values.cores, 10); 23 | var relativeLoad = parseFloat(values.relative_load); 24 | 25 | capacity = Math.max(1, Math.floor(((1 - relativeLoad) * cores) - 1)); 26 | 27 | capacity = Number.isFinite(capacity) ? capacity : 1; 28 | 29 | debug('host=%s, capacity=%s', this.host, capacity); 30 | return callback(null, capacity); 31 | }.bind(this)); 32 | }; 33 | -------------------------------------------------------------------------------- /lib/batch/scheduler/capacity/http-simple.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var request = require('request'); 4 | var debug = require('../../util/debug')('capacity-http-simple'); 5 | 6 | function HttpSimpleCapacity (host, capacityEndpoint) { 7 | this.host = host; 8 | this.capacityEndpoint = capacityEndpoint; 9 | 10 | this.lastResponse = null; 11 | this.lastResponseTime = 0; 12 | } 13 | 14 | module.exports = HttpSimpleCapacity; 15 | 16 | HttpSimpleCapacity.prototype.getCapacity = function (callback) { 17 | this.getResponse(function (err, values) { 18 | var capacity = 1; 19 | 20 | if (err) { 21 | return callback(null, capacity); 22 | } 23 | 24 | var availableCores = parseInt(values.available_cores, 10); 25 | 26 | capacity = Math.max(availableCores, 1); 27 | capacity = Number.isFinite(capacity) ? capacity : 1; 28 | 29 | debug('host=%s, capacity=%s', this.host, capacity); 30 | return callback(null, capacity); 31 | }.bind(this)); 32 | }; 33 | 34 | HttpSimpleCapacity.prototype.getResponse = function (callback) { 35 | var requestParams = { 36 | method: 'POST', 37 | url: this.capacityEndpoint, 38 | timeout: 2000, 39 | json: true 40 | }; 41 | debug('getCapacity(%s)', this.host); 42 | 43 | // throttle requests for 500 ms 44 | var now = Date.now(); 45 | if (this.lastResponse !== null && ((now - this.lastResponseTime) < 500)) { 46 | return callback(null, this.lastResponse); 47 | } 48 | 49 | request.post(requestParams, function (err, res, jsonRes) { 50 | if (err) { 51 | return callback(err); 52 | } 53 | if (jsonRes && jsonRes.retcode === 0) { 54 | this.lastResponse = jsonRes.return_values || {}; 55 | // We could go more aggressive by updating lastResponseTime on failures. 56 | this.lastResponseTime = now; 57 | 58 | return callback(null, this.lastResponse); 59 | } 60 | return callback(new Error('Could not retrieve information from endpoint')); 61 | }.bind(this)); 62 | }; 63 | -------------------------------------------------------------------------------- /lib/batch/user-database-metadata-service.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | function UserDatabaseMetadataService (metadataBackend) { 4 | this.metadataBackend = metadataBackend; 5 | } 6 | 7 | UserDatabaseMetadataService.prototype.getUserMetadata = function (username, callback) { 8 | var self = this; 9 | 10 | this.metadataBackend.getAllUserDBParams(username, function (err, userDatabaseMetadata) { 11 | if (err) { 12 | return callback(err); 13 | } 14 | 15 | callback(null, self.parseMetadataToDatabase(userDatabaseMetadata)); 16 | }); 17 | }; 18 | 19 | UserDatabaseMetadataService.prototype.parseMetadataToDatabase = function (userDatabaseMetadata) { 20 | var dbParams = userDatabaseMetadata; 21 | 22 | var dbopts = {}; 23 | 24 | dbopts.port = dbParams.dbport || global.settings.db_batch_port || global.settings.db_port; 25 | dbopts.host = dbParams.dbhost; 26 | dbopts.dbname = dbParams.dbname; 27 | 28 | return dbopts; 29 | }; 30 | 31 | module.exports = UserDatabaseMetadataService; 32 | -------------------------------------------------------------------------------- /lib/batch/util/debug.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var debug = require('debug'); 4 | 5 | module.exports = function batchDebug (ns) { 6 | return debug(['batch', ns].join(':')); 7 | }; 8 | -------------------------------------------------------------------------------- /lib/batch/util/forever.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function forever (fn, done) { 4 | function next (err) { 5 | if (err) { 6 | return done(err); 7 | } 8 | fn(next); 9 | } 10 | next(); 11 | }; 12 | -------------------------------------------------------------------------------- /lib/models/cartodb-request.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = class CdbRequest { 4 | constructor () { 5 | // would extract "strk" from "strk.cartodb.com" 6 | this.RE_USER_FROM_HOST = new RegExp(global.settings.user_from_host || '^([^\\.]+)\\.'); 7 | } 8 | 9 | userByReq (req) { 10 | const host = req.headers.host || ''; 11 | 12 | if (req.params.user) { 13 | return req.params.user; 14 | } 15 | 16 | const mat = host.match(this.RE_USER_FROM_HOST); 17 | 18 | if (!mat || mat.length !== 2) { 19 | throw new Error(`No username found in hostname '${host}'`); 20 | } 21 | 22 | return mat[1]; 23 | } 24 | }; 25 | -------------------------------------------------------------------------------- /lib/models/formats/README: -------------------------------------------------------------------------------- 1 | Format classes are required to expose a constructor with no arguments, 2 | a getFileExtension() and a sendResponse(opts, callback) method. 3 | 4 | The ``opts`` parameter contains: 5 | 6 | sink Output stream to send the reponse to 7 | sql SQL query requested by the user 8 | skipfields Comma separate list of fields to skip from output 9 | really only needed with "SELECT *" queries 10 | gn Name of the geometry column (for formats requiring one) 11 | dp Number of decimal points of precision for geometries (if used) 12 | database Name of the database to connect to 13 | user_id Identifier of the user 14 | filename Name to use for attachment disposition 15 | 16 | The ``callback`` parameter is a function that is invoked when the 17 | format object finished with sending the result to the sink. 18 | If an error occurs the callback is invoked with an Error argument. 19 | -------------------------------------------------------------------------------- /lib/models/formats/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var fs = require('fs'); 4 | const path = require('path'); 5 | var formats = {}; 6 | 7 | function formatFilesWithPath (dir) { 8 | var formatDir = path.join(__dirname, dir); 9 | return fs.readdirSync(formatDir).map(function (formatFile) { 10 | return path.join(formatDir, formatFile); 11 | }); 12 | } 13 | 14 | var formatFilesPaths = [] 15 | .concat(formatFilesWithPath('ogr')) 16 | .concat(formatFilesWithPath('pg')); 17 | 18 | formatFilesPaths.forEach(function (file) { 19 | var format = require(file); 20 | formats[format.prototype.id] = format; 21 | }); 22 | 23 | module.exports = formats; 24 | -------------------------------------------------------------------------------- /lib/models/formats/ogr/csv.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Ogr = require('./../ogr'); 4 | 5 | function CsvFormat () {} 6 | 7 | CsvFormat.prototype = new Ogr('csv'); 8 | 9 | CsvFormat.prototype._contentType = 'text/csv; charset=utf-8; header=present'; 10 | CsvFormat.prototype._fileExtension = 'csv'; 11 | 12 | CsvFormat.prototype.generate = function (options, callback) { 13 | this.toOGR_SingleFile(options, 'CSV', callback); 14 | }; 15 | 16 | module.exports = CsvFormat; 17 | -------------------------------------------------------------------------------- /lib/models/formats/ogr/geopackage.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Ogr = require('./../ogr'); 4 | 5 | function GeoPackageFormat () {} 6 | 7 | GeoPackageFormat.prototype = new Ogr('gpkg'); 8 | 9 | GeoPackageFormat.prototype._contentType = 'application/x-sqlite3; charset=utf-8'; 10 | GeoPackageFormat.prototype._fileExtension = 'gpkg'; 11 | // As of GDAL 1.10.1 SRID detection is bogus, so we use 12 | // our own method. See: 13 | // http://trac.osgeo.org/gdal/ticket/5131 14 | // http://trac.osgeo.org/gdal/ticket/5287 15 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/110 16 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/116 17 | // Bug was fixed in GDAL 1.10.2 18 | GeoPackageFormat.prototype._needSRS = true; 19 | 20 | GeoPackageFormat.prototype.generate = function (options, callback) { 21 | options.cmd_params = ['-lco', 'FID=cartodb_id']; 22 | this.toOGR_SingleFile(options, 'GPKG', callback); 23 | }; 24 | 25 | module.exports = GeoPackageFormat; 26 | -------------------------------------------------------------------------------- /lib/models/formats/ogr/kml.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Ogr = require('./../ogr'); 4 | 5 | function KmlFormat () {} 6 | 7 | KmlFormat.prototype = new Ogr('kml'); 8 | 9 | KmlFormat.prototype._contentType = 'application/kml; charset=utf-8'; 10 | KmlFormat.prototype._fileExtension = 'kml'; 11 | // As of GDAL 1.10.1 SRID detection is bogus, so we use 12 | // our own method. See: 13 | // http://trac.osgeo.org/gdal/ticket/5131 14 | // http://trac.osgeo.org/gdal/ticket/5287 15 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/110 16 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/116 17 | // Bug was fixed in GDAL 1.10.2 18 | KmlFormat.prototype._needSRS = true; 19 | 20 | KmlFormat.prototype.generate = function (options, callback) { 21 | this.toOGR_SingleFile(options, 'KML', callback); 22 | }; 23 | 24 | module.exports = KmlFormat; 25 | -------------------------------------------------------------------------------- /lib/models/formats/ogr/spatialite.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Ogr = require('./../ogr'); 4 | 5 | function SpatiaLiteFormat () {} 6 | 7 | SpatiaLiteFormat.prototype = new Ogr('spatialite'); 8 | 9 | SpatiaLiteFormat.prototype._contentType = 'application/x-sqlite3; charset=utf-8'; 10 | SpatiaLiteFormat.prototype._fileExtension = 'sqlite'; 11 | // As of GDAL 1.10.1 SRID detection is bogus, so we use 12 | // our own method. See: 13 | // http://trac.osgeo.org/gdal/ticket/5131 14 | // http://trac.osgeo.org/gdal/ticket/5287 15 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/110 16 | // http://github.com/CartoDB/CartoDB-SQL-API/issues/116 17 | // Bug was fixed in GDAL 1.10.2 18 | SpatiaLiteFormat.prototype._needSRS = true; 19 | 20 | SpatiaLiteFormat.prototype.generate = function (options, callback) { 21 | this.toOGR_SingleFile(options, 'SQLite', callback); 22 | options.cmd_params = ['SPATIALITE=yes']; 23 | }; 24 | 25 | module.exports = SpatiaLiteFormat; 26 | -------------------------------------------------------------------------------- /lib/models/formats/pg/arraybuffer.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var _ = require('underscore'); 4 | 5 | var Pg = require('./../pg'); 6 | var ArrayBufferSer = require('../../bin-encoder'); 7 | 8 | function BinaryFormat () {} 9 | 10 | BinaryFormat.prototype = new Pg('arraybuffer'); 11 | 12 | BinaryFormat.prototype._contentType = 'application/octet-stream'; 13 | 14 | BinaryFormat.prototype._extractTypeFromName = function (name) { 15 | var g = name.match(/.*__(uintclamp|uint|int|float)(8|16|32)/i); 16 | if (g && g.length === 3) { 17 | var typeName = g[1] + g[2]; 18 | return ArrayBufferSer.typeNames[typeName]; 19 | } 20 | }; 21 | 22 | BinaryFormat.prototype.transform = function (result, options, callback) { 23 | var totalRows = result.rowCount; 24 | var rows = result.rows; 25 | 26 | // get headers 27 | if (!totalRows) { 28 | callback(null, Buffer.alloc(0)); 29 | return; 30 | } 31 | 32 | var headersNames = Object.keys(rows[0]); 33 | var headerTypes = []; 34 | 35 | if (_.contains(headersNames, 'the_geom')) { 36 | callback(new Error('geometry types are not supported'), null); 37 | return; 38 | } 39 | 40 | try { 41 | var i; 42 | var r; 43 | var n; 44 | var t; 45 | // get header types (and guess from name) 46 | for (i = 0; i < headersNames.length; ++i) { 47 | r = rows[0]; 48 | n = headersNames[i]; 49 | if (typeof (r[n]) === 'string') { 50 | headerTypes.push(ArrayBufferSer.STRING); 51 | } else if (typeof (r[n]) === 'object') { 52 | t = this._extractTypeFromName(n); 53 | t = t || ArrayBufferSer.FLOAT32; 54 | headerTypes.push(ArrayBufferSer.BUFFER + t); 55 | } else { 56 | t = this._extractTypeFromName(n); 57 | headerTypes.push(t || ArrayBufferSer.FLOAT32); 58 | } 59 | } 60 | 61 | // pack the data 62 | var header = new ArrayBufferSer(ArrayBufferSer.STRING, headersNames); 63 | var data = [header]; 64 | for (i = 0; i < headersNames.length; ++i) { 65 | var d = []; 66 | n = headersNames[i]; 67 | for (r = 0; r < totalRows; ++r) { 68 | var row = rows[r][n]; 69 | if (headerTypes[i] > ArrayBufferSer.BUFFER) { 70 | row = new ArrayBufferSer(headerTypes[i] - ArrayBufferSer.BUFFER, row); 71 | } 72 | d.push(row); 73 | } 74 | var b = new ArrayBufferSer(headerTypes[i], d); 75 | data.push(b); 76 | } 77 | 78 | // create the final buffer 79 | var all = new ArrayBufferSer(ArrayBufferSer.BUFFER, data); 80 | 81 | callback(null, all.buffer); 82 | } catch (e) { 83 | callback(e, null); 84 | } 85 | }; 86 | 87 | module.exports = BinaryFormat; 88 | -------------------------------------------------------------------------------- /lib/monitoring/health-check.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var step = require('step'); 4 | var fs = require('fs'); 5 | 6 | function HealthCheck (disableFile) { 7 | this.disableFile = disableFile; 8 | } 9 | 10 | module.exports = HealthCheck; 11 | 12 | HealthCheck.prototype.check = function (callback) { 13 | var self = this; 14 | 15 | step( 16 | function getManualDisable () { 17 | fs.readFile(self.disableFile, this); 18 | }, 19 | function handleDisabledFile (err, data) { 20 | var next = this; 21 | if (err) { 22 | return next(); 23 | } 24 | if (data) { 25 | err = new Error(data); 26 | err.http_status = 503; 27 | throw err; 28 | } 29 | }, 30 | function handleResult (err) { 31 | callback(err); 32 | } 33 | ); 34 | }; 35 | -------------------------------------------------------------------------------- /lib/server-options.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const Logger = require('./utils/logger'); 4 | const logger = new Logger(); 5 | 6 | module.exports = function getServerOptions () { 7 | const defaults = { 8 | routes: { 9 | // Each entry corresponds with an express' router. 10 | // You must define at least one path. However, middlewares are optional. 11 | api: [{ 12 | // Required: path where other "routers" or "controllers" will be attached to. 13 | paths: [ 14 | // In case the path has a :user param the username will be the one specified in the URL, 15 | // otherwise it will fallback to extract the username from the host header. 16 | '/api/:version', 17 | '/user/:user/api/:version' 18 | ], 19 | // Optional: attach middlewares at the begining of the router 20 | // to perform custom operations. 21 | middlewares: [], 22 | sql: [{ 23 | // Required 24 | paths: [ 25 | '/sql' 26 | ], 27 | // Optional 28 | middlewares: [] 29 | }] 30 | }] 31 | }, 32 | logger 33 | }; 34 | 35 | return Object.assign({}, defaults, global.settings); 36 | }; 37 | -------------------------------------------------------------------------------- /lib/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const express = require('express'); 4 | const fs = require('fs'); 5 | const RedisPool = require('redis-mpool'); 6 | const cartodbRedis = require('cartodb-redis'); 7 | const ApiRouter = require('./api/api-router'); 8 | const batchFactory = require('./batch'); 9 | const getServerOptions = require('./server-options'); 10 | 11 | process.env.PGAPPNAME = process.env.PGAPPNAME || 'cartodb_sqlapi'; 12 | 13 | // override Date.toJSON 14 | require('./utils/date-to-json'); 15 | 16 | module.exports = function createServer (statsClient) { 17 | const { routes, logger } = getServerOptions(); 18 | const app = express(); 19 | const redisPool = new RedisPool({ 20 | name: 'sql-api', 21 | host: global.settings.redis_host, 22 | port: global.settings.redis_port, 23 | max: global.settings.redisPool, 24 | idleTimeoutMillis: global.settings.redisIdleTimeoutMillis, 25 | reapIntervalMillis: global.settings.redisReapIntervalMillis 26 | }); 27 | const metadataBackend = cartodbRedis({ pool: redisPool }); 28 | 29 | // Set default configuration 30 | global.settings.db_pubuser = global.settings.db_pubuser || 'publicuser'; 31 | global.settings.bufferedRows = global.settings.bufferedRows || 1000; 32 | global.settings.ratelimits = Object.assign( 33 | { 34 | rateLimitsEnabled: false, 35 | endpoints: { 36 | query: false, 37 | job_create: false, 38 | job_get: false, 39 | job_delete: false, 40 | copy_from: false, 41 | copy_to: false 42 | } 43 | }, 44 | global.settings.ratelimits 45 | ); 46 | 47 | // TODO: it's here becouse of testing purposes, try to move to top level 48 | global.settings.tmpDir = global.settings.tmpDir || '/tmp'; 49 | if (!fs.existsSync(global.settings.tmpDir)) { 50 | fs.mkdirSync(global.settings.tmpDir, { recursive: true }); 51 | } 52 | 53 | app.enable('jsonp callback'); 54 | app.set('trust proxy', true); 55 | app.disable('x-powered-by'); 56 | app.disable('etag'); 57 | 58 | const apiRouter = new ApiRouter({ 59 | redisPool, 60 | metadataBackend, 61 | statsClient, 62 | logger 63 | }); 64 | apiRouter.route(app, routes.api); 65 | 66 | const isBatchProcess = process.argv.indexOf('--no-batch') === -1; 67 | 68 | if (global.settings.environment !== 'test' && isBatchProcess) { 69 | const batchName = global.settings.api_hostname || 'batch'; 70 | 71 | app.batch = batchFactory(metadataBackend, redisPool, batchName, statsClient, logger); 72 | app.batch.start(); 73 | } 74 | 75 | return app; 76 | }; 77 | -------------------------------------------------------------------------------- /lib/services/error-handler-factory.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const ErrorHandler = require('./error-handler'); 4 | const { codeToCondition } = require('../postgresql/error-codes'); 5 | 6 | module.exports = function ErrorHandlerFactory (err) { 7 | if (isTimeoutError(err)) { 8 | return createTimeoutError(); 9 | } else { 10 | return createGenericError(err); 11 | } 12 | }; 13 | 14 | function isTimeoutError (err) { 15 | return err.message && ( 16 | err.message.indexOf('statement timeout') > -1 || 17 | err.message.indexOf('RuntimeError: Execution of function interrupted by signal') > -1 || 18 | err.message.indexOf('canceling statement due to user request') > -1 19 | ); 20 | } 21 | 22 | function createTimeoutError () { 23 | return new ErrorHandler({ 24 | message: 'You are over platform\'s limits: SQL query timeout error.' + 25 | ' Refactor your query before running again or contact CARTO support for more details.', 26 | context: 'limit', 27 | detail: 'datasource', 28 | httpStatus: 429 29 | }); 30 | } 31 | 32 | function createGenericError (err) { 33 | return new ErrorHandler({ 34 | message: err.message, 35 | context: err.context, 36 | detail: err.detail, 37 | hint: err.hint, 38 | httpStatus: err.http_status, 39 | name: codeToCondition[err.code] || err.name 40 | }); 41 | } 42 | -------------------------------------------------------------------------------- /lib/services/error-handler.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | class ErrorHandler extends Error { 4 | constructor ({ message, context, detail, hint, httpStatus, name }) { 5 | super(message); 6 | 7 | this.http_status = this.getHttpStatus(httpStatus); 8 | this.context = context; 9 | this.detail = detail; 10 | this.hint = hint; 11 | 12 | if (name) { 13 | this.name = name; 14 | } 15 | } 16 | 17 | getResponse () { 18 | const serialized = { 19 | error: [this.message], 20 | context: this.context, 21 | detail: this.detail, 22 | hint: this.hint 23 | }; 24 | 25 | if (global.settings.environment === 'development') { 26 | serialized.stack = this.stack; 27 | } 28 | 29 | return serialized; 30 | } 31 | 32 | getHttpStatus (httpStatus = 400) { 33 | if (this.message.includes('permission denied')) { 34 | return 403; 35 | } 36 | 37 | return httpStatus; 38 | } 39 | } 40 | 41 | module.exports = ErrorHandler; 42 | -------------------------------------------------------------------------------- /lib/services/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const bunyan = require('bunyan'); 4 | 5 | class Logger { 6 | constructor (path, name) { 7 | const env = process.env.NODE_ENV; 8 | const logLevel = process.env.LOG_LEVEL; 9 | const stream = { 10 | level: logLevel || ((env === 'test') ? 'fatal' : (env === 'development') ? 'debug' : 'info') 11 | }; 12 | 13 | if (path) { 14 | stream.path = path; 15 | } else { 16 | stream.stream = process.stdout; 17 | } 18 | 19 | this.path = path; 20 | this.logger = bunyan.createLogger({ 21 | name, 22 | streams: [stream] 23 | }); 24 | } 25 | 26 | fatal (...args) { 27 | this.logger.fatal(...args); 28 | } 29 | 30 | error (...args) { 31 | this.logger.error(...args); 32 | } 33 | 34 | warn (...args) { 35 | this.logger.warn(...args); 36 | } 37 | 38 | info (...args) { 39 | this.logger.info(...args); 40 | } 41 | 42 | debug (...args) { 43 | this.logger.debug(...args); 44 | } 45 | 46 | trace (...args) { 47 | this.logger.trace(...args); 48 | } 49 | 50 | reopenFileStreams () { 51 | this.logger.reopenFileStreams(); 52 | } 53 | 54 | // Ensures that the writable stream is flushed. 55 | // Use this function before exiting the process to not lose log entries 56 | // 57 | // See: https://github.com/trentm/node-bunyan/issues/37 58 | // See: https://github.com/trentm/node-bunyan/issues/73 59 | end (callback) { 60 | // process.stdout cannot be closed 61 | if (!this.path) { 62 | return callback(); 63 | } 64 | 65 | this.logger.streams[0].stream.on('finish', callback); 66 | this.logger.streams[0].stream.end(); // close stream, flush buffer to disk 67 | } 68 | } 69 | 70 | module.exports = Logger; 71 | -------------------------------------------------------------------------------- /lib/services/pg-entities-access-validator.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const FORBIDDEN_ENTITIES = { 4 | carto: ['*'], 5 | cartodb: [ 6 | 'cdb_analysis_catalog', 7 | 'cdb_conf', 8 | 'cdb_tablemetadata' 9 | ], 10 | pg_catalog: ['*'], 11 | information_schema: ['*'], 12 | public: ['spatial_ref_sys'], 13 | topology: [ 14 | 'layer', 15 | 'topology' 16 | ] 17 | }; 18 | 19 | const Validator = { 20 | validate (affectedTables, authorizationLevel) { 21 | let hardValidationResult = true; 22 | let softValidationResult = true; 23 | 24 | if (!!affectedTables && affectedTables.tables) { 25 | if (global.settings.validatePGEntitiesAccess) { 26 | hardValidationResult = this.hardValidation(affectedTables.tables); 27 | } 28 | 29 | if (authorizationLevel !== 'master') { 30 | softValidationResult = this.softValidation(affectedTables.tables); 31 | } 32 | } 33 | 34 | return hardValidationResult && softValidationResult; 35 | }, 36 | 37 | hardValidation (tables) { 38 | for (const table of tables) { 39 | if (FORBIDDEN_ENTITIES[table.schema_name] && FORBIDDEN_ENTITIES[table.schema_name].length && 40 | ( 41 | FORBIDDEN_ENTITIES[table.schema_name][0] === '*' || 42 | FORBIDDEN_ENTITIES[table.schema_name].includes(table.table_name) 43 | ) 44 | ) { 45 | return false; 46 | } 47 | } 48 | 49 | return true; 50 | }, 51 | 52 | softValidation (tables) { 53 | for (const table of tables) { 54 | if (table.table_name.match(/\bpg_/)) { 55 | return false; 56 | } 57 | } 58 | 59 | return true; 60 | } 61 | }; 62 | 63 | module.exports = Validator; 64 | -------------------------------------------------------------------------------- /lib/services/pubsub-metrics.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const { PubSub } = require('@google-cloud/pubsub'); 4 | 5 | /** 6 | * PubSubMetricsService 7 | */ 8 | class PubSubMetricsService { 9 | static build () { 10 | if (!global.settings.pubSubMetrics || !global.settings.pubSubMetrics.enabled) { 11 | return new PubSubMetricsService(undefined, false); 12 | } 13 | 14 | const pubsub = PubSubMetricsService.createPubSub(); 15 | 16 | return new PubSubMetricsService(pubsub, true); 17 | } 18 | 19 | static createPubSub () { 20 | const projectId = global.settings.pubSubMetrics.project_id; 21 | const credentials = global.settings.pubSubMetrics.credentials; 22 | const config = {}; 23 | 24 | if (projectId) { 25 | config.projectId = projectId; 26 | } 27 | if (credentials) { 28 | config.keyFilename = credentials; 29 | } 30 | return new PubSub(config); 31 | } 32 | 33 | constructor (pubSub, enabled) { 34 | this.pubsub = pubSub; 35 | this.enabled = enabled; 36 | } 37 | 38 | isEnabled () { 39 | return this.enabled; 40 | } 41 | 42 | _getTopic () { 43 | const topicName = global.settings.pubSubMetrics.topic; 44 | 45 | return this.pubsub.topic(topicName); 46 | } 47 | 48 | sendEvent (event, attributes) { 49 | if (!this.enabled) { 50 | return; 51 | } 52 | 53 | const data = Buffer.from(event); 54 | const topic = this._getTopic(); 55 | 56 | topic.publish(data, attributes) 57 | .then(() => { 58 | console.log(`PubSubTracker: event '${event}' published to '${topic.name}'`); 59 | }) 60 | .catch((error) => { 61 | console.error(`ERROR: pubsub middleware failed to publish event '${event}': ${error.message}`); 62 | }); 63 | } 64 | } 65 | 66 | module.exports = PubSubMetricsService; 67 | -------------------------------------------------------------------------------- /lib/services/stream-copy-metrics.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const { getFormatFromCopyQuery } = require('../utils/query-info'); 4 | 5 | module.exports = class StreamCopyMetrics { 6 | constructor (logger, type, sql, user, isGzip = false) { 7 | this.logger = logger; 8 | 9 | this.type = type; 10 | this.format = getFormatFromCopyQuery(sql); 11 | this.sql = sql; 12 | this.isGzip = isGzip; 13 | this.username = user; 14 | this.size = 0; 15 | this.gzipSize = 0; 16 | this.rows = 0; 17 | 18 | this.startTime = new Date(); 19 | this.endTime = null; 20 | this.time = null; 21 | 22 | this.success = true; 23 | this.error = null; 24 | 25 | this.ended = false; 26 | } 27 | 28 | addSize (size) { 29 | this.size += size; 30 | } 31 | 32 | addGzipSize (size) { 33 | this.gzipSize += size; 34 | } 35 | 36 | end (rows = null, error = null) { 37 | if (this.ended) { 38 | return; 39 | } 40 | 41 | this.ended = true; 42 | 43 | if (Number.isInteger(rows)) { 44 | this.rows = rows; 45 | } 46 | 47 | if (error instanceof Error) { 48 | this.error = error; 49 | } 50 | 51 | this.endTime = new Date(); 52 | this.time = (this.endTime.getTime() - this.startTime.getTime()) / 1000; 53 | 54 | this._log( 55 | this.startTime.toISOString(), 56 | this.isGzip && this.gzipSize ? this.gzipSize : null, 57 | this.error ? this.error.message : null 58 | ); 59 | } 60 | 61 | _log (timestamp, gzipSize = null, errorMessage = null) { 62 | const logData = { 63 | type: this.type, 64 | format: this.format, 65 | size: this.size, 66 | rows: this.rows, 67 | gzip: this.isGzip, 68 | username: this.username, 69 | time: this.time, 70 | timestamp, 71 | sql: this.sql 72 | }; 73 | 74 | if (gzipSize) { 75 | logData.gzipSize = gzipSize; 76 | } 77 | 78 | if (errorMessage) { 79 | logData.error = errorMessage; 80 | this.success = false; 81 | } 82 | 83 | logData.success = this.success; 84 | 85 | this.logger.info({ 'cdb-user': this.username, ingestion: logData }, 'Copy to/from query metrics'); 86 | } 87 | }; 88 | -------------------------------------------------------------------------------- /lib/services/throttler-stream.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const { Transform } = require('stream'); 4 | 5 | module.exports = class Throttler extends Transform { 6 | constructor (pgstream, ...args) { 7 | super(...args); 8 | 9 | this.pgstream = pgstream; 10 | 11 | this.sampleSeconds = global.settings.copy_from_maximum_slow_input_speed_interval || 15; 12 | this.minimunBytesPerSampleThreshold = global.settings.copy_from_minimum_input_speed || 0; 13 | this.byteCount = 0; 14 | 15 | this._interval = setInterval(this._updateMetrics.bind(this), this.sampleSeconds * 1000); 16 | } 17 | 18 | _updateMetrics () { 19 | if (this.byteCount < this.minimunBytesPerSampleThreshold) { 20 | clearInterval(this._interval); 21 | this.pgstream.emit('error', new Error('Connection closed by server: input data too slow')); 22 | } 23 | this.byteCount = 0; 24 | } 25 | 26 | _transform (chunk, encoding, callback) { 27 | this.byteCount += chunk.length; 28 | callback(null, chunk); 29 | } 30 | 31 | _flush (callback) { 32 | clearInterval(this._interval); 33 | callback(); 34 | } 35 | }; 36 | -------------------------------------------------------------------------------- /lib/services/user-limits.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * UserLimits 5 | * @param {cartodb-redis} metadataBackend 6 | * @param {object} options 7 | */ 8 | class UserLimits { 9 | constructor (metadataBackend, options = {}) { 10 | this.metadataBackend = metadataBackend; 11 | this.options = options; 12 | 13 | this.preprareRateLimit(); 14 | } 15 | 16 | preprareRateLimit () { 17 | if (this.options.limits.rateLimitsEnabled) { 18 | this.metadataBackend.loadRateLimitsScript(); 19 | } 20 | } 21 | 22 | getRateLimit (user, endpointGroup, callback) { 23 | this.metadataBackend.getRateLimit(user, 'sql', endpointGroup, callback); 24 | } 25 | } 26 | 27 | module.exports = UserLimits; 28 | -------------------------------------------------------------------------------- /lib/stats/client.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var _ = require('underscore'); 4 | var debug = require('debug')('windshaft:stats_client'); 5 | var StatsD = require('node-statsd').StatsD; 6 | 7 | module.exports = { 8 | /** 9 | * Returns an StatsD instance or an stub object that replicates the StatsD public interface so there is no need to 10 | * keep checking if the stats_client is instantiated or not. 11 | * 12 | * The first call to this method implies all future calls will use the config specified in the very first call. 13 | * 14 | * TODO: It's far from ideal to use make this a singleton, improvement desired. 15 | * We proceed this way to be able to use StatsD from several places sharing one single StatsD instance. 16 | * 17 | * @param config Configuration for StatsD, if undefined it will return an stub 18 | * @returns {StatsD|Object} 19 | */ 20 | getInstance: function (config) { 21 | if (!this.instance) { 22 | var instance; 23 | 24 | if (config) { 25 | instance = new StatsD(config); 26 | instance.last_error = { msg: '', count: 0 }; 27 | instance.socket.on('error', function (err) { 28 | var lastErr = instance.last_error; 29 | var lastMsg = lastErr.msg; 30 | var thisMsg = '' + err; 31 | if (thisMsg !== lastMsg) { 32 | debug('statsd client socket error: ' + err); 33 | instance.last_error.count = 1; 34 | instance.last_error.msg = thisMsg; 35 | } else { 36 | ++lastErr.count; 37 | if (!lastErr.interval) { 38 | instance.last_error.interval = setInterval(function () { 39 | var count = instance.last_error.count; 40 | if (count > 1) { 41 | debug('last statsd client socket error repeated ' + count + ' times'); 42 | instance.last_error.count = 1; 43 | clearInterval(instance.last_error.interval); 44 | instance.last_error.interval = null; 45 | } 46 | }, 1000); 47 | } 48 | } 49 | }); 50 | } else { 51 | var stubFunc = function (stat, value, sampleRate, callback) { 52 | if (_.isFunction(callback)) { 53 | callback(null, 0); 54 | } 55 | }; 56 | instance = { 57 | timing: stubFunc, 58 | increment: stubFunc, 59 | decrement: stubFunc, 60 | gauge: stubFunc, 61 | unique: stubFunc, 62 | set: stubFunc, 63 | sendAll: stubFunc, 64 | send: stubFunc 65 | }; 66 | } 67 | 68 | this.instance = instance; 69 | } 70 | 71 | return this.instance; 72 | } 73 | }; 74 | -------------------------------------------------------------------------------- /lib/stats/profiler-proxy.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Profiler = require('step-profiler'); 4 | 5 | /** 6 | * Proxy to encapsulate node-step-profiler module so there is no need to check if there is an instance 7 | */ 8 | function ProfilerProxy (opts) { 9 | this.profile = !!opts.profile; 10 | 11 | this.profiler = null; 12 | if (opts.profile) { 13 | this.profiler = new Profiler({ statsd_client: opts.statsd_client }); 14 | } 15 | } 16 | 17 | ProfilerProxy.prototype.done = function (what) { 18 | if (this.profile) { 19 | this.profiler.done(what); 20 | } 21 | }; 22 | 23 | ProfilerProxy.prototype.end = function () { 24 | if (this.profile) { 25 | this.profiler.end(); 26 | } 27 | }; 28 | 29 | ProfilerProxy.prototype.start = function (what) { 30 | if (this.profile) { 31 | this.profiler.start(what); 32 | } 33 | }; 34 | 35 | ProfilerProxy.prototype.add = function (what) { 36 | if (this.profile) { 37 | this.profiler.add(what || {}); 38 | } 39 | }; 40 | 41 | ProfilerProxy.prototype.sendStats = function () { 42 | if (this.profile) { 43 | this.profiler.sendStats(); 44 | } 45 | }; 46 | 47 | ProfilerProxy.prototype.toString = function () { 48 | return this.profile ? this.profiler.toString() : ''; 49 | }; 50 | 51 | ProfilerProxy.prototype.toJSONString = function () { 52 | return this.profile ? this.profiler.toJSONString() : '{}'; 53 | }; 54 | 55 | ProfilerProxy.prototype.toJSON = function () { 56 | return this.profile ? JSON.parse(this.profiler.toJSONString()) : {}; 57 | }; 58 | 59 | module.exports = ProfilerProxy; 60 | -------------------------------------------------------------------------------- /lib/utils/content-disposition.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = function getContentDisposition (formatter, filename, inline) { 4 | var ext = formatter.getFileExtension(); 5 | var time = new Date().toUTCString(); 6 | return (inline ? 'inline' : 'attachment') + '; filename=' + filename + '.' + ext + '; ' + 7 | 'modification-date="' + time + '";'; 8 | }; 9 | -------------------------------------------------------------------------------- /lib/utils/date-to-json.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | function pad (n) { 4 | return n < 10 ? '0' + n : n; 5 | } 6 | 7 | /* eslint-disable no-extend-native */ 8 | Date.prototype.toJSON = function () { 9 | var s = this.getFullYear() + '-' + pad(this.getMonth() + 1) + '-' + pad(this.getDate()) + 'T' + 10 | pad(this.getHours()) + ':' + pad(this.getMinutes()) + ':' + pad(this.getSeconds()); 11 | var offset = this.getTimezoneOffset(); 12 | if (offset === 0) { 13 | s += 'Z'; 14 | } else { 15 | s += (offset < 0 ? '+' : '-') + pad(Math.abs(offset / 60)) + pad(Math.abs(offset % 60)); 16 | } 17 | return s; 18 | }; 19 | /* eslint-enable no-extend-native */ 20 | -------------------------------------------------------------------------------- /lib/utils/filename-sanitizer.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var path = require('path'); 4 | 5 | module.exports = function sanitizeFilename (filename) { 6 | filename = path.basename(filename, path.extname(filename)); 7 | filename = filename.replace(/[;()\[\]<>'"\s]/g, '_'); // eslint-disable-line no-useless-escape 8 | return filename; 9 | }; 10 | -------------------------------------------------------------------------------- /lib/utils/logger.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const pino = require('pino'); 4 | const { req: requestSerializer, res: responseSerializer, err, wrapErrorSerializer } = pino.stdSerializers; 5 | const DEV_ENVS = ['test', 'development']; 6 | 7 | module.exports = class Logger { 8 | constructor () { 9 | const { LOG_LEVEL, NODE_ENV } = process.env; 10 | const logLevelFromNodeEnv = NODE_ENV === 'test' ? 'fatal' : 'info'; 11 | const errorSerializer = DEV_ENVS.includes(NODE_ENV) ? err : wrapErrorSerializer(err => { 12 | err.stack = err.stack.split('\n').slice(0, 3).join('\n'); 13 | return err; 14 | }); 15 | const options = { 16 | base: null, // Do not bind hostname, pid and friends by default 17 | level: LOG_LEVEL || logLevelFromNodeEnv, 18 | formatters: { 19 | level (label) { 20 | if (label === 'warn') { 21 | return { levelname: 'warning' }; 22 | } 23 | 24 | return { levelname: label }; 25 | } 26 | }, 27 | messageKey: 'event_message', 28 | timestamp: () => `,"timestamp":"${new Date(Date.now()).toISOString()}"`, 29 | serializers: { 30 | client_request: requestSerializer, 31 | server_response: responseSerializer, 32 | exception: (err) => Array.isArray(err) ? err.map((err) => errorSerializer(err)) : [errorSerializer(err)] 33 | } 34 | }; 35 | const dest = pino.destination({ sync: false }); // stdout 36 | 37 | this._logger = pino(options, dest); 38 | } 39 | 40 | trace (...args) { 41 | this._logger.trace(...args); 42 | } 43 | 44 | debug (...args) { 45 | this._logger.debug(...args); 46 | } 47 | 48 | info (...args) { 49 | this._logger.info(...args); 50 | } 51 | 52 | warn (...args) { 53 | this._logger.warn(...args); 54 | } 55 | 56 | error (...args) { 57 | this._logger.error(...args); 58 | } 59 | 60 | fatal (...args) { 61 | this._logger.fatal(...args); 62 | } 63 | 64 | child (...args) { 65 | return this._logger.child(...args); 66 | } 67 | 68 | finish (callback) { 69 | return pino.final(this._logger, callback); 70 | } 71 | }; 72 | -------------------------------------------------------------------------------- /lib/utils/md5.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var crypto = require('crypto'); 4 | 5 | module.exports = function generateMD5 (data) { 6 | var hash = crypto.createHash('md5'); 7 | hash.update(data); 8 | return hash.digest('hex'); 9 | }; 10 | -------------------------------------------------------------------------------- /lib/utils/query-info.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const COPY_FORMATS = ['TEXT', 'CSV', 'BINARY']; 4 | 5 | const regex = /\bFORMAT\s+(\w+)/; 6 | 7 | module.exports = { 8 | getFormatFromCopyQuery (copyQuery) { 9 | let format = 'TEXT'; // Postgres default format 10 | 11 | copyQuery = copyQuery.toUpperCase(); 12 | 13 | if (!copyQuery.startsWith('COPY ')) { 14 | return false; 15 | } 16 | 17 | if (copyQuery.includes(' WITH') && copyQuery.includes('FORMAT ')) { 18 | const result = regex.exec(copyQuery); 19 | 20 | if (result && result.length === 2) { 21 | if (COPY_FORMATS.includes(result[1])) { 22 | format = result[1]; 23 | format = format.toUpperCase(); 24 | } else { 25 | format = false; 26 | } 27 | } 28 | } 29 | 30 | return format; 31 | } 32 | }; 33 | -------------------------------------------------------------------------------- /lib/utils/query-may-write.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var sqlQueryMayWriteRegex = new RegExp('\\b(alter|insert|update|delete|create|drop|reindex|truncate|refresh)\\b', 'i'); 4 | 5 | /** 6 | * This is a fuzzy check, the return could be true even if the query doesn't really write anything. But you can be 7 | * pretty sure of a false return. 8 | * 9 | * @param sql The SQL statement to check against 10 | * @returns {boolean} Return true of the given query may write to the database 11 | */ 12 | module.exports = function queryMayWrite (sql) { 13 | return sqlQueryMayWriteRegex.test(sql); 14 | }; 15 | -------------------------------------------------------------------------------- /metro/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const metro = require('./metro'); 4 | const path = require('path'); 5 | const fs = require('fs'); 6 | 7 | const { CONFIG_PATH = path.resolve(__dirname, './config.json') } = process.env; 8 | const existsConfigFile = fs.existsSync(CONFIG_PATH); 9 | 10 | if (!existsConfigFile) { 11 | exit(4)(new Error(`Wrong path for CONFIG_PATH env variable: ${CONFIG_PATH} no such file`)); 12 | } 13 | 14 | let config; 15 | 16 | if (existsConfigFile) { 17 | config = fs.readFileSync(CONFIG_PATH); 18 | try { 19 | config = JSON.parse(config); 20 | } catch (e) { 21 | exit(5)(new Error('Wrong config format: invalid JSON')); 22 | } 23 | } 24 | 25 | metro({ metrics: config && config.metrics }) 26 | .then(exit(0)) 27 | .catch(exit(1)); 28 | 29 | process.on('uncaughtException', exit(2)); 30 | process.on('unhandledRejection', exit(3)); 31 | 32 | function exit (code = 1) { 33 | return function (err) { 34 | if (err) { 35 | console.error(err); 36 | } 37 | 38 | process.exit(code); 39 | }; 40 | } 41 | -------------------------------------------------------------------------------- /metro/metrics-collector.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const http = require('http'); 4 | const { Counter, Histogram, register } = require('prom-client'); 5 | const flatten = require('flat'); 6 | const { Transform, PassThrough } = require('stream'); 7 | const DEV_ENVS = ['test', 'development']; 8 | 9 | const factory = { 10 | counter: Counter, 11 | histogram: Histogram 12 | }; 13 | 14 | module.exports = class MetricsCollector { 15 | constructor ({ port = 0, definitions } = {}) { 16 | this._port = port; 17 | this._definitions = definitions; 18 | this._server = null; 19 | this._stream = createTransformStream(this._definitions); 20 | } 21 | 22 | get stream () { 23 | return this._stream; 24 | } 25 | 26 | start () { 27 | return new Promise((resolve, reject) => { 28 | this._server = http.createServer((req, res) => { 29 | res.writeHead(200, { 'Content-Type': register.contentType }); 30 | res.end(register.metrics()); 31 | }); 32 | 33 | this._server.once('error', err => reject(err)); 34 | this._server.once('listening', () => resolve()); 35 | this._server.listen(this._port); 36 | }); 37 | } 38 | 39 | stop () { 40 | return new Promise((resolve) => { 41 | register.clear(); 42 | if (!this._server) { 43 | return resolve(); 44 | } 45 | 46 | this._server.once('close', () => { 47 | this._server = null; 48 | resolve(); 49 | }); 50 | 51 | this._server.close(); 52 | }); 53 | }; 54 | }; 55 | 56 | function createTransformStream (definitions) { 57 | if (typeof definitions !== 'object') { 58 | return new PassThrough(); 59 | } 60 | 61 | const metrics = []; 62 | 63 | for (const { type, options, valuePath, labelPaths, shouldMeasure, measure } of definitions) { 64 | metrics.push({ 65 | instance: new factory[type](options), 66 | valuePath, 67 | labelPaths, 68 | shouldMeasure: eval(shouldMeasure), // eslint-disable-line no-eval 69 | measure: eval(measure) // eslint-disable-line no-eval 70 | }); 71 | } 72 | 73 | return new Transform({ 74 | transform (chunk, enc, callback) { 75 | let entry; 76 | 77 | try { 78 | entry = JSON.parse(chunk); 79 | } catch (e) { 80 | if (DEV_ENVS.includes(process.env.NODE_ENV)) { 81 | this.push(chunk + '\n'); 82 | } 83 | return callback(); 84 | } 85 | 86 | const flatEntry = flatten(entry); 87 | 88 | for (const metric of metrics) { 89 | const value = flatEntry[metric.valuePath]; 90 | const labels = Array.isArray(metric.labelPaths) && metric.labelPaths.map(path => flatEntry[path]); 91 | 92 | if (metric.shouldMeasure({ labels, value })) { 93 | metric.measure({ metric: metric.instance, labels, value }); 94 | } 95 | } 96 | 97 | this.push(`${JSON.stringify(entry)}\n`); 98 | 99 | return callback(); 100 | } 101 | }); 102 | } 103 | -------------------------------------------------------------------------------- /metro/metro.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const util = require('util'); 4 | const stream = require('stream'); 5 | const pipeline = util.promisify(stream.pipeline); 6 | const split = require('split2'); 7 | const logCollector = require('./log-collector'); 8 | const MetricsCollector = require('./metrics-collector'); 9 | 10 | module.exports = async function metro ({ input = process.stdin, output = process.stdout, metrics = {} } = {}) { 11 | const metricsCollector = new MetricsCollector(metrics); 12 | const { stream: metricsStream } = metricsCollector; 13 | 14 | try { 15 | await metricsCollector.start(); 16 | await pipeline(input, split(), logCollector(), metricsStream, output); 17 | } finally { 18 | await metricsCollector.stop(); 19 | } 20 | }; 21 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "name": "cartodb-sql-api", 4 | "version": "6.0.0", 5 | "description": "High speed SQL API for CARTO", 6 | "keywords": [ 7 | "carto", 8 | "sql", 9 | "batch", 10 | "import", 11 | "export", 12 | "geospatial" 13 | ], 14 | "url": "https://github.com/CartoDB/CartoDB-SQL-API", 15 | "license": "BSD-3-Clause", 16 | "repository": { 17 | "type": "git", 18 | "url": "git://github.com/CartoDB/CartoDB-SQL-API.git" 19 | }, 20 | "author": "CARTO (https://carto.com)", 21 | "contributors": [ 22 | "Simon Tokumine ", 23 | "Sandro Santilli ", 24 | "Raúl Ochoa ", 25 | "Daniel García Aubert " 26 | ], 27 | "main": "app.js", 28 | "dependencies": { 29 | "@carto/fqdn-sync": "0.2.2", 30 | "@google-cloud/pubsub": "1.5.0", 31 | "assign-deep": "^1.0.1", 32 | "basic-auth": "^2.0.0", 33 | "bintrees": "1.0.1", 34 | "bunyan": "1.8.1", 35 | "cartodb-psql": "0.14.0", 36 | "cartodb-query-tables": "^0.7.0", 37 | "cartodb-redis": "^3.0.0", 38 | "debug": "^4.1.1", 39 | "express": "^4.16.4", 40 | "gc-stats": "^1.4.0", 41 | "log4js": "cartodb/log4js-node#cdb", 42 | "lru-cache": "~2.5.0", 43 | "multer": "~1.2.0", 44 | "node-statsd": "~0.0.7", 45 | "node-uuid": "^1.4.7", 46 | "oauth-client": "0.3.0", 47 | "pg-copy-streams": "github:cartodb/node-pg-copy-streams#v2.x-carto", 48 | "pino": "^6.3.2", 49 | "prom-client": "^12.0.0", 50 | "qs": "~6.2.1", 51 | "queue-async": "~1.0.7", 52 | "redis-mpool": "^0.8.0", 53 | "redlock": "2.0.1", 54 | "request": "^2.88.0", 55 | "split2": "^3.1.1", 56 | "step": "~0.0.5", 57 | "step-profiler": "~0.3.0", 58 | "topojson": "0.0.8", 59 | "underscore": "~1.6.0", 60 | "uuid": "^8.1.0", 61 | "yargs": "^15.3.1" 62 | }, 63 | "devDependencies": { 64 | "adm-zip": "^0.4.14", 65 | "eslint": "^6.8.0", 66 | "eslint-config-standard": "^14.1.0", 67 | "eslint-plugin-import": "^2.19.1", 68 | "eslint-plugin-node": "^10.0.0", 69 | "eslint-plugin-promise": "^4.2.1", 70 | "eslint-plugin-standard": "^4.0.1", 71 | "libxmljs": "^0.19.7", 72 | "mocha": "^7.2.0", 73 | "mockdate": "^2.0.2", 74 | "nodemon": "^2.0.6", 75 | "nyc": "^15.0.0", 76 | "pg": "github:cartodb/node-postgres#6.4.2-cdb2", 77 | "pino-pretty": "^4.0.0", 78 | "redis": "^3.1.0", 79 | "shapefile": "0.3.0", 80 | "sinon": "^9.0.0", 81 | "sqlite3": "^4.2.0" 82 | }, 83 | "scripts": { 84 | "lint:fix": "eslint --fix app.js \"lib/**/*.js\" \"test/**/*.js\"", 85 | "lint": "eslint app.js \"lib/**/*.js\" \"test/**/*.js\"", 86 | "pretest:setup": "npm run lint", 87 | "test:setup": "NODE_ENV=test node test setup", 88 | "pretest": "npm run test:setup", 89 | "test": "NODE_ENV=test TZ='Europe/Rome' mocha -t 5000 --exit --recursive test/acceptance test/integration test/unit", 90 | "posttest": "npm run test:teardown", 91 | "test:teardown": "NODE_ENV=test node test teardown", 92 | "cover": "nyc --reporter=lcov npm test", 93 | "dev": "NODE_ENV=development nodemon app.js" 94 | }, 95 | "engines": { 96 | "node": "^10.15.1", 97 | "npm": "^6.4.1" 98 | } 99 | } 100 | 101 | -------------------------------------------------------------------------------- /test/acceptance/app-auth-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | 8 | describe('app.auth', function () { 9 | var scenarios = [ 10 | { 11 | desc: 'no api key should fallback to default api key', 12 | url: '/api/v1/sql?q=SELECT%20*%20FROM%20untitle_table_4', 13 | statusCode: 200 14 | }, 15 | { 16 | desc: 'invalid api key should return 401', 17 | url: '/api/v1/sql?api_key=THIS_API_KEY_NOT_EXIST&q=SELECT%20*%20FROM%20untitle_table_4', 18 | statusCode: 401 19 | }, 20 | { 21 | desc: 'valid api key should allow insert in protected tables', 22 | url: "/api/v1/sql?api_key=1234&q=INSERT%20INTO%20private_table%20(name)%20VALUES%20('app_auth_test1')", 23 | statusCode: 200 24 | }, 25 | { 26 | desc: 'valid api key should allow delete in protected tables', 27 | url: "/api/v1/sql?api_key=1234&q=DELETE%20FROM%20private_table%20WHERE%20name%3d'app_auth_test1'", 28 | statusCode: 200 29 | }, 30 | { 31 | desc: 'invalid api key should NOT allow insert in protected tables', 32 | url: "/api/v1/sql?api_key=THIS_API_KEY_NOT_EXIST&q=INSERT%20INTO%20private_table%20(name)%20VALUES%20('R')", 33 | statusCode: 401 34 | }, 35 | { 36 | desc: 'no api key should NOT allow insert in protected tables', 37 | url: "/api/v1/sql?q=INSERT%20INTO%20private_table%20(name)%20VALUES%20('RAMBO')", 38 | statusCode: 403 39 | }, 40 | { 41 | desc: 'no api key should NOT allow insert in public tables', 42 | url: "/api/v1/sql?q=INSERT%20INTO%20untitle_table_4%20(name)%20VALUES%20('RAMBO')", 43 | statusCode: 403 44 | } 45 | ]; 46 | 47 | scenarios.forEach(function (scenario) { 48 | it(scenario.desc, function (done) { 49 | assert.response(server, { 50 | // view prepare_db.sh to find public table name and structure 51 | url: scenario.url, 52 | headers: { 53 | host: 'vizzuality.cartodb.com' 54 | }, 55 | method: 'GET' 56 | }, 57 | {}, 58 | function (err, res) { 59 | assert.ifError(err); 60 | assert.strictEqual(res.statusCode, scenario.statusCode, res.statusCode + ': ' + res.body); 61 | done(); 62 | } 63 | ); 64 | }); 65 | }); 66 | }); 67 | -------------------------------------------------------------------------------- /test/acceptance/backend-crash-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var assert = require('../support/assert'); 6 | var step = require('step'); 7 | var net = require('net'); 8 | 9 | var sqlServerPort = 5540; 10 | var sqlServer = net.createServer(function (c) { 11 | c.destroy(); 12 | sqlServer.close(function () { 13 | }); 14 | }); 15 | 16 | describe('backend crash', function () { 17 | before(function (done) { 18 | sqlServer.listen(sqlServerPort, done); 19 | }); 20 | 21 | // See https://github.com/CartoDB/CartoDB-SQL-API/issues/135 22 | it('does not hang server', function (done) { 23 | var dbHostBackup = global.settings.db_host; 24 | var dbPortBackup = global.settings.db_port; 25 | global.settings.db_host = 'localhost'; 26 | global.settings.db_port = sqlServerPort; 27 | var server = require('../../lib/server')(); 28 | step( 29 | function sendQuery () { 30 | assert.response(server, { 31 | url: '/api/v1/sql?q=SELECT+1', 32 | method: 'GET', 33 | headers: { host: 'vizzuality.localhost' } 34 | }, {}, this); 35 | }, 36 | function checkResponse (err, res) { 37 | assert.ifError(err); 38 | assert.strictEqual(res.statusCode, 500, res.statusCode + ': ' + res.body); 39 | var parsed = JSON.parse(res.body); 40 | assert.ok(parsed.error); 41 | var msg = parsed.error[0]; 42 | assert.ok(msg.match(/unexpected.*end/), msg); 43 | return null; 44 | }, 45 | function sendAnotherQuery () { 46 | assert.response(server, { 47 | url: '/api/v1/sql?q=SELECT+2', 48 | method: 'GET', 49 | headers: { host: 'vizzuality.localhost' } 50 | }, {}, this); 51 | }, 52 | function checkResponse (err, res) { 53 | assert.ifError(err); 54 | assert.strictEqual(res.statusCode, 500, res.statusCode + ': ' + res.body); 55 | var parsed = JSON.parse(res.body); 56 | assert.ok(parsed.error); 57 | var msg = parsed.error[0]; 58 | assert.ok(msg.match(/connect/), msg); 59 | return null; 60 | }, 61 | function finish (err) { 62 | global.settings.db_host = dbHostBackup; 63 | global.settings.db_port = dbPortBackup; 64 | done(err); 65 | } 66 | ); 67 | }); 68 | 69 | after(function (done) { 70 | // be sure the sqlServer is closed 71 | if (sqlServer.listening) { 72 | return sqlServer.close(done); 73 | } 74 | 75 | done(); 76 | }); 77 | }); 78 | -------------------------------------------------------------------------------- /test/acceptance/batch/batch-drain-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | var assert = require('../../support/assert'); 5 | var redisUtils = require('../../support/redis-utils'); 6 | var batchFactory = require('../../../lib/batch/index'); 7 | 8 | var Logger = require('../../../lib/utils/logger'); 9 | var JobPublisher = require('../../../lib/batch/pubsub/job-publisher'); 10 | var JobQueue = require('../../../lib/batch/job-queue'); 11 | var JobBackend = require('../../../lib/batch/job-backend'); 12 | var JobService = require('../../../lib/batch/job-service'); 13 | var JobCanceller = require('../../../lib/batch/job-canceller'); 14 | var metadataBackend = require('cartodb-redis')({ pool: redisUtils.getPool() }); 15 | 16 | const TEST_USER_ID = 1; 17 | const TEST_USER = global.settings.db_user.replace('<%= user_id %>', TEST_USER_ID); 18 | const TEST_DB = global.settings.db_base_name.replace('<%= user_id %>', TEST_USER_ID); 19 | 20 | describe('batch module', function () { 21 | var username = 'vizzuality'; 22 | var pool = redisUtils.getPool(); 23 | var logger = new Logger(); 24 | var jobPublisher = new JobPublisher(pool); 25 | var jobQueue = new JobQueue(metadataBackend, jobPublisher, logger); 26 | var jobBackend = new JobBackend(metadataBackend, jobQueue, logger); 27 | var jobCanceller = new JobCanceller(); 28 | var jobService = new JobService(jobBackend, jobCanceller, logger); 29 | 30 | before(function (done) { 31 | this.batch = batchFactory(metadataBackend, pool, undefined, undefined, logger); 32 | this.batch.start(); 33 | this.batch.on('ready', done); 34 | }); 35 | 36 | after(function (done) { 37 | this.batch.stop(); 38 | redisUtils.clean(global.settings.batch_db, 'batch:*', done); 39 | }); 40 | 41 | function createJob (sql, done) { 42 | var data = { 43 | user: username, 44 | query: sql, 45 | host: global.settings.db_host, 46 | dbname: TEST_DB, 47 | dbuser: TEST_USER, 48 | port: global.settings.db_batch_port, 49 | pass: global.settings.db_user_pass 50 | }; 51 | 52 | jobService.create(data, function (err, job) { 53 | if (err) { 54 | return done(err); 55 | } 56 | 57 | done(null, job.serialize()); 58 | }); 59 | } 60 | 61 | it('should drain the current job', function (done) { 62 | var self = this; 63 | createJob('select pg_sleep(3)', function (err, job) { 64 | if (err) { 65 | return done(err); 66 | } 67 | setTimeout(function () { 68 | jobBackend.get(job.job_id, function (err, job) { 69 | if (err) { 70 | done(err); 71 | } 72 | assert.strictEqual(job.status, 'running'); 73 | 74 | self.batch.drain(function () { 75 | jobBackend.get(job.job_id, function (err, job) { 76 | if (err) { 77 | done(err); 78 | } 79 | assert.strictEqual(job.status, 'pending'); 80 | done(); 81 | }); 82 | }); 83 | }); 84 | }, 50); 85 | }); 86 | }); 87 | }); 88 | -------------------------------------------------------------------------------- /test/acceptance/batch/batch-limits-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | 5 | var assert = require('../../support/assert'); 6 | var BatchTestClient = require('../../support/batch-test-client'); 7 | var JobStatus = require('../../../lib/batch/job-status'); 8 | var redisUtils = require('../../support/redis-utils'); 9 | var metadataBackend = require('cartodb-redis')({ pool: redisUtils.getPool() }); 10 | const dbUtils = require('../../support/db_utils'); 11 | 12 | describe('batch query statement_timeout limit', function () { 13 | before(function (done) { 14 | this.batchTestClient = new BatchTestClient(); 15 | this.batchQueryTimeout = global.settings.batch_query_timeout; 16 | global.settings.batch_query_timeout = 15000; 17 | metadataBackend.redisCmd(global.settings.batch_db, 'HMSET', ['limits:batch:vizzuality', 'timeout', 100], done); 18 | }); 19 | before(dbUtils.resetPgBouncerConnections); 20 | after(function (done) { 21 | global.settings.batch_query_timeout = this.batchQueryTimeout; 22 | redisUtils.clean(global.settings.batch_db, 'limits:batch:*', function () { 23 | this.batchTestClient.drain(done); 24 | }.bind(this)); 25 | }); 26 | after(dbUtils.resetPgBouncerConnections); 27 | 28 | function jobPayload (query) { 29 | return { 30 | query: query 31 | }; 32 | } 33 | 34 | it('should cancel with user statement_timeout limit', function (done) { 35 | var payload = jobPayload('select pg_sleep(10)'); 36 | this.batchTestClient.createJob(payload, function (err, jobResult) { 37 | if (err) { 38 | return done(err); 39 | } 40 | jobResult.getStatus(function (err, job) { 41 | if (err) { 42 | return done(err); 43 | } 44 | assert.strictEqual(job.status, JobStatus.FAILED); 45 | assert.ok(job.failed_reason.match(/statement.*timeout/)); 46 | return done(); 47 | }); 48 | }); 49 | }); 50 | }); 51 | -------------------------------------------------------------------------------- /test/acceptance/batch/job-query-order-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | var assert = require('../../support/assert'); 5 | 6 | var BatchTestClient = require('../../support/batch-test-client'); 7 | var JobStatus = require('../../../lib/batch/job-status'); 8 | 9 | describe('job query order', function () { 10 | before(function () { 11 | this.batchTestClient = new BatchTestClient(); 12 | }); 13 | 14 | after(function (done) { 15 | return this.batchTestClient.drain(done); 16 | }); 17 | 18 | function createJob (queries) { 19 | return { 20 | query: queries 21 | }; 22 | } 23 | 24 | it('should run job queries in order (single consumer)', function (done) { 25 | var jobRequest1 = createJob(['select 1', 'select 2']); 26 | var jobRequest2 = createJob(['select 3']); 27 | 28 | this.batchTestClient.createJob(jobRequest1, function (err, jobResult1) { 29 | if (err) { 30 | return done(err); 31 | } 32 | this.batchTestClient.createJob(jobRequest2, function (err, jobResult2) { 33 | if (err) { 34 | return done(err); 35 | } 36 | 37 | jobResult1.getStatus(function (err, job1) { 38 | if (err) { 39 | return done(err); 40 | } 41 | jobResult2.getStatus(function (err, job2) { 42 | if (err) { 43 | return done(err); 44 | } 45 | assert.strictEqual(job1.status, JobStatus.DONE); 46 | assert.strictEqual(job2.status, JobStatus.DONE); 47 | assert.ok( 48 | new Date(job1.updated_at).getTime() < new Date(job2.updated_at).getTime(), 49 | 'job1 (' + job1.updated_at + ') should finish before job2 (' + job2.updated_at + ')' 50 | ); 51 | done(); 52 | }); 53 | }); 54 | }); 55 | }.bind(this)); 56 | }); 57 | }); 58 | -------------------------------------------------------------------------------- /test/acceptance/batch/leader-job-query-order-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | var assert = require('../../support/assert'); 5 | 6 | var TestClient = require('../../support/test-client'); 7 | var BatchTestClient = require('../../support/batch-test-client'); 8 | var JobStatus = require('../../../lib/batch/job-status'); 9 | 10 | describe('multiple batch clients job query order', function () { 11 | before(function (done) { 12 | this.batchTestClient1 = new BatchTestClient({ name: 'consumerA' }); 13 | this.batchTestClient2 = new BatchTestClient({ name: 'consumerB' }); 14 | 15 | this.testClient = new TestClient(); 16 | this.testClient.getResult( 17 | 'drop table if exists ordered_inserts; create table ordered_inserts (status numeric)', 18 | done 19 | ); 20 | }); 21 | 22 | after(function (done) { 23 | this.batchTestClient1.drain(function (err) { 24 | if (err) { 25 | return done(err); 26 | } 27 | 28 | this.batchTestClient2.drain(done); 29 | }.bind(this)); 30 | }); 31 | 32 | function createJob (queries) { 33 | return { 34 | query: queries 35 | }; 36 | } 37 | 38 | it('should run job queries in order (multiple consumers)', function (done) { 39 | var jobRequest1 = createJob([ 40 | 'insert into ordered_inserts values(1)', 41 | 'select pg_sleep(0.25)', 42 | 'insert into ordered_inserts values(2)' 43 | ]); 44 | var jobRequest2 = createJob([ 45 | 'insert into ordered_inserts values(3)' 46 | ]); 47 | 48 | var self = this; 49 | 50 | this.batchTestClient1.createJob(jobRequest1, function (err, jobResult1) { 51 | if (err) { 52 | return done(err); 53 | } 54 | this.batchTestClient2.createJob(jobRequest2, function (err, jobResult2) { 55 | if (err) { 56 | return done(err); 57 | } 58 | 59 | jobResult1.getStatus(function (err, job1) { 60 | if (err) { 61 | return done(err); 62 | } 63 | jobResult2.getStatus(function (err, job2) { 64 | if (err) { 65 | return done(err); 66 | } 67 | assert.strictEqual(job1.status, JobStatus.DONE); 68 | assert.strictEqual(job2.status, JobStatus.DONE); 69 | 70 | self.testClient.getResult('select * from ordered_inserts', function (err, rows) { 71 | assert.ok(!err); 72 | 73 | assert.deepStrictEqual(rows, [{ status: 1 }, { status: 2 }, { status: 3 }]); 74 | assert.ok( 75 | new Date(job1.updated_at).getTime() < new Date(job2.updated_at).getTime(), 76 | 'job1 (' + job1.updated_at + ') should finish before job2 (' + job2.updated_at + ')' 77 | ); 78 | done(); 79 | }); 80 | }); 81 | }); 82 | }); 83 | }.bind(this)); 84 | }); 85 | }); 86 | -------------------------------------------------------------------------------- /test/acceptance/batch/queued-jobs-limit-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | 5 | var assert = require('../../support/assert'); 6 | var redisUtils = require('../../support/redis-utils'); 7 | var TestClient = require('../../support/test-client'); 8 | 9 | describe('max queued jobs', function () { 10 | before(function (done) { 11 | this.batch_max_queued_jobs = global.settings.batch_max_queued_jobs; 12 | global.settings.batch_max_queued_jobs = 1; 13 | this.server = require('../../../lib/server')(); 14 | this.testClient = new TestClient(); 15 | this.testClient.getResult( 16 | 'drop table if exists max_queued_jobs_inserts; create table max_queued_jobs_inserts (status numeric)', 17 | done 18 | ); 19 | }); 20 | 21 | after(function (done) { 22 | global.settings.batch_max_queued_jobs = this.batch_max_queued_jobs; 23 | redisUtils.clean(global.settings.batch_db, 'batch:*', done); 24 | }); 25 | 26 | function createJob (server, status, callback) { 27 | assert.response( 28 | server, 29 | { 30 | url: '/api/v2/sql/job?api_key=1234', 31 | headers: { 32 | host: 'vizzuality.cartodb.com', 33 | 'Content-Type': 'application/json' 34 | }, 35 | method: 'POST', 36 | data: JSON.stringify({ 37 | query: 'insert into max_queued_jobs_inserts values (1)' 38 | }) 39 | }, 40 | { 41 | status: status 42 | }, 43 | function (err, res) { 44 | if (err) { 45 | return callback(err); 46 | } 47 | 48 | return callback(null, JSON.parse(res.body)); 49 | } 50 | ); 51 | } 52 | 53 | it('POST /api/v2/sql/job should respond with 200 and the created job', function (done) { 54 | var self = this; 55 | createJob(this.server, 201, function (err) { 56 | assert.ok(!err); 57 | 58 | createJob(self.server, 400, function (err, res) { 59 | assert.ok(!err); 60 | assert.strictEqual(res.error[0], 'Failed to create job. Max number of jobs (' + 61 | global.settings.batch_max_queued_jobs + ') queued reached'); 62 | done(); 63 | }); 64 | }); 65 | }); 66 | }); 67 | -------------------------------------------------------------------------------- /test/acceptance/client-headers-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const assert = require('../support/assert'); 4 | const TestClient = require('../support/test-client'); 5 | 6 | describe('SQL api metric headers', function () { 7 | const publicSQL = 'select * from untitle_table_4'; 8 | 9 | it('should get client header if client param is present', function (done) { 10 | this.testClient = new TestClient(); 11 | const params = { client: 'test' }; 12 | 13 | this.testClient.getResult(publicSQL, params, (err, result, headers) => { 14 | assert.ifError(err); 15 | assert.strictEqual(result.length, 6); 16 | assert.strictEqual(headers['carto-client'], 'test'); 17 | done(); 18 | }); 19 | }); 20 | 21 | it('should not get the client header if no client is provided', function (done) { 22 | this.testClient = new TestClient(); 23 | 24 | this.testClient.getResult(publicSQL, (err, result, headers) => { 25 | assert.ifError(err); 26 | assert.strictEqual(result.length, 6); 27 | assert.strictEqual(headers['carto-client'], undefined); 28 | done(); 29 | }); 30 | }); 31 | 32 | it('should get the user id in the response header', function (done) { 33 | this.testClient = new TestClient(); 34 | 35 | this.testClient.getResult(publicSQL, (err, result, headers) => { 36 | assert.ifError(err); 37 | assert.strictEqual(result.length, 6); 38 | assert.strictEqual(headers['carto-user-id'], '1'); 39 | done(); 40 | }); 41 | }); 42 | }); 43 | -------------------------------------------------------------------------------- /test/acceptance/copy-statements-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | var querystring = require('querystring'); 8 | 9 | describe('copy-statements', function () { 10 | var RESPONSE_OK = { 11 | statusCode: 200 12 | }; 13 | 14 | before(function (done) { 15 | assert.response(server, { 16 | url: '/api/v1/sql?' + querystring.stringify({ 17 | q: 'CREATE TABLE copy_test_table(a int)', 18 | api_key: 1234 19 | }), 20 | headers: { host: 'vizzuality.cartodb.com' }, 21 | method: 'GET' 22 | }, RESPONSE_OK, done); 23 | }); 24 | 25 | after(function (done) { 26 | assert.response(server, { 27 | url: '/api/v1/sql?' + querystring.stringify({ 28 | q: 'DROP TABLE IF EXISTS copy_test_table', 29 | api_key: 1234 30 | }), 31 | headers: { host: 'vizzuality.cartodb.com' }, 32 | method: 'GET' 33 | }, RESPONSE_OK, done); 34 | }); 35 | 36 | // Test effects of COPY 37 | // See https://github.com/Vizzuality/cartodb-management/issues/1502 38 | it('COPY TABLE with GET and auth', function (done) { 39 | assert.response(server, { 40 | url: '/api/v1/sql?' + querystring.stringify({ 41 | q: 'COPY copy_test_table FROM stdin;', 42 | api_key: 1234 43 | }), 44 | headers: { host: 'vizzuality.cartodb.com' }, 45 | method: 'GET' 46 | }, {}, function (err, res) { 47 | assert.ifError(err); 48 | // We expect a problem, actually 49 | assert.strictEqual(res.statusCode, 400, res.statusCode + ': ' + res.body); 50 | assert.deepStrictEqual(res.headers['content-type'], 'application/json; charset=utf-8'); 51 | assert.deepStrictEqual(res.headers['content-disposition'], 'inline'); 52 | assert.deepStrictEqual(JSON.parse(res.body), { error: ['COPY from stdin failed: No source stream defined'] }); 53 | done(); 54 | }); 55 | }); 56 | 57 | it('COPY TABLE with GET and auth', function (done) { 58 | assert.response(server, { 59 | url: '/api/v1/sql?' + querystring.stringify({ 60 | q: "COPY copy_test_table to '/tmp/x';", 61 | api_key: 1234 62 | }), 63 | headers: { host: 'vizzuality.cartodb.com' }, 64 | method: 'GET' 65 | }, {}, function (err, res) { 66 | assert.ifError(err); 67 | // We expect a problem, actually 68 | assert.strictEqual(res.statusCode, 400, res.statusCode + ': ' + res.body); 69 | assert.deepStrictEqual(res.headers['content-type'], 'application/json; charset=utf-8'); 70 | assert.deepStrictEqual(res.headers['content-disposition'], 'inline'); 71 | const errorExp = /must be superuser.* to COPY.* a file/; 72 | const hintExp = /Anyone can COPY to stdout or from stdin. psql's \\copy command also works for anyone./; 73 | assert.ok(JSON.parse(res.body).error[0].match(errorExp)); 74 | assert.ok(JSON.parse(res.body).hint.match(hintExp)); 75 | done(); 76 | }); 77 | }); 78 | }); 79 | -------------------------------------------------------------------------------- /test/acceptance/export/arraybuffer-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | require('../../support/assert'); 5 | 6 | var server = require('../../../lib/server')(); 7 | var assert = require('assert'); 8 | var querystring = require('querystring'); 9 | 10 | describe('export.arraybuffer', function () { 11 | it('GET /api/v1/sql as arraybuffer ', function (done) { 12 | assert.response(server, { 13 | url: '/api/v1/sql?' + querystring.stringify({ 14 | q: 'SELECT cartodb_id,name,1::integer,187.9 FROM untitle_table_4', 15 | format: 'arraybuffer' 16 | }), 17 | headers: { host: 'vizzuality.cartodb.com' }, 18 | method: 'GET' 19 | }, { }, function (err, res) { 20 | assert.ifError(err); 21 | assert.strictEqual(res.statusCode, 200, res.body); 22 | assert.strictEqual(res.headers['content-type'], 'application/octet-stream'); 23 | done(); 24 | }); 25 | }); 26 | 27 | it('GET /api/v1/sql as arraybuffer does not support geometry types ', function (done) { 28 | assert.response(server, { 29 | url: '/api/v1/sql?' + querystring.stringify({ 30 | q: 'SELECT cartodb_id, the_geom FROM untitle_table_4', 31 | format: 'arraybuffer' 32 | }), 33 | headers: { host: 'vizzuality.cartodb.com' }, 34 | method: 'GET' 35 | }, { }, function (err, res) { 36 | assert.ifError(err); 37 | assert.strictEqual(res.statusCode, 400, res.body); 38 | var result = JSON.parse(res.body); 39 | assert.strictEqual(result.error[0], 'geometry types are not supported'); 40 | 41 | done(); 42 | }); 43 | }); 44 | }); 45 | -------------------------------------------------------------------------------- /test/acceptance/export/folder-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | require('../../support/assert'); 5 | 6 | const fs = require('fs'); 7 | let server = require('../../../lib/server'); 8 | const assert = require('assert'); 9 | const querystring = require('querystring'); 10 | 11 | describe('export folder', function () { 12 | it('folder exists', function (done) { 13 | const currentTmpDir = global.settings.tmpDir; 14 | 15 | const dynamicTmpDir = `/tmp/${new Date().getTime()}/a/b/c`; 16 | global.settings.tmpDir = dynamicTmpDir; 17 | server = server(); 18 | 19 | assert.response(server, { 20 | url: '/api/v1/sql?' + querystring.stringify({ 21 | q: 'SELECT 1' 22 | }), 23 | headers: { host: 'vizzuality.cartodb.com' }, 24 | method: 'GET' 25 | }, {}, function (err, res) { 26 | assert.ifError(err); 27 | assert.ok(res.statusCode === 200); 28 | assert.ok(fs.existsSync(dynamicTmpDir)); 29 | 30 | global.settings.tmpDir = currentTmpDir; 31 | 32 | done(); 33 | }); 34 | }); 35 | }); 36 | -------------------------------------------------------------------------------- /test/acceptance/export/spatialite-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | 5 | var server = require('../../../lib/server')(); 6 | var assert = require('../../support/assert'); 7 | var sqlite = require('sqlite3'); 8 | 9 | describe('spatialite query', function () { 10 | it('returns a valid sqlite database', function (done) { 11 | assert.response(server, { 12 | url: '/api/v1/sql?q=SELECT%20*%20FROM%20untitle_table_4%20LIMIT%201&format=spatialite', 13 | headers: { host: 'vizzuality.cartodb.com' }, 14 | method: 'GET' 15 | }, { }, function (err, res) { 16 | assert.ifError(err); 17 | assert.strictEqual(res.statusCode, 200, res.body); 18 | assert.strictEqual(res.headers['content-type'], 'application/x-sqlite3; charset=utf-8'); 19 | var db = new sqlite.Database(':memory:', res.body); 20 | var qr = db.get('PRAGMA database_list', function (err) { 21 | assert.strictEqual(err, null); 22 | done(); 23 | }); 24 | assert.notEqual(qr, undefined); 25 | }); 26 | }); 27 | 28 | it('different file name', function (done) { 29 | assert.response(server, { 30 | url: '/api/v1/sql?q=SELECT%20*%20FROM%20untitle_table_4%20LIMIT%201&format=spatialite&filename=manolo', 31 | headers: { host: 'vizzuality.cartodb.com' }, 32 | method: 'GET' 33 | }, { }, function (err, res) { 34 | assert.ifError(err); 35 | assert.strictEqual(res.headers['content-type'], 'application/x-sqlite3; charset=utf-8'); 36 | assert.notEqual(res.headers['content-disposition'].indexOf('manolo.sqlite'), -1); 37 | done(); 38 | }); 39 | }); 40 | 41 | it('gets database schema', function (done) { 42 | assert.response(server, { 43 | url: '/api/v1/sql?q=SELECT%20*%20FROM%20untitle_table_4%20LIMIT%201&format=spatialite', 44 | headers: { host: 'vizzuality.cartodb.com' }, 45 | method: 'GET' 46 | }, { }, function (err, res) { 47 | assert.ifError(err); 48 | var db = new sqlite.Database(':memory:', res.body); 49 | var schemaQuery = "SELECT name, sql FROM sqlite_master WHERE type='table' ORDER BY name"; 50 | var qr = db.get(schemaQuery, function (err) { 51 | assert.strictEqual(err, null); 52 | done(); 53 | }); 54 | assert.notEqual(qr, undefined); 55 | }); 56 | }); 57 | }); 58 | -------------------------------------------------------------------------------- /test/acceptance/frontend-abort-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var assert = require('../support/assert'); 6 | var step = require('step'); 7 | var net = require('net'); 8 | 9 | var sqlServerDataHandler; 10 | var sqlServerPort = 5556; 11 | var sqlServer = net.createServer(function (c) { 12 | c.on('data', function (d) { 13 | console.log('SQL Server got data: ' + d); 14 | if (sqlServerDataHandler) { 15 | console.log('Sending data to sqlServerDataHandler'); 16 | sqlServerDataHandler(null, d); 17 | } 18 | c.destroy(); 19 | }); 20 | }); 21 | 22 | describe('frontend abort', function () { 23 | before(function (done) { 24 | sqlServer.listen(sqlServerPort, done); 25 | }); 26 | 27 | // See https://github.com/CartoDB/CartoDB-SQL-API/issues/129 28 | it('aborts request', function (done) { 29 | // console.log("settings:"); console.dir(global.settings); 30 | var dbHostBackup = global.settings.db_host; 31 | var dbPortBackup = global.settings.db_port; 32 | global.settings.db_host = 'localhost'; 33 | global.settings.db_port = sqlServerPort; 34 | var server = require('../../lib/server')(); 35 | var timeout; 36 | step( 37 | function sendQuery () { 38 | assert.response(server, { 39 | url: '/api/v1/sql?q=SELECT+1', 40 | method: 'GET', 41 | timeout: 1, 42 | headers: { host: 'vizzuality.localhost' } 43 | }, {}, this); 44 | }, 45 | function checkResponse (err/*, res */) { 46 | assert(err); // expect timeout 47 | assert.ok(('' + err).match(/socket/), err); 48 | sqlServerDataHandler = this; 49 | var next = this; 50 | // If a call does not arrive to the sql server within 51 | // the given timeout we're confident it means the request 52 | // was successfully aborted 53 | timeout = setTimeout(function () { next(null); }, 500); 54 | }, 55 | function checkSqlServerData (err, data) { 56 | clearTimeout(timeout); 57 | assert.ok(err.message === 'ETIMEDOUT' || err.message === 'ESOCKETTIMEDOUT'); 58 | assert.ok(!data, 'SQL Server was contacted no matter client abort'); 59 | return null; 60 | }, 61 | function finish (err) { 62 | global.settings.db_host = dbHostBackup; 63 | global.settings.db_port = dbPortBackup; 64 | done(err); 65 | } 66 | ); 67 | }); 68 | 69 | after(function (done) { 70 | try { 71 | sqlServer.close(done); 72 | } catch (er) { 73 | console.log(er); 74 | done(); // error expected as server is probably closed already 75 | } 76 | }); 77 | }); 78 | -------------------------------------------------------------------------------- /test/acceptance/health-check-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | require('../support/assert'); 5 | 6 | var assert = require('assert'); 7 | var server = require('../../lib/server')(); 8 | 9 | describe('health checks', function () { 10 | beforeEach(function (done) { 11 | global.settings.health = { 12 | enabled: true 13 | // username: 'vizzuality', 14 | // query: 'select 1::text' 15 | }; 16 | done(); 17 | }); 18 | 19 | var healthCheckRequest = { 20 | url: '/api/v1/health', 21 | method: 'GET', 22 | headers: { 23 | host: 'vizzuality.localhost' 24 | } 25 | }; 26 | 27 | it('returns 200 and ok=true with disabled configuration', function (done) { 28 | global.settings.health.enabled = false; 29 | 30 | assert.response(server, 31 | healthCheckRequest, 32 | { 33 | status: 200 34 | }, 35 | function (err, res) { 36 | assert.ok(!err); 37 | 38 | var parsed = JSON.parse(res.body); 39 | 40 | assert.strictEqual(parsed.enabled, false); 41 | assert.ok(parsed.ok); 42 | 43 | done(); 44 | } 45 | ); 46 | }); 47 | 48 | it('returns 200 and ok=true with enabled configuration', function (done) { 49 | assert.response(server, 50 | healthCheckRequest, 51 | { 52 | status: 200 53 | }, 54 | function (err, res) { 55 | assert.ok(!err); 56 | 57 | var parsed = JSON.parse(res.body); 58 | 59 | assert.ok(parsed.enabled); 60 | assert.ok(parsed.ok); 61 | 62 | done(); 63 | } 64 | ); 65 | }); 66 | }); 67 | -------------------------------------------------------------------------------- /test/acceptance/oauth/oauth_test.py: -------------------------------------------------------------------------------- 1 | # TO RUN 2 | # > virtualenv env 3 | # > . env/bin/activate 4 | # > pip install oauth2 5 | # > pip install cartodb 6 | # 7 | # FILL IN THINGS BELOW 8 | # > python oauth_test.py 9 | 10 | from cartodb import CartoDB, CartoDBException 11 | 12 | import httplib2 13 | import oauth2 as oauth 14 | if __name__ == '__main__': 15 | 16 | user = '' 17 | password = '' 18 | CONSUMER_KEY= '' 19 | CONSUMER_SECRET= '' 20 | cl = CartoDB(CONSUMER_KEY, CONSUMER_SECRET, user, password, 'simon') 21 | try: 22 | print cl.sql('select * from do_not_exist') 23 | except CartoDBException as e: 24 | print ("some error ocurred", e) 25 | print cl.sql('select * from table'); 26 | 27 | -------------------------------------------------------------------------------- /test/acceptance/pg-entities-access-validator-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const assert = require('../support/assert'); 4 | const TestClient = require('../support/test-client'); 5 | 6 | describe('PG entities access validator', function () { 7 | const forbiddenQueries = [ 8 | 'select * from information_schema.tables', 9 | 'select * from pg_catalog.pg_auth_members' 10 | ]; 11 | 12 | const testClientApiKey = new TestClient({ apiKey: 1234 }); 13 | const testClientAuthorized = new TestClient({ authorization: 'vizzuality:regular1' }); 14 | 15 | const expectedResponse = { 16 | response: { 17 | status: 403 18 | } 19 | }; 20 | 21 | function assertQuery (query, testClient, done) { 22 | testClient.getResult(query, expectedResponse, (err, result) => { 23 | assert.ifError(err); 24 | assert.deepStrictEqual(result.error, ['system tables are forbidden']); 25 | done(); 26 | }); 27 | } 28 | 29 | describe('validatePGEntitiesAccess enabled', function () { 30 | before(function () { 31 | global.settings.validatePGEntitiesAccess = true; 32 | }); 33 | 34 | forbiddenQueries.forEach(query => { 35 | it(`testClientApiKey: query: ${query}`, function (done) { 36 | assertQuery(query, testClientApiKey, done); 37 | }); 38 | 39 | it(`testClientAuthorized: query: ${query}`, function (done) { 40 | assertQuery(query, testClientAuthorized, done); 41 | }); 42 | }); 43 | }); 44 | 45 | describe('validatePGEntitiesAccess disabled', function () { 46 | before(function () { 47 | global.settings.validatePGEntitiesAccess = false; 48 | }); 49 | 50 | forbiddenQueries.forEach(query => { 51 | it(`testClientApiKey: query: ${query}`, function (done) { 52 | testClientApiKey.getResult(query, err => { 53 | assert.ifError(err); 54 | done(); 55 | }); 56 | }); 57 | 58 | it(`testClientAuthorized: query: ${query}`, function (done) { 59 | testClientAuthorized.getResult(query, err => { 60 | assert.ifError(err); 61 | done(); 62 | }); 63 | }); 64 | }); 65 | }); 66 | }); 67 | -------------------------------------------------------------------------------- /test/acceptance/query-float-values-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | var querystring = require('querystring'); 8 | var step = require('step'); 9 | 10 | describe('special numeric (float) values', function () { 11 | var RESPONSE_OK = { 12 | statusCode: 200 13 | }; 14 | var HEADERS = { 15 | host: 'vizzuality.localhost.lan:8080' 16 | }; 17 | var METHOD = 'GET'; 18 | var URL = '/api/v1/sql?api_key=1234&'; 19 | 20 | it('should cast Infinity and NaN values properly', function (done) { 21 | step( 22 | function createTable () { 23 | var next = this; 24 | var opts = { 25 | url: URL + querystring.stringify({ 26 | q: 'drop table if exists numbers_test; create table numbers_test(val float)' 27 | }), 28 | headers: HEADERS, 29 | method: METHOD 30 | }; 31 | assert.response(server, opts, RESPONSE_OK, next); 32 | }, 33 | function insertData (err) { 34 | assert.ifError(err); 35 | var next = this; 36 | var opts = { 37 | url: URL + querystring.stringify({ 38 | q: [ 39 | 'insert into numbers_test', 40 | ' values (\'NaN\'::float), (\'infinity\'::float), (\'-infinity\'::float), (1::float)' 41 | ].join('') 42 | }), 43 | headers: HEADERS, 44 | method: METHOD 45 | }; 46 | assert.response(server, opts, RESPONSE_OK, next); 47 | }, 48 | function queryData (err) { 49 | assert.ifError(err); 50 | var next = this; 51 | var opts = { 52 | url: URL + querystring.stringify({ 53 | q: 'select * from numbers_test' 54 | }), 55 | headers: HEADERS, 56 | method: METHOD 57 | }; 58 | assert.response(server, opts, RESPONSE_OK, next); 59 | }, 60 | function assertResult (err, res) { 61 | assert.ifError(err); 62 | var result = JSON.parse(res.body); 63 | assert.ok(Array.isArray(result.rows)); 64 | assert.strictEqual(result.rows[0].val, 'NaN'); 65 | assert.strictEqual(result.rows[1].val, 'Infinity'); 66 | assert.strictEqual(result.rows[2].val, '-Infinity'); 67 | assert.strictEqual(result.rows[3].val, 1); 68 | done(); 69 | } 70 | ); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /test/acceptance/query-multipart-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | const server = require('../../lib/server')(); 6 | const assert = require('../support/assert'); 7 | 8 | describe('query-multipart', function () { 9 | it('make query from a multipart form', function (done) { 10 | assert.response(server, { 11 | url: '/api/v1/sql', 12 | formData: { 13 | q: 'SELECT 2 as n' 14 | }, 15 | headers: { host: 'vizzuality.cartodb.com' }, 16 | method: 'POST' 17 | }, {}, function (err, res) { 18 | assert.ifError(err); 19 | const response = JSON.parse(res.body); 20 | assert.strictEqual(typeof (response.time) !== 'undefined', true); 21 | assert.strictEqual(response.total_rows, 1); 22 | assert.deepStrictEqual(response.rows, [{ n: 2 }]); 23 | done(); 24 | }); 25 | }); 26 | }); 27 | -------------------------------------------------------------------------------- /test/acceptance/regressions-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | var qs = require('querystring'); 8 | 9 | describe('regressions', function () { 10 | it('issue #224: tables with . (dot) in name works and can be queried', function (done) { 11 | function createRequest (sqlQuery) { 12 | return { 13 | url: '/api/v1/sql?' + qs.stringify({ 14 | q: sqlQuery, 15 | api_key: 1234 16 | }), 17 | headers: { 18 | host: 'vizzuality.cartodb.com' 19 | }, 20 | method: 'GET' 21 | }; 22 | } 23 | 24 | var responseOk = { 25 | statusCode: 200 26 | }; 27 | 28 | assert.response(server, createRequest('CREATE TABLE "foo.bar" (a int);'), responseOk, 29 | function (err) { 30 | if (err) { 31 | return done(err); 32 | } 33 | 34 | assert.response(server, createRequest('INSERT INTO "foo.bar" (a) values (1), (2)'), responseOk, 35 | function (err, res) { 36 | if (err) { 37 | return done(err); 38 | } 39 | var parsedBody = JSON.parse(res.body); 40 | assert.strictEqual(parsedBody.total_rows, 2); 41 | 42 | assert.response(server, createRequest('SELECT * FROM "foo.bar"'), responseOk, 43 | function (err, res) { 44 | if (err) { 45 | return done(err); 46 | } 47 | 48 | // table should not get a cache channel as it won't get invalidated 49 | assert.ok(!Object.prototype.hasOwnProperty.call(res.headers, 'x-cache-channel')); 50 | var parsedBody = JSON.parse(res.body); 51 | assert.strictEqual(parsedBody.total_rows, 2); 52 | assert.deepStrictEqual(parsedBody.rows, [{ a: 1 }, { a: 2 }]); 53 | 54 | // delete table 55 | assert.response(server, createRequest('DROP TABLE "foo.bar"'), responseOk, done); 56 | } 57 | ); 58 | } 59 | ); 60 | } 61 | ); 62 | }); 63 | }); 64 | -------------------------------------------------------------------------------- /test/acceptance/stream-responses-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | var querystring = require('querystring'); 8 | 9 | describe('stream-responses', function () { 10 | function createFailingQueryRequest (format) { 11 | var params = { 12 | q: 'SELECT the_geom, 100/(cartodb_id - 3) cdb_ratio FROM untitle_table_4' 13 | }; 14 | 15 | if (format) { 16 | params.format = format; 17 | } 18 | 19 | return { 20 | url: '/api/v1/sql?' + querystring.stringify(params), 21 | headers: { 22 | host: 'vizzuality.cartodb.com' 23 | }, 24 | method: 'GET' 25 | }; 26 | } 27 | 28 | var okResponse = { 29 | status: 200 30 | }; 31 | 32 | describe('format-json', function () { 33 | it('should close on error and error message must be part of the response', function (done) { 34 | assert.response( 35 | server, 36 | createFailingQueryRequest(), 37 | okResponse, 38 | function (err, res) { 39 | assert.ifError(err); 40 | var parsedBody = JSON.parse(res.body); 41 | assert.strictEqual(parsedBody.rows.length, 2); 42 | assert.deepStrictEqual(parsedBody.error, ['division by zero']); 43 | done(); 44 | } 45 | ); 46 | }); 47 | }); 48 | 49 | describe('format-geojson', function () { 50 | it('should close on error and error message must be part of the response', function (done) { 51 | assert.response( 52 | server, 53 | createFailingQueryRequest('geojson'), 54 | okResponse, 55 | function (err, res) { 56 | assert.ifError(err); 57 | var parsedBody = JSON.parse(res.body); 58 | assert.strictEqual(parsedBody.features.length, 2); 59 | assert.deepStrictEqual(parsedBody.error, ['division by zero']); 60 | done(); 61 | } 62 | ); 63 | }); 64 | }); 65 | }); 66 | -------------------------------------------------------------------------------- /test/acceptance/system-queries-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var server = require('../../lib/server')(); 6 | var assert = require('../support/assert'); 7 | var querystring = require('querystring'); 8 | 9 | describe('system-queries', function () { 10 | var systemQueriesSuitesToTest = [ 11 | { 12 | desc: 'pg_ queries work with api_key and fail otherwise', 13 | queries: [ 14 | 'SELECT * FROM pg_attribute', 15 | 'SELECT * FROM PG_attribute', 16 | 'SELECT * FROM "pg_attribute"', 17 | 'SELECT a.* FROM untitle_table_4 a,pg_attribute', 18 | 'SELECT * FROM geometry_columns' 19 | ], 20 | api_key_works: true, 21 | no_api_key_works: false 22 | }, 23 | { 24 | desc: 'Possible false positive queries will work with api_key and without it', 25 | queries: [ 26 | "SELECT 'pg_'", 27 | 'SELECT pg_attribute FROM ( select 1 as pg_attribute ) as f', 28 | 'SELECT * FROM cpg_test' 29 | ], 30 | api_key_works: true, 31 | no_api_key_works: true 32 | } 33 | ]; 34 | 35 | systemQueriesSuitesToTest.forEach(function (suiteToTest) { 36 | var apiKeyStatusErrorCode = suiteToTest.api_key_works ? 200 : 403; 37 | testSystemQueries(suiteToTest.desc + ' with api_key', suiteToTest.queries, apiKeyStatusErrorCode, '1234'); 38 | var noApiKeyStatusErrorCode = suiteToTest.no_api_key_works ? 200 : 403; 39 | testSystemQueries(suiteToTest.desc, suiteToTest.queries, noApiKeyStatusErrorCode); 40 | }); 41 | 42 | function testSystemQueries (description, queries, statusErrorCode, apiKey) { 43 | queries.forEach(function (query) { 44 | it('[' + description + '] query: ' + query, function (done) { 45 | var queryStringParams = { q: query }; 46 | if (apiKey) { 47 | queryStringParams.api_key = apiKey; 48 | } 49 | var request = { 50 | headers: { host: 'vizzuality.cartodb.com' }, 51 | method: 'GET', 52 | url: '/api/v1/sql?' + querystring.stringify(queryStringParams) 53 | }; 54 | assert.response(server, request, function (err, response) { 55 | assert.ifError(err); 56 | assert.strictEqual(response.statusCode, statusErrorCode); 57 | done(); 58 | }); 59 | }); 60 | }); 61 | } 62 | }); 63 | -------------------------------------------------------------------------------- /test/acceptance/timeout-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * 5 | * Requires the database and tables setup in config/environments/test.js to exist 6 | * Ensure the user is present in the pgbouncer auth file too 7 | * TODO: Add OAuth tests. 8 | * 9 | * To run this test, ensure that cartodb_test_user_1_db metadata exists 10 | * in Redis for the vizzuality.cartodb.com domain 11 | * 12 | * SELECT 5 13 | * HSET rails:users:vizzuality id 1 14 | * HSET rails:users:vizzuality database_name cartodb_test_user_1_db 15 | * 16 | */ 17 | require('../helper'); 18 | 19 | var assert = require('../support/assert'); 20 | var step = require('step'); 21 | var server = require('../../lib/server'); 22 | 23 | describe('timeout', function () { 24 | // See https://github.com/CartoDB/CartoDB-SQL-API/issues/128 25 | it('after configured milliseconds', function (done) { 26 | var testTimeout = 1; 27 | var timeoutBackup = global.settings.node_socket_timeout; 28 | global.settings.node_socket_timeout = testTimeout; 29 | step( 30 | function sendLongQuery () { 31 | assert.response(server(), { 32 | url: '/api/v1/sql?q=SELECT+count(*)+FROM+generate_series(1,100000)', 33 | method: 'GET', 34 | headers: { host: 'vizzuality.localhost' } 35 | }, {}, this); 36 | }, 37 | function checkResponse (err/*, res */) { 38 | assert.ok(err); 39 | assert.ok(err.message.match(/hang up/), err); 40 | return null; 41 | }, 42 | function finish (err) { 43 | global.settings.node_socket_timeout = timeoutBackup; 44 | done(err); 45 | } 46 | ); 47 | }); 48 | 49 | // TODO: check that the query is interrupted on timeout! 50 | // See #129 51 | }); 52 | -------------------------------------------------------------------------------- /test/acceptance/transaction-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var assert = require('../support/assert'); 6 | var qs = require('querystring'); 7 | var request = require('request'); 8 | 9 | describe('transaction', function () { 10 | var SERVER_PORT = 5554; 11 | 12 | var server; 13 | before(function (done) { 14 | server = require('../../lib/server')(); 15 | this.listener = server.listen(SERVER_PORT, '127.0.0.1'); 16 | this.listener.on('listening', done); 17 | }); 18 | 19 | after(function (done) { 20 | this.listener.close(done); 21 | }); 22 | 23 | var sqlRequest = request.defaults({ 24 | headers: { host: 'vizzuality.localhost' } 25 | }); 26 | 27 | function requestUrl (query) { 28 | return 'http://127.0.0.1:' + SERVER_PORT + '/api/v1/sql?' + qs.stringify({ q: query }); 29 | } 30 | 31 | var errorQuery = 'BEGIN; PREPARE _pstm AS select error; EXECUTE _pstm; COMMIT;'; 32 | 33 | it('should NOT fail to second request after error in transaction', function (done) { 34 | sqlRequest(requestUrl(errorQuery), function (err, response, body) { 35 | assert.ok(!err); 36 | assert.strictEqual(response.statusCode, 400); 37 | 38 | var parsedBody = JSON.parse(body); 39 | assert.ok(parsedBody); 40 | assert.deepStrictEqual(parsedBody, { error: ['column "error" does not exist'] }); 41 | 42 | sqlRequest(requestUrl('select 1 as foo'), function (err, response, body) { 43 | assert.ok(!err); 44 | assert.strictEqual(response.statusCode, 200); 45 | 46 | var parsedBody = JSON.parse(body); 47 | assert.ok(parsedBody); 48 | assert.deepStrictEqual(parsedBody.rows, [{ foo: 1 }]); 49 | 50 | done(); 51 | }); 52 | }); 53 | }); 54 | }); 55 | -------------------------------------------------------------------------------- /test/helper.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | let configFileName = process.env.NODE_ENV; 4 | if (process.env.CARTO_SQL_API_ENV_BASED_CONF) { 5 | // we override the file with the one with env vars 6 | configFileName = 'config'; 7 | } 8 | 9 | global.settings = require(`../config/environments/${configFileName}.js`); 10 | process.env.NODE_ENV = 'test'; 11 | -------------------------------------------------------------------------------- /test/integration/batch/job-publisher-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | 5 | var BATCH_SOURCE = '../../../lib/batch/'; 6 | 7 | var assert = require('../../support/assert'); 8 | 9 | var redisUtils = require('../../support/redis-utils'); 10 | 11 | var Channel = require(BATCH_SOURCE + 'pubsub/channel'); 12 | var JobPublisher = require(BATCH_SOURCE + 'pubsub/job-publisher'); 13 | 14 | var HOST = 'wadus'; 15 | 16 | describe('job publisher', function () { 17 | var jobPublisher = new JobPublisher(redisUtils.getPool()); 18 | 19 | it('.publish() should publish in job channel', function (done) { 20 | redisUtils.getPool().acquire(Channel.DB) 21 | .then(client => { 22 | client.subscribe(Channel.NAME); 23 | 24 | client.on('message', function (channel, host) { 25 | assert.strictEqual(host, HOST); 26 | assert.strictEqual(channel, Channel.NAME); 27 | client.unsubscribe(Channel.NAME); 28 | redisUtils.getPool().release(Channel.DB, client) 29 | .then(() => done()) 30 | .catch((err) => done(err)); 31 | }); 32 | 33 | jobPublisher.publish(HOST); 34 | }) 35 | .catch((err) => done(err)); 36 | }); 37 | }); 38 | -------------------------------------------------------------------------------- /test/integration/batch/locker-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | 5 | var assert = require('../../support/assert'); 6 | var redisUtils = require('../../support/redis-utils'); 7 | var Locker = require('../../../lib/batch/leader/locker'); 8 | 9 | describe('locker', function () { 10 | var host = 'localhost'; 11 | 12 | var TTL = 500; 13 | 14 | var config = { ttl: TTL, pool: redisUtils.getPool() }; 15 | 16 | it('should lock and unlock', function (done) { 17 | var lockerA = Locker.create('redis-distlock', config); 18 | var lockerB = Locker.create('redis-distlock', config); 19 | lockerA.lock(host, function (err, lock) { 20 | if (err) { 21 | return done(err); 22 | } 23 | assert.ok(lock); 24 | 25 | // others can't lock on same host 26 | lockerB.lock(host, function (err) { 27 | assert.ok(err); 28 | assert.strictEqual(err.name, 'LockError'); 29 | 30 | lockerA.unlock(host, function (err) { 31 | assert.ok(!err); 32 | // others can lock after unlock 33 | lockerB.lock(host, function (err, lock2) { 34 | assert.ok(!err); 35 | assert.ok(lock2); 36 | lockerB.unlock(host, done); 37 | }); 38 | }); 39 | }); 40 | }); 41 | }); 42 | 43 | it('should lock and keep locking until unlock', function (done) { 44 | var lockerA = Locker.create('redis-distlock', config); 45 | var lockerB = Locker.create('redis-distlock', config); 46 | lockerA.lock(host, function (err, lock) { 47 | if (err) { 48 | return done(err); 49 | } 50 | setTimeout(function () { 51 | lockerB.lock(host, function (err) { 52 | assert.ok(err); 53 | 54 | assert.ok(lock); 55 | lockerA.unlock(host, done); 56 | }); 57 | }, 2 * TTL); 58 | }); 59 | }); 60 | }); 61 | -------------------------------------------------------------------------------- /test/integration/stream-copy-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | const assert = require('assert'); 5 | 6 | const StreamCopy = require('../../lib/services/stream-copy'); 7 | 8 | describe('stream copy', function () { 9 | it('uses batch api port', function (done) { 10 | const userDbParams = { 11 | dbname: 'cartodb_test_user_1_db', 12 | dbuser: 'test_cartodb_user_1', 13 | pass: 'test_cartodb_user_1_pass', 14 | port: 'invalid_port' 15 | }; 16 | const sql = 'COPY dummy_table FROM STDIN'; 17 | const streamCopy = new StreamCopy(sql, userDbParams); 18 | assert.strictEqual(streamCopy.dbParams.port, global.settings.db_batch_port); 19 | done(); 20 | }); 21 | }); 22 | -------------------------------------------------------------------------------- /test/support/.gitignore: -------------------------------------------------------------------------------- 1 | CDB_*.sql 2 | -------------------------------------------------------------------------------- /test/support/csv/copy_test_table.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CartoDB/CartoDB-SQL-API/2d3af7f5b6000197343dc209892b68d0428f408b/test/support/csv/copy_test_table.csv.gz -------------------------------------------------------------------------------- /test/support/db_utils.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const { Client } = require('pg'); 4 | 5 | const dbConfig = { 6 | db_user: process.env.PGUSER || 'postgres', 7 | db_host: global.settings.db_host, 8 | db_port: global.settings.db_port, 9 | db_batch_port: global.settings.db_batch_port 10 | }; 11 | 12 | module.exports.resetPgBouncerConnections = function (callback) { 13 | // We assume there's no pgbouncer if db_port === db_batch_port 14 | if (dbConfig.db_port === dbConfig.db_batch_port) { 15 | return callback(); 16 | } 17 | 18 | const client = new Client({ 19 | database: 'pgbouncer', 20 | user: dbConfig.db_user, 21 | host: dbConfig.db_host, 22 | port: dbConfig.db_port 23 | }); 24 | 25 | // We just chain a PAUSE followed by a RESUME to reset internal pool connections of PgBouncer 26 | client.connect(); 27 | client.query('PAUSE', err => { 28 | if (err) { 29 | return callback(err); 30 | } 31 | client.query('RESUME', err => { 32 | client.end(); 33 | return callback(err); 34 | }); 35 | }); 36 | }; 37 | -------------------------------------------------------------------------------- /test/support/libredis_cell.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CartoDB/CartoDB-SQL-API/2d3af7f5b6000197343dc209892b68d0428f408b/test/support/libredis_cell.dylib -------------------------------------------------------------------------------- /test/support/libredis_cell.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CartoDB/CartoDB-SQL-API/2d3af7f5b6000197343dc209892b68d0428f408b/test/support/libredis_cell.so -------------------------------------------------------------------------------- /test/support/middlewares/teapot-headers.js: -------------------------------------------------------------------------------- 1 | exports.middlewares = [ 2 | function () { 3 | return function teapotHeaderMiddleware (req, res, next) { 4 | res.header('X-What-Am-I', 'I\'m a teapot'); 5 | return next(); 6 | }; 7 | }, 8 | function () { 9 | return function teapotAnotherHeaderMiddleware (req, res, next) { 10 | res.header('X-Again-What-Am-I', 'I\'m a teapot'); 11 | return next(); 12 | }; 13 | } 14 | ]; 15 | -------------------------------------------------------------------------------- /test/support/middlewares/teapot-response.js: -------------------------------------------------------------------------------- 1 | exports.middlewares = function () { 2 | return function teapotMiddleware (req, res) { 3 | res.status(418).send('I\'m a teapot'); 4 | }; 5 | }; 6 | -------------------------------------------------------------------------------- /test/support/redis-utils.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var RedisPool = require('redis-mpool'); 4 | 5 | var redisConfig = { 6 | host: global.settings.redis_host, 7 | port: global.settings.redis_port, 8 | max: global.settings.redisPool, 9 | idleTimeoutMillis: global.settings.redisIdleTimeoutMillis, 10 | reapIntervalMillis: global.settings.redisReapIntervalMillis 11 | }; 12 | var metadataBackend = require('cartodb-redis')(redisConfig); 13 | 14 | module.exports.clean = function clean (db, pattern, callback) { 15 | metadataBackend.redisCmd(db, 'KEYS', [pattern], function (err, keys) { 16 | if (err) { 17 | return callback(err); 18 | } 19 | 20 | if (!keys || !keys.length) { 21 | return callback(); 22 | } 23 | 24 | metadataBackend.redisCmd(db, 'DEL', keys, callback); 25 | }); 26 | }; 27 | 28 | module.exports.getConfig = function getConfig () { 29 | return redisConfig; 30 | }; 31 | 32 | var pool = new RedisPool(redisConfig); 33 | module.exports.getPool = function getPool () { 34 | return pool; 35 | }; 36 | 37 | module.exports.configureUserMetadata = function configureUserMetadata (action, params, callback) { 38 | metadataBackend.redisCmd(5, action, params, callback); 39 | }; 40 | -------------------------------------------------------------------------------- /test/support/sql/quota_mock.sql: -------------------------------------------------------------------------------- 1 | -- See https://github.com/CartoDB/cartodb-postgresql/blob/master/scripts-available/CDB_Quota.sql 2 | 3 | CREATE OR REPLACE FUNCTION _CDB_UserQuotaInBytes() 4 | RETURNS int8 AS 5 | $$ 6 | -- 250 MB 7 | SELECT (250 * 1024 * 1024)::int8; 8 | $$ LANGUAGE sql IMMUTABLE; 9 | 10 | CREATE OR REPLACE FUNCTION CDB_UserDataSize(schema_name TEXT) 11 | RETURNS bigint AS 12 | $$ 13 | BEGIN 14 | -- 100 MB 15 | RETURN 100 * 1024 * 1024; 16 | END; 17 | $$ LANGUAGE 'plpgsql' VOLATILE; 18 | -------------------------------------------------------------------------------- /test/unit/batch/job-publisher-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Channel = require('../../../lib/batch/pubsub/channel'); 4 | var JobPublisher = require('../../../lib/batch/pubsub/job-publisher'); 5 | var assert = require('assert'); 6 | 7 | describe('batch API job publisher', function () { 8 | beforeEach(function () { 9 | var self = this; 10 | this.host = 'irrelevantHost'; 11 | this.redis = { 12 | createClient: function () { 13 | return this; 14 | }, 15 | publish: function () { 16 | var isValidFirstArg = arguments[0] === Channel.NAME; 17 | var isValidSecondArg = arguments[1] === self.host; 18 | self.redis.publishIsCalledWithValidArgs = isValidFirstArg && isValidSecondArg; 19 | }, 20 | on: function () {}, 21 | ping: function (cb) { 22 | cb(); 23 | } 24 | }; 25 | this.pool = { 26 | acquire: function (db) { 27 | return Promise.resolve(self.redis); 28 | } 29 | }; 30 | 31 | this.jobPublisher = new JobPublisher(this.pool); 32 | }); 33 | 34 | it('.publish() should publish new messages', function () { 35 | this.jobPublisher.publish(this.host); 36 | setImmediate(() => { 37 | assert.ok(this.redis.publishIsCalledWithValidArgs); 38 | }); 39 | }); 40 | }); 41 | -------------------------------------------------------------------------------- /test/unit/batch/job-queue-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var JobQueue = require('../../../lib/batch/job-queue'); 4 | var assert = require('assert'); 5 | 6 | describe('batch API job queue', function () { 7 | beforeEach(function () { 8 | this.metadataBackend = { 9 | redisCmd: function () { 10 | var callback = arguments[arguments.length - 1]; 11 | process.nextTick(function () { 12 | callback(null, 'irrelevantJob'); 13 | }); 14 | }, 15 | redisMultiCmd: function () { 16 | var callback = arguments[arguments.length - 1]; 17 | process.nextTick(function () { 18 | callback(null, 'irrelevantJob'); 19 | }); 20 | } 21 | }; 22 | this.jobPublisher = { 23 | publish: function () {} 24 | }; 25 | this.logger = { 26 | debug: function () {} 27 | }; 28 | this.jobQueue = new JobQueue(this.metadataBackend, this.jobPublisher, this.logger); 29 | }); 30 | 31 | it('.enqueue() should enqueue the provided job', function (done) { 32 | this.jobQueue.enqueue('irrelevantJob', 'irrelevantHost', function (err) { 33 | assert.ok(!err); 34 | done(); 35 | }); 36 | }); 37 | 38 | it('.dequeue() should dequeue the next job', function (done) { 39 | this.jobQueue.dequeue('irrelevantHost', function (err) { 40 | assert.ok(!err); 41 | done(); 42 | }); 43 | }); 44 | 45 | it('.enqueueFirst() should dequeue the next job', function (done) { 46 | this.jobQueue.enqueueFirst('irrelevantJob', 'irrelevantHost', function (err) { 47 | assert.ok(!err); 48 | done(); 49 | }); 50 | }); 51 | }); 52 | -------------------------------------------------------------------------------- /test/unit/batch/job-subscriber-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var Channel = require('../../../lib/batch/pubsub/channel'); 4 | var JobSubscriber = require('../../../lib/batch/pubsub/job-subscriber'); 5 | var assert = require('assert'); 6 | 7 | describe('batch API job subscriber', function () { 8 | beforeEach(function () { 9 | var self = this; 10 | 11 | this.onMessageListener = function () {}; 12 | this.redis = { 13 | createClient: function () { 14 | return this; 15 | }, 16 | subscribe: function () { 17 | var isValidFirstArg = arguments[0] === Channel.NAME; 18 | self.redis.subscribeIsCalledWithValidArgs = isValidFirstArg; 19 | }, 20 | on: function () { 21 | if (arguments[0] === 'message') { 22 | self.redis.onIsCalledWithValidArgs = true; 23 | } 24 | }, 25 | unsubscribe: function () { 26 | var isValidFirstArg = arguments[0] === Channel.NAME; 27 | self.redis.unsubscribeIsCalledWithValidArgs = isValidFirstArg; 28 | }, 29 | scan: function (params, callback) { 30 | return callback(null, ['0']); 31 | }, 32 | removeAllListeners: function () { 33 | return this; 34 | }, 35 | smembers: function (key, callback) { 36 | callback(null, []); 37 | }, 38 | connected: true 39 | }; 40 | this.pool = { 41 | acquire: function () { 42 | return Promise.resolve(self.redis); 43 | }, 44 | release: function (/* db, client */) { 45 | return Promise.resolve(); 46 | } 47 | }; 48 | this.queueSeeker = { 49 | seek: function () { 50 | var callback = arguments[1]; 51 | 52 | callback(null, []); 53 | } 54 | }; 55 | 56 | this.jobSubscriber = new JobSubscriber(this.pool, this.queueSeeker); 57 | }); 58 | 59 | it('.subscribe() should listen for incoming messages', function () { 60 | this.jobSubscriber.subscribe(this.onMessageListener); 61 | setImmediate(() => { 62 | assert.ok(this.redis.onIsCalledWithValidArgs); 63 | assert.ok(this.redis.subscribeIsCalledWithValidArgs); 64 | }); 65 | }); 66 | 67 | it('.unsubscribe() should stop listening for incoming messages', function () { 68 | this.jobSubscriber.subscribe(this.onMessageListener); 69 | this.jobSubscriber.unsubscribe(); 70 | setImmediate(() => { 71 | assert.ok(this.redis.unsubscribeIsCalledWithValidArgs); 72 | }); 73 | }); 74 | }); 75 | -------------------------------------------------------------------------------- /test/unit/error-handler-factory-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const assert = require('assert'); 4 | const errorHandlerFactory = require('../../lib/services/error-handler-factory'); 5 | const ErrorHandler = require('../../lib/services/error-handler'); 6 | const { codeToCondition } = require('../../lib/postgresql/error-codes'); 7 | 8 | const rateLimitError = new Error( 9 | 'You are over platform\'s limits. Please contact us to know more details' 10 | ); 11 | rateLimitError.http_status = 429; 12 | rateLimitError.context = 'limit'; 13 | rateLimitError.detail = 'rate-limit'; 14 | 15 | const cases = [ 16 | { 17 | title: 'postgres error', 18 | error: new Error(codeToCondition['02000']) 19 | }, 20 | { 21 | title: 'rate limit error', 22 | error: rateLimitError 23 | } 24 | ]; 25 | 26 | describe('error-handler-factory', function () { 27 | cases.forEach(({ title, error }) => { 28 | it(title, function () { 29 | const errorHandler = errorHandlerFactory(error); 30 | const expectedError = new ErrorHandler({ 31 | message: error.message, 32 | context: error.context, 33 | detail: error.detail, 34 | hint: error.hint, 35 | httpStatus: error.http_status, 36 | name: codeToCondition[error.code] || error.name 37 | }); 38 | 39 | assert.deepStrictEqual(errorHandler, expectedError); 40 | }); 41 | }); 42 | 43 | it('timeout error', function () { 44 | const error = new Error('statement timeout'); 45 | const errorHandler = errorHandlerFactory(error); 46 | const expectedError = new ErrorHandler({ 47 | message: 'You are over platform\'s limits: SQL query timeout error.' + 48 | ' Refactor your query before running again or contact CARTO support for more details.', 49 | context: 'limit', 50 | detail: 'datasource', 51 | httpStatus: 429 52 | }); 53 | 54 | assert.deepStrictEqual(errorHandler, expectedError); 55 | }); 56 | 57 | it('permission denied error', function () { 58 | const error = new Error('permission denied'); 59 | const errorHandler = errorHandlerFactory(error); 60 | const expectedError = new ErrorHandler({ 61 | message: error.message, 62 | context: error.context, 63 | detail: error.detail, 64 | hint: error.hint, 65 | httpStatus: 403, 66 | name: codeToCondition[error.code] || error.name 67 | }); 68 | 69 | assert.deepStrictEqual(errorHandler, expectedError); 70 | }); 71 | }); 72 | -------------------------------------------------------------------------------- /test/unit/health-check-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | var assert = require('assert'); 6 | var HealthCheck = require('../../lib/monitoring/health-check'); 7 | 8 | var metadataBackend = {}; 9 | 10 | function PSQL (dbParams) { 11 | this.params = dbParams; 12 | } 13 | 14 | var healthCheck = new HealthCheck(metadataBackend, PSQL); 15 | 16 | describe('health checks', function () { 17 | it('errors if disabled file exists', function (done) { 18 | var fs = require('fs'); 19 | 20 | var readFileFn = fs.readFile; 21 | fs.readFile = function (filename, callback) { 22 | callback(null, 'Maintenance'); 23 | }; 24 | healthCheck.check(function (err) { 25 | assert.strictEqual(err.message, 'Maintenance'); 26 | assert.strictEqual(err.http_status, 503); 27 | fs.readFile = readFileFn; 28 | done(); 29 | }); 30 | }); 31 | 32 | it('does not err if disabled file does not exists', function (done) { 33 | var fs = require('fs'); 34 | 35 | var readFileFn = fs.readFile; 36 | fs.readFile = function (filename, callback) { 37 | callback(new Error('ENOENT'), null); 38 | }; 39 | healthCheck.check(function (err) { 40 | assert.strictEqual(err, undefined); 41 | fs.readFile = readFileFn; 42 | done(); 43 | }); 44 | }); 45 | }); 46 | -------------------------------------------------------------------------------- /test/unit/model/bin-encoder-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../../helper'); 4 | var assert = require('assert'); 5 | 6 | var ArrayBufferSer = require('../../../lib/models/bin-encoder'); 7 | 8 | describe('ArrayBufferSer', function () { 9 | it('calculate size for basic types', function () { 10 | var b = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 2, 3, 4]); 11 | assert.strictEqual(4 * 2, b.getDataSize()); 12 | 13 | b = new ArrayBufferSer(ArrayBufferSer.INT8, [1, 2, 3, 4]); 14 | assert.strictEqual(4, b.getDataSize()); 15 | 16 | b = new ArrayBufferSer(ArrayBufferSer.INT32, [1, 2, 3, 4]); 17 | assert.strictEqual(4 * 4, b.getDataSize()); 18 | }); 19 | 20 | it('calculate size for arrays', function () { 21 | var b = new ArrayBufferSer(ArrayBufferSer.STRING, ['test', 'kease']); 22 | assert.strictEqual((b.headerSize + 4 + 5) * 2, b.getDataSize()); 23 | 24 | var ba = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 2, 3, 4]); 25 | var bc = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 4]); 26 | 27 | b = new ArrayBufferSer(ArrayBufferSer.BUFFER, [ba, bc]); 28 | assert.strictEqual((b.headerSize + 4 + 2) * 2, b.getDataSize()); 29 | assert.strictEqual(b.type, ArrayBufferSer.BUFFER); 30 | }); 31 | 32 | function assertBufferEquals (a, b) { 33 | assert.strictEqual(a.length, b.length); 34 | for (var i = 0; i < a.length; ++i) { 35 | assert.strictEqual(a[i], b[i], 'byte i ' + i + ' is different: ' + a[i] + ' != ' + b[i]); 36 | } 37 | } 38 | 39 | it('binary data is ok', function () { 40 | var b = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 2, 3, 4]); 41 | var bf = Buffer.from([0, 0, 0, ArrayBufferSer.INT16, 0, 0, 0, 8, 1, 0, 2, 0, 3, 0, 4, 0]); 42 | assertBufferEquals(bf, b.buffer); 43 | }); 44 | 45 | it('binary data is ok with arrays', function () { 46 | var ba = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 2, 3, 4]); 47 | var bc = new ArrayBufferSer(ArrayBufferSer.INT16, [1, 4]); 48 | 49 | var b = new ArrayBufferSer(ArrayBufferSer.BUFFER, [ba, bc]); 50 | var bf = Buffer.from([ 51 | 0, 0, 0, ArrayBufferSer.BUFFER, // type 52 | 0, 0, 0, 28, 53 | 0, 0, 0, ArrayBufferSer.INT16, 0, 0, 0, 8, 1, 0, 2, 0, 3, 0, 4, 0, 54 | 0, 0, 0, ArrayBufferSer.INT16, 0, 0, 0, 4, 1, 0, 4, 0]); 55 | assertBufferEquals(bf, b.buffer); 56 | }); 57 | 58 | it('binary data is ok with strings', function () { 59 | var s = 'test'; 60 | var b = new ArrayBufferSer(ArrayBufferSer.STRING, [s]); 61 | var bf = Buffer.from([ 62 | 0, 0, 0, ArrayBufferSer.STRING, // type 63 | 0, 0, 0, 16, 64 | 0, 0, 0, ArrayBufferSer.UINT16, 65 | 0, 0, 0, 8, 66 | s.charCodeAt(0), 0, 67 | s.charCodeAt(1), 0, 68 | s.charCodeAt(2), 0, 69 | s.charCodeAt(3), 0 70 | ]); 71 | assertBufferEquals(bf, b.buffer); 72 | }); 73 | }); 74 | -------------------------------------------------------------------------------- /test/unit/pubsub-metrics-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('../helper'); 4 | 5 | const sinon = require('sinon'); 6 | const assert = require('../support/assert'); 7 | const PubSubMetricsService = require('../../lib/services/pubsub-metrics'); 8 | 9 | const fakeTopic = { 10 | name: 'test-topic', 11 | publish: sinon.stub().returns(Promise.resolve()) 12 | }; 13 | 14 | const fakePubSub = { 15 | topic: () => fakeTopic 16 | }; 17 | 18 | const eventAttributes = { 19 | event_source: 'test', 20 | user_id: '123', 21 | event_group_id: '1', 22 | response_code: '200', 23 | source_domain: 'vizzuality.cartodb.com', 24 | event_time: new Date().toISOString(), 25 | event_version: '1' 26 | }; 27 | 28 | describe('pubsub metrics service', function () { 29 | it('should not send event if not enabled', function () { 30 | const pubSubMetricsService = new PubSubMetricsService(fakePubSub, false); 31 | 32 | pubSubMetricsService.sendEvent('test-event', eventAttributes); 33 | assert(fakeTopic.publish.notCalled); 34 | }); 35 | 36 | it('should send event if enabled', function () { 37 | const pubSubMetricsService = new PubSubMetricsService(fakePubSub, true); 38 | 39 | pubSubMetricsService.sendEvent('test-event', eventAttributes); 40 | assert(fakeTopic.publish.calledOnceWith(Buffer.from('test-event'), eventAttributes)); 41 | }); 42 | }); 43 | -------------------------------------------------------------------------------- /test/unit/query-info-test.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const assert = require('assert'); 4 | const queryInfo = require('../../lib/utils/query-info'); 5 | 6 | describe('query info', function () { 7 | describe('copy format', function () { 8 | describe('csv', function () { 9 | const validQueries = [ 10 | "COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT CSV, DELIMITER ',', HEADER true)", 11 | "COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT CSV, DELIMITER ',', HEADER true)", 12 | "COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT CSV , DELIMITER ',', HEADER true)", 13 | 'COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT CSV)', 14 | 'COPY copy_endpoints_test FROM STDIN WITH(FORMAT csv,HEADER true)' 15 | ]; 16 | 17 | validQueries.forEach(query => { 18 | it(query, function () { 19 | const result = queryInfo.getFormatFromCopyQuery(query); 20 | assert.strictEqual(result, 'CSV'); 21 | }); 22 | }); 23 | }); 24 | 25 | describe('text', function () { 26 | const validQueries = [ 27 | 'COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT TEXT)', 28 | 'COPY copy_endpoints_test (id, name) FROM STDIN' 29 | ]; 30 | 31 | validQueries.forEach(query => { 32 | it(query, function () { 33 | const result = queryInfo.getFormatFromCopyQuery(query); 34 | assert.strictEqual(result, 'TEXT'); 35 | }); 36 | }); 37 | }); 38 | 39 | describe('binary', function () { 40 | const validQueries = [ 41 | 'COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT BINARY)' 42 | ]; 43 | 44 | validQueries.forEach(query => { 45 | it(query, function () { 46 | const result = queryInfo.getFormatFromCopyQuery(query); 47 | assert.strictEqual(result, 'BINARY'); 48 | }); 49 | }); 50 | }); 51 | 52 | describe('should fail', function () { 53 | const validQueries = [ 54 | 'COPY copy_endpoints_test (id, name) FROM STDIN WITH (FORMAT ERROR)', 55 | 'SELECT * from copy_endpoints_test' 56 | ]; 57 | 58 | validQueries.forEach(query => { 59 | it(query, function () { 60 | const result = queryInfo.getFormatFromCopyQuery(query); 61 | assert.strictEqual(result, false); 62 | }); 63 | }); 64 | }); 65 | }); 66 | }); 67 | --------------------------------------------------------------------------------