├── .gitignore ├── Dockerfile ├── README.md ├── files.txt ├── index.js ├── lib ├── common.js ├── constants.js ├── importer.js ├── kmsCrypto.js └── s3-keys.js ├── package.json ├── run.js └── setup.js /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | deploy.env 3 | event.json 4 | node_modules 5 | .DS_Store 6 | dist 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:0.12 2 | 3 | RUN mkdir /opt/lambda-rds-loader 4 | ADD . /opt/lambda-rds-loader 5 | RUN ln -s /opt/lambda-rds-loader/run.js /usr/local/bin/lambda-rds-loader && chmod 755 /usr/local/bin/lambda-rds-loader 6 | 7 | CMD [] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS Lambda RDS Database Loader 2 | 3 | Use this function to load CSV files from any S3 location into RDS tables. Tables and columns will be auto generated if they don't exist. 4 | 5 | ## Getting Started - Lambda Execution Role 6 | You also need to add an IAM policy as shown below to the role that AWS Lambda 7 | uses when it runs. Once your function is deployed, add the following policy to 8 | the `LambdaExecRole` to enable AWS Lambda to call SNS, use DynamoDB, write Manifest 9 | files to S3, perform encryption with the AWS Key Management Service. At the moment 10 | Lambda cannot communicate over VPC and so the RDS needs to allow all external 11 | connections. 12 | 13 | ``` 14 | { 15 | "Version": "2012-10-17", 16 | "Statement": [ 17 | { 18 | "Sid": "Stmt1424787824000", 19 | "Effect": "Allow", 20 | "Action": [ 21 | "dynamodb:DeleteItem", 22 | "dynamodb:DescribeTable", 23 | "dynamodb:GetItem", 24 | "dynamodb:ListTables", 25 | "dynamodb:PutItem", 26 | "dynamodb:Query", 27 | "dynamodb:Scan", 28 | "dynamodb:UpdateItem", 29 | "sns:GetEndpointAttributes", 30 | "sns:GetSubscriptionAttributes", 31 | "sns:GetTopicAttributes", 32 | "sns:ListTopics", 33 | "sns:Publish", 34 | "sns:Subscribe", 35 | "sns:Unsubscribe", 36 | "s3:Get*", 37 | "s3:Put*", 38 | "s3:List*", 39 | "kms:Decrypt", 40 | "kms:DescribeKey", 41 | "kms:GetKeyPolicy" 42 | ], 43 | "Resource": [ 44 | "*" 45 | ] 46 | } 47 | ] 48 | } 49 | ``` 50 | 51 | ## Getting Started - Configuration 52 | In order to setup a lamda configuration run: 53 | ``` 54 | nodejs setup.js 55 | ``` 56 | 57 | Item | Required | Notes 58 | :---- | :--------: | :----- 59 | Enter the Region for the Configuration | Y | Any AWS Region from http://docs.aws.amazon.com/general/latest/gr/rande.html, using the short name (for example us-east-1 for US East 1) 60 | Enter the S3 Bucket | Y | Bucket name will be used to lookup RDS configuration for the particular bucket. 61 | Enter a Filename Filter Regex | N | A Regular Expression used to filter files before they are processed. 62 | Enter the RDS Host | Y | Database host. 63 | Enter the RDS Port | Y | Database port. 64 | Enter the Database Name | Y | Database to use. 65 | Enter the Schema | N | Schema to use, default: public. 66 | Enter table prefix | N | Prefix newly created tables. This is recommended if files start with numbers. 67 | Enter the folder depth from bucket root to use as table name. Use negative index to select from the input file | Y | Determines names for new tables. For example: /test/path/file.csv, index of 0 would create a table named *test* and index of -1 would creat a table named *file_csv*. 68 | Should the Table be Truncated before Load? | N | Truncate table before loading new data. 69 | Enter the Database Username | Y | Database username. 70 | Enter the Database Password | Y | Database password. 71 | Enter the CSV Delimiter | N | CSV delimiter, default: ,. 72 | 73 | Configuration will be stored in DynamoDB database LambdaRDSLoaderConfig. 74 | 75 | ## Getting Started - Running 76 | Upload the lambda function as a zip file or use node-lambda. Create an S3 watch on an S3 location. Bucket will correspond to RDS configuration tagged with such bucket name. 77 | -------------------------------------------------------------------------------- /files.txt: -------------------------------------------------------------------------------- 1 | null -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var Importer = require('./lib/importer'); 2 | 3 | exports.handler = function(event, context) { 4 | if(!event.Records) { 5 | console.log('No records found'); 6 | context.done(); 7 | } else { 8 | var importer = new Importer(event.Records); 9 | importer.run(); 10 | } 11 | }; 12 | -------------------------------------------------------------------------------- /lib/common.js: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 5 | 6 | http://aws.amazon.com/asl/ 7 | 8 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. 9 | */ 10 | 11 | var async = require('async'); 12 | require('./constants'); 13 | 14 | // function which creates a string representation of now suitable for use in S3 15 | // paths 16 | exports.getFormattedDate = function(date) { 17 | if (!date) { 18 | date = new Date(); 19 | } 20 | 21 | var hour = date.getHours(); 22 | hour = (hour < 10 ? "0" : "") + hour; 23 | 24 | var min = date.getMinutes(); 25 | min = (min < 10 ? "0" : "") + min; 26 | 27 | var sec = date.getSeconds(); 28 | sec = (sec < 10 ? "0" : "") + sec; 29 | 30 | var year = date.getFullYear(); 31 | 32 | var month = date.getMonth() + 1; 33 | month = (month < 10 ? "0" : "") + month; 34 | 35 | var day = date.getDate(); 36 | day = (day < 10 ? "0" : "") + day; 37 | 38 | return year + "-" + month + "-" + day + "-" + hour + ":" + min + ":" + sec; 39 | }; 40 | 41 | /* current time as seconds */ 42 | exports.now = function() { 43 | return new Date().getTime() / 1000; 44 | }; 45 | 46 | exports.readableTime = function(epochSeconds) { 47 | var d = new Date(0); 48 | d.setUTCSeconds(epochSeconds); 49 | return exports.getFormattedDate(d); 50 | }; 51 | 52 | exports.createTables = function(dynamoDB, callback) { 53 | // processed files table spec 54 | var pfKey = 'loadFile'; 55 | var configKey = s3prefix; 56 | var configSpec = { 57 | AttributeDefinitions : [ { 58 | AttributeName : configKey, 59 | AttributeType : 'S' 60 | } ], 61 | KeySchema : [ { 62 | AttributeName : configKey, 63 | KeyType : 'HASH' 64 | } ], 65 | TableName : configTable, 66 | ProvisionedThroughput : { 67 | ReadCapacityUnits : 5, 68 | WriteCapacityUnits : 5 69 | } 70 | }; 71 | 72 | console.log("Creating Tables in Dynamo DB if Required"); 73 | dynamoDB.createTable(configSpec, function(err, data) { 74 | if (err) { 75 | if (err.code !== 'ResourceInUseException') { 76 | console.log(Object.prototype.toString.call(err).toString()); 77 | console.log(err.toString()); 78 | process.exit(ERROR); 79 | } 80 | } 81 | }); 82 | }; 83 | 84 | exports.updateConfig = function(setRegion, dynamoDB, updateRequest, outerCallback) { 85 | var tryNumber = 0; 86 | var writeConfigRetryLimit = 100; 87 | 88 | async.whilst(function() { 89 | // retry until the try count is hit 90 | return tryNumber < writeConfigRetryLimit; 91 | }, function(callback) { 92 | tryNumber++; 93 | 94 | dynamoDB.updateItem(updateRequest, function(err, data) { 95 | if (err) { 96 | if (err.code === 'ResourceInUseException' || err.code === 'ResourceNotFoundException') { 97 | console.log(err.code); 98 | 99 | // retry if the table is in use after 1 second 100 | setTimeout(callback(), 1000); 101 | } else { 102 | // some other error - fail 103 | console.log(JSON.stringify(updateRequest)); 104 | console.log(err); 105 | outerCallback(err); 106 | } 107 | } else { 108 | // all OK - exit OK 109 | if (data) { 110 | console.log("Configuration for " + updateRequest.Key.s3Prefix.S + " updated in " + setRegion); 111 | outerCallback(null); 112 | } 113 | } 114 | }); 115 | }, function(error) { 116 | // never called 117 | }); 118 | }; 119 | 120 | exports.writeConfig = function(setRegion, dynamoDB, dynamoConfig, outerCallback) { 121 | var tryNumber = 0; 122 | var writeConfigRetryLimit = 100; 123 | 124 | async.whilst(function() { 125 | // retry until the try count is hit 126 | return tryNumber < writeConfigRetryLimit; 127 | }, function(callback) { 128 | tryNumber++; 129 | 130 | dynamoDB.putItem(dynamoConfig, function(err, data) { 131 | if (err) { 132 | if (err.code === 'ResourceInUseException' || err.code === 'ResourceNotFoundException') { 133 | // retry if the table is in use after 1 second 134 | setTimeout(callback(), 1000); 135 | } else { 136 | // some other error - fail 137 | console.log(JSON.stringify(dynamoConfig)); 138 | console.log(JSON.stringify(err)); 139 | if (outerCallback) 140 | outerCallback(err); 141 | } 142 | } else { 143 | // all OK - exit OK 144 | if (data) { 145 | console.log("Configuration for " + dynamoConfig.Item.s3Prefix.S + " successfully written in " 146 | + setRegion); 147 | if (outerCallback) 148 | outerCallback(null); 149 | } 150 | } 151 | }); 152 | }, function(error) { 153 | // never called 154 | }); 155 | }; 156 | 157 | exports.dropTables = function(dynamoDB, callback) { 158 | // drop the config table 159 | dynamoDB.deleteTable({ 160 | TableName : configTable 161 | }, function(err, data) { 162 | if (err && err.code !== 'ResourceNotFoundException') { 163 | console.log(err); 164 | process.exit(ERROR); 165 | } else { 166 | console.log("All Configuration Tables Dropped"); 167 | // call the callback requested 168 | } 169 | }); 170 | }; 171 | 172 | /* validate that the given value is a number, and if so return it */ 173 | exports.getIntValue = function(value, rl) { 174 | if (!value || value === null) { 175 | rl.close(); 176 | console.log('Null Value'); 177 | process.exit(INVALID_ARG); 178 | } else { 179 | var num = parseInt(value); 180 | 181 | if (isNaN(num)) { 182 | rl.close(); 183 | console.log('Value \'' + value + '\' is not a Number'); 184 | process.exit(INVALID_ARG); 185 | } else { 186 | return num; 187 | } 188 | } 189 | }; 190 | 191 | exports.getBooleanValue = function(value) { 192 | if (value) { 193 | if ([ 'TRUE', '1', 'YES', 'Y' ].indexOf(value.toUpperCase()) > -1) { 194 | return true; 195 | } else { 196 | return false; 197 | } 198 | } else { 199 | return false; 200 | } 201 | }; 202 | 203 | /* validate that the provided value is not null/undefined */ 204 | exports.validateNotNull = function(value, message, rl) { 205 | if (!value || value === null || value === '') { 206 | rl.close(); 207 | console.log(message); 208 | process.exit(INVALID_ARG); 209 | } 210 | }; 211 | 212 | /* turn blank lines read from STDIN to Null */ 213 | exports.blank = function(value) { 214 | if (value === '') { 215 | return null; 216 | } else { 217 | return value; 218 | } 219 | }; 220 | 221 | exports.validateArrayContains = function(array, value, rl) { 222 | if (!(array.indexOf(value) > -1)) { 223 | rl.close(); 224 | console.log('Value must be one of ' + array.toString()); 225 | process.exit(INVALID_ARG); 226 | } 227 | }; 228 | 229 | exports.createManifestInfo = function(config) { 230 | // manifest file will be at the configuration location, with a fixed 231 | // prefix and the date plus a random value for uniqueness across all 232 | // executing functions 233 | var dateName = exports.getFormattedDate(); 234 | var rand = Math.floor(Math.random() * 10000); 235 | 236 | var manifestInfo = { 237 | manifestBucket : config.manifestBucket.S, 238 | manifestKey : config.manifestKey.S, 239 | manifestName : 'manifest-' + dateName + '-' + rand 240 | }; 241 | manifestInfo.manifestPrefix = manifestInfo.manifestKey + '/' + manifestInfo.manifestName; 242 | manifestInfo.manifestPath = manifestInfo.manifestBucket + "/" + manifestInfo.manifestPrefix; 243 | 244 | return manifestInfo; 245 | }; 246 | 247 | exports.randomInt = function(low, high) { 248 | return Math.floor(Math.random() * (high - low) + low); 249 | }; -------------------------------------------------------------------------------- /lib/constants.js: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 5 | 6 | http://aws.amazon.com/asl/ 7 | 8 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. 9 | */ 10 | 11 | batchId = 'batchId'; 12 | currentBatch = 'currentBatch'; 13 | s3prefix = 's3Prefix'; 14 | lastUpdate = 'lastUpdate'; 15 | complete = 'complete'; 16 | locked = 'locked'; 17 | open = 'open'; 18 | error = 'error'; 19 | entries = 'entries'; 20 | status = 'status'; 21 | configTable = 'LambdaRDSLoaderConfig'; 22 | conditionCheckFailed = 'ConditionalCheckFailedException'; 23 | provisionedThroughputExceeded = 'ProvisionedThroughputExceededException'; 24 | INVALID_ARG = -1; 25 | ERROR = -1; 26 | OK = 0; 27 | -------------------------------------------------------------------------------- /lib/importer.js: -------------------------------------------------------------------------------- 1 | var region = process.env['AWS_REGION']; 2 | if (!region || region === null || region === "") { 3 | region = "us-east-1"; 4 | console.log("AWS Lambda RDS Database Loader using default region " + region); 5 | } 6 | 7 | var aws = require('aws-sdk'); 8 | aws.config.update({ 9 | region : region 10 | }); 11 | var s3 = new aws.S3({ 12 | apiVersion : '2006-03-01', 13 | region : region 14 | }); 15 | var dynamoDB = new aws.DynamoDB({ 16 | apiVersion : '2012-08-10', 17 | region : region 18 | }); 19 | 20 | require('./constants'); 21 | var common = require('./common'); 22 | var kmsCrypto = require('./kmsCrypto'); 23 | kmsCrypto.setRegion(region); 24 | var S3S = require('s3-streams'); 25 | var pgCopy = require('pg-copy-streams').from; 26 | var pg = require('pg'); 27 | var parse = require('csv-parse'); 28 | var Q = require('q'); 29 | var pgp = require('pg-promise')({ promiseLib: Q }); 30 | var db; 31 | var _ = require('underscore'); 32 | var getS3 = Q.nbind(s3.getObject, s3); 33 | var s3Keys = require('./s3-keys'); 34 | 35 | 36 | function Importer(records) { 37 | var _this = this; 38 | _this.records = records; 39 | }; 40 | 41 | Importer.prototype.getS3Records = function(bucket, prefix) { 42 | var _this = this; 43 | var deferred = Q.defer(); 44 | var records = []; 45 | var prefix = prefix.replace(/^\/|\/$/g, '') + '/'; 46 | 47 | var s3ListKeys = new s3Keys(s3, 1000); 48 | s3ListKeys.listKeys({ 49 | bucket: bucket, 50 | prefix: prefix 51 | }, function (error, keys) { 52 | if (error) { 53 | deferred.reject(error); 54 | } 55 | 56 | _.each(keys, function (key) { 57 | if(_.last(key).replace(/[\W_]+/g,'').length !== 0) { 58 | records.push({ 59 | s3: { 60 | bucket: { 61 | name: bucket, 62 | }, 63 | object: { 64 | key: key 65 | } 66 | } 67 | }); 68 | } 69 | }); 70 | deferred.resolve(records); 71 | }); 72 | 73 | return deferred.promise; 74 | }, 75 | 76 | Importer.prototype.getTableName = function(key, inputInfo) { 77 | var table = key.split('/'); 78 | if(inputInfo.folderDepthLevelForTableName < 0) { 79 | table.reverse(); 80 | folderDepthLevelForTableName = (inputInfo.folderDepthLevelForTableName * -1) - 1; 81 | } 82 | return inputInfo.schema + '.' + inputInfo.tablePrefix + table[folderDepthLevelForTableName].replace(/[\W]+/g,'_'); 83 | }; 84 | 85 | Importer.prototype.getTableNames = function(inputInfo) { 86 | return new Q.promise(function(resolve, reject) { 87 | if(!inputInfo.useSingleTable) { 88 | for(var i = 0; i < inputInfo.records.length; i++) { 89 | var key = inputInfo.records[i].s3.object.key; 90 | var tableName = Importer.prototype.getTableName(key, inputInfo); 91 | if(inputInfo.tableNames.indexOf(tableName) === -1) { 92 | inputInfo.tableNames.push({ 93 | 'tableName': tableName, 94 | 'key': key 95 | }); 96 | console.log("Resolving table name to: " + tableName); 97 | } 98 | } 99 | } 100 | 101 | resolve(inputInfo); 102 | }); 103 | }; 104 | 105 | Importer.prototype.createTablesIfNotExists = function(inputInfo) { 106 | var createTables = []; 107 | var table = inputInfo.tableNames[0]; 108 | var deferred = Q.defer(); 109 | 110 | var createTableIfNotExists = function(table) { 111 | var createDeferred = Q.defer(); 112 | getS3({Bucket: inputInfo.bucket, Key: table.key}) 113 | .then(function(response) { 114 | parse(response.Body.toString(), {delimiter: inputInfo.delimiter}, function(error, output) { 115 | var sql = "CREATE TABLE IF NOT EXISTS " + table.tableName + "("; 116 | var columns = []; 117 | for(var i = 0; i < output[0].length; i++) { 118 | columns.push(output[0][i] + " text"); 119 | } 120 | sql += columns.join(',') + ') WITH (OIDS=FALSE);'; 121 | console.log(sql); 122 | 123 | db.query(sql) 124 | .then(function() { 125 | createDeferred.resolve(); 126 | }) 127 | .catch(function(error) { 128 | createDeferred.reject(error); 129 | }); 130 | }); 131 | }); 132 | return createDeferred.promise; 133 | }; 134 | 135 | for(var i = 0; i < inputInfo.tableNames.length; i++) { 136 | createTables.push(createTableIfNotExists(inputInfo.tableNames[i])); 137 | } 138 | 139 | Q.allSettled(createTables).then(function(response) { 140 | deferred.resolve(inputInfo); 141 | }) 142 | 143 | return deferred.promise; 144 | 145 | }; 146 | 147 | Importer.prototype.updateConfig = function(inputInfo) { 148 | console.log("Found RDS Configuration for " + inputInfo.bucket); 149 | 150 | var config = inputInfo.config.Item; 151 | var decryptMap = Q.nbind(kmsCrypto.decryptMap, kmsCrypto); 152 | var deferred = Q.defer(); 153 | var encryptedItems = { 154 | 'rdsPassword': kmsCrypto.stringToBuffer(config.loadRDS.L[0].M.connectPassword.S) 155 | }; 156 | 157 | return decryptMap(encryptedItems) 158 | .then(function(response) { 159 | inputInfo.tablePrefix = config.loadRDS.L[0].M.tablePrefix.S; 160 | inputInfo.schema = config.loadRDS.L[0].M.targetSchema.S; 161 | inputInfo.folderDepthLevelForTableName = config.folderDepthLevelForTableName.N; 162 | inputInfo.truncateTarget = config.loadRDS.L[0].M.truncateTarget.BOOL; 163 | inputInfo.useSingleTable = config.loadRDS.L[0].M.useSingleTable.BOOL; 164 | inputInfo.connectionString = 'postgres://' + config.loadRDS.L[0].M.connectUser.S + ':' + response.rdsPassword.toString() + '@' 165 | + config.loadRDS.L[0].M.rdsHost.S + ':' + config.loadRDS.L[0].M.rdsPort.N + '/' + config.loadRDS.L[0].M.rdsDB.S; 166 | 167 | db = pgp(inputInfo.connectionString); 168 | return inputInfo; 169 | }) 170 | }; 171 | 172 | Importer.prototype.dropTablesIfExists = function(inputInfo) { 173 | var dropTables = []; 174 | var deferred = Q.defer(); 175 | 176 | var dropTableIfExists = function(table) { 177 | var sql = "DROP TABLE IF EXISTS " + table; 178 | console.log(sql); 179 | return db.query(sql); 180 | }; 181 | 182 | if(inputInfo.truncateTarget) { 183 | for(var i = 0; i < inputInfo.tableNames.length; i++) { 184 | dropTables.push(dropTableIfExists(inputInfo.tableNames[i].tableName)); 185 | } 186 | } 187 | 188 | Q.allSettled(dropTables).then(function() { 189 | deferred.resolve(inputInfo); 190 | }); 191 | 192 | return deferred.promise; 193 | }; 194 | 195 | Importer.prototype.runImport = function(inputInfo) { 196 | console.log('Importing data.'); 197 | var deferred = Q.defer(); 198 | var _this = this; 199 | 200 | function importRecords(i, key, columns, tableName) { 201 | return new Q.promise(function(resolve, reject) { 202 | var stream = S3S.ReadStream(s3, {Bucket: inputInfo.bucket, Key: key}); 203 | pg.connect(inputInfo.connectionString, function(error, client) { 204 | if(error) { 205 | reject(error); 206 | } else { 207 | console.log('Loading file:', i+1, 'of', inputInfo.records.length, ' - ', key); 208 | var copySql = "COPY " + tableName + " (" + columns.join(',') + ") FROM STDIN WITH CSV HEADER DELIMITER '" + inputInfo.delimiter + "'"; 209 | 210 | var query = client.query(pgCopy(copySql)); 211 | stream.pipe(query) 212 | .on('end', function () { 213 | client.end(); 214 | i++; 215 | 216 | if(i < inputInfo.records.length) { 217 | fetchRecord(i); 218 | } else { 219 | resolve(inputInfo); 220 | } 221 | }) 222 | .on('error', function(error) { 223 | console.log(error); 224 | console.log(copySql); 225 | reject(error); 226 | }); 227 | } 228 | }); 229 | }); 230 | } 231 | 232 | function fetchRecord(i) { 233 | var key = inputInfo.records[i].s3.object.key; 234 | var tableName = Importer.prototype.getTableName(key, inputInfo); 235 | var columns = []; 236 | 237 | getS3({Bucket: inputInfo.bucket, Key: key}) 238 | .then(function(response) { 239 | parse(response.Body.toString(), { delimiter: inputInfo.delimiter, columns: true }, function(error, output) { 240 | if(error) { 241 | deferred.reject(error); 242 | } else { 243 | columns = Object.keys(output[0]); 244 | 245 | var _this = Importer.prototype; 246 | var tableName = _this.getTableName(key, inputInfo); 247 | if(i === 0 && inputInfo.useSingleTable) { 248 | console.log("Resolving table name to: " + tableName); 249 | 250 | inputInfo.tableNames.push({ 251 | 'tableName': tableName, 252 | 'key': key 253 | }); 254 | 255 | _this.dropTablesIfExists(inputInfo) 256 | .then(_this.createTablesIfNotExists) 257 | .then(function(response) { 258 | return importRecords(i, key, columns, tableName); 259 | }) 260 | .catch(function(error) { 261 | deferred.reject(error); 262 | }); 263 | 264 | } else if(inputInfo.useSingleTable) { 265 | tableName = inputInfo.tableNames[0].tableName; 266 | var newColumns = []; 267 | for(var j = 0; j < columns.length; j++) { 268 | var addColumnSql = "ALTER TABLE " + tableName + " ADD COLUMN " + columns[j] + " TEXT NULL"; 269 | newColumns.push(db.query(addColumnSql)); 270 | } 271 | 272 | Q.allSettled(newColumns) 273 | .then(function() { 274 | return importRecords(i, key, columns, tableName); 275 | }); 276 | 277 | 278 | } else { 279 | return importRecords(i, key, columns, tableName); 280 | } 281 | } 282 | }); 283 | }); 284 | } 285 | 286 | fetchRecord(0); 287 | return deferred.promise; 288 | }; 289 | 290 | Importer.prototype.getConfig = function() { 291 | var _this = this; 292 | return new Q.promise(function(resolve, reject) { 293 | var getDynamoItem = Q.nbind(dynamoDB.getItem, dynamoDB); 294 | var inputInfo = { 295 | bucket: _this.records[0].s3.bucket.name, 296 | key: _this.records[0].s3.object.key, 297 | schema: 'public', 298 | folderDepthLevelForTableName: 0, 299 | tableNames: [], 300 | connectionString: null, 301 | delimiter: ',', 302 | truncateTarget: false, 303 | tablePrefix: '', 304 | useSingleTable: false, 305 | sourceColumn: 'rds_loader_source_file', 306 | records: _this.records 307 | }; 308 | 309 | // load the configuration for this prefix 310 | var dynamoLookup = { 311 | Key : { 312 | s3Prefix : { 313 | S : inputInfo.bucket 314 | } 315 | }, 316 | TableName : configTable, 317 | ConsistentRead : true 318 | }; 319 | 320 | return getDynamoItem(dynamoLookup) 321 | .then(function(response) { 322 | inputInfo.config = response; 323 | resolve(inputInfo); 324 | }) 325 | .catch(function(error) { 326 | reject(error); 327 | }); 328 | }); 329 | }; 330 | 331 | Importer.prototype.run = function() { 332 | var _this = this; 333 | _this.getConfig() 334 | .then(_this.updateConfig) 335 | .then(_this.getTableNames) 336 | .then(_this.dropTablesIfExists) 337 | .then(_this.createTablesIfNotExists) 338 | .then(_this.runImport) 339 | .catch(function(error) { 340 | console.log(error.stack); 341 | context.done(error); 342 | }) 343 | .finally(function() { 344 | pgp.end(); 345 | context.done(); 346 | }); 347 | }; 348 | 349 | module.exports = Importer; 350 | -------------------------------------------------------------------------------- /lib/kmsCrypto.js: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 5 | 6 | http://aws.amazon.com/asl/ 7 | 8 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. 9 | */ 10 | var useRegion = undefined; 11 | var aws = require('aws-sdk'); 12 | var async = require('async'); 13 | var kms = undefined; 14 | 15 | var authContext = { 16 | module : "AWSLambdaRedshiftLoader", 17 | region : null 18 | }; 19 | 20 | // module key alias to be used for this application 21 | var moduleKeyName = "alias/LambaRedshiftLoaderKey"; 22 | 23 | var setRegion = function(region) { 24 | if (!region) { 25 | useRegion = process.env['AWS_REGION']; 26 | 27 | if (!useRegion) { 28 | useRegion = 'us-east-1'; 29 | console.log("AWS KMS using default region " + useRegion); 30 | } 31 | } else { 32 | useRegion = region; 33 | } 34 | 35 | aws.config.update({ 36 | region : useRegion 37 | }); 38 | kms = new aws.KMS({ 39 | apiVersion : '2014-11-01', 40 | region : useRegion 41 | }); 42 | authContext.region = useRegion; 43 | }; 44 | exports.setRegion = setRegion; 45 | 46 | /** 47 | * Retrieves or creates the master key metadata for this module
48 | * Parameters: 49 | *
  • callback(err,KeyMetadata) err - errors generated while getting or 50 | * creating the key
  • 51 | *
  • KeyMetadata - KMS Key Metadata including ID and ARN for this module's 52 | * master key
  • 53 | */ 54 | var getOrCreateMasterKey = function(callback) { 55 | kms.describeKey({ 56 | KeyId : moduleKeyName 57 | }, function(err, data) { 58 | if (err) { 59 | if (err.code === 'InvalidArnException' || err.code === 'NotFoundException') { 60 | // master key for the module doesn't exist, so 61 | // create it 62 | var createKeyParams = { 63 | Description : "Lambda Redshift Loader Master Encryption Key", 64 | KeyUsage : 'ENCRYPT_DECRYPT' 65 | }; 66 | 67 | // create the master key for this module and 68 | // bind an alias to it 69 | kms.createKey(createKeyParams, function(err, createKeyData) { 70 | if (err) { 71 | console.log("Error during Master Key creation"); 72 | return callback(err); 73 | } else { 74 | // create an alias for 75 | // the master key 76 | var createAliasParams = { 77 | AliasName : moduleKeyName, 78 | TargetKeyId : createKeyData.KeyMetadata.KeyId 79 | }; 80 | kms.createAlias(createAliasParams, function(err, createAliasData) { 81 | if (err) { 82 | console.log("Error during creation of Alias " + moduleKeyName + " for Master Key " + createKeyData.KeyMetadata.Arn); 83 | return callback(err); 84 | } else { 85 | // invoke 86 | // the 87 | // callback 88 | return callback(undefined, createKeyData.KeyMetadata); 89 | } 90 | }); 91 | } 92 | }); 93 | } else { 94 | // got an unknown error while describing the key 95 | console.log("Unknown Error during Customer Master Key describe"); 96 | return callback(err); 97 | } 98 | } else { 99 | // ok - we got the previously generated key, so 100 | // callback 101 | return callback(undefined, data.KeyMetadata); 102 | } 103 | }); 104 | }; 105 | exports.getOrCreateMasterKey = getOrCreateMasterKey; 106 | 107 | /** 108 | * Function which encrypts a value using the module's master key
    109 | * Parameters: 110 | *
  • toEncrypt - value to be encrypted
  • 111 | *
  • callback(err, encrypted) - function invoked once encryption is completed
  • 112 | */ 113 | var encrypt = function(toEncrypt, callback) { 114 | // get the master key 115 | getOrCreateMasterKey(function(err, keyMetadata) { 116 | if (err) { 117 | console.log("Error during resolution of Customer Master Key"); 118 | return callback(err); 119 | } else { 120 | // encrypt the data 121 | var params = { 122 | KeyId : keyMetadata.KeyId, 123 | Plaintext : new Buffer(toEncrypt), 124 | EncryptionContext : authContext 125 | }; 126 | kms.encrypt(params, function(err, encryptData) { 127 | if (err) { 128 | console.log("Error during Encryption"); 129 | return callback(err); 130 | } else { 131 | return callback(undefined, encryptData.CiphertextBlob); 132 | } 133 | }); 134 | } 135 | }); 136 | }; 137 | exports.encrypt = encrypt; 138 | 139 | /** 140 | * Function which does a blocking encryption of the array of values. Invokes the 141 | * afterEncryption callback after all values in the input array have been 142 | * encrypted
    143 | * Parameters: 144 | *
  • plaintextArray - Array of plaintext input values
  • 145 | *
  • afterDecryptionCallback - function invoked once encryption has been 146 | * completed
  • 147 | */ 148 | var encryptAll = function(plaintextArray, afterEncryptionCallback) { 149 | async.map(plaintextArray, function(item, callback) { 150 | // decrypt the value using internal decrypt 151 | encrypt(item, function(err, ciphertext) { 152 | return callback(err, ciphertext); 153 | }); 154 | }, function(err, results) { 155 | // call the after encryption callback with the result array 156 | return afterEncryptionCallback(err, results); 157 | }); 158 | }; 159 | exports.encryptAll = encryptAll; 160 | 161 | var encryptMap = function(valueMap, afterEncryptionCallback) { 162 | var encryptedValueMap = {}; 163 | 164 | async.each(Object.keys(valueMap), function(key, callback) { 165 | encrypt(valueMap[key], function(err, ciphertext) { 166 | if (err) { 167 | callback(err); 168 | } else { 169 | encryptedValueMap[key] = ciphertext; 170 | callback(); 171 | } 172 | }); 173 | }, function(err) { 174 | // call the after decryption callback with the result data 175 | return afterEncryptionCallback(err, encryptedValueMap); 176 | }); 177 | }; 178 | exports.encryptMap = encryptMap; 179 | 180 | /** 181 | * Function to decrypt a value using the module's master key
    182 | * Parameters: 183 | *
  • toDecrypt - value to be decrypted
  • 184 | *
  • callback(err, decrypted) - Callback to be invoked after decryption which 185 | * receives the decrypted value, and errors that were generated
  • 186 | */ 187 | var decrypt = function(encryptedCiphertext, callback) { 188 | var params = { 189 | CiphertextBlob : encryptedCiphertext, 190 | EncryptionContext : authContext 191 | }; 192 | 193 | kms.decrypt(params, function(err, decryptData) { 194 | if (err) { 195 | console.log("Error during Decryption"); 196 | return callback(err); 197 | } else { 198 | if (!decryptData) { 199 | console.log("Failed to decrypt ciphertext"); 200 | return callback(undefined); 201 | } else { 202 | return callback(undefined, decryptData.Plaintext); 203 | } 204 | } 205 | }); 206 | }; 207 | exports.decrypt = decrypt; 208 | 209 | /** 210 | * Function which does a blocking decryption of the array of values. Invokes the 211 | * afterDecryption callback after all values in the input array have been 212 | * decrypted
    213 | * Parameters: 214 | *
  • encryptedArray - Array of encrypted input values
  • 215 | *
  • afterDecryptionCallback - function invoked once decryption has been 216 | * completed
  • 217 | */ 218 | var decryptAll = function(encryptedArray, afterDecryptionCallback) { 219 | async.map(encryptedArray, function(item, callback) { 220 | // decrypt the value using internal decrypt 221 | decrypt(item, function(err, plaintext) { 222 | return callback(err, plaintext); 223 | }); 224 | }, function(err, results) { 225 | // call the after decryption callback with the result array 226 | return afterDecryptionCallback(err, results); 227 | }); 228 | }; 229 | exports.decryptAll = decryptAll; 230 | 231 | var decryptMap = function(encryptedValueMap, afterDecryptionCallback) { 232 | var decryptedValueMap = {}; 233 | 234 | async.each(Object.keys(encryptedValueMap), function(key, callback) { 235 | // decrypt the value using internal decrypt 236 | decrypt(encryptedValueMap[key], function(err, plaintext) { 237 | if (err) { 238 | console.log(JSON.stringify(err)); 239 | callback(err); 240 | } else { 241 | decryptedValueMap[key] = plaintext; 242 | callback(); 243 | } 244 | }); 245 | }, function(err) { 246 | // call the after decryption callback with the result data 247 | return afterDecryptionCallback(err, decryptedValueMap); 248 | }); 249 | }; 250 | exports.decryptMap = decryptMap; 251 | 252 | var bufferToString = function(buffer) { 253 | return JSON.stringify(buffer); 254 | }; 255 | exports.bufferToString = bufferToString; 256 | 257 | var stringToBuffer = function(stringBuffer) { 258 | return new Buffer(JSON.parse(stringBuffer)); 259 | }; 260 | exports.stringToBuffer = stringToBuffer; 261 | 262 | var toLambdaStringFormat = function(buffer) { 263 | // only extract the numeric array from the specified buffer 264 | // buffer implementation varies between node .10 and .12, so this method 265 | // will ensure 266 | // that the buffer is stored in a format that Lambda can rehydrate (v0.10 267 | // format) 268 | var regex = /.*(\[.*\]).*/; 269 | var stringValue = bufferToString(buffer); 270 | 271 | if (stringValue === null) { 272 | return null; 273 | } else { 274 | var matches = regex.exec(stringValue); 275 | if (matches && matches.length > 0) { 276 | return matches[1]; 277 | } else { 278 | return stringValue; 279 | } 280 | } 281 | }; 282 | exports.toLambdaStringFormat = toLambdaStringFormat; -------------------------------------------------------------------------------- /lib/s3-keys.js: -------------------------------------------------------------------------------- 1 | /* Work around for 1000 file limitation on getObjects */ 2 | var _ = require('underscore'); 3 | 4 | function s3Keys(s3, maxKeys) { 5 | this.s3 = s3; 6 | this.maxKeys = maxKeys; 7 | }; 8 | 9 | s3Keys.prototype.listKeys = function(options, callback) { 10 | var _this = this; 11 | var keys = []; 12 | function listKeysRecusively (marker) { 13 | options.marker = marker; 14 | _this.listKeyPage(options, 15 | function (error, nextMarker, keyset) { 16 | if (error) { 17 | return callback(error, keys); 18 | } 19 | 20 | keys = keys.concat(keyset); 21 | 22 | if (nextMarker) { 23 | listKeysRecusively(nextMarker); 24 | } else { 25 | callback(null, keys); 26 | } 27 | }); 28 | } 29 | // Start the recursive listing at the beginning, with no marker. 30 | listKeysRecusively(); 31 | }; 32 | 33 | s3Keys.prototype.listKeyPage = function(options, callback) { 34 | var _this = this; 35 | var params = { 36 | Bucket : options.bucket, 37 | Delimiter: options.delimiter, 38 | Marker : options.marker, 39 | MaxKeys : _this.maxKeys, 40 | Prefix : options.prefix 41 | }; 42 | 43 | _this.s3.listObjects(params, function (error, response) { 44 | if (error) { 45 | return callback(error); 46 | } else if (response.err) { 47 | return callback(new Error(response.err)); 48 | } 49 | 50 | // Convert the results into an array of key strings, or 51 | // common prefixes if we're using a delimiter. 52 | var keys; 53 | if (options.delimiter) { 54 | // Note that if you set MaxKeys to 1 you can see some interesting 55 | // behavior in which the first response has no response.CommonPrefix 56 | // values, and so we have to skip over that and move on to the 57 | // next page. 58 | keys = _.map(response.CommonPrefixes, function (item) { 59 | return item.Prefix; 60 | }); 61 | } else { 62 | keys = _.map(response.Contents, function (item) { 63 | return item.Key; 64 | }); 65 | } 66 | 67 | // Check to see if there are yet more keys to be obtained, and if so 68 | // return the marker for use in the next request. 69 | var nextMarker; 70 | if (response.IsTruncated) { 71 | if (options.delimiter) { 72 | // If specifying a delimiter, the response.NextMarker field exists. 73 | nextMarker = response.NextMarker; 74 | } else { 75 | // For normal listing, there is no response.NextMarker 76 | // and we must use the last key instead. 77 | nextMarker = keys[keys.length - 1]; 78 | } 79 | } 80 | 81 | callback(null, nextMarker, keys); 82 | }); 83 | } 84 | 85 | module.exports = s3Keys; -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lambda-rds-loader", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "async": "^1.5.0", 13 | "commander": "^2.9.0", 14 | "csv-parse": "^1.0.1", 15 | "node-uuid": "^1.4.7", 16 | "pg": "^4.4.3", 17 | "pg-copy-streams": "^0.3.0", 18 | "pg-promise": "^2.8.5", 19 | "q": "^1.4.1", 20 | "s3-streams": "^0.3.0", 21 | "underscore": "^1.8.3" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /run.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var url = require("url"); 4 | var program = require('commander'); 5 | var Importer = require('./lib/importer'); 6 | 7 | program 8 | .version('0.0.1') 9 | .option('-p, --path', 'Enter s3 bucket and prefix in format s3://bucket/prefix') 10 | .parse(process.argv); 11 | 12 | if(program.path) { 13 | var path = url.parse(program.args[0]); 14 | var importer = new Importer([]); 15 | importer.getS3Records(path.host, path.pathname) 16 | .then(function(records) { 17 | importer.records = records; 18 | console.log('Found', records.length, 'records.'); 19 | importer.run(); 20 | }) 21 | .catch(function(error) { 22 | console.log(error); 23 | }); 24 | 25 | } else { 26 | console.log('Enter s3 bucket and prefix in format s3://bucket/prefix'); 27 | process.exit(1); 28 | } 29 | -------------------------------------------------------------------------------- /setup.js: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 5 | 6 | http://aws.amazon.com/asl/ 7 | 8 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. 9 | */ 10 | 11 | /** 12 | * Ask questions of the end user via STDIN and then setup the DynamoDB table 13 | * entry for the configuration when done 14 | */ 15 | var pjson = require('./package.json'); 16 | var readline = require('readline'); 17 | var aws = require('aws-sdk'); 18 | require('./lib/constants'); 19 | var common = require('./lib/common'); 20 | var async = require('async'); 21 | var uuid = require('node-uuid'); 22 | var dynamoDB; 23 | var kmsCrypto = require('./lib/kmsCrypto'); 24 | var setRegion; 25 | 26 | dynamoConfig = { 27 | TableName : configTable, 28 | Item : { 29 | currentBatch : { 30 | S : uuid.v4() 31 | }, 32 | version : { 33 | S : pjson.version 34 | }, 35 | loadRDS : { 36 | L : [ { 37 | M : { 38 | 39 | } 40 | } ] 41 | } 42 | } 43 | }; 44 | 45 | /* configuration of question prompts and config assignment */ 46 | var rl = readline.createInterface({ 47 | input : process.stdin, 48 | output : process.stdout 49 | }); 50 | 51 | var qs = []; 52 | 53 | q_region = function(callback) { 54 | rl.question('Enter the Region for the Configuration > ', function(answer) { 55 | if (common.blank(answer) !== null) { 56 | common.validateArrayContains([ "ap-northeast-1", "ap-southeast-1", "ap-southeast-2", "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2" ], 57 | answer.toLowerCase(), rl); 58 | 59 | setRegion = answer.toLowerCase(); 60 | 61 | // configure dynamo db and kms for the correct region 62 | dynamoDB = new aws.DynamoDB({ 63 | apiVersion : '2012-08-10', 64 | region : setRegion 65 | }); 66 | kmsCrypto.setRegion(setRegion); 67 | 68 | callback(null); 69 | } 70 | }); 71 | }; 72 | 73 | q_s3Prefix = function(callback) { 74 | rl.question('Enter the S3 Bucket > ', function(answer) { 75 | common.validateNotNull(answer, 'You Must Provide an S3 Bucket Name', rl); 76 | 77 | // setup prefix to be * if one was not provided 78 | var stripped = answer.replace(new RegExp('s3://', 'g'), ''); 79 | var elements = stripped.split("/"); 80 | var setPrefix = undefined; 81 | 82 | if (elements.length === 1) { 83 | // bucket only so use "bucket" alone 84 | setPrefix = elements[0]; 85 | } else { 86 | // right trim "/" 87 | setPrefix = stripped.replace(/\/$/, ''); 88 | } 89 | 90 | dynamoConfig.Item.s3Prefix = { 91 | S : setPrefix 92 | }; 93 | 94 | callback(null); 95 | }); 96 | }; 97 | 98 | q_filenameFilter = function(callback) { 99 | rl.question('Enter a Filename Filter Regex > ', function(answer) { 100 | if (common.blank(answer) !== null) { 101 | dynamoConfig.Item.filenameFilterRegex = { 102 | S : answer 103 | }; 104 | } 105 | callback(null); 106 | }); 107 | }; 108 | 109 | q_rdsHost = function(callback) { 110 | rl.question('Enter the RDS Host > ', function(answer) { 111 | common.validateNotNull(answer, 'You Must Provide an RDS Host', rl); 112 | dynamoConfig.Item.loadRDS.L[0].M.rdsHost = { 113 | S : answer 114 | }; 115 | callback(null); 116 | }); 117 | }; 118 | 119 | q_rdsPort = function(callback) { 120 | rl.question('Enter the RDS Port > ', function(answer) { 121 | dynamoConfig.Item.loadRDS.L[0].M.rdsPort = { 122 | N : '' + common.getIntValue(answer, rl) 123 | }; 124 | callback(null); 125 | }); 126 | }; 127 | 128 | q_rdsDB = function(callback) { 129 | rl.question('Enter the Database Name > ', function(answer) { 130 | if (common.blank(answer) !== null) { 131 | dynamoConfig.Item.loadRDS.L[0].M.rdsDB = { 132 | S : answer 133 | }; 134 | } 135 | callback(null); 136 | }); 137 | }; 138 | 139 | q_userName = function(callback) { 140 | rl.question('Enter the Database Username > ', function(answer) { 141 | common.validateNotNull(answer, 'You Must Provide a Username', rl); 142 | dynamoConfig.Item.loadRDS.L[0].M.connectUser = { 143 | S : answer 144 | }; 145 | callback(null); 146 | }); 147 | }; 148 | 149 | q_userPwd = function(callback) { 150 | rl.question('Enter the Database Password > ', function(answer) { 151 | common.validateNotNull(answer, 'You Must Provide a Password', rl); 152 | 153 | kmsCrypto.encrypt(answer, function(err, ciphertext) { 154 | if (err) { 155 | console.log(JSON.stringify(err)); 156 | process.exit(ERROR); 157 | } else { 158 | dynamoConfig.Item.loadRDS.L[0].M.connectPassword = { 159 | S : kmsCrypto.toLambdaStringFormat(ciphertext) 160 | }; 161 | callback(null); 162 | } 163 | }); 164 | }); 165 | }; 166 | 167 | q_schema = function(callback) { 168 | rl.question('Enter the Schema (optional) > ', function(answer) { 169 | if (answer && answer !== null && answer !== "") { 170 | dynamoConfig.Item.loadRDS.L[0].M.targetSchema = { 171 | S : answer 172 | }; 173 | callback(null); 174 | } else { 175 | callback(null); 176 | } 177 | }); 178 | }; 179 | 180 | q_tablePrefix = function(callback) { 181 | rl.question('Enter table prefix (optional but recommended if keys start with numericals) > ', function(answer) { 182 | if (answer && answer !== null && answer !== "") { 183 | dynamoConfig.Item.loadRDS.L[0].M.tablePrefix = { 184 | S : answer 185 | }; 186 | callback(null); 187 | } else { 188 | callback(null); 189 | } 190 | }); 191 | }; 192 | 193 | q_folderDepthLevelForTableName = function(callback) { 194 | rl.question('Enter the folder depth from bucket root to use as table name. Use negative index to select from the input file > ', function(answer) { 195 | dynamoConfig.Item.folderDepthLevelForTableName = { 196 | N : '' + common.getIntValue(answer, rl) 197 | }; 198 | callback(null); 199 | }); 200 | }; 201 | 202 | q_truncateTable = function(callback) { 203 | rl.question('Should the Table be Truncated before Load? (Y/N) > ', function(answer) { 204 | dynamoConfig.Item.loadRDS.L[0].M.truncateTarget = { 205 | BOOL : common.getBooleanValue(answer) 206 | }; 207 | callback(null); 208 | }); 209 | }; 210 | 211 | q_csvDelimiter = function(callback) { 212 | //if (dynamoConfig.Item.dataFormat.S === 'CSV') { 213 | rl.question('Enter the CSV Delimiter > ', function(answer) { 214 | common.validateNotNull(answer, 'You Must the Delimiter for CSV Input', rl); 215 | dynamoConfig.Item.csvDelimiter = { 216 | S : answer 217 | }; 218 | callback(null); 219 | }); 220 | //} else { 221 | // callback(null); 222 | //} 223 | }; 224 | 225 | last = function(callback) { 226 | rl.close(); 227 | 228 | setup(null, callback); 229 | }; 230 | 231 | q_useSingleTable = function(callback) { 232 | rl.question('Should the load utilize a single table for loads? (Y/N) > ', function(answer) { 233 | dynamoConfig.Item.loadRDS.L[0].M.useSingleTable = { 234 | BOOL : common.getBooleanValue(answer) 235 | }; 236 | callback(null); 237 | }); 238 | }; 239 | 240 | setup = function(overrideConfig, callback) { 241 | // set which configuration to use 242 | var useConfig = undefined; 243 | if (overrideConfig) { 244 | useConfig = overrideConfig; 245 | } else { 246 | useConfig = dynamoConfig; 247 | } 248 | var configWriter = common.writeConfig(setRegion, dynamoDB, useConfig, callback); 249 | common.createTables(dynamoDB, configWriter); 250 | }; 251 | // export the setup module so that customers can programmatically add new 252 | // configurations 253 | exports.setup = setup; 254 | 255 | qs.push(q_region); 256 | qs.push(q_s3Prefix); 257 | qs.push(q_filenameFilter); 258 | qs.push(q_rdsHost); 259 | qs.push(q_rdsPort); 260 | qs.push(q_rdsDB); 261 | qs.push(q_schema); 262 | qs.push(q_tablePrefix); 263 | qs.push(q_useSingleTable); 264 | qs.push(q_folderDepthLevelForTableName); 265 | qs.push(q_truncateTable); 266 | qs.push(q_userName); 267 | qs.push(q_userPwd); 268 | qs.push(q_csvDelimiter); 269 | 270 | // always have to have the 'last' function added to halt the readline channel 271 | // and run the setup 272 | qs.push(last); 273 | 274 | // call the first function in the function list, to invoke the callback 275 | // reference chain 276 | async.waterfall(qs); 277 | --------------------------------------------------------------------------------