├── index.js ├── .gitignore ├── test ├── config.json ├── EscapeTest.coffee ├── MultiQueryTest.coffee ├── MultiExecuteTest.coffee ├── data.csv └── QueryTest.coffee ├── samples ├── execute.coffee ├── pipe.coffee ├── query.coffee ├── style_sugar.js └── style_native.js ├── lib ├── 0.7.1-cdh3u1 │ ├── fb303_types.js │ ├── serde_types.js │ ├── hive_service_types.js │ └── queryplan_types.js ├── 0.7.1-cdh3u2 │ ├── fb303_types.js │ ├── serde_types.js │ └── hive_service_types.js ├── 0.7.1-cdh3u3 │ ├── fb303_types.js │ ├── serde_types.js │ └── hive_service_types.js └── hive.js ├── Makefile ├── package.json ├── LICENSE ├── bin └── generate ├── src ├── 0.7.1-cdh3u1 │ ├── serde.thrift │ ├── queryplan.thrift │ ├── fb303.thrift │ ├── hive_service.thrift │ └── hive_metastore.thrift ├── 0.7.1-cdh3u2 │ ├── serde.thrift │ ├── queryplan.thrift │ ├── fb303.thrift │ ├── hive_service.thrift │ └── hive_metastore.thrift ├── 0.7.1-cdh3u3 │ ├── serde.thrift │ ├── queryplan.thrift │ ├── fb303.thrift │ ├── hive_service.thrift │ └── hive_metastore.thrift └── hive.coffee └── README.md /index.js: -------------------------------------------------------------------------------- 1 | 2 | module.exports = require('./lib/hive'); 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | /npm-debug.log 3 | /node_modules/ 4 | /samples/pipe.out 5 | /bin/cloud9 6 | !.gitignore 7 | -------------------------------------------------------------------------------- /test/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.7.1-cdh3u2", 3 | "server": "127.0.0.1", 4 | "port": 10000, 5 | "timeout": 1000, 6 | "db": "test_database", 7 | "table": "test_table" 8 | } -------------------------------------------------------------------------------- /samples/execute.coffee: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | hive = require 'thrift-hive' 4 | # Client connection 5 | client = hive.createClient 6 | version: '0.7.1-cdh3u2' 7 | server: '127.0.0.1' 8 | port: 10000 9 | timeout: 1000 10 | # Execute 11 | client.execute 'USE default', (err) -> 12 | console.log err.message if err 13 | client.end() 14 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u1/fb303_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.fb_status = { 9 | 'DEAD' : 0, 10 | 'STARTING' : 1, 11 | 'ALIVE' : 2, 12 | 'STOPPING' : 3, 13 | 'STOPPED' : 4, 14 | 'WARNING' : 5 15 | }; 16 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u2/fb303_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.fb_status = { 9 | 'DEAD' : 0, 10 | 'STARTING' : 1, 11 | 'ALIVE' : 2, 12 | 'STOPPING' : 3, 13 | 'STOPPED' : 4, 14 | 'WARNING' : 5 15 | }; 16 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u3/fb303_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.fb_status = { 9 | 'DEAD' : 0, 10 | 'STARTING' : 1, 11 | 'ALIVE' : 2, 12 | 'STOPPING' : 3, 13 | 'STOPPED' : 4, 14 | 'WARNING' : 5 15 | }; 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REPORTER = dot 2 | 3 | build: 4 | @./node_modules/.bin/coffee -b -o lib src/*.coffee 5 | 6 | test: build 7 | @NODE_ENV=test ./node_modules/.bin/mocha --compilers coffee:coffee-script \ 8 | --reporter $(REPORTER) 9 | 10 | coverage: build 11 | @jscoverage --no-highlight lib lib-cov 12 | @rm -rf lib-conv/*-* 13 | @cp -rp lib/*-* lib-cov 14 | @HIVE_COV=1 $(MAKE) test REPORTER=html-cov > doc/coverage.html 15 | @rm -rf lib-cov 16 | 17 | .PHONY: test 18 | -------------------------------------------------------------------------------- /samples/pipe.coffee: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env coffee 2 | 3 | fs = require 'fs' 4 | hive = require 'thrift-hive' 5 | # Client connection 6 | client = hive.createClient 7 | version: '0.7.1-cdh3u2' 8 | server: '127.0.0.1' 9 | port: 10000 10 | timeout: 1000 11 | # Execute query 12 | client.query('show tables') 13 | .on 'row', (database) -> 14 | this.emit 'data', 'Found ' + database + '\n' 15 | .on 'error', (err) -> 16 | client.end() 17 | .on 'end', () -> 18 | client.end() 19 | .pipe( fs.createWriteStream "#{__dirname}/pipe.out" ) 20 | -------------------------------------------------------------------------------- /samples/query.coffee: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env coffee 2 | 3 | assert = require 'assert' 4 | hive = require "#{__dirname}/.." 5 | 6 | client = hive.createClient() 7 | 8 | client.execute 'use test_database', (err) -> 9 | assert.ifError err 10 | query = client.query('select * from test_table limit 10', 10) 11 | .on 'row', (row) -> 12 | query.pause() 13 | setTimeout -> 14 | console.log row 15 | query.resume() 16 | , 100 17 | .on 'error', (err) -> 18 | .on 'end', () -> 19 | console.log err.message if err 20 | client.end() 21 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "thrift-hive", 3 | "description": "Hive client using the Apache Thrift RPC system", 4 | "version": "0.0.7", 5 | "author": "David Worms", 6 | "contributors": [ 7 | { "name": "David Worms", "email": "david@adaltas.com" } 8 | ], 9 | "dependencies": { 10 | "thrift": "latest", 11 | "each": "latest" 12 | }, 13 | "devDependencies": { 14 | "coffee-script": "latest", 15 | "mocha": "latest", 16 | "should": "latest" 17 | }, 18 | "keywords": ["hive", "nosql", "hadoop"], 19 | "main": "index", 20 | "engines": { "node": ">= 0.4.7" } 21 | } -------------------------------------------------------------------------------- /samples/style_sugar.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var hive = require('thrift-hive'); 4 | // Client connection 5 | var client = hive.createClient({ 6 | version: '0.7.1-cdh3u2', 7 | server: '127.0.0.1', 8 | port: 10000, 9 | timeout: 1000 10 | }); 11 | // Execute query 12 | client.execute('use default', function(err){ 13 | client.query('show tables') 14 | .on('row', function(database){ 15 | console.log(database); 16 | }) 17 | .on('error', function(err){ 18 | console.log(err.message); 19 | client.end(); 20 | }) 21 | .on('end', function(){ 22 | client.end(); 23 | }); 24 | }); 25 | -------------------------------------------------------------------------------- /samples/style_native.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var assert = require('assert'); 4 | var thrift = require('thrift'); 5 | var transport = require('thrift/lib/thrift/transport'); 6 | var ThriftHive = require('../lib/0.7.1-cdh3u2/ThriftHive'); 7 | // Client connection 8 | var options = {transport: transport.TBufferedTransport, timeout: 1000}; 9 | var connection = thrift.createConnection('127.0.0.1', 10000, options); 10 | var client = thrift.createClient(ThriftHive, connection); 11 | // Execute query 12 | client.execute('use default', function(err){ 13 | client.execute('show tables', function(err){ 14 | assert.ifError(err); 15 | client.fetchAll(function(err, databases){ 16 | if(err){ 17 | console.log(err.message); 18 | }else{ 19 | console.log(databases); 20 | } 21 | connection.end(); 22 | }); 23 | }); 24 | }); -------------------------------------------------------------------------------- /test/EscapeTest.coffee: -------------------------------------------------------------------------------- 1 | 2 | should = require 'should' 3 | config = require './config' 4 | hive = if process.env.HIVE_COV then require '../lib-cov/hive' else require '../lib/hive' 5 | 6 | client = null 7 | before -> 8 | client = hive.createClient config 9 | after -> 10 | client.end() 11 | 12 | describe 'escape', -> 13 | it 'should honor "--" and "/* */"', (next) -> 14 | count_before = 0 15 | count_row = 0 16 | client.multi_query(""" 17 | -- 18 | create db 19 | -- 20 | CREATE DATABASE IF NOT EXISTS #{config.db}; 21 | /* 22 | create table 23 | -- with some dash 24 | CREATE TABLE IF NOT EXISTS #{config.table} ( 25 | a_bigint BIGINT, 26 | an_int INT, 27 | a_date STRING 28 | ) 29 | ROW FORMAT DELIMITED 30 | FIELDS TERMINATED BY ','; 31 | -- load data 32 | LOAD DATA LOCAL INPATH '#{__dirname}/data.csv' OVERWRITE INTO TABLE #{config.table}; 33 | -- return data 34 | SELECT * FROM #{config.table}; 35 | */ 36 | show databases; 37 | """) 38 | .on 'before', (query) -> 39 | count_before++ 40 | .on 'row', (row) -> 41 | count_row++ 42 | .on 'error', (err) -> 43 | should.not.exist err 44 | .on 'end', (query) -> 45 | count_before.should.eql 2 46 | query.should.eql "show databases" 47 | next() 48 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Software License Agreement (BSD License) 2 | ======================================== 3 | Copyright (c) 2011, SARL Adaltas. 4 | 5 | All rights reserved. 6 | 7 | Redistribution and use of this software in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 8 | 9 | - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 10 | 11 | - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 12 | 13 | - Neither the name of SARL Adaltas nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission of the SARL Adaltas. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 16 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u1/serde_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.SERIALIZATION_LIB = 'serialization.lib'; 9 | ttypes.SERIALIZATION_CLASS = 'serialization.class'; 10 | ttypes.SERIALIZATION_FORMAT = 'serialization.format'; 11 | ttypes.SERIALIZATION_DDL = 'serialization.ddl'; 12 | ttypes.SERIALIZATION_NULL_FORMAT = 'serialization.null.format'; 13 | ttypes.SERIALIZATION_LAST_COLUMN_TAKES_REST = 'serialization.last.column.takes.rest'; 14 | ttypes.SERIALIZATION_SORT_ORDER = 'serialization.sort.order'; 15 | ttypes.SERIALIZATION_USE_JSON_OBJECTS = 'serialization.use.json.object'; 16 | ttypes.FIELD_DELIM = 'field.delim'; 17 | ttypes.COLLECTION_DELIM = 'colelction.delim'; 18 | ttypes.LINE_DELIM = 'line.delim'; 19 | ttypes.MAPKEY_DELIM = 'mapkey.delim'; 20 | ttypes.QUOTE_CHAR = 'quote.delim'; 21 | ttypes.ESCAPE_CHAR = 'escape.delim'; 22 | ttypes.VOID_TYPE_NAME = 'void'; 23 | ttypes.BOOLEAN_TYPE_NAME = 'boolean'; 24 | ttypes.TINYINT_TYPE_NAME = 'tinyint'; 25 | ttypes.SMALLINT_TYPE_NAME = 'smallint'; 26 | ttypes.INT_TYPE_NAME = 'int'; 27 | ttypes.BIGINT_TYPE_NAME = 'bigint'; 28 | ttypes.FLOAT_TYPE_NAME = 'float'; 29 | ttypes.DOUBLE_TYPE_NAME = 'double'; 30 | ttypes.STRING_TYPE_NAME = 'string'; 31 | ttypes.DATE_TYPE_NAME = 'date'; 32 | ttypes.DATETIME_TYPE_NAME = 'datetime'; 33 | ttypes.TIMESTAMP_TYPE_NAME = 'timestamp'; 34 | ttypes.LIST_TYPE_NAME = 'array'; 35 | ttypes.MAP_TYPE_NAME = 'map'; 36 | ttypes.STRUCT_TYPE_NAME = 'struct'; 37 | ttypes.UNION_TYPE_NAME = 'uniontype'; 38 | ttypes.LIST_COLUMNS = 'columns'; 39 | ttypes.LIST_COLUMN_TYPES = 'columns.types'; 40 | ttypes.PrimitiveTypes = ['void','boolean','tinyint','smallint','int','bigint','float','double','string','date','datetime','timestamp']; 41 | ttypes.CollectionTypes = ['array','map']; 42 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u2/serde_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.SERIALIZATION_LIB = 'serialization.lib'; 9 | ttypes.SERIALIZATION_CLASS = 'serialization.class'; 10 | ttypes.SERIALIZATION_FORMAT = 'serialization.format'; 11 | ttypes.SERIALIZATION_DDL = 'serialization.ddl'; 12 | ttypes.SERIALIZATION_NULL_FORMAT = 'serialization.null.format'; 13 | ttypes.SERIALIZATION_LAST_COLUMN_TAKES_REST = 'serialization.last.column.takes.rest'; 14 | ttypes.SERIALIZATION_SORT_ORDER = 'serialization.sort.order'; 15 | ttypes.SERIALIZATION_USE_JSON_OBJECTS = 'serialization.use.json.object'; 16 | ttypes.FIELD_DELIM = 'field.delim'; 17 | ttypes.COLLECTION_DELIM = 'colelction.delim'; 18 | ttypes.LINE_DELIM = 'line.delim'; 19 | ttypes.MAPKEY_DELIM = 'mapkey.delim'; 20 | ttypes.QUOTE_CHAR = 'quote.delim'; 21 | ttypes.ESCAPE_CHAR = 'escape.delim'; 22 | ttypes.VOID_TYPE_NAME = 'void'; 23 | ttypes.BOOLEAN_TYPE_NAME = 'boolean'; 24 | ttypes.TINYINT_TYPE_NAME = 'tinyint'; 25 | ttypes.SMALLINT_TYPE_NAME = 'smallint'; 26 | ttypes.INT_TYPE_NAME = 'int'; 27 | ttypes.BIGINT_TYPE_NAME = 'bigint'; 28 | ttypes.FLOAT_TYPE_NAME = 'float'; 29 | ttypes.DOUBLE_TYPE_NAME = 'double'; 30 | ttypes.STRING_TYPE_NAME = 'string'; 31 | ttypes.DATE_TYPE_NAME = 'date'; 32 | ttypes.DATETIME_TYPE_NAME = 'datetime'; 33 | ttypes.TIMESTAMP_TYPE_NAME = 'timestamp'; 34 | ttypes.LIST_TYPE_NAME = 'array'; 35 | ttypes.MAP_TYPE_NAME = 'map'; 36 | ttypes.STRUCT_TYPE_NAME = 'struct'; 37 | ttypes.UNION_TYPE_NAME = 'uniontype'; 38 | ttypes.LIST_COLUMNS = 'columns'; 39 | ttypes.LIST_COLUMN_TYPES = 'columns.types'; 40 | ttypes.PrimitiveTypes = ['void','boolean','tinyint','smallint','int','bigint','float','double','string','date','datetime','timestamp']; 41 | ttypes.CollectionTypes = ['array','map']; 42 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u3/serde_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.SERIALIZATION_LIB = 'serialization.lib'; 9 | ttypes.SERIALIZATION_CLASS = 'serialization.class'; 10 | ttypes.SERIALIZATION_FORMAT = 'serialization.format'; 11 | ttypes.SERIALIZATION_DDL = 'serialization.ddl'; 12 | ttypes.SERIALIZATION_NULL_FORMAT = 'serialization.null.format'; 13 | ttypes.SERIALIZATION_LAST_COLUMN_TAKES_REST = 'serialization.last.column.takes.rest'; 14 | ttypes.SERIALIZATION_SORT_ORDER = 'serialization.sort.order'; 15 | ttypes.SERIALIZATION_USE_JSON_OBJECTS = 'serialization.use.json.object'; 16 | ttypes.FIELD_DELIM = 'field.delim'; 17 | ttypes.COLLECTION_DELIM = 'colelction.delim'; 18 | ttypes.LINE_DELIM = 'line.delim'; 19 | ttypes.MAPKEY_DELIM = 'mapkey.delim'; 20 | ttypes.QUOTE_CHAR = 'quote.delim'; 21 | ttypes.ESCAPE_CHAR = 'escape.delim'; 22 | ttypes.VOID_TYPE_NAME = 'void'; 23 | ttypes.BOOLEAN_TYPE_NAME = 'boolean'; 24 | ttypes.TINYINT_TYPE_NAME = 'tinyint'; 25 | ttypes.SMALLINT_TYPE_NAME = 'smallint'; 26 | ttypes.INT_TYPE_NAME = 'int'; 27 | ttypes.BIGINT_TYPE_NAME = 'bigint'; 28 | ttypes.FLOAT_TYPE_NAME = 'float'; 29 | ttypes.DOUBLE_TYPE_NAME = 'double'; 30 | ttypes.STRING_TYPE_NAME = 'string'; 31 | ttypes.DATE_TYPE_NAME = 'date'; 32 | ttypes.DATETIME_TYPE_NAME = 'datetime'; 33 | ttypes.TIMESTAMP_TYPE_NAME = 'timestamp'; 34 | ttypes.LIST_TYPE_NAME = 'array'; 35 | ttypes.MAP_TYPE_NAME = 'map'; 36 | ttypes.STRUCT_TYPE_NAME = 'struct'; 37 | ttypes.UNION_TYPE_NAME = 'uniontype'; 38 | ttypes.LIST_COLUMNS = 'columns'; 39 | ttypes.LIST_COLUMN_TYPES = 'columns.types'; 40 | ttypes.PrimitiveTypes = ['void','boolean','tinyint','smallint','int','bigint','float','double','string','date','datetime','timestamp']; 41 | ttypes.CollectionTypes = ['array','map']; 42 | -------------------------------------------------------------------------------- /test/MultiQueryTest.coffee: -------------------------------------------------------------------------------- 1 | 2 | should = require 'should' 3 | config = require './config' 4 | hive = if process.env.HIVE_COV then require '../lib-cov/hive' else require '../lib/hive' 5 | 6 | client = null 7 | before -> 8 | client = hive.createClient config 9 | after -> 10 | client.end() 11 | 12 | describe 'Multi # Query', -> 13 | it 'String', (next) -> 14 | count_before = 0 15 | count_row = 0 16 | client.multi_query(""" 17 | -- create db 18 | CREATE DATABASE IF NOT EXISTS #{config.db}; 19 | -- create table 20 | CREATE TABLE IF NOT EXISTS #{config.table} ( 21 | a_bigint BIGINT, 22 | an_int INT, 23 | a_date STRING 24 | ) 25 | ROW FORMAT DELIMITED 26 | FIELDS TERMINATED BY ','; 27 | -- load data 28 | LOAD DATA LOCAL INPATH '#{__dirname}/data.csv' OVERWRITE INTO TABLE #{config.table}; 29 | -- return data 30 | SELECT * FROM #{config.table}; 31 | """) 32 | .on 'before', (query) -> 33 | count_before++ 34 | .on 'row', (row) -> 35 | count_row++ 36 | .on 'error', (err) -> 37 | should.not.exist err 38 | .on 'end', (query) -> 39 | count_before.should.eql 4 40 | count_row.should.eql 54 41 | query.should.eql "SELECT * FROM #{config.table}" 42 | next() 43 | it 'Error in execute # No callback', (next) -> 44 | count_before = 0 45 | count_error = 0 46 | client.multi_query(""" 47 | -- Throw err 48 | Whow, that should throw an exception!; 49 | -- create db 50 | CREATE DATABASE IF NOT EXISTS #{config.db}; 51 | """) 52 | .on 'before', (query) -> 53 | count_before++ 54 | .on 'error', (err) -> 55 | count_error++ 56 | .on 'end', (query) -> 57 | false.should.not.be.ok 58 | .on 'both', (err) -> 59 | err.should.be.an.instanceof Error 60 | err.name.should.eql 'HiveServerException' 61 | count_before.should.eql 1 62 | count_error.should.eql 1 63 | next() 64 | -------------------------------------------------------------------------------- /test/MultiExecuteTest.coffee: -------------------------------------------------------------------------------- 1 | 2 | should = require 'should' 3 | config = require './config' 4 | hive = if process.env.HIVE_COV then require '../lib-cov/hive' else require '../lib/hive' 5 | 6 | client = null 7 | before -> 8 | client = hive.createClient config 9 | after -> 10 | client.end() 11 | 12 | describe 'Multi # Execute', -> 13 | it 'String', (next) -> 14 | count_before = 0 15 | count_end = 0 16 | count_both = 0 17 | execute = client.multi_execute """ 18 | -- create db 19 | CREATE DATABASE IF NOT EXISTS #{config.db}; 20 | -- create table 21 | CREATE TABLE IF NOT EXISTS #{config.table} ( 22 | a_bigint BIGINT, 23 | an_int INT, 24 | a_date STRING 25 | ) 26 | ROW FORMAT DELIMITED 27 | FIELDS TERMINATED BY ','; 28 | -- load data 29 | LOAD DATA LOCAL INPATH '#{__dirname}/data.csv' OVERWRITE INTO TABLE #{config.table}; 30 | """, (err) -> 31 | should.not.exist err 32 | count_before.should.eql 3 33 | count_end.should.eql 1 34 | count_both.should.eql 1 35 | next() 36 | execute.on 'before', (query) -> 37 | count_before++ 38 | execute.on 'end', (query) -> 39 | count_end++ 40 | execute.on 'both', (query) -> 41 | count_both++ 42 | it 'Error', (next) -> 43 | count_before = 0 44 | count_error = 0 45 | count_both = 0 46 | execute = client.multi_execute """ 47 | -- Throw err 48 | Whow, that should throw an exception!; 49 | -- create db 50 | CREATE DATABASE IF NOT EXISTS #{config.db}; 51 | """, (err) -> 52 | err.should.be.an.instanceof Error 53 | err.name.should.eql 'HiveServerException' 54 | count_before.should.eql 1 55 | count_error.should.eql 1 56 | count_both.should.eql 1 57 | next() 58 | execute.on 'before', (query) -> 59 | count_before++ 60 | execute.on 'error', (err) -> 61 | err.should.be.an.instanceof Error 62 | err.name.should.eql 'HiveServerException' 63 | count_error++ 64 | execute.on 'both', (query) -> 65 | count_both++ 66 | -------------------------------------------------------------------------------- /test/data.csv: -------------------------------------------------------------------------------- 1 | 100035500005100,171,scelerisque integer 2 | 100035500015100,97,gravida 3 | 100035500025100,1,feugiat vitae orci 4 | 100035500035100,135,erat parturient 5 | 100035500045100,233,lorem bibendum mi 6 | 100035500055100,48,fringilla 7 | 100035500065100,204,lacus aliquet erat 8 | 100035500075100,288,interdum 9 | 100035500085100,122,nulla ut nunc 10 | 100035500095100,167,ridiculus nec 11 | 110035500005100,242,massa sit 12 | 110035500015100,53,ultrices in 13 | 110035500025100,87,neque non sodales aenean 14 | 110035500035100,375,aliquet 15 | 110035500045100,80,ac mauris 16 | 110035500055100,29,eget eu nunc in 17 | 110035500065100,145,vestibulum justo 18 | 110035500075100,183,tristique hendrerit vitae 19 | 110035500085100,521,ut sagittis magna 20 | 110035500095100,191,enim fermentum 21 | 120035500005100,298,aliquet nunc ultricies laoreet 22 | 120035500015100,374,risus adipiscing 23 | 120035500025100,914,mauris 24 | 120035500035100,131,donec suspendisse faucibus vestibulum 25 | 120035500045100,504,ipsum 26 | 120035500055100,128,varius turpis iaculis 27 | 120035500065100,150,vivamus aliquam tincidunt tellus 28 | 120035500075100,454,suspendisse magnis morbi 29 | 120035500085100,267,tempor eu 30 | 120035500095100,214,dolor enim commodo 31 | 130035500005100,56,sociis blandit elit 32 | 130035500015100,217,consectetur 33 | 130035500025100,94,aliquet 34 | 130035500035100,235,morbi non risus 35 | 130035500045100,111,nunc 36 | 130035500055100,124,arcu condimentum diam turpis 37 | 130035500065100,282,hendrerit 38 | 130035500075100,182,sed ullamcorper a tortor 39 | 130035500085100,169,dolor eu interdum nec 40 | 130035500095100,345,donec 41 | 140035500005100,54,in suspendisse 42 | 140035500015100,64,amet dolor fermentum 43 | 140035500025100,87,scelerisque et 44 | 140035500035100,20,vivamus quis adipiscing aliquet 45 | 140035500045100,286,tortor donec neque ut 46 | 140035500055100,80,varius mi 47 | 140035500065100,301,egestas nam morbi 48 | 140035500075100,270,commodo porta 49 | 140035500085100,523,at ultricies turpis 50 | 140035500095100,476,egestas mauris 51 | 150035500005100,413,vitae 52 | 150035500015100,159,at eleifend nisi faucibus 53 | 150035500025100,59,metus pharetra semper 54 | 150035500035100,161,nunc 55 | -------------------------------------------------------------------------------- /bin/generate: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | base=`dirname "${BASH_SOURCE-$0}"` 4 | base=`cd "$base/..">/dev/null; pwd` 5 | 6 | mkdir -p $base/lib/0.7.1-cdh3u1 7 | thrift -o $base/lib/0.7.1-cdh3u1 --gen js:node -I $base/src/0.7.1-cdh3u1 $base/src/0.7.1-cdh3u1/hive_metastore.thrift 8 | thrift -o $base/lib/0.7.1-cdh3u1 --gen js:node -I $base/src/0.7.1-cdh3u1 $base/src/0.7.1-cdh3u1/hive_service.thrift 9 | thrift -o $base/lib/0.7.1-cdh3u1 --gen js:node -I $base/src/0.7.1-cdh3u1 $base/src/0.7.1-cdh3u1/fb303.thrift 10 | thrift -o $base/lib/0.7.1-cdh3u1 --gen js:node -I $base/src/0.7.1-cdh3u1 $base/src/0.7.1-cdh3u1/serde.thrift 11 | thrift -o $base/lib/0.7.1-cdh3u1 --gen js:node -I $base/src/0.7.1-cdh3u1 $base/src/0.7.1-cdh3u1/queryplan.thrift 12 | cp -rp $base/lib/0.7.1-cdh3u1/gen-nodejs/* $base/lib/0.7.1-cdh3u1/ 13 | rm -rf $base/lib/0.7.1-cdh3u1/gen-nodejs 14 | 15 | mkdir -p $base/lib/0.7.1-cdh3u2 16 | thrift -o $base/lib/0.7.1-cdh3u2 --gen js:node -I $base/src/0.7.1-cdh3u2 $base/src/0.7.1-cdh3u2/hive_metastore.thrift 17 | thrift -o $base/lib/0.7.1-cdh3u2 --gen js:node -I $base/src/0.7.1-cdh3u2 $base/src/0.7.1-cdh3u2/hive_service.thrift 18 | thrift -o $base/lib/0.7.1-cdh3u2 --gen js:node -I $base/src/0.7.1-cdh3u2 $base/src/0.7.1-cdh3u2/fb303.thrift 19 | thrift -o $base/lib/0.7.1-cdh3u2 --gen js:node -I $base/src/0.7.1-cdh3u2 $base/src/0.7.1-cdh3u2/serde.thrift 20 | thrift -o $base/lib/0.7.1-cdh3u2 --gen js:node -I $base/src/0.7.1-cdh3u2 $base/src/0.7.1-cdh3u2/queryplan.thrift 21 | cp -rp $base/lib/0.7.1-cdh3u2/gen-nodejs/* $base/lib/0.7.1-cdh3u2/ 22 | rm -rf $base/lib/0.7.1-cdh3u2/gen-nodejs 23 | 24 | mkdir -p $base/lib/0.7.1-cdh3u3 25 | thrift -o $base/lib/0.7.1-cdh3u3 --gen js:node -I $base/src/0.7.1-cdh3u3 $base/src/0.7.1-cdh3u3/hive_metastore.thrift 26 | thrift -o $base/lib/0.7.1-cdh3u3 --gen js:node -I $base/src/0.7.1-cdh3u3 $base/src/0.7.1-cdh3u3/hive_service.thrift 27 | thrift -o $base/lib/0.7.1-cdh3u3 --gen js:node -I $base/src/0.7.1-cdh3u3 $base/src/0.7.1-cdh3u3/fb303.thrift 28 | thrift -o $base/lib/0.7.1-cdh3u3 --gen js:node -I $base/src/0.7.1-cdh3u3 $base/src/0.7.1-cdh3u3/serde.thrift 29 | thrift -o $base/lib/0.7.1-cdh3u3 --gen js:node -I $base/src/0.7.1-cdh3u3 $base/src/0.7.1-cdh3u3/queryplan.thrift 30 | cp -rp $base/lib/0.7.1-cdh3u3/gen-nodejs/* $base/lib/0.7.1-cdh3u3/ 31 | rm -rf $base/lib/0.7.1-cdh3u3/gen-nodejs 32 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u1/serde.thrift: -------------------------------------------------------------------------------- 1 | 2 | namespace java org.apache.hadoop.hive.serde 3 | namespace php org.apache.hadoop.hive.serde 4 | namespace py org_apache_hadoop_hive_serde 5 | namespace cpp Hive 6 | 7 | // name of serialization scheme. 8 | const string SERIALIZATION_LIB = "serialization.lib" 9 | const string SERIALIZATION_CLASS = "serialization.class" 10 | const string SERIALIZATION_FORMAT = "serialization.format" 11 | const string SERIALIZATION_DDL = "serialization.ddl" 12 | const string SERIALIZATION_NULL_FORMAT = "serialization.null.format" 13 | const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest" 14 | const string SERIALIZATION_SORT_ORDER = "serialization.sort.order" 15 | const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object" 16 | 17 | const string FIELD_DELIM = "field.delim" 18 | const string COLLECTION_DELIM = "colelction.delim" 19 | const string LINE_DELIM = "line.delim" 20 | const string MAPKEY_DELIM = "mapkey.delim" 21 | const string QUOTE_CHAR = "quote.delim" 22 | const string ESCAPE_CHAR = "escape.delim" 23 | 24 | typedef string PrimitiveType 25 | typedef string CollectionType 26 | 27 | const string VOID_TYPE_NAME = "void"; 28 | const string BOOLEAN_TYPE_NAME = "boolean"; 29 | const string TINYINT_TYPE_NAME = "tinyint"; 30 | const string SMALLINT_TYPE_NAME = "smallint"; 31 | const string INT_TYPE_NAME = "int"; 32 | const string BIGINT_TYPE_NAME = "bigint"; 33 | const string FLOAT_TYPE_NAME = "float"; 34 | const string DOUBLE_TYPE_NAME = "double"; 35 | const string STRING_TYPE_NAME = "string"; 36 | const string DATE_TYPE_NAME = "date"; 37 | const string DATETIME_TYPE_NAME = "datetime"; 38 | const string TIMESTAMP_TYPE_NAME = "timestamp"; 39 | 40 | const string LIST_TYPE_NAME = "array"; 41 | const string MAP_TYPE_NAME = "map"; 42 | const string STRUCT_TYPE_NAME = "struct"; 43 | const string UNION_TYPE_NAME = "uniontype"; 44 | 45 | const string LIST_COLUMNS = "columns"; 46 | const string LIST_COLUMN_TYPES = "columns.types"; 47 | 48 | const set PrimitiveTypes = [ VOID_TYPE_NAME BOOLEAN_TYPE_NAME TINYINT_TYPE_NAME SMALLINT_TYPE_NAME INT_TYPE_NAME BIGINT_TYPE_NAME FLOAT_TYPE_NAME DOUBLE_TYPE_NAME STRING_TYPE_NAME DATE_TYPE_NAME DATETIME_TYPE_NAME TIMESTAMP_TYPE_NAME ], 49 | const set CollectionTypes = [ LIST_TYPE_NAME MAP_TYPE_NAME ], 50 | 51 | 52 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u2/serde.thrift: -------------------------------------------------------------------------------- 1 | 2 | namespace java org.apache.hadoop.hive.serde 3 | namespace php org.apache.hadoop.hive.serde 4 | namespace py org_apache_hadoop_hive_serde 5 | namespace cpp Hive 6 | 7 | // name of serialization scheme. 8 | const string SERIALIZATION_LIB = "serialization.lib" 9 | const string SERIALIZATION_CLASS = "serialization.class" 10 | const string SERIALIZATION_FORMAT = "serialization.format" 11 | const string SERIALIZATION_DDL = "serialization.ddl" 12 | const string SERIALIZATION_NULL_FORMAT = "serialization.null.format" 13 | const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest" 14 | const string SERIALIZATION_SORT_ORDER = "serialization.sort.order" 15 | const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object" 16 | 17 | const string FIELD_DELIM = "field.delim" 18 | const string COLLECTION_DELIM = "colelction.delim" 19 | const string LINE_DELIM = "line.delim" 20 | const string MAPKEY_DELIM = "mapkey.delim" 21 | const string QUOTE_CHAR = "quote.delim" 22 | const string ESCAPE_CHAR = "escape.delim" 23 | 24 | typedef string PrimitiveType 25 | typedef string CollectionType 26 | 27 | const string VOID_TYPE_NAME = "void"; 28 | const string BOOLEAN_TYPE_NAME = "boolean"; 29 | const string TINYINT_TYPE_NAME = "tinyint"; 30 | const string SMALLINT_TYPE_NAME = "smallint"; 31 | const string INT_TYPE_NAME = "int"; 32 | const string BIGINT_TYPE_NAME = "bigint"; 33 | const string FLOAT_TYPE_NAME = "float"; 34 | const string DOUBLE_TYPE_NAME = "double"; 35 | const string STRING_TYPE_NAME = "string"; 36 | const string DATE_TYPE_NAME = "date"; 37 | const string DATETIME_TYPE_NAME = "datetime"; 38 | const string TIMESTAMP_TYPE_NAME = "timestamp"; 39 | 40 | const string LIST_TYPE_NAME = "array"; 41 | const string MAP_TYPE_NAME = "map"; 42 | const string STRUCT_TYPE_NAME = "struct"; 43 | const string UNION_TYPE_NAME = "uniontype"; 44 | 45 | const string LIST_COLUMNS = "columns"; 46 | const string LIST_COLUMN_TYPES = "columns.types"; 47 | 48 | const set PrimitiveTypes = [ VOID_TYPE_NAME BOOLEAN_TYPE_NAME TINYINT_TYPE_NAME SMALLINT_TYPE_NAME INT_TYPE_NAME BIGINT_TYPE_NAME FLOAT_TYPE_NAME DOUBLE_TYPE_NAME STRING_TYPE_NAME DATE_TYPE_NAME DATETIME_TYPE_NAME TIMESTAMP_TYPE_NAME ], 49 | const set CollectionTypes = [ LIST_TYPE_NAME MAP_TYPE_NAME ], 50 | 51 | 52 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u3/serde.thrift: -------------------------------------------------------------------------------- 1 | 2 | namespace java org.apache.hadoop.hive.serde 3 | namespace php org.apache.hadoop.hive.serde 4 | namespace py org_apache_hadoop_hive_serde 5 | namespace cpp Hive 6 | 7 | // name of serialization scheme. 8 | const string SERIALIZATION_LIB = "serialization.lib" 9 | const string SERIALIZATION_CLASS = "serialization.class" 10 | const string SERIALIZATION_FORMAT = "serialization.format" 11 | const string SERIALIZATION_DDL = "serialization.ddl" 12 | const string SERIALIZATION_NULL_FORMAT = "serialization.null.format" 13 | const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest" 14 | const string SERIALIZATION_SORT_ORDER = "serialization.sort.order" 15 | const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object" 16 | 17 | const string FIELD_DELIM = "field.delim" 18 | const string COLLECTION_DELIM = "colelction.delim" 19 | const string LINE_DELIM = "line.delim" 20 | const string MAPKEY_DELIM = "mapkey.delim" 21 | const string QUOTE_CHAR = "quote.delim" 22 | const string ESCAPE_CHAR = "escape.delim" 23 | 24 | typedef string PrimitiveType 25 | typedef string CollectionType 26 | 27 | const string VOID_TYPE_NAME = "void"; 28 | const string BOOLEAN_TYPE_NAME = "boolean"; 29 | const string TINYINT_TYPE_NAME = "tinyint"; 30 | const string SMALLINT_TYPE_NAME = "smallint"; 31 | const string INT_TYPE_NAME = "int"; 32 | const string BIGINT_TYPE_NAME = "bigint"; 33 | const string FLOAT_TYPE_NAME = "float"; 34 | const string DOUBLE_TYPE_NAME = "double"; 35 | const string STRING_TYPE_NAME = "string"; 36 | const string DATE_TYPE_NAME = "date"; 37 | const string DATETIME_TYPE_NAME = "datetime"; 38 | const string TIMESTAMP_TYPE_NAME = "timestamp"; 39 | 40 | const string LIST_TYPE_NAME = "array"; 41 | const string MAP_TYPE_NAME = "map"; 42 | const string STRUCT_TYPE_NAME = "struct"; 43 | const string UNION_TYPE_NAME = "uniontype"; 44 | 45 | const string LIST_COLUMNS = "columns"; 46 | const string LIST_COLUMN_TYPES = "columns.types"; 47 | 48 | const set PrimitiveTypes = [ VOID_TYPE_NAME BOOLEAN_TYPE_NAME TINYINT_TYPE_NAME SMALLINT_TYPE_NAME INT_TYPE_NAME BIGINT_TYPE_NAME FLOAT_TYPE_NAME DOUBLE_TYPE_NAME STRING_TYPE_NAME DATE_TYPE_NAME DATETIME_TYPE_NAME TIMESTAMP_TYPE_NAME ], 49 | const set CollectionTypes = [ LIST_TYPE_NAME MAP_TYPE_NAME ], 50 | 51 | 52 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u1/queryplan.thrift: -------------------------------------------------------------------------------- 1 | namespace java org.apache.hadoop.hive.ql.plan.api 2 | namespace cpp Apache.Hadoop.Hive 3 | 4 | enum AdjacencyType { CONJUNCTIVE, DISJUNCTIVE } 5 | struct Adjacency { 6 | 1: string node, 7 | 2: list children, 8 | 3: AdjacencyType adjacencyType, 9 | } 10 | 11 | enum NodeType { OPERATOR, STAGE } 12 | struct Graph { 13 | 1: NodeType nodeType, 14 | 2: list roots, 15 | 3: list adjacencyList, 16 | } 17 | 18 | #Represents a operator along with its counters 19 | enum OperatorType { 20 | JOIN, 21 | MAPJOIN, 22 | EXTRACT, 23 | FILTER, 24 | FORWARD, 25 | GROUPBY, 26 | LIMIT, 27 | SCRIPT, 28 | SELECT, 29 | TABLESCAN, 30 | FILESINK, 31 | REDUCESINK, 32 | UNION, 33 | UDTF, 34 | LATERALVIEWJOIN, 35 | LATERALVIEWFORWARD, 36 | HASHTABLESINK, 37 | HASHTABLEDUMMY, 38 | } 39 | 40 | struct Operator { 41 | 1: string operatorId, 42 | 2: OperatorType operatorType, 43 | 3: map operatorAttributes, 44 | 4: map operatorCounters, 45 | 5: bool done, 46 | 6: bool started, 47 | } 48 | 49 | # Represents whether it is a map-reduce job or not. In future, different tasks can add their dependencies 50 | # The operator graph shows the operator tree 51 | enum TaskType { MAP, REDUCE, OTHER } 52 | struct Task { 53 | 1: string taskId, 54 | 2: TaskType taskType 55 | 3: map taskAttributes, 56 | 4: map taskCounters, 57 | 5: optional Graph operatorGraph, 58 | 6: optional list operatorList, 59 | 7: bool done, 60 | 8: bool started, 61 | } 62 | 63 | # Represents a Stage - unfortunately, it is represented as Task in ql/exec 64 | enum StageType { 65 | CONDITIONAL, 66 | COPY, 67 | DDL, 68 | MAPRED, 69 | EXPLAIN, 70 | FETCH, 71 | FUNC, 72 | MAPREDLOCAL, 73 | MOVE, 74 | STATS, 75 | } 76 | 77 | struct Stage { 78 | 1: string stageId, 79 | 2: StageType stageType, 80 | 3: map stageAttributes, 81 | 4: map stageCounters, 82 | 5: list taskList, 83 | 6: bool done, 84 | 7: bool started, 85 | } 86 | 87 | # Represents a query - 88 | # The graph maintains the stage dependency.In case of conditional tasks, it is represented as if only 89 | # one of the dependencies need to be executed 90 | struct Query { 91 | 1: string queryId, 92 | 2: string queryType, 93 | 3: map queryAttributes, 94 | 4: map queryCounters, 95 | 5: Graph stageGraph, 96 | 6: list stageList, 97 | 7: bool done, 98 | 8: bool started, 99 | } 100 | 101 | # List of all queries - each query maintains if it is done or started 102 | # This can be used to track all the queries in the session 103 | struct QueryPlan { 104 | 1: list queries, 105 | 2: bool done, 106 | 3: bool started, 107 | } 108 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u2/queryplan.thrift: -------------------------------------------------------------------------------- 1 | namespace java org.apache.hadoop.hive.ql.plan.api 2 | namespace cpp Apache.Hadoop.Hive 3 | 4 | enum AdjacencyType { CONJUNCTIVE, DISJUNCTIVE } 5 | struct Adjacency { 6 | 1: string node, 7 | 2: list children, 8 | 3: AdjacencyType adjacencyType, 9 | } 10 | 11 | enum NodeType { OPERATOR, STAGE } 12 | struct Graph { 13 | 1: NodeType nodeType, 14 | 2: list roots, 15 | 3: list adjacencyList, 16 | } 17 | 18 | #Represents a operator along with its counters 19 | enum OperatorType { 20 | JOIN, 21 | MAPJOIN, 22 | EXTRACT, 23 | FILTER, 24 | FORWARD, 25 | GROUPBY, 26 | LIMIT, 27 | SCRIPT, 28 | SELECT, 29 | TABLESCAN, 30 | FILESINK, 31 | REDUCESINK, 32 | UNION, 33 | UDTF, 34 | LATERALVIEWJOIN, 35 | LATERALVIEWFORWARD, 36 | HASHTABLESINK, 37 | HASHTABLEDUMMY, 38 | } 39 | 40 | struct Operator { 41 | 1: string operatorId, 42 | 2: OperatorType operatorType, 43 | 3: map operatorAttributes, 44 | 4: map operatorCounters, 45 | 5: bool done, 46 | 6: bool started, 47 | } 48 | 49 | # Represents whether it is a map-reduce job or not. In future, different tasks can add their dependencies 50 | # The operator graph shows the operator tree 51 | enum TaskType { MAP, REDUCE, OTHER } 52 | struct Task { 53 | 1: string taskId, 54 | 2: TaskType taskType 55 | 3: map taskAttributes, 56 | 4: map taskCounters, 57 | 5: optional Graph operatorGraph, 58 | 6: optional list operatorList, 59 | 7: bool done, 60 | 8: bool started, 61 | } 62 | 63 | # Represents a Stage - unfortunately, it is represented as Task in ql/exec 64 | enum StageType { 65 | CONDITIONAL, 66 | COPY, 67 | DDL, 68 | MAPRED, 69 | EXPLAIN, 70 | FETCH, 71 | FUNC, 72 | MAPREDLOCAL, 73 | MOVE, 74 | STATS, 75 | } 76 | 77 | struct Stage { 78 | 1: string stageId, 79 | 2: StageType stageType, 80 | 3: map stageAttributes, 81 | 4: map stageCounters, 82 | 5: list taskList, 83 | 6: bool done, 84 | 7: bool started, 85 | } 86 | 87 | # Represents a query - 88 | # The graph maintains the stage dependency.In case of conditional tasks, it is represented as if only 89 | # one of the dependencies need to be executed 90 | struct Query { 91 | 1: string queryId, 92 | 2: string queryType, 93 | 3: map queryAttributes, 94 | 4: map queryCounters, 95 | 5: Graph stageGraph, 96 | 6: list stageList, 97 | 7: bool done, 98 | 8: bool started, 99 | } 100 | 101 | # List of all queries - each query maintains if it is done or started 102 | # This can be used to track all the queries in the session 103 | struct QueryPlan { 104 | 1: list queries, 105 | 2: bool done, 106 | 3: bool started, 107 | } 108 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u3/queryplan.thrift: -------------------------------------------------------------------------------- 1 | namespace java org.apache.hadoop.hive.ql.plan.api 2 | namespace cpp Apache.Hadoop.Hive 3 | 4 | enum AdjacencyType { CONJUNCTIVE, DISJUNCTIVE } 5 | struct Adjacency { 6 | 1: string node, 7 | 2: list children, 8 | 3: AdjacencyType adjacencyType, 9 | } 10 | 11 | enum NodeType { OPERATOR, STAGE } 12 | struct Graph { 13 | 1: NodeType nodeType, 14 | 2: list roots, 15 | 3: list adjacencyList, 16 | } 17 | 18 | #Represents a operator along with its counters 19 | enum OperatorType { 20 | JOIN, 21 | MAPJOIN, 22 | EXTRACT, 23 | FILTER, 24 | FORWARD, 25 | GROUPBY, 26 | LIMIT, 27 | SCRIPT, 28 | SELECT, 29 | TABLESCAN, 30 | FILESINK, 31 | REDUCESINK, 32 | UNION, 33 | UDTF, 34 | LATERALVIEWJOIN, 35 | LATERALVIEWFORWARD, 36 | HASHTABLESINK, 37 | HASHTABLEDUMMY, 38 | } 39 | 40 | struct Operator { 41 | 1: string operatorId, 42 | 2: OperatorType operatorType, 43 | 3: map operatorAttributes, 44 | 4: map operatorCounters, 45 | 5: bool done, 46 | 6: bool started, 47 | } 48 | 49 | # Represents whether it is a map-reduce job or not. In future, different tasks can add their dependencies 50 | # The operator graph shows the operator tree 51 | enum TaskType { MAP, REDUCE, OTHER } 52 | struct Task { 53 | 1: string taskId, 54 | 2: TaskType taskType 55 | 3: map taskAttributes, 56 | 4: map taskCounters, 57 | 5: optional Graph operatorGraph, 58 | 6: optional list operatorList, 59 | 7: bool done, 60 | 8: bool started, 61 | } 62 | 63 | # Represents a Stage - unfortunately, it is represented as Task in ql/exec 64 | enum StageType { 65 | CONDITIONAL, 66 | COPY, 67 | DDL, 68 | MAPRED, 69 | EXPLAIN, 70 | FETCH, 71 | FUNC, 72 | MAPREDLOCAL, 73 | MOVE, 74 | STATS, 75 | } 76 | 77 | struct Stage { 78 | 1: string stageId, 79 | 2: StageType stageType, 80 | 3: map stageAttributes, 81 | 4: map stageCounters, 82 | 5: list taskList, 83 | 6: bool done, 84 | 7: bool started, 85 | } 86 | 87 | # Represents a query - 88 | # The graph maintains the stage dependency.In case of conditional tasks, it is represented as if only 89 | # one of the dependencies need to be executed 90 | struct Query { 91 | 1: string queryId, 92 | 2: string queryType, 93 | 3: map queryAttributes, 94 | 4: map queryCounters, 95 | 5: Graph stageGraph, 96 | 6: list stageList, 97 | 7: bool done, 98 | 8: bool started, 99 | } 100 | 101 | # List of all queries - each query maintains if it is done or started 102 | # This can be used to track all the queries in the session 103 | struct QueryPlan { 104 | 1: list queries, 105 | 2: bool done, 106 | 3: bool started, 107 | } 108 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u1/fb303.thrift: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | 20 | /** 21 | * fb303.thrift 22 | */ 23 | 24 | namespace java com.facebook.fb303 25 | namespace cpp facebook.fb303 26 | namespace perl Facebook.FB303 27 | 28 | /** 29 | * Common status reporting mechanism across all services 30 | */ 31 | enum fb_status { 32 | DEAD = 0, 33 | STARTING = 1, 34 | ALIVE = 2, 35 | STOPPING = 3, 36 | STOPPED = 4, 37 | WARNING = 5, 38 | } 39 | 40 | /** 41 | * Standard base service 42 | */ 43 | service FacebookService { 44 | 45 | /** 46 | * Returns a descriptive name of the service 47 | */ 48 | string getName(), 49 | 50 | /** 51 | * Returns the version of the service 52 | */ 53 | string getVersion(), 54 | 55 | /** 56 | * Gets the status of this service 57 | */ 58 | fb_status getStatus(), 59 | 60 | /** 61 | * User friendly description of status, such as why the service is in 62 | * the dead or warning state, or what is being started or stopped. 63 | */ 64 | string getStatusDetails(), 65 | 66 | /** 67 | * Gets the counters for this service 68 | */ 69 | map getCounters(), 70 | 71 | /** 72 | * Gets the value of a single counter 73 | */ 74 | i64 getCounter(1: string key), 75 | 76 | /** 77 | * Sets an option 78 | */ 79 | void setOption(1: string key, 2: string value), 80 | 81 | /** 82 | * Gets an option 83 | */ 84 | string getOption(1: string key), 85 | 86 | /** 87 | * Gets all options 88 | */ 89 | map getOptions(), 90 | 91 | /** 92 | * Returns a CPU profile over the given time interval (client and server 93 | * must agree on the profile format). 94 | */ 95 | string getCpuProfile(1: i32 profileDurationInSec), 96 | 97 | /** 98 | * Returns the unix time that the server has been running since 99 | */ 100 | i64 aliveSince(), 101 | 102 | /** 103 | * Tell the server to reload its configuration, reopen log files, etc 104 | */ 105 | oneway void reinitialize(), 106 | 107 | /** 108 | * Suggest a shutdown to the server 109 | */ 110 | oneway void shutdown(), 111 | 112 | } 113 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u2/fb303.thrift: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | 20 | /** 21 | * fb303.thrift 22 | */ 23 | 24 | namespace java com.facebook.fb303 25 | namespace cpp facebook.fb303 26 | namespace perl Facebook.FB303 27 | 28 | /** 29 | * Common status reporting mechanism across all services 30 | */ 31 | enum fb_status { 32 | DEAD = 0, 33 | STARTING = 1, 34 | ALIVE = 2, 35 | STOPPING = 3, 36 | STOPPED = 4, 37 | WARNING = 5, 38 | } 39 | 40 | /** 41 | * Standard base service 42 | */ 43 | service FacebookService { 44 | 45 | /** 46 | * Returns a descriptive name of the service 47 | */ 48 | string getName(), 49 | 50 | /** 51 | * Returns the version of the service 52 | */ 53 | string getVersion(), 54 | 55 | /** 56 | * Gets the status of this service 57 | */ 58 | fb_status getStatus(), 59 | 60 | /** 61 | * User friendly description of status, such as why the service is in 62 | * the dead or warning state, or what is being started or stopped. 63 | */ 64 | string getStatusDetails(), 65 | 66 | /** 67 | * Gets the counters for this service 68 | */ 69 | map getCounters(), 70 | 71 | /** 72 | * Gets the value of a single counter 73 | */ 74 | i64 getCounter(1: string key), 75 | 76 | /** 77 | * Sets an option 78 | */ 79 | void setOption(1: string key, 2: string value), 80 | 81 | /** 82 | * Gets an option 83 | */ 84 | string getOption(1: string key), 85 | 86 | /** 87 | * Gets all options 88 | */ 89 | map getOptions(), 90 | 91 | /** 92 | * Returns a CPU profile over the given time interval (client and server 93 | * must agree on the profile format). 94 | */ 95 | string getCpuProfile(1: i32 profileDurationInSec), 96 | 97 | /** 98 | * Returns the unix time that the server has been running since 99 | */ 100 | i64 aliveSince(), 101 | 102 | /** 103 | * Tell the server to reload its configuration, reopen log files, etc 104 | */ 105 | oneway void reinitialize(), 106 | 107 | /** 108 | * Suggest a shutdown to the server 109 | */ 110 | oneway void shutdown(), 111 | 112 | } 113 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u3/fb303.thrift: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | 20 | /** 21 | * fb303.thrift 22 | */ 23 | 24 | namespace java com.facebook.fb303 25 | namespace cpp facebook.fb303 26 | namespace perl Facebook.FB303 27 | 28 | /** 29 | * Common status reporting mechanism across all services 30 | */ 31 | enum fb_status { 32 | DEAD = 0, 33 | STARTING = 1, 34 | ALIVE = 2, 35 | STOPPING = 3, 36 | STOPPED = 4, 37 | WARNING = 5, 38 | } 39 | 40 | /** 41 | * Standard base service 42 | */ 43 | service FacebookService { 44 | 45 | /** 46 | * Returns a descriptive name of the service 47 | */ 48 | string getName(), 49 | 50 | /** 51 | * Returns the version of the service 52 | */ 53 | string getVersion(), 54 | 55 | /** 56 | * Gets the status of this service 57 | */ 58 | fb_status getStatus(), 59 | 60 | /** 61 | * User friendly description of status, such as why the service is in 62 | * the dead or warning state, or what is being started or stopped. 63 | */ 64 | string getStatusDetails(), 65 | 66 | /** 67 | * Gets the counters for this service 68 | */ 69 | map getCounters(), 70 | 71 | /** 72 | * Gets the value of a single counter 73 | */ 74 | i64 getCounter(1: string key), 75 | 76 | /** 77 | * Sets an option 78 | */ 79 | void setOption(1: string key, 2: string value), 80 | 81 | /** 82 | * Gets an option 83 | */ 84 | string getOption(1: string key), 85 | 86 | /** 87 | * Gets all options 88 | */ 89 | map getOptions(), 90 | 91 | /** 92 | * Returns a CPU profile over the given time interval (client and server 93 | * must agree on the profile format). 94 | */ 95 | string getCpuProfile(1: i32 profileDurationInSec), 96 | 97 | /** 98 | * Returns the unix time that the server has been running since 99 | */ 100 | i64 aliveSince(), 101 | 102 | /** 103 | * Tell the server to reload its configuration, reopen log files, etc 104 | */ 105 | oneway void reinitialize(), 106 | 107 | /** 108 | * Suggest a shutdown to the server 109 | */ 110 | oneway void shutdown(), 111 | 112 | } 113 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u1/hive_service.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # 20 | # Thrift Service that the hive service is built on 21 | # 22 | 23 | # 24 | # TODO: include/thrift is shared among different components. It 25 | # should not be under metastore. 26 | 27 | include "fb303.thrift" 28 | include "hive_metastore.thrift" 29 | include "queryplan.thrift" 30 | 31 | namespace java org.apache.hadoop.hive.service 32 | namespace cpp Apache.Hadoop.Hive 33 | 34 | // Enumeration of JobTracker.State 35 | enum JobTrackerState { 36 | INITIALIZING = 1, 37 | RUNNING = 2, 38 | } 39 | 40 | // Map-Reduce cluster status information 41 | struct HiveClusterStatus { 42 | 1: i32 taskTrackers, 43 | 2: i32 mapTasks, 44 | 3: i32 reduceTasks, 45 | 4: i32 maxMapTasks, 46 | 5: i32 maxReduceTasks, 47 | 6: JobTrackerState state, 48 | } 49 | 50 | exception HiveServerException { 51 | 1: string message 52 | 2: i32 errorCode 53 | 3: string SQLState 54 | } 55 | 56 | # Interface for Thrift Hive Server 57 | service ThriftHive extends hive_metastore.ThriftHiveMetastore { 58 | # Execute a query. Takes a HiveQL string 59 | void execute(1:string query) throws(1:HiveServerException ex) 60 | 61 | # Fetch one row. This row is the serialized form 62 | # of the result of the query 63 | string fetchOne() throws(1:HiveServerException ex) 64 | 65 | # Fetch a given number of rows or remaining number of 66 | # rows whichever is smaller. 67 | list fetchN(1:i32 numRows) throws(1:HiveServerException ex) 68 | 69 | # Fetch all rows of the query result 70 | list fetchAll() throws(1:HiveServerException ex) 71 | 72 | # Get a schema object with fields represented with native Hive types 73 | hive_metastore.Schema getSchema() throws(1:HiveServerException ex) 74 | 75 | # Get a schema object with fields represented with Thrift DDL types 76 | hive_metastore.Schema getThriftSchema() throws(1:HiveServerException ex) 77 | 78 | # Get the status information about the Map-Reduce cluster 79 | HiveClusterStatus getClusterStatus() throws(1:HiveServerException ex) 80 | 81 | # Get the queryplan annotated with counter information 82 | queryplan.QueryPlan getQueryPlan() throws(1:HiveServerException ex) 83 | 84 | # clean up last Hive query (releasing locks etc.) 85 | void clean() 86 | } 87 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u2/hive_service.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # 20 | # Thrift Service that the hive service is built on 21 | # 22 | 23 | # 24 | # TODO: include/thrift is shared among different components. It 25 | # should not be under metastore. 26 | 27 | include "fb303.thrift" 28 | include "hive_metastore.thrift" 29 | include "queryplan.thrift" 30 | 31 | namespace java org.apache.hadoop.hive.service 32 | namespace cpp Apache.Hadoop.Hive 33 | 34 | // Enumeration of JobTracker.State 35 | enum JobTrackerState { 36 | INITIALIZING = 1, 37 | RUNNING = 2, 38 | } 39 | 40 | // Map-Reduce cluster status information 41 | struct HiveClusterStatus { 42 | 1: i32 taskTrackers, 43 | 2: i32 mapTasks, 44 | 3: i32 reduceTasks, 45 | 4: i32 maxMapTasks, 46 | 5: i32 maxReduceTasks, 47 | 6: JobTrackerState state, 48 | } 49 | 50 | exception HiveServerException { 51 | 1: string message 52 | 2: i32 errorCode 53 | 3: string SQLState 54 | } 55 | 56 | # Interface for Thrift Hive Server 57 | service ThriftHive extends hive_metastore.ThriftHiveMetastore { 58 | # Execute a query. Takes a HiveQL string 59 | void execute(1:string query) throws(1:HiveServerException ex) 60 | 61 | # Fetch one row. This row is the serialized form 62 | # of the result of the query 63 | string fetchOne() throws(1:HiveServerException ex) 64 | 65 | # Fetch a given number of rows or remaining number of 66 | # rows whichever is smaller. 67 | list fetchN(1:i32 numRows) throws(1:HiveServerException ex) 68 | 69 | # Fetch all rows of the query result 70 | list fetchAll() throws(1:HiveServerException ex) 71 | 72 | # Get a schema object with fields represented with native Hive types 73 | hive_metastore.Schema getSchema() throws(1:HiveServerException ex) 74 | 75 | # Get a schema object with fields represented with Thrift DDL types 76 | hive_metastore.Schema getThriftSchema() throws(1:HiveServerException ex) 77 | 78 | # Get the status information about the Map-Reduce cluster 79 | HiveClusterStatus getClusterStatus() throws(1:HiveServerException ex) 80 | 81 | # Get the queryplan annotated with counter information 82 | queryplan.QueryPlan getQueryPlan() throws(1:HiveServerException ex) 83 | 84 | # clean up last Hive query (releasing locks etc.) 85 | void clean() 86 | } 87 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u3/hive_service.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # 20 | # Thrift Service that the hive service is built on 21 | # 22 | 23 | # 24 | # TODO: include/thrift is shared among different components. It 25 | # should not be under metastore. 26 | 27 | include "fb303.thrift" 28 | include "hive_metastore.thrift" 29 | include "queryplan.thrift" 30 | 31 | namespace java org.apache.hadoop.hive.service 32 | namespace cpp Apache.Hadoop.Hive 33 | 34 | // Enumeration of JobTracker.State 35 | enum JobTrackerState { 36 | INITIALIZING = 1, 37 | RUNNING = 2, 38 | } 39 | 40 | // Map-Reduce cluster status information 41 | struct HiveClusterStatus { 42 | 1: i32 taskTrackers, 43 | 2: i32 mapTasks, 44 | 3: i32 reduceTasks, 45 | 4: i32 maxMapTasks, 46 | 5: i32 maxReduceTasks, 47 | 6: JobTrackerState state, 48 | } 49 | 50 | exception HiveServerException { 51 | 1: string message 52 | 2: i32 errorCode 53 | 3: string SQLState 54 | } 55 | 56 | # Interface for Thrift Hive Server 57 | service ThriftHive extends hive_metastore.ThriftHiveMetastore { 58 | # Execute a query. Takes a HiveQL string 59 | void execute(1:string query) throws(1:HiveServerException ex) 60 | 61 | # Fetch one row. This row is the serialized form 62 | # of the result of the query 63 | string fetchOne() throws(1:HiveServerException ex) 64 | 65 | # Fetch a given number of rows or remaining number of 66 | # rows whichever is smaller. 67 | list fetchN(1:i32 numRows) throws(1:HiveServerException ex) 68 | 69 | # Fetch all rows of the query result 70 | list fetchAll() throws(1:HiveServerException ex) 71 | 72 | # Get a schema object with fields represented with native Hive types 73 | hive_metastore.Schema getSchema() throws(1:HiveServerException ex) 74 | 75 | # Get a schema object with fields represented with Thrift DDL types 76 | hive_metastore.Schema getThriftSchema() throws(1:HiveServerException ex) 77 | 78 | # Get the status information about the Map-Reduce cluster 79 | HiveClusterStatus getClusterStatus() throws(1:HiveServerException ex) 80 | 81 | # Get the queryplan annotated with counter information 82 | queryplan.QueryPlan getQueryPlan() throws(1:HiveServerException ex) 83 | 84 | # clean up last Hive query (releasing locks etc.) 85 | void clean() 86 | } 87 | -------------------------------------------------------------------------------- /test/QueryTest.coffee: -------------------------------------------------------------------------------- 1 | 2 | should = require 'should' 3 | config = require './config' 4 | hive = if process.env.HIVE_COV then require '../lib-cov/hive' else require '../lib/hive' 5 | 6 | client = null 7 | before -> 8 | client = hive.createClient config 9 | after -> 10 | client.end() 11 | 12 | describe 'Query', -> 13 | it 'Prepare', (next) -> 14 | client.execute "CREATE DATABASE IF NOT EXISTS #{config.db}", (err) -> 15 | should.not.exist err 16 | client.execute "USE #{config.db}", (err) -> 17 | should.not.exist err 18 | client.execute """ 19 | CREATE TABLE IF NOT EXISTS #{config.table} ( 20 | a_bigint BIGINT, 21 | an_int INT, 22 | a_date STRING 23 | ) 24 | ROW FORMAT DELIMITED 25 | FIELDS TERMINATED BY ',' 26 | """, (err) -> 27 | should.not.exist err 28 | client.execute """ 29 | LOAD DATA LOCAL INPATH '#{__dirname}/data.csv' OVERWRITE INTO TABLE #{config.table} 30 | """, (err) -> 31 | should.not.exist err 32 | next() 33 | it 'all', (next) -> 34 | count = 0 35 | call_row_first = call_row_last = false 36 | client.query("SELECT * FROM #{config.table}") 37 | .on 'row', (row, index) -> 38 | index.should.eql count 39 | count++ 40 | row.should.be.an.instanceof Array 41 | row.length.should.eql 3 42 | .on 'row-first', (row, index) -> 43 | count.should.eql 0 44 | call_row_first = true 45 | .on 'row-last', (row, index) -> 46 | count.should.eql 54 47 | call_row_last = true 48 | .on 'error', (err) -> 49 | false.should.not.be.ok 50 | .on 'end', -> 51 | count.should.eql 54 52 | call_row_first.should.be.ok 53 | call_row_last.should.be.ok 54 | next() 55 | it 'n', (next) -> 56 | count = 0 57 | client.query("select * from #{config.table}", 10) 58 | .on 'row', (row, index) -> 59 | index.should.eql count 60 | count++ 61 | .on 'error', (err) -> 62 | false.should.not.be.ok 63 | .on 'end', -> 64 | count.should.eql 54 65 | next() 66 | it 'error', (next) -> 67 | error_called = false 68 | client.query("select * from undefined_table", 10) 69 | .on 'row', (row) -> 70 | false.should.not.be.ok 71 | .on 'error', (err) -> 72 | err.should.be.an.instanceof Error 73 | error_called = true 74 | .on 'end', -> 75 | false.should.not.be.ok 76 | .on 'both', (err) -> 77 | err.should.be.an.instanceof Error 78 | error_called.should.be.ok 79 | next() 80 | it 'pause/resume', (next) -> 81 | count = 0 82 | query = client.query("select * from #{config.table}", 10) 83 | .on 'row', (row, index) -> 84 | index.should.eql count 85 | count++ 86 | query.pause() 87 | setTimeout -> 88 | query.resume() 89 | , 10 90 | .on 'error', (err) -> 91 | false.should.not.be.ok 92 | .on 'end', -> 93 | count.should.eql 54 94 | next() 95 | it 'header', (next) -> 96 | # Test where hive.cli.print.header impact Thrift 97 | # answer is no 98 | count = 0 99 | client.execute 'set hive.cli.print.header=true', (err) -> 100 | query = client.query("select * from #{config.table}", 10) 101 | .on 'row', (row, index) -> 102 | count++ 103 | .on 'error', (err) -> 104 | false.should.not.be.ok 105 | .on 'end', -> 106 | count.should.eql 54 107 | next() 108 | -------------------------------------------------------------------------------- /src/hive.coffee: -------------------------------------------------------------------------------- 1 | 2 | Stream = require 'stream' 3 | each = require 'each' 4 | 5 | thrift = require 'thrift' 6 | transport = require 'thrift/lib/thrift/transport' 7 | EventEmitter = require('events').EventEmitter 8 | 9 | split = module.exports.split = (hqls) -> 10 | return hqls if Array.isArray hqls 11 | commented = false # Are we in a commented mode 12 | hqls = hqls.split('\n').filter( (line) -> 13 | line = line.trim() 14 | skip = false # Should we skip the current line 15 | if not commented and line.indexOf('/*') isnt -1 16 | commented = '/*' 17 | skip = true 18 | else if not commented and line is '--' 19 | commented = '--' 20 | skip = true 21 | else if commented is '/*' and line.lastIndexOf('*/') isnt -1 and line.lastIndexOf('*/') is (line.length - 2) 22 | commented = false 23 | skip = true 24 | else if commented is '--' and line is '--' 25 | commented = false 26 | skip = true 27 | skip = true if line.indexOf('--') is 0 28 | not commented and not skip 29 | ).join('\n') 30 | hqls = hqls.split ';' 31 | hqls = hqls.map (query) -> query.trim() 32 | hqls = hqls.filter (query) -> query.indexOf('--') isnt 0 and query isnt '' 33 | 34 | module.exports.createClient = (options = {}) -> 35 | options.version ?= '0.7.1-cdh3u3' 36 | options.server ?= '127.0.0.1' 37 | options.port ?= 10000 38 | options.timeout ?= 1000 39 | options.transport ?= transport.TBufferedTransport 40 | connection = thrift.createConnection options.server, options.port, options 41 | client = thrift.createClient require("./#{options.version}/ThriftHive"), connection 42 | # Returned object 43 | connection: connection 44 | client: client 45 | end: connection.end.bind connection 46 | execute: (query, callback) -> 47 | emitter = new EventEmitter 48 | process.nextTick -> 49 | emitter.emit 'before', query 50 | client.execute query, (err) -> 51 | if err 52 | emitter.readable = false 53 | # emit error only if 54 | # - an error callback or 55 | # - no error callback, no both callback, no user callback 56 | lerror = emitter.listeners('error').length 57 | lboth = emitter.listeners('both').length 58 | emitError = lerror or (not lerror and not lboth and not callback ) 59 | emitter.emit 'error', err if emitError 60 | else 61 | emitter.emit 'end', null, query 62 | emitter.emit 'both', err, query 63 | callback err, callback if callback 64 | emitter 65 | query: (query, size) -> 66 | if arguments.length is 2 and typeof size is 'function' 67 | callback = size 68 | size = null 69 | exec = -> 70 | emitter.emit 'before', query 71 | client.execute query, (err) -> 72 | if err 73 | emitter.readable = false 74 | # emit error only if 75 | # - an error callback or 76 | # - no error callback and no both callback 77 | lerror = emitter.listeners('error').length 78 | lboth = emitter.listeners('both').length 79 | emitError = lerror or (not lerror and not lboth) # and not callback if we add callback support 80 | emitter.emit 'error', err if emitError 81 | emitter.emit 'both', err, query 82 | return 83 | fetch() 84 | process.nextTick exec if query 85 | buffer = [] 86 | #emitter = new EventEmitter 87 | count = 0 88 | emitter = new Stream 89 | emitter.size = size 90 | emitter.readable = true 91 | emitter.paused = 0 92 | emitter.query = (q) -> 93 | throw new Error 'Query already defined' if query 94 | query = q 95 | exec() 96 | @ 97 | emitter.pause = -> 98 | @paused = 1 99 | emitter.resume = -> 100 | @was = @paused 101 | @paused = 0 102 | fetch() if @was 103 | handle = (err, rows) => 104 | if err 105 | emitter.readable = false 106 | # emit error only if 107 | # - an error callback or 108 | # - no error callback and no both callback 109 | lerror = emitter.listeners('error').length 110 | lboth = emitter.listeners('both').length 111 | emitError = lerror or (not lerror and not lboth) 112 | emitter.emit 'error', err if emitError 113 | emitter.emit 'both', err, query 114 | return 115 | rows = rows.map (row) -> row.split '\t' 116 | for row in rows 117 | emitter.emit 'row-first', row, 0 if count is 0 118 | emitter.emit 'row', row, count++ 119 | if rows.length is emitter.size 120 | fetch() unless emitter.paused 121 | else 122 | emitter.emit 'row-last', row, count - 1 123 | emitter.readable = false 124 | emitter.emit 'end', query 125 | emitter.emit 'both', null, query 126 | fetch = -> 127 | return if emitter.paused or not emitter.readable 128 | if emitter.size 129 | then client.fetchN emitter.size, handle 130 | else client.fetchAll handle 131 | emitter 132 | multi_execute: (hqls, callback) -> 133 | emitter = new EventEmitter 134 | hqls = split(hqls) 135 | each(hqls) 136 | .on 'item', (next, query) => 137 | exec = @execute query, next 138 | exec.on 'before', -> emitter.emit.call emitter, 'before', arguments... 139 | .on 'both', (err) -> 140 | if err 141 | then emitter.emit.call emitter, 'error', arguments... 142 | else emitter.emit.call emitter, 'end', arguments... 143 | emitter.emit.call emitter, 'both', arguments... 144 | callback err if callback 145 | emitter 146 | multi_query: (hqls, size) -> 147 | hqls = split(hqls) 148 | query = @query() 149 | each(hqls) 150 | .on 'item', (next, hql, i) => 151 | unless hqls.length is i + 1 152 | exec = @execute hql#, next 153 | exec.on 'before', -> query.emit.call query, 'before', arguments... 154 | exec.on 'error', (err) -> 155 | query.readable = false 156 | # emit error only if 157 | # - an error callback or 158 | # - no error callback and no both callback 159 | lerror = query.listeners('error').length 160 | lboth = query.listeners('both').length 161 | emitError = lerror or (not lerror and not lboth) 162 | query.emit 'error', err if emitError 163 | query.emit 'both', err, query 164 | exec.on 'end', -> next() 165 | else 166 | query.query(hql, size) 167 | query 168 | 169 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u1/hive_service_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.JobTrackerState = { 9 | 'INITIALIZING' : 1, 10 | 'RUNNING' : 2 11 | }; 12 | var HiveClusterStatus = module.exports.HiveClusterStatus = function(args) { 13 | this.taskTrackers = null; 14 | this.mapTasks = null; 15 | this.reduceTasks = null; 16 | this.maxMapTasks = null; 17 | this.maxReduceTasks = null; 18 | this.state = null; 19 | if (args) { 20 | if (args.taskTrackers !== undefined) { 21 | this.taskTrackers = args.taskTrackers; 22 | } 23 | if (args.mapTasks !== undefined) { 24 | this.mapTasks = args.mapTasks; 25 | } 26 | if (args.reduceTasks !== undefined) { 27 | this.reduceTasks = args.reduceTasks; 28 | } 29 | if (args.maxMapTasks !== undefined) { 30 | this.maxMapTasks = args.maxMapTasks; 31 | } 32 | if (args.maxReduceTasks !== undefined) { 33 | this.maxReduceTasks = args.maxReduceTasks; 34 | } 35 | if (args.state !== undefined) { 36 | this.state = args.state; 37 | } 38 | } 39 | }; 40 | HiveClusterStatus.prototype = {}; 41 | HiveClusterStatus.prototype.read = function(input) { 42 | input.readStructBegin(); 43 | while (true) 44 | { 45 | var ret = input.readFieldBegin(); 46 | var fname = ret.fname; 47 | var ftype = ret.ftype; 48 | var fid = ret.fid; 49 | if (ftype == Thrift.Type.STOP) { 50 | break; 51 | } 52 | switch (fid) 53 | { 54 | case 1: 55 | if (ftype == Thrift.Type.I32) { 56 | this.taskTrackers = input.readI32(); 57 | } else { 58 | input.skip(ftype); 59 | } 60 | break; 61 | case 2: 62 | if (ftype == Thrift.Type.I32) { 63 | this.mapTasks = input.readI32(); 64 | } else { 65 | input.skip(ftype); 66 | } 67 | break; 68 | case 3: 69 | if (ftype == Thrift.Type.I32) { 70 | this.reduceTasks = input.readI32(); 71 | } else { 72 | input.skip(ftype); 73 | } 74 | break; 75 | case 4: 76 | if (ftype == Thrift.Type.I32) { 77 | this.maxMapTasks = input.readI32(); 78 | } else { 79 | input.skip(ftype); 80 | } 81 | break; 82 | case 5: 83 | if (ftype == Thrift.Type.I32) { 84 | this.maxReduceTasks = input.readI32(); 85 | } else { 86 | input.skip(ftype); 87 | } 88 | break; 89 | case 6: 90 | if (ftype == Thrift.Type.I32) { 91 | this.state = input.readI32(); 92 | } else { 93 | input.skip(ftype); 94 | } 95 | break; 96 | default: 97 | input.skip(ftype); 98 | } 99 | input.readFieldEnd(); 100 | } 101 | input.readStructEnd(); 102 | return; 103 | }; 104 | 105 | HiveClusterStatus.prototype.write = function(output) { 106 | output.writeStructBegin('HiveClusterStatus'); 107 | if (this.taskTrackers) { 108 | output.writeFieldBegin('taskTrackers', Thrift.Type.I32, 1); 109 | output.writeI32(this.taskTrackers); 110 | output.writeFieldEnd(); 111 | } 112 | if (this.mapTasks) { 113 | output.writeFieldBegin('mapTasks', Thrift.Type.I32, 2); 114 | output.writeI32(this.mapTasks); 115 | output.writeFieldEnd(); 116 | } 117 | if (this.reduceTasks) { 118 | output.writeFieldBegin('reduceTasks', Thrift.Type.I32, 3); 119 | output.writeI32(this.reduceTasks); 120 | output.writeFieldEnd(); 121 | } 122 | if (this.maxMapTasks) { 123 | output.writeFieldBegin('maxMapTasks', Thrift.Type.I32, 4); 124 | output.writeI32(this.maxMapTasks); 125 | output.writeFieldEnd(); 126 | } 127 | if (this.maxReduceTasks) { 128 | output.writeFieldBegin('maxReduceTasks', Thrift.Type.I32, 5); 129 | output.writeI32(this.maxReduceTasks); 130 | output.writeFieldEnd(); 131 | } 132 | if (this.state) { 133 | output.writeFieldBegin('state', Thrift.Type.I32, 6); 134 | output.writeI32(this.state); 135 | output.writeFieldEnd(); 136 | } 137 | output.writeFieldStop(); 138 | output.writeStructEnd(); 139 | return; 140 | }; 141 | 142 | var HiveServerException = module.exports.HiveServerException = function(args) { 143 | Thrift.TException.call(this, "HiveServerException") 144 | this.name = "HiveServerException" 145 | this.message = null; 146 | this.errorCode = null; 147 | this.SQLState = null; 148 | if (args) { 149 | if (args.message !== undefined) { 150 | this.message = args.message; 151 | } 152 | if (args.errorCode !== undefined) { 153 | this.errorCode = args.errorCode; 154 | } 155 | if (args.SQLState !== undefined) { 156 | this.SQLState = args.SQLState; 157 | } 158 | } 159 | }; 160 | Thrift.inherits(HiveServerException, Thrift.TException); 161 | HiveServerException.prototype.name = 'HiveServerException'; 162 | HiveServerException.prototype.read = function(input) { 163 | input.readStructBegin(); 164 | while (true) 165 | { 166 | var ret = input.readFieldBegin(); 167 | var fname = ret.fname; 168 | var ftype = ret.ftype; 169 | var fid = ret.fid; 170 | if (ftype == Thrift.Type.STOP) { 171 | break; 172 | } 173 | switch (fid) 174 | { 175 | case 1: 176 | if (ftype == Thrift.Type.STRING) { 177 | this.message = input.readString(); 178 | } else { 179 | input.skip(ftype); 180 | } 181 | break; 182 | case 2: 183 | if (ftype == Thrift.Type.I32) { 184 | this.errorCode = input.readI32(); 185 | } else { 186 | input.skip(ftype); 187 | } 188 | break; 189 | case 3: 190 | if (ftype == Thrift.Type.STRING) { 191 | this.SQLState = input.readString(); 192 | } else { 193 | input.skip(ftype); 194 | } 195 | break; 196 | default: 197 | input.skip(ftype); 198 | } 199 | input.readFieldEnd(); 200 | } 201 | input.readStructEnd(); 202 | return; 203 | }; 204 | 205 | HiveServerException.prototype.write = function(output) { 206 | output.writeStructBegin('HiveServerException'); 207 | if (this.message) { 208 | output.writeFieldBegin('message', Thrift.Type.STRING, 1); 209 | output.writeString(this.message); 210 | output.writeFieldEnd(); 211 | } 212 | if (this.errorCode) { 213 | output.writeFieldBegin('errorCode', Thrift.Type.I32, 2); 214 | output.writeI32(this.errorCode); 215 | output.writeFieldEnd(); 216 | } 217 | if (this.SQLState) { 218 | output.writeFieldBegin('SQLState', Thrift.Type.STRING, 3); 219 | output.writeString(this.SQLState); 220 | output.writeFieldEnd(); 221 | } 222 | output.writeFieldStop(); 223 | output.writeStructEnd(); 224 | return; 225 | }; 226 | 227 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u2/hive_service_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.JobTrackerState = { 9 | 'INITIALIZING' : 1, 10 | 'RUNNING' : 2 11 | }; 12 | var HiveClusterStatus = module.exports.HiveClusterStatus = function(args) { 13 | this.taskTrackers = null; 14 | this.mapTasks = null; 15 | this.reduceTasks = null; 16 | this.maxMapTasks = null; 17 | this.maxReduceTasks = null; 18 | this.state = null; 19 | if (args) { 20 | if (args.taskTrackers !== undefined) { 21 | this.taskTrackers = args.taskTrackers; 22 | } 23 | if (args.mapTasks !== undefined) { 24 | this.mapTasks = args.mapTasks; 25 | } 26 | if (args.reduceTasks !== undefined) { 27 | this.reduceTasks = args.reduceTasks; 28 | } 29 | if (args.maxMapTasks !== undefined) { 30 | this.maxMapTasks = args.maxMapTasks; 31 | } 32 | if (args.maxReduceTasks !== undefined) { 33 | this.maxReduceTasks = args.maxReduceTasks; 34 | } 35 | if (args.state !== undefined) { 36 | this.state = args.state; 37 | } 38 | } 39 | }; 40 | HiveClusterStatus.prototype = {}; 41 | HiveClusterStatus.prototype.read = function(input) { 42 | input.readStructBegin(); 43 | while (true) 44 | { 45 | var ret = input.readFieldBegin(); 46 | var fname = ret.fname; 47 | var ftype = ret.ftype; 48 | var fid = ret.fid; 49 | if (ftype == Thrift.Type.STOP) { 50 | break; 51 | } 52 | switch (fid) 53 | { 54 | case 1: 55 | if (ftype == Thrift.Type.I32) { 56 | this.taskTrackers = input.readI32(); 57 | } else { 58 | input.skip(ftype); 59 | } 60 | break; 61 | case 2: 62 | if (ftype == Thrift.Type.I32) { 63 | this.mapTasks = input.readI32(); 64 | } else { 65 | input.skip(ftype); 66 | } 67 | break; 68 | case 3: 69 | if (ftype == Thrift.Type.I32) { 70 | this.reduceTasks = input.readI32(); 71 | } else { 72 | input.skip(ftype); 73 | } 74 | break; 75 | case 4: 76 | if (ftype == Thrift.Type.I32) { 77 | this.maxMapTasks = input.readI32(); 78 | } else { 79 | input.skip(ftype); 80 | } 81 | break; 82 | case 5: 83 | if (ftype == Thrift.Type.I32) { 84 | this.maxReduceTasks = input.readI32(); 85 | } else { 86 | input.skip(ftype); 87 | } 88 | break; 89 | case 6: 90 | if (ftype == Thrift.Type.I32) { 91 | this.state = input.readI32(); 92 | } else { 93 | input.skip(ftype); 94 | } 95 | break; 96 | default: 97 | input.skip(ftype); 98 | } 99 | input.readFieldEnd(); 100 | } 101 | input.readStructEnd(); 102 | return; 103 | }; 104 | 105 | HiveClusterStatus.prototype.write = function(output) { 106 | output.writeStructBegin('HiveClusterStatus'); 107 | if (this.taskTrackers) { 108 | output.writeFieldBegin('taskTrackers', Thrift.Type.I32, 1); 109 | output.writeI32(this.taskTrackers); 110 | output.writeFieldEnd(); 111 | } 112 | if (this.mapTasks) { 113 | output.writeFieldBegin('mapTasks', Thrift.Type.I32, 2); 114 | output.writeI32(this.mapTasks); 115 | output.writeFieldEnd(); 116 | } 117 | if (this.reduceTasks) { 118 | output.writeFieldBegin('reduceTasks', Thrift.Type.I32, 3); 119 | output.writeI32(this.reduceTasks); 120 | output.writeFieldEnd(); 121 | } 122 | if (this.maxMapTasks) { 123 | output.writeFieldBegin('maxMapTasks', Thrift.Type.I32, 4); 124 | output.writeI32(this.maxMapTasks); 125 | output.writeFieldEnd(); 126 | } 127 | if (this.maxReduceTasks) { 128 | output.writeFieldBegin('maxReduceTasks', Thrift.Type.I32, 5); 129 | output.writeI32(this.maxReduceTasks); 130 | output.writeFieldEnd(); 131 | } 132 | if (this.state) { 133 | output.writeFieldBegin('state', Thrift.Type.I32, 6); 134 | output.writeI32(this.state); 135 | output.writeFieldEnd(); 136 | } 137 | output.writeFieldStop(); 138 | output.writeStructEnd(); 139 | return; 140 | }; 141 | 142 | var HiveServerException = module.exports.HiveServerException = function(args) { 143 | Thrift.TException.call(this, "HiveServerException") 144 | this.name = "HiveServerException" 145 | this.message = null; 146 | this.errorCode = null; 147 | this.SQLState = null; 148 | if (args) { 149 | if (args.message !== undefined) { 150 | this.message = args.message; 151 | } 152 | if (args.errorCode !== undefined) { 153 | this.errorCode = args.errorCode; 154 | } 155 | if (args.SQLState !== undefined) { 156 | this.SQLState = args.SQLState; 157 | } 158 | } 159 | }; 160 | Thrift.inherits(HiveServerException, Thrift.TException); 161 | HiveServerException.prototype.name = 'HiveServerException'; 162 | HiveServerException.prototype.read = function(input) { 163 | input.readStructBegin(); 164 | while (true) 165 | { 166 | var ret = input.readFieldBegin(); 167 | var fname = ret.fname; 168 | var ftype = ret.ftype; 169 | var fid = ret.fid; 170 | if (ftype == Thrift.Type.STOP) { 171 | break; 172 | } 173 | switch (fid) 174 | { 175 | case 1: 176 | if (ftype == Thrift.Type.STRING) { 177 | this.message = input.readString(); 178 | } else { 179 | input.skip(ftype); 180 | } 181 | break; 182 | case 2: 183 | if (ftype == Thrift.Type.I32) { 184 | this.errorCode = input.readI32(); 185 | } else { 186 | input.skip(ftype); 187 | } 188 | break; 189 | case 3: 190 | if (ftype == Thrift.Type.STRING) { 191 | this.SQLState = input.readString(); 192 | } else { 193 | input.skip(ftype); 194 | } 195 | break; 196 | default: 197 | input.skip(ftype); 198 | } 199 | input.readFieldEnd(); 200 | } 201 | input.readStructEnd(); 202 | return; 203 | }; 204 | 205 | HiveServerException.prototype.write = function(output) { 206 | output.writeStructBegin('HiveServerException'); 207 | if (this.message) { 208 | output.writeFieldBegin('message', Thrift.Type.STRING, 1); 209 | output.writeString(this.message); 210 | output.writeFieldEnd(); 211 | } 212 | if (this.errorCode) { 213 | output.writeFieldBegin('errorCode', Thrift.Type.I32, 2); 214 | output.writeI32(this.errorCode); 215 | output.writeFieldEnd(); 216 | } 217 | if (this.SQLState) { 218 | output.writeFieldBegin('SQLState', Thrift.Type.STRING, 3); 219 | output.writeString(this.SQLState); 220 | output.writeFieldEnd(); 221 | } 222 | output.writeFieldStop(); 223 | output.writeStructEnd(); 224 | return; 225 | }; 226 | 227 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u3/hive_service_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.JobTrackerState = { 9 | 'INITIALIZING' : 1, 10 | 'RUNNING' : 2 11 | }; 12 | var HiveClusterStatus = module.exports.HiveClusterStatus = function(args) { 13 | this.taskTrackers = null; 14 | this.mapTasks = null; 15 | this.reduceTasks = null; 16 | this.maxMapTasks = null; 17 | this.maxReduceTasks = null; 18 | this.state = null; 19 | if (args) { 20 | if (args.taskTrackers !== undefined) { 21 | this.taskTrackers = args.taskTrackers; 22 | } 23 | if (args.mapTasks !== undefined) { 24 | this.mapTasks = args.mapTasks; 25 | } 26 | if (args.reduceTasks !== undefined) { 27 | this.reduceTasks = args.reduceTasks; 28 | } 29 | if (args.maxMapTasks !== undefined) { 30 | this.maxMapTasks = args.maxMapTasks; 31 | } 32 | if (args.maxReduceTasks !== undefined) { 33 | this.maxReduceTasks = args.maxReduceTasks; 34 | } 35 | if (args.state !== undefined) { 36 | this.state = args.state; 37 | } 38 | } 39 | }; 40 | HiveClusterStatus.prototype = {}; 41 | HiveClusterStatus.prototype.read = function(input) { 42 | input.readStructBegin(); 43 | while (true) 44 | { 45 | var ret = input.readFieldBegin(); 46 | var fname = ret.fname; 47 | var ftype = ret.ftype; 48 | var fid = ret.fid; 49 | if (ftype == Thrift.Type.STOP) { 50 | break; 51 | } 52 | switch (fid) 53 | { 54 | case 1: 55 | if (ftype == Thrift.Type.I32) { 56 | this.taskTrackers = input.readI32(); 57 | } else { 58 | input.skip(ftype); 59 | } 60 | break; 61 | case 2: 62 | if (ftype == Thrift.Type.I32) { 63 | this.mapTasks = input.readI32(); 64 | } else { 65 | input.skip(ftype); 66 | } 67 | break; 68 | case 3: 69 | if (ftype == Thrift.Type.I32) { 70 | this.reduceTasks = input.readI32(); 71 | } else { 72 | input.skip(ftype); 73 | } 74 | break; 75 | case 4: 76 | if (ftype == Thrift.Type.I32) { 77 | this.maxMapTasks = input.readI32(); 78 | } else { 79 | input.skip(ftype); 80 | } 81 | break; 82 | case 5: 83 | if (ftype == Thrift.Type.I32) { 84 | this.maxReduceTasks = input.readI32(); 85 | } else { 86 | input.skip(ftype); 87 | } 88 | break; 89 | case 6: 90 | if (ftype == Thrift.Type.I32) { 91 | this.state = input.readI32(); 92 | } else { 93 | input.skip(ftype); 94 | } 95 | break; 96 | default: 97 | input.skip(ftype); 98 | } 99 | input.readFieldEnd(); 100 | } 101 | input.readStructEnd(); 102 | return; 103 | }; 104 | 105 | HiveClusterStatus.prototype.write = function(output) { 106 | output.writeStructBegin('HiveClusterStatus'); 107 | if (this.taskTrackers) { 108 | output.writeFieldBegin('taskTrackers', Thrift.Type.I32, 1); 109 | output.writeI32(this.taskTrackers); 110 | output.writeFieldEnd(); 111 | } 112 | if (this.mapTasks) { 113 | output.writeFieldBegin('mapTasks', Thrift.Type.I32, 2); 114 | output.writeI32(this.mapTasks); 115 | output.writeFieldEnd(); 116 | } 117 | if (this.reduceTasks) { 118 | output.writeFieldBegin('reduceTasks', Thrift.Type.I32, 3); 119 | output.writeI32(this.reduceTasks); 120 | output.writeFieldEnd(); 121 | } 122 | if (this.maxMapTasks) { 123 | output.writeFieldBegin('maxMapTasks', Thrift.Type.I32, 4); 124 | output.writeI32(this.maxMapTasks); 125 | output.writeFieldEnd(); 126 | } 127 | if (this.maxReduceTasks) { 128 | output.writeFieldBegin('maxReduceTasks', Thrift.Type.I32, 5); 129 | output.writeI32(this.maxReduceTasks); 130 | output.writeFieldEnd(); 131 | } 132 | if (this.state) { 133 | output.writeFieldBegin('state', Thrift.Type.I32, 6); 134 | output.writeI32(this.state); 135 | output.writeFieldEnd(); 136 | } 137 | output.writeFieldStop(); 138 | output.writeStructEnd(); 139 | return; 140 | }; 141 | 142 | var HiveServerException = module.exports.HiveServerException = function(args) { 143 | Thrift.TException.call(this, "HiveServerException") 144 | this.name = "HiveServerException" 145 | this.message = null; 146 | this.errorCode = null; 147 | this.SQLState = null; 148 | if (args) { 149 | if (args.message !== undefined) { 150 | this.message = args.message; 151 | } 152 | if (args.errorCode !== undefined) { 153 | this.errorCode = args.errorCode; 154 | } 155 | if (args.SQLState !== undefined) { 156 | this.SQLState = args.SQLState; 157 | } 158 | } 159 | }; 160 | Thrift.inherits(HiveServerException, Thrift.TException); 161 | HiveServerException.prototype.name = 'HiveServerException'; 162 | HiveServerException.prototype.read = function(input) { 163 | input.readStructBegin(); 164 | while (true) 165 | { 166 | var ret = input.readFieldBegin(); 167 | var fname = ret.fname; 168 | var ftype = ret.ftype; 169 | var fid = ret.fid; 170 | if (ftype == Thrift.Type.STOP) { 171 | break; 172 | } 173 | switch (fid) 174 | { 175 | case 1: 176 | if (ftype == Thrift.Type.STRING) { 177 | this.message = input.readString(); 178 | } else { 179 | input.skip(ftype); 180 | } 181 | break; 182 | case 2: 183 | if (ftype == Thrift.Type.I32) { 184 | this.errorCode = input.readI32(); 185 | } else { 186 | input.skip(ftype); 187 | } 188 | break; 189 | case 3: 190 | if (ftype == Thrift.Type.STRING) { 191 | this.SQLState = input.readString(); 192 | } else { 193 | input.skip(ftype); 194 | } 195 | break; 196 | default: 197 | input.skip(ftype); 198 | } 199 | input.readFieldEnd(); 200 | } 201 | input.readStructEnd(); 202 | return; 203 | }; 204 | 205 | HiveServerException.prototype.write = function(output) { 206 | output.writeStructBegin('HiveServerException'); 207 | if (this.message) { 208 | output.writeFieldBegin('message', Thrift.Type.STRING, 1); 209 | output.writeString(this.message); 210 | output.writeFieldEnd(); 211 | } 212 | if (this.errorCode) { 213 | output.writeFieldBegin('errorCode', Thrift.Type.I32, 2); 214 | output.writeI32(this.errorCode); 215 | output.writeFieldEnd(); 216 | } 217 | if (this.SQLState) { 218 | output.writeFieldBegin('SQLState', Thrift.Type.STRING, 3); 219 | output.writeString(this.SQLState); 220 | output.writeFieldEnd(); 221 | } 222 | output.writeFieldStop(); 223 | output.writeStructEnd(); 224 | return; 225 | }; 226 | 227 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Thrift Hive - Hive client using the Apache Thrift RPC system 2 | 3 | Hive client with the following main features: 4 | - fetch rows with optional batch size 5 | - implement Node Readable Stream API (including `pipe`) 6 | - hive multiple version support 7 | - multiple query support through the `multi_execute` and `multi_query` functions 8 | - advanced comments parsing 9 | 10 | The project export the [Hive API][1] using [Apache Thrift RPC system][2]. It 11 | support multiple versions and a readable stream API. 12 | 13 | ## Installation 14 | 15 | ``` 16 | npm install thrift-hive 17 | ``` 18 | 19 | ## Quick example 20 | 21 | ```javascript 22 | var hive = require('thrift-hive'); 23 | // Client connection 24 | var client = hive.createClient({ 25 | version: '0.7.1-cdh3u2', 26 | server: '127.0.0.1', 27 | port: 10000, 28 | timeout: 1000 29 | }); 30 | // Execute call 31 | client.execute('use default', function(err){ 32 | // Query call 33 | client.query('show tables') 34 | .on('row', function(database){ 35 | console.log(database); 36 | }) 37 | .on('error', function(err){ 38 | console.log(err.message); 39 | client.end(); 40 | }) 41 | .on('end', function(){ 42 | client.end(); 43 | }); 44 | }); 45 | ``` 46 | 47 | ## Hive Client 48 | 49 | We've added a function `hive.createClient` to simplify coding. However, you 50 | are free to use the raw Thrift API. The client take an `options` object as its 51 | argument andexpose an `execute` and a `query` methods. 52 | 53 | Available options 54 | 55 | - `version` 56 | default to '0.7.1-cdh3u2' 57 | - `server` 58 | default to '127.0.0.1' 59 | - `port` 60 | default to 10000 61 | - `timeout` 62 | default to 1000 milliseconds 63 | 64 | Available API 65 | 66 | - `client` 67 | A reference to the thrift client returned by `thrift.createClient` 68 | - `connection` 69 | A reference to the thrift connection returned by `thrift.createConnection` 70 | - `end([callback])` 71 | Close the Thrift connection 72 | - `execute(query, [callback])` 73 | Execute a query and, when done, call the provided callback with an optional 74 | error. 75 | - `query(query, [size])` 76 | Execute a query and return its results as an array of arrays (rows and 77 | columns). The size argument is optional and indicate the number of row to 78 | return on each fetch. 79 | 80 | ```coffeescript 81 | hive = require 'thrift-hive' 82 | # Client connection 83 | client = hive.createClient 84 | version: '0.7.1-cdh3u2' 85 | server: '127.0.0.1' 86 | port: 10000 87 | timeout: 1000 88 | # Execute 89 | client.execute 'USE default', (err) -> 90 | console.log err.message if err 91 | client.end() 92 | ``` 93 | 94 | ## Hive Query 95 | 96 | The `client.query` function implement the [EventEmitter API][3]. 97 | 98 | The following events are emitted: 99 | 100 | - `row` 101 | Emitted for each row returned by Hive. Contains a two arguments, the row 102 | as an array and the row index. 103 | - `row-first` 104 | Emitted after the first row returned by Hive. Contains a two arguments, 105 | the row as an array and the row index (always 0). 106 | - `row-last` 107 | Emitted after the last row returned by Hive. Contains a two arguments, 108 | the row as an array and the row index. 109 | - `error` 110 | Emitted when the connection failed or when Hive return an error. 111 | - `end` 112 | Emitted when there are no more rows to retrieve, not called if there was 113 | an error before. 114 | - `both` 115 | Convenient event combining the `error` and `end` events. Emitted when an 116 | error occured or when there are no more rows to retrieve. Return the same 117 | arguments than the `error` or `end` event depending on the operation 118 | outturn. 119 | 120 | The `client.query` function return a Node [readable stream][4]. It is possible to 121 | pipe the data into a [writable stream][5] but it is your responsibility to emit 122 | the `data` event, usually inside the `row` event. 123 | 124 | The following code written in CoffeeScript is an example of piping data returned by the query into a [writable stream][5]. 125 | 126 | ```coffeescript 127 | fs = require 'fs' 128 | hive = require 'thrift-hive' 129 | # Client connection 130 | client = hive.createClient 131 | version: '0.7.1-cdh3u2' 132 | server: '127.0.0.1' 133 | port: 10000 134 | timeout: 1000 135 | # Execute query 136 | client.query('show tables') 137 | .on 'row', (database) -> 138 | this.emit 'data', 'Found ' + database + '\n' 139 | .on 'error', (err) -> 140 | client.end() 141 | .on 'end', () -> 142 | client.end() 143 | .pipe( fs.createWriteStream "#{__dirname}/pipe.out" ) 144 | ``` 145 | 146 | ## Navite Thrift API 147 | 148 | Here's the same example as the one in the "Quick example" section but using the 149 | native thrift API. 150 | 151 | ```javascript 152 | var assert = require('assert'); 153 | var thrift = require('thrift'); 154 | var transport = require('thrift/lib/thrift/transport'); 155 | var ThriftHive = require('../lib/0.7.1-cdh3u2/ThriftHive'); 156 | // Client connection 157 | var options = {transport: transport.TBufferedTransport, timeout: 1000}; 158 | var connection = thrift.createConnection('127.0.0.1', 10000, options); 159 | var client = thrift.createClient(ThriftHive, connection); 160 | // Execute query 161 | client.execute('use default', function(err){ 162 | client.execute('show tables', function(err){ 163 | assert.ifError(err); 164 | client.fetchAll(function(err, databases){ 165 | if(err){ 166 | console.log(err.message); 167 | }else{ 168 | console.log(databases); 169 | } 170 | connection.end(); 171 | }); 172 | }); 173 | }); 174 | ``` 175 | 176 | ## Multi queries 177 | 178 | For conveniency, we've added two functions, `multi_execute` and `multi_query` which 179 | may run multiple requests in sequential mode inside a same client connection. They 180 | are both the same except how the last query is handled: 181 | 182 | - `multi_execute` will end with an `execute` call, thus it's API is the same 183 | as the `execute` function. 184 | - `multi_query` will end with a `query` call, thus it's API is the same 185 | as the `query` function. 186 | 187 | They accept the same arguments as their counterpart but the query may be an 188 | array or a string of queries. If it is a string, it will be split into multiple 189 | queries. Note, the parser is pretty light, removing ';' and comments but it 190 | seems to do the job. 191 | 192 | ## Testing 193 | 194 | Run the samples: 195 | 196 | ```bash 197 | node samples/execute.js 198 | node samples/query.js 199 | node samples/style_native.js 200 | node samples/style_sugar.js 201 | ``` 202 | 203 | Run the tests with `expresso`: 204 | 205 | Hive must be started with Thrift support. By default, the tests will connect to 206 | Hive Thrift server on the host `localhost` and the port `10000`. Edit the file 207 | "./test/config.json" if you wish to change the connection settings used accross 208 | the tests. A database `test_database` will be created if it does not yet exist 209 | and all the tests will run on it. 210 | 211 | ```bash 212 | npm install -g expresso 213 | expresso -s 214 | ``` 215 | 216 | [1]: http://hive.apache.org "Apache Hive" 217 | [2]: http://thrift.apache.org "Apache Thrift" 218 | [3]: http://nodejs.org/docs/v0.6.2/api/events.html#events.EventEmitter "EventEmitter API" 219 | [4]: http://nodejs.org/docs/v0.6.2/api/streams.html#readable_Stream "Readable Stream API" 220 | [5]: http://nodejs.org/docs/v0.6.2/api/streams.html#writable_Stream "Writable Stream API" 221 | -------------------------------------------------------------------------------- /lib/hive.js: -------------------------------------------------------------------------------- 1 | // Generated by CoffeeScript 1.4.0 2 | var EventEmitter, Stream, each, split, thrift, transport, 3 | __slice = [].slice; 4 | 5 | Stream = require('stream'); 6 | 7 | each = require('each'); 8 | 9 | thrift = require('thrift'); 10 | 11 | transport = require('thrift/lib/thrift/transport'); 12 | 13 | EventEmitter = require('events').EventEmitter; 14 | 15 | split = module.exports.split = function(hqls) { 16 | var commented; 17 | if (Array.isArray(hqls)) { 18 | return hqls; 19 | } 20 | commented = false; 21 | hqls = hqls.split('\n').filter(function(line) { 22 | var skip; 23 | line = line.trim(); 24 | skip = false; 25 | if (!commented && line.indexOf('/*') !== -1) { 26 | commented = '/*'; 27 | skip = true; 28 | } else if (!commented && line === '--') { 29 | commented = '--'; 30 | skip = true; 31 | } else if (commented === '/*' && line.lastIndexOf('*/') !== -1 && line.lastIndexOf('*/') === (line.length - 2)) { 32 | commented = false; 33 | skip = true; 34 | } else if (commented === '--' && line === '--') { 35 | commented = false; 36 | skip = true; 37 | } 38 | if (line.indexOf('--') === 0) { 39 | skip = true; 40 | } 41 | return !commented && !skip; 42 | }).join('\n'); 43 | hqls = hqls.split(';'); 44 | hqls = hqls.map(function(query) { 45 | return query.trim(); 46 | }); 47 | return hqls = hqls.filter(function(query) { 48 | return query.indexOf('--') !== 0 && query !== ''; 49 | }); 50 | }; 51 | 52 | module.exports.createClient = function(options) { 53 | var client, connection, _ref, _ref1, _ref2, _ref3, _ref4; 54 | if (options == null) { 55 | options = {}; 56 | } 57 | if ((_ref = options.version) == null) { 58 | options.version = '0.7.1-cdh3u3'; 59 | } 60 | if ((_ref1 = options.server) == null) { 61 | options.server = '127.0.0.1'; 62 | } 63 | if ((_ref2 = options.port) == null) { 64 | options.port = 10000; 65 | } 66 | if ((_ref3 = options.timeout) == null) { 67 | options.timeout = 1000; 68 | } 69 | if ((_ref4 = options.transport) == null) { 70 | options.transport = transport.TBufferedTransport; 71 | } 72 | connection = thrift.createConnection(options.server, options.port, options); 73 | client = thrift.createClient(require("./" + options.version + "/ThriftHive"), connection); 74 | return { 75 | connection: connection, 76 | client: client, 77 | end: connection.end.bind(connection), 78 | execute: function(query, callback) { 79 | var emitter; 80 | emitter = new EventEmitter; 81 | process.nextTick(function() { 82 | emitter.emit('before', query); 83 | return client.execute(query, function(err) { 84 | var emitError, lboth, lerror; 85 | if (err) { 86 | emitter.readable = false; 87 | lerror = emitter.listeners('error').length; 88 | lboth = emitter.listeners('both').length; 89 | emitError = lerror || (!lerror && !lboth && !callback); 90 | if (emitError) { 91 | emitter.emit('error', err); 92 | } 93 | } else { 94 | emitter.emit('end', null, query); 95 | } 96 | emitter.emit('both', err, query); 97 | if (callback) { 98 | return callback(err, callback); 99 | } 100 | }); 101 | }); 102 | return emitter; 103 | }, 104 | query: function(query, size) { 105 | var buffer, callback, count, emitter, exec, fetch, handle, 106 | _this = this; 107 | if (arguments.length === 2 && typeof size === 'function') { 108 | callback = size; 109 | size = null; 110 | } 111 | exec = function() { 112 | emitter.emit('before', query); 113 | return client.execute(query, function(err) { 114 | var emitError, lboth, lerror; 115 | if (err) { 116 | emitter.readable = false; 117 | lerror = emitter.listeners('error').length; 118 | lboth = emitter.listeners('both').length; 119 | emitError = lerror || (!lerror && !lboth); 120 | if (emitError) { 121 | emitter.emit('error', err); 122 | } 123 | emitter.emit('both', err, query); 124 | return; 125 | } 126 | return fetch(); 127 | }); 128 | }; 129 | if (query) { 130 | process.nextTick(exec); 131 | } 132 | buffer = []; 133 | count = 0; 134 | emitter = new Stream; 135 | emitter.size = size; 136 | emitter.readable = true; 137 | emitter.paused = 0; 138 | emitter.query = function(q) { 139 | if (query) { 140 | throw new Error('Query already defined'); 141 | } 142 | query = q; 143 | exec(); 144 | return this; 145 | }; 146 | emitter.pause = function() { 147 | return this.paused = 1; 148 | }; 149 | emitter.resume = function() { 150 | this.was = this.paused; 151 | this.paused = 0; 152 | if (this.was) { 153 | return fetch(); 154 | } 155 | }; 156 | handle = function(err, rows) { 157 | var emitError, lboth, lerror, row, _i, _len; 158 | if (err) { 159 | emitter.readable = false; 160 | lerror = emitter.listeners('error').length; 161 | lboth = emitter.listeners('both').length; 162 | emitError = lerror || (!lerror && !lboth); 163 | if (emitError) { 164 | emitter.emit('error', err); 165 | } 166 | emitter.emit('both', err, query); 167 | return; 168 | } 169 | rows = rows.map(function(row) { 170 | return row.split('\t'); 171 | }); 172 | for (_i = 0, _len = rows.length; _i < _len; _i++) { 173 | row = rows[_i]; 174 | if (count === 0) { 175 | emitter.emit('row-first', row, 0); 176 | } 177 | emitter.emit('row', row, count++); 178 | } 179 | if (rows.length === emitter.size) { 180 | if (!emitter.paused) { 181 | return fetch(); 182 | } 183 | } else { 184 | emitter.emit('row-last', row, count - 1); 185 | emitter.readable = false; 186 | emitter.emit('end', query); 187 | return emitter.emit('both', null, query); 188 | } 189 | }; 190 | fetch = function() { 191 | if (emitter.paused || !emitter.readable) { 192 | return; 193 | } 194 | if (emitter.size) { 195 | return client.fetchN(emitter.size, handle); 196 | } else { 197 | return client.fetchAll(handle); 198 | } 199 | }; 200 | return emitter; 201 | }, 202 | multi_execute: function(hqls, callback) { 203 | var emitter, 204 | _this = this; 205 | emitter = new EventEmitter; 206 | hqls = split(hqls); 207 | each(hqls).on('item', function(next, query) { 208 | var exec; 209 | exec = _this.execute(query, next); 210 | return exec.on('before', function() { 211 | var _ref5; 212 | return (_ref5 = emitter.emit).call.apply(_ref5, [emitter, 'before'].concat(__slice.call(arguments))); 213 | }); 214 | }).on('both', function(err) { 215 | var _ref5, _ref6, _ref7; 216 | if (err) { 217 | (_ref5 = emitter.emit).call.apply(_ref5, [emitter, 'error'].concat(__slice.call(arguments))); 218 | } else { 219 | (_ref6 = emitter.emit).call.apply(_ref6, [emitter, 'end'].concat(__slice.call(arguments))); 220 | } 221 | (_ref7 = emitter.emit).call.apply(_ref7, [emitter, 'both'].concat(__slice.call(arguments))); 222 | if (callback) { 223 | return callback(err); 224 | } 225 | }); 226 | return emitter; 227 | }, 228 | multi_query: function(hqls, size) { 229 | var query, 230 | _this = this; 231 | hqls = split(hqls); 232 | query = this.query(); 233 | each(hqls).on('item', function(next, hql, i) { 234 | var exec; 235 | if (hqls.length !== i + 1) { 236 | exec = _this.execute(hql); 237 | exec.on('before', function() { 238 | var _ref5; 239 | return (_ref5 = query.emit).call.apply(_ref5, [query, 'before'].concat(__slice.call(arguments))); 240 | }); 241 | exec.on('error', function(err) { 242 | var emitError, lboth, lerror; 243 | query.readable = false; 244 | lerror = query.listeners('error').length; 245 | lboth = query.listeners('both').length; 246 | emitError = lerror || (!lerror && !lboth); 247 | if (emitError) { 248 | query.emit('error', err); 249 | } 250 | return query.emit('both', err, query); 251 | }); 252 | return exec.on('end', function() { 253 | return next(); 254 | }); 255 | } else { 256 | return query.query(hql, size); 257 | } 258 | }); 259 | return query; 260 | } 261 | }; 262 | }; 263 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u1/hive_metastore.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | # 3 | # Thrift Service that the MetaStore is built on 4 | # 5 | 6 | include "fb303.thrift" 7 | 8 | namespace java org.apache.hadoop.hive.metastore.api 9 | namespace php metastore 10 | namespace cpp Apache.Hadoop.Hive 11 | 12 | const string DDL_TIME = "transient_lastDdlTime" 13 | 14 | struct Version { 15 | 1: string version, 16 | 2: string comments 17 | } 18 | 19 | struct FieldSchema { 20 | 1: string name, // name of the field 21 | 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps 22 | 3: string comment 23 | } 24 | 25 | struct Type { 26 | 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types 27 | 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) 28 | 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) 29 | 4: optional list fields // if the name is one of the user defined types 30 | } 31 | 32 | enum HiveObjectType { 33 | GLOBAL = 1, 34 | DATABASE = 2, 35 | TABLE = 3, 36 | PARTITION = 4, 37 | COLUMN = 5, 38 | } 39 | 40 | enum PrincipalType { 41 | USER = 1, 42 | ROLE = 2, 43 | GROUP = 3, 44 | } 45 | 46 | struct HiveObjectRef{ 47 | 1: HiveObjectType objectType, 48 | 2: string dbName, 49 | 3: string objectName, 50 | 4: list partValues, 51 | 5: string columnName, 52 | } 53 | 54 | struct PrivilegeGrantInfo { 55 | 1: string privilege, 56 | 2: i32 createTime, 57 | 3: string grantor, 58 | 4: PrincipalType grantorType, 59 | 5: bool grantOption, 60 | } 61 | 62 | struct HiveObjectPrivilege { 63 | 1: HiveObjectRef hiveObject, 64 | 2: string principalName, 65 | 3: PrincipalType principalType, 66 | 4: PrivilegeGrantInfo grantInfo, 67 | } 68 | 69 | struct PrivilegeBag { 70 | 1: list privileges, 71 | } 72 | 73 | struct PrincipalPrivilegeSet { 74 | 1: map> userPrivileges, // user name -> privilege grant info 75 | 2: map> groupPrivileges, // group name -> privilege grant info 76 | 3: map> rolePrivileges, //role name -> privilege grant info 77 | } 78 | 79 | struct Role { 80 | 1: string roleName, 81 | 2: i32 createTime, 82 | 3: string ownerName, 83 | } 84 | 85 | // namespace for tables 86 | struct Database { 87 | 1: string name, 88 | 2: string description, 89 | 3: string locationUri, 90 | 4: map parameters, // properties associated with the database 91 | 5: optional PrincipalPrivilegeSet privileges 92 | } 93 | 94 | // This object holds the information needed by SerDes 95 | struct SerDeInfo { 96 | 1: string name, // name of the serde, table name by default 97 | 2: string serializationLib, // usually the class that implements the extractor & loader 98 | 3: map parameters // initialization parameters 99 | } 100 | 101 | // sort order of a column (column name along with asc(1)/desc(0)) 102 | struct Order { 103 | 1: string col, // sort column name 104 | 2: i32 order // asc(1) or desc(0) 105 | } 106 | 107 | // this object holds all the information about physical storage of the data belonging to a table 108 | struct StorageDescriptor { 109 | 1: list cols, // required (refer to types defined above) 110 | 2: string location, // defaults to //tablename 111 | 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format 112 | 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format 113 | 5: bool compressed, // compressed or not 114 | 6: i32 numBuckets, // this must be specified if there are any dimension columns 115 | 7: SerDeInfo serdeInfo, // serialization and deserialization information 116 | 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` 117 | 9: list sortCols, // sort order of the data in each bucket 118 | 10: map parameters // any user supplied key value hash 119 | } 120 | 121 | // table information 122 | struct Table { 123 | 1: string tableName, // name of the table 124 | 2: string dbName, // database name ('default') 125 | 3: string owner, // owner of this table 126 | 4: i32 createTime, // creation time of the table 127 | 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) 128 | 6: i32 retention, // retention time 129 | 7: StorageDescriptor sd, // storage descriptor of the table 130 | 8: list partitionKeys, // partition keys of the table. only primitive types are supported 131 | 9: map parameters, // to store comments or any other user level parameters 132 | 10: string viewOriginalText, // original view text, null for non-view 133 | 11: string viewExpandedText, // expanded view text, null for non-view 134 | 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 135 | 13: optional PrincipalPrivilegeSet privileges, 136 | } 137 | 138 | struct Partition { 139 | 1: list values // string value is converted to appropriate partition key type 140 | 2: string dbName, 141 | 3: string tableName, 142 | 4: i32 createTime, 143 | 5: i32 lastAccessTime, 144 | 6: StorageDescriptor sd, 145 | 7: map parameters, 146 | 8: optional PrincipalPrivilegeSet privileges 147 | } 148 | 149 | struct Index { 150 | 1: string indexName, // unique with in the whole database namespace 151 | 2: string indexHandlerClass, // reserved 152 | 3: string dbName, 153 | 4: string origTableName, 154 | 5: i32 createTime, 155 | 6: i32 lastAccessTime, 156 | 7: string indexTableName, 157 | 8: StorageDescriptor sd, 158 | 9: map parameters, 159 | 10: bool deferredRebuild 160 | } 161 | 162 | // schema of the table/query results etc. 163 | struct Schema { 164 | // column names, types, comments 165 | 1: list fieldSchemas, // delimiters etc 166 | 2: map properties 167 | } 168 | 169 | exception MetaException { 170 | 1: string message 171 | } 172 | 173 | exception UnknownTableException { 174 | 1: string message 175 | } 176 | 177 | exception UnknownDBException { 178 | 1: string message 179 | } 180 | 181 | exception AlreadyExistsException { 182 | 1: string message 183 | } 184 | 185 | exception InvalidObjectException { 186 | 1: string message 187 | } 188 | 189 | exception NoSuchObjectException { 190 | 1: string message 191 | } 192 | 193 | exception IndexAlreadyExistsException { 194 | 1: string message 195 | } 196 | 197 | exception InvalidOperationException { 198 | 1: string message 199 | } 200 | 201 | exception ConfigValSecurityException { 202 | 1: string message 203 | } 204 | 205 | /** 206 | * This interface is live. 207 | */ 208 | service ThriftHiveMetastore extends fb303.FacebookService 209 | { 210 | void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 211 | Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) 212 | void drop_database(1:string name, 2:bool deleteData) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) 213 | list get_databases(1:string pattern) throws(1:MetaException o1) 214 | list get_all_databases() throws(1:MetaException o1) 215 | void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) 216 | 217 | // returns the type with given name (make seperate calls for the dependent types if needed) 218 | Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) 219 | bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 220 | bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) 221 | map get_type_all(1:string name) 222 | throws(1:MetaException o2) 223 | 224 | // Gets a list of FieldSchemas describing the columns of a particular table 225 | list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), 226 | 227 | // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table 228 | list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) 229 | 230 | // create a Hive table. Following fields must be set 231 | // tableName 232 | // database (only 'default' for now until Hive QL supports databases) 233 | // owner (not needed, but good to have for tracking purposes) 234 | // sd.cols (list of field schemas) 235 | // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) 236 | // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) 237 | // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe 238 | // * See notes on DDL_TIME 239 | void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) 240 | // drops the table and all the partitions associated with it if the table has partitions 241 | // delete data (including partitions) if deleteData is set to true 242 | void drop_table(1:string dbname, 2:string name, 3:bool deleteData) 243 | throws(1:NoSuchObjectException o1, 2:MetaException o3) 244 | list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) 245 | list get_all_tables(1: string db_name) throws (1: MetaException o1) 246 | 247 | Table get_table(1:string dbname, 2:string tbl_name) 248 | throws (1:MetaException o1, 2:NoSuchObjectException o2) 249 | // alter table applies to only future partitions not for existing partitions 250 | // * See notes on DDL_TIME 251 | void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) 252 | throws (1:InvalidOperationException o1, 2:MetaException o2) 253 | 254 | // the following applies to only tables that have partitions 255 | // * See notes on DDL_TIME 256 | Partition add_partition(1:Partition new_part) 257 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 258 | Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 259 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 260 | Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) 261 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 262 | bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) 263 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 264 | bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) 265 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 266 | Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 267 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 268 | 269 | Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 270 | 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) 271 | 272 | Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) 273 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 274 | 275 | // returns all the partitions for this table in reverse chronological order. 276 | // If max parts is given then it will return only that many. 277 | list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 278 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 279 | list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 280 | 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 281 | 282 | list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 283 | throws(1:MetaException o2) 284 | 285 | // get_partition*_ps methods allow filtering by a partial partition specification, 286 | // as needed for dynamic partitions. The values that are not restricted should 287 | // be empty strings. Nulls were considered (instead of "") but caused errors in 288 | // generated Python code. The size of part_vals may be smaller than the 289 | // number of partition columns - the unspecified values are considered the same 290 | // as "". 291 | list get_partitions_ps(1:string db_name 2:string tbl_name 292 | 3:list part_vals, 4:i16 max_parts=-1) 293 | throws(1:MetaException o1) 294 | list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, 295 | 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 296 | 297 | list get_partition_names_ps(1:string db_name, 298 | 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) 299 | throws(1:MetaException o1) 300 | 301 | // get the partitions matching the given partition filter 302 | list get_partitions_by_filter(1:string db_name 2:string tbl_name 303 | 3:string filter, 4:i16 max_parts=-1) 304 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 305 | 306 | // changes the partition to the new partition object. partition is identified from the part values 307 | // in the new_part 308 | // * See notes on DDL_TIME 309 | void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) 310 | throws(1:InvalidOperationException o1, 2:MetaException o2) 311 | 312 | // gets the value of the configuration key in the metastore server. returns 313 | // defaultValue if the key does not exist. if the configuration key does not 314 | // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is 315 | // thrown. 316 | string get_config_value(1:string name, 2:string defaultValue) 317 | throws(1:ConfigValSecurityException o1) 318 | 319 | // converts a partition name into a partition values array 320 | list partition_name_to_vals(1: string part_name) 321 | throws(1: MetaException o1) 322 | // converts a partition name into a partition specification (a mapping from 323 | // the partition cols to the values) 324 | map partition_name_to_spec(1: string part_name) 325 | throws(1: MetaException o1) 326 | 327 | //index 328 | Index add_index(1:Index new_index, 2: Table index_table) 329 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 330 | void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) 331 | throws (1:InvalidOperationException o1, 2:MetaException o2) 332 | bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) 333 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 334 | Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) 335 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 336 | 337 | list get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 338 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 339 | list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 340 | throws(1:MetaException o2) 341 | 342 | //authorization privileges 343 | 344 | bool create_role(1:Role role) throws(1:MetaException o1) 345 | bool drop_role(1:string role_name) throws(1:MetaException o1) 346 | list get_role_names() throws(1:MetaException o1) 347 | bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, 348 | 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) 349 | bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) 350 | throws(1:MetaException o1) 351 | list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) 352 | 353 | PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, 354 | 3: list group_names) throws(1:MetaException o1) 355 | list list_privileges(1:string principal_name, 2:PrincipalType principal_type, 356 | 3: HiveObjectRef hiveObject) throws(1:MetaException o1) 357 | 358 | bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 359 | bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 360 | 361 | //Authentication (delegation token) interfaces 362 | 363 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 364 | // to metastore server 365 | string get_delegation_token(1:string renewer_kerberos_principal_name) throws (1:MetaException o1) 366 | 367 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 368 | // to metastore server - this method takes an extra token signature string which is just 369 | // an identifier to associate with the token - this will be used by the token selector code 370 | // to pick the right token given the associated identifier. 371 | string get_delegation_token_with_signature(1:string renewer_kerberos_principal_name, 372 | 2:string token_signature) throws (1:MetaException o1) 373 | 374 | // method to renew delegation token obtained from metastore server 375 | i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) 376 | 377 | // method to cancel delegation token obtained from metastore server 378 | void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) 379 | } 380 | 381 | // * Note about the DDL_TIME: When creating or altering a table or a partition, 382 | // if the DDL_TIME is not set, the current time will be used. 383 | 384 | // For storing info about archived partitions in parameters 385 | 386 | // Whether the partition is archived 387 | const string IS_ARCHIVED = "is_archived", 388 | // The original location of the partition, before archiving. After archiving, 389 | // this directory will contain the archive. When the partition 390 | // is dropped, this directory will be deleted 391 | const string ORIGINAL_LOCATION = "original_location", 392 | 393 | // these should be needed only for backward compatibility with filestore 394 | const string META_TABLE_COLUMNS = "columns", 395 | const string META_TABLE_COLUMN_TYPES = "columns.types", 396 | const string BUCKET_FIELD_NAME = "bucket_field_name", 397 | const string BUCKET_COUNT = "bucket_count", 398 | const string FIELD_TO_DIMENSION = "field_to_dimension", 399 | const string META_TABLE_NAME = "name", 400 | const string META_TABLE_DB = "db", 401 | const string META_TABLE_LOCATION = "location", 402 | const string META_TABLE_SERDE = "serde", 403 | const string META_TABLE_PARTITION_COLUMNS = "partition_columns", 404 | const string FILE_INPUT_FORMAT = "file.inputformat", 405 | const string FILE_OUTPUT_FORMAT = "file.outputformat", 406 | const string META_TABLE_STORAGE = "storage_handler", 407 | 408 | 409 | 410 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u2/hive_metastore.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | # 3 | # Thrift Service that the MetaStore is built on 4 | # 5 | 6 | include "fb303.thrift" 7 | 8 | namespace java org.apache.hadoop.hive.metastore.api 9 | namespace php metastore 10 | namespace cpp Apache.Hadoop.Hive 11 | 12 | const string DDL_TIME = "transient_lastDdlTime" 13 | 14 | struct Version { 15 | 1: string version, 16 | 2: string comments 17 | } 18 | 19 | struct FieldSchema { 20 | 1: string name, // name of the field 21 | 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps 22 | 3: string comment 23 | } 24 | 25 | struct Type { 26 | 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types 27 | 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) 28 | 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) 29 | 4: optional list fields // if the name is one of the user defined types 30 | } 31 | 32 | enum HiveObjectType { 33 | GLOBAL = 1, 34 | DATABASE = 2, 35 | TABLE = 3, 36 | PARTITION = 4, 37 | COLUMN = 5, 38 | } 39 | 40 | enum PrincipalType { 41 | USER = 1, 42 | ROLE = 2, 43 | GROUP = 3, 44 | } 45 | 46 | struct HiveObjectRef{ 47 | 1: HiveObjectType objectType, 48 | 2: string dbName, 49 | 3: string objectName, 50 | 4: list partValues, 51 | 5: string columnName, 52 | } 53 | 54 | struct PrivilegeGrantInfo { 55 | 1: string privilege, 56 | 2: i32 createTime, 57 | 3: string grantor, 58 | 4: PrincipalType grantorType, 59 | 5: bool grantOption, 60 | } 61 | 62 | struct HiveObjectPrivilege { 63 | 1: HiveObjectRef hiveObject, 64 | 2: string principalName, 65 | 3: PrincipalType principalType, 66 | 4: PrivilegeGrantInfo grantInfo, 67 | } 68 | 69 | struct PrivilegeBag { 70 | 1: list privileges, 71 | } 72 | 73 | struct PrincipalPrivilegeSet { 74 | 1: map> userPrivileges, // user name -> privilege grant info 75 | 2: map> groupPrivileges, // group name -> privilege grant info 76 | 3: map> rolePrivileges, //role name -> privilege grant info 77 | } 78 | 79 | struct Role { 80 | 1: string roleName, 81 | 2: i32 createTime, 82 | 3: string ownerName, 83 | } 84 | 85 | // namespace for tables 86 | struct Database { 87 | 1: string name, 88 | 2: string description, 89 | 3: string locationUri, 90 | 4: map parameters, // properties associated with the database 91 | 5: optional PrincipalPrivilegeSet privileges 92 | } 93 | 94 | // This object holds the information needed by SerDes 95 | struct SerDeInfo { 96 | 1: string name, // name of the serde, table name by default 97 | 2: string serializationLib, // usually the class that implements the extractor & loader 98 | 3: map parameters // initialization parameters 99 | } 100 | 101 | // sort order of a column (column name along with asc(1)/desc(0)) 102 | struct Order { 103 | 1: string col, // sort column name 104 | 2: i32 order // asc(1) or desc(0) 105 | } 106 | 107 | // this object holds all the information about physical storage of the data belonging to a table 108 | struct StorageDescriptor { 109 | 1: list cols, // required (refer to types defined above) 110 | 2: string location, // defaults to //tablename 111 | 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format 112 | 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format 113 | 5: bool compressed, // compressed or not 114 | 6: i32 numBuckets, // this must be specified if there are any dimension columns 115 | 7: SerDeInfo serdeInfo, // serialization and deserialization information 116 | 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` 117 | 9: list sortCols, // sort order of the data in each bucket 118 | 10: map parameters // any user supplied key value hash 119 | } 120 | 121 | // table information 122 | struct Table { 123 | 1: string tableName, // name of the table 124 | 2: string dbName, // database name ('default') 125 | 3: string owner, // owner of this table 126 | 4: i32 createTime, // creation time of the table 127 | 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) 128 | 6: i32 retention, // retention time 129 | 7: StorageDescriptor sd, // storage descriptor of the table 130 | 8: list partitionKeys, // partition keys of the table. only primitive types are supported 131 | 9: map parameters, // to store comments or any other user level parameters 132 | 10: string viewOriginalText, // original view text, null for non-view 133 | 11: string viewExpandedText, // expanded view text, null for non-view 134 | 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 135 | 13: optional PrincipalPrivilegeSet privileges, 136 | } 137 | 138 | struct Partition { 139 | 1: list values // string value is converted to appropriate partition key type 140 | 2: string dbName, 141 | 3: string tableName, 142 | 4: i32 createTime, 143 | 5: i32 lastAccessTime, 144 | 6: StorageDescriptor sd, 145 | 7: map parameters, 146 | 8: optional PrincipalPrivilegeSet privileges 147 | } 148 | 149 | struct Index { 150 | 1: string indexName, // unique with in the whole database namespace 151 | 2: string indexHandlerClass, // reserved 152 | 3: string dbName, 153 | 4: string origTableName, 154 | 5: i32 createTime, 155 | 6: i32 lastAccessTime, 156 | 7: string indexTableName, 157 | 8: StorageDescriptor sd, 158 | 9: map parameters, 159 | 10: bool deferredRebuild 160 | } 161 | 162 | // schema of the table/query results etc. 163 | struct Schema { 164 | // column names, types, comments 165 | 1: list fieldSchemas, // delimiters etc 166 | 2: map properties 167 | } 168 | 169 | exception MetaException { 170 | 1: string message 171 | } 172 | 173 | exception UnknownTableException { 174 | 1: string message 175 | } 176 | 177 | exception UnknownDBException { 178 | 1: string message 179 | } 180 | 181 | exception AlreadyExistsException { 182 | 1: string message 183 | } 184 | 185 | exception InvalidObjectException { 186 | 1: string message 187 | } 188 | 189 | exception NoSuchObjectException { 190 | 1: string message 191 | } 192 | 193 | exception IndexAlreadyExistsException { 194 | 1: string message 195 | } 196 | 197 | exception InvalidOperationException { 198 | 1: string message 199 | } 200 | 201 | exception ConfigValSecurityException { 202 | 1: string message 203 | } 204 | 205 | /** 206 | * This interface is live. 207 | */ 208 | service ThriftHiveMetastore extends fb303.FacebookService 209 | { 210 | void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 211 | Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) 212 | void drop_database(1:string name, 2:bool deleteData) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) 213 | list get_databases(1:string pattern) throws(1:MetaException o1) 214 | list get_all_databases() throws(1:MetaException o1) 215 | void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) 216 | 217 | // returns the type with given name (make seperate calls for the dependent types if needed) 218 | Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) 219 | bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 220 | bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) 221 | map get_type_all(1:string name) 222 | throws(1:MetaException o2) 223 | 224 | // Gets a list of FieldSchemas describing the columns of a particular table 225 | list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), 226 | 227 | // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table 228 | list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) 229 | 230 | // create a Hive table. Following fields must be set 231 | // tableName 232 | // database (only 'default' for now until Hive QL supports databases) 233 | // owner (not needed, but good to have for tracking purposes) 234 | // sd.cols (list of field schemas) 235 | // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) 236 | // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) 237 | // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe 238 | // * See notes on DDL_TIME 239 | void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) 240 | // drops the table and all the partitions associated with it if the table has partitions 241 | // delete data (including partitions) if deleteData is set to true 242 | void drop_table(1:string dbname, 2:string name, 3:bool deleteData) 243 | throws(1:NoSuchObjectException o1, 2:MetaException o3) 244 | list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) 245 | list get_all_tables(1: string db_name) throws (1: MetaException o1) 246 | 247 | Table get_table(1:string dbname, 2:string tbl_name) 248 | throws (1:MetaException o1, 2:NoSuchObjectException o2) 249 | // alter table applies to only future partitions not for existing partitions 250 | // * See notes on DDL_TIME 251 | void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) 252 | throws (1:InvalidOperationException o1, 2:MetaException o2) 253 | 254 | // the following applies to only tables that have partitions 255 | // * See notes on DDL_TIME 256 | Partition add_partition(1:Partition new_part) 257 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 258 | Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 259 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 260 | Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) 261 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 262 | bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) 263 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 264 | bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) 265 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 266 | Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 267 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 268 | 269 | Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 270 | 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) 271 | 272 | Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) 273 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 274 | 275 | // returns all the partitions for this table in reverse chronological order. 276 | // If max parts is given then it will return only that many. 277 | list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 278 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 279 | list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 280 | 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 281 | 282 | list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 283 | throws(1:MetaException o2) 284 | 285 | // get_partition*_ps methods allow filtering by a partial partition specification, 286 | // as needed for dynamic partitions. The values that are not restricted should 287 | // be empty strings. Nulls were considered (instead of "") but caused errors in 288 | // generated Python code. The size of part_vals may be smaller than the 289 | // number of partition columns - the unspecified values are considered the same 290 | // as "". 291 | list get_partitions_ps(1:string db_name 2:string tbl_name 292 | 3:list part_vals, 4:i16 max_parts=-1) 293 | throws(1:MetaException o1) 294 | list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, 295 | 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 296 | 297 | list get_partition_names_ps(1:string db_name, 298 | 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) 299 | throws(1:MetaException o1) 300 | 301 | // get the partitions matching the given partition filter 302 | list get_partitions_by_filter(1:string db_name 2:string tbl_name 303 | 3:string filter, 4:i16 max_parts=-1) 304 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 305 | 306 | // changes the partition to the new partition object. partition is identified from the part values 307 | // in the new_part 308 | // * See notes on DDL_TIME 309 | void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) 310 | throws(1:InvalidOperationException o1, 2:MetaException o2) 311 | 312 | // gets the value of the configuration key in the metastore server. returns 313 | // defaultValue if the key does not exist. if the configuration key does not 314 | // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is 315 | // thrown. 316 | string get_config_value(1:string name, 2:string defaultValue) 317 | throws(1:ConfigValSecurityException o1) 318 | 319 | // converts a partition name into a partition values array 320 | list partition_name_to_vals(1: string part_name) 321 | throws(1: MetaException o1) 322 | // converts a partition name into a partition specification (a mapping from 323 | // the partition cols to the values) 324 | map partition_name_to_spec(1: string part_name) 325 | throws(1: MetaException o1) 326 | 327 | //index 328 | Index add_index(1:Index new_index, 2: Table index_table) 329 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 330 | void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) 331 | throws (1:InvalidOperationException o1, 2:MetaException o2) 332 | bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) 333 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 334 | Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) 335 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 336 | 337 | list get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 338 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 339 | list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 340 | throws(1:MetaException o2) 341 | 342 | //authorization privileges 343 | 344 | bool create_role(1:Role role) throws(1:MetaException o1) 345 | bool drop_role(1:string role_name) throws(1:MetaException o1) 346 | list get_role_names() throws(1:MetaException o1) 347 | bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, 348 | 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) 349 | bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) 350 | throws(1:MetaException o1) 351 | list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) 352 | 353 | PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, 354 | 3: list group_names) throws(1:MetaException o1) 355 | list list_privileges(1:string principal_name, 2:PrincipalType principal_type, 356 | 3: HiveObjectRef hiveObject) throws(1:MetaException o1) 357 | 358 | bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 359 | bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 360 | 361 | //Authentication (delegation token) interfaces 362 | 363 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 364 | // to metastore server 365 | string get_delegation_token(1:string renewer_kerberos_principal_name) throws (1:MetaException o1) 366 | 367 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 368 | // to metastore server - this method takes an extra token signature string which is just 369 | // an identifier to associate with the token - this will be used by the token selector code 370 | // to pick the right token given the associated identifier. 371 | string get_delegation_token_with_signature(1:string renewer_kerberos_principal_name, 372 | 2:string token_signature) throws (1:MetaException o1) 373 | 374 | // method to renew delegation token obtained from metastore server 375 | i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) 376 | 377 | // method to cancel delegation token obtained from metastore server 378 | void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) 379 | } 380 | 381 | // * Note about the DDL_TIME: When creating or altering a table or a partition, 382 | // if the DDL_TIME is not set, the current time will be used. 383 | 384 | // For storing info about archived partitions in parameters 385 | 386 | // Whether the partition is archived 387 | const string IS_ARCHIVED = "is_archived", 388 | // The original location of the partition, before archiving. After archiving, 389 | // this directory will contain the archive. When the partition 390 | // is dropped, this directory will be deleted 391 | const string ORIGINAL_LOCATION = "original_location", 392 | 393 | // these should be needed only for backward compatibility with filestore 394 | const string META_TABLE_COLUMNS = "columns", 395 | const string META_TABLE_COLUMN_TYPES = "columns.types", 396 | const string BUCKET_FIELD_NAME = "bucket_field_name", 397 | const string BUCKET_COUNT = "bucket_count", 398 | const string FIELD_TO_DIMENSION = "field_to_dimension", 399 | const string META_TABLE_NAME = "name", 400 | const string META_TABLE_DB = "db", 401 | const string META_TABLE_LOCATION = "location", 402 | const string META_TABLE_SERDE = "serde", 403 | const string META_TABLE_PARTITION_COLUMNS = "partition_columns", 404 | const string FILE_INPUT_FORMAT = "file.inputformat", 405 | const string FILE_OUTPUT_FORMAT = "file.outputformat", 406 | const string META_TABLE_STORAGE = "storage_handler", 407 | 408 | 409 | 410 | -------------------------------------------------------------------------------- /src/0.7.1-cdh3u3/hive_metastore.thrift: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/thrift -java 2 | # 3 | # Thrift Service that the MetaStore is built on 4 | # 5 | 6 | include "fb303.thrift" 7 | 8 | namespace java org.apache.hadoop.hive.metastore.api 9 | namespace php metastore 10 | namespace cpp Apache.Hadoop.Hive 11 | 12 | const string DDL_TIME = "transient_lastDdlTime" 13 | 14 | struct Version { 15 | 1: string version, 16 | 2: string comments 17 | } 18 | 19 | struct FieldSchema { 20 | 1: string name, // name of the field 21 | 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps 22 | 3: string comment 23 | } 24 | 25 | struct Type { 26 | 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types 27 | 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) 28 | 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) 29 | 4: optional list fields // if the name is one of the user defined types 30 | } 31 | 32 | enum HiveObjectType { 33 | GLOBAL = 1, 34 | DATABASE = 2, 35 | TABLE = 3, 36 | PARTITION = 4, 37 | COLUMN = 5, 38 | } 39 | 40 | enum PrincipalType { 41 | USER = 1, 42 | ROLE = 2, 43 | GROUP = 3, 44 | } 45 | 46 | struct HiveObjectRef{ 47 | 1: HiveObjectType objectType, 48 | 2: string dbName, 49 | 3: string objectName, 50 | 4: list partValues, 51 | 5: string columnName, 52 | } 53 | 54 | struct PrivilegeGrantInfo { 55 | 1: string privilege, 56 | 2: i32 createTime, 57 | 3: string grantor, 58 | 4: PrincipalType grantorType, 59 | 5: bool grantOption, 60 | } 61 | 62 | struct HiveObjectPrivilege { 63 | 1: HiveObjectRef hiveObject, 64 | 2: string principalName, 65 | 3: PrincipalType principalType, 66 | 4: PrivilegeGrantInfo grantInfo, 67 | } 68 | 69 | struct PrivilegeBag { 70 | 1: list privileges, 71 | } 72 | 73 | struct PrincipalPrivilegeSet { 74 | 1: map> userPrivileges, // user name -> privilege grant info 75 | 2: map> groupPrivileges, // group name -> privilege grant info 76 | 3: map> rolePrivileges, //role name -> privilege grant info 77 | } 78 | 79 | struct Role { 80 | 1: string roleName, 81 | 2: i32 createTime, 82 | 3: string ownerName, 83 | } 84 | 85 | // namespace for tables 86 | struct Database { 87 | 1: string name, 88 | 2: string description, 89 | 3: string locationUri, 90 | 4: map parameters, // properties associated with the database 91 | 5: optional PrincipalPrivilegeSet privileges 92 | } 93 | 94 | // This object holds the information needed by SerDes 95 | struct SerDeInfo { 96 | 1: string name, // name of the serde, table name by default 97 | 2: string serializationLib, // usually the class that implements the extractor & loader 98 | 3: map parameters // initialization parameters 99 | } 100 | 101 | // sort order of a column (column name along with asc(1)/desc(0)) 102 | struct Order { 103 | 1: string col, // sort column name 104 | 2: i32 order // asc(1) or desc(0) 105 | } 106 | 107 | // this object holds all the information about physical storage of the data belonging to a table 108 | struct StorageDescriptor { 109 | 1: list cols, // required (refer to types defined above) 110 | 2: string location, // defaults to //tablename 111 | 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format 112 | 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format 113 | 5: bool compressed, // compressed or not 114 | 6: i32 numBuckets, // this must be specified if there are any dimension columns 115 | 7: SerDeInfo serdeInfo, // serialization and deserialization information 116 | 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` 117 | 9: list sortCols, // sort order of the data in each bucket 118 | 10: map parameters // any user supplied key value hash 119 | } 120 | 121 | // table information 122 | struct Table { 123 | 1: string tableName, // name of the table 124 | 2: string dbName, // database name ('default') 125 | 3: string owner, // owner of this table 126 | 4: i32 createTime, // creation time of the table 127 | 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) 128 | 6: i32 retention, // retention time 129 | 7: StorageDescriptor sd, // storage descriptor of the table 130 | 8: list partitionKeys, // partition keys of the table. only primitive types are supported 131 | 9: map parameters, // to store comments or any other user level parameters 132 | 10: string viewOriginalText, // original view text, null for non-view 133 | 11: string viewExpandedText, // expanded view text, null for non-view 134 | 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 135 | 13: optional PrincipalPrivilegeSet privileges, 136 | } 137 | 138 | struct Partition { 139 | 1: list values // string value is converted to appropriate partition key type 140 | 2: string dbName, 141 | 3: string tableName, 142 | 4: i32 createTime, 143 | 5: i32 lastAccessTime, 144 | 6: StorageDescriptor sd, 145 | 7: map parameters, 146 | 8: optional PrincipalPrivilegeSet privileges 147 | } 148 | 149 | struct Index { 150 | 1: string indexName, // unique with in the whole database namespace 151 | 2: string indexHandlerClass, // reserved 152 | 3: string dbName, 153 | 4: string origTableName, 154 | 5: i32 createTime, 155 | 6: i32 lastAccessTime, 156 | 7: string indexTableName, 157 | 8: StorageDescriptor sd, 158 | 9: map parameters, 159 | 10: bool deferredRebuild 160 | } 161 | 162 | // schema of the table/query results etc. 163 | struct Schema { 164 | // column names, types, comments 165 | 1: list fieldSchemas, // delimiters etc 166 | 2: map properties 167 | } 168 | 169 | exception MetaException { 170 | 1: string message 171 | } 172 | 173 | exception UnknownTableException { 174 | 1: string message 175 | } 176 | 177 | exception UnknownDBException { 178 | 1: string message 179 | } 180 | 181 | exception AlreadyExistsException { 182 | 1: string message 183 | } 184 | 185 | exception InvalidObjectException { 186 | 1: string message 187 | } 188 | 189 | exception NoSuchObjectException { 190 | 1: string message 191 | } 192 | 193 | exception IndexAlreadyExistsException { 194 | 1: string message 195 | } 196 | 197 | exception InvalidOperationException { 198 | 1: string message 199 | } 200 | 201 | exception ConfigValSecurityException { 202 | 1: string message 203 | } 204 | 205 | /** 206 | * This interface is live. 207 | */ 208 | service ThriftHiveMetastore extends fb303.FacebookService 209 | { 210 | void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 211 | Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) 212 | void drop_database(1:string name, 2:bool deleteData) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) 213 | list get_databases(1:string pattern) throws(1:MetaException o1) 214 | list get_all_databases() throws(1:MetaException o1) 215 | void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) 216 | 217 | // returns the type with given name (make seperate calls for the dependent types if needed) 218 | Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) 219 | bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) 220 | bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) 221 | map get_type_all(1:string name) 222 | throws(1:MetaException o2) 223 | 224 | // Gets a list of FieldSchemas describing the columns of a particular table 225 | list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), 226 | 227 | // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table 228 | list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) 229 | 230 | // create a Hive table. Following fields must be set 231 | // tableName 232 | // database (only 'default' for now until Hive QL supports databases) 233 | // owner (not needed, but good to have for tracking purposes) 234 | // sd.cols (list of field schemas) 235 | // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) 236 | // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) 237 | // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe 238 | // * See notes on DDL_TIME 239 | void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) 240 | // drops the table and all the partitions associated with it if the table has partitions 241 | // delete data (including partitions) if deleteData is set to true 242 | void drop_table(1:string dbname, 2:string name, 3:bool deleteData) 243 | throws(1:NoSuchObjectException o1, 2:MetaException o3) 244 | list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) 245 | list get_all_tables(1: string db_name) throws (1: MetaException o1) 246 | 247 | Table get_table(1:string dbname, 2:string tbl_name) 248 | throws (1:MetaException o1, 2:NoSuchObjectException o2) 249 | // alter table applies to only future partitions not for existing partitions 250 | // * See notes on DDL_TIME 251 | void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) 252 | throws (1:InvalidOperationException o1, 2:MetaException o2) 253 | 254 | // the following applies to only tables that have partitions 255 | // * See notes on DDL_TIME 256 | Partition add_partition(1:Partition new_part) 257 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 258 | Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 259 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 260 | Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) 261 | throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 262 | bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) 263 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 264 | bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) 265 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 266 | Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) 267 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 268 | 269 | Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 270 | 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) 271 | 272 | Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) 273 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 274 | 275 | // returns all the partitions for this table in reverse chronological order. 276 | // If max parts is given then it will return only that many. 277 | list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 278 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 279 | list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 280 | 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 281 | 282 | list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) 283 | throws(1:MetaException o2) 284 | 285 | // get_partition*_ps methods allow filtering by a partial partition specification, 286 | // as needed for dynamic partitions. The values that are not restricted should 287 | // be empty strings. Nulls were considered (instead of "") but caused errors in 288 | // generated Python code. The size of part_vals may be smaller than the 289 | // number of partition columns - the unspecified values are considered the same 290 | // as "". 291 | list get_partitions_ps(1:string db_name 2:string tbl_name 292 | 3:list part_vals, 4:i16 max_parts=-1) 293 | throws(1:MetaException o1) 294 | list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, 295 | 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) 296 | 297 | list get_partition_names_ps(1:string db_name, 298 | 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) 299 | throws(1:MetaException o1) 300 | 301 | // get the partitions matching the given partition filter 302 | list get_partitions_by_filter(1:string db_name 2:string tbl_name 303 | 3:string filter, 4:i16 max_parts=-1) 304 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 305 | 306 | // changes the partition to the new partition object. partition is identified from the part values 307 | // in the new_part 308 | // * See notes on DDL_TIME 309 | void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) 310 | throws(1:InvalidOperationException o1, 2:MetaException o2) 311 | 312 | // gets the value of the configuration key in the metastore server. returns 313 | // defaultValue if the key does not exist. if the configuration key does not 314 | // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is 315 | // thrown. 316 | string get_config_value(1:string name, 2:string defaultValue) 317 | throws(1:ConfigValSecurityException o1) 318 | 319 | // converts a partition name into a partition values array 320 | list partition_name_to_vals(1: string part_name) 321 | throws(1: MetaException o1) 322 | // converts a partition name into a partition specification (a mapping from 323 | // the partition cols to the values) 324 | map partition_name_to_spec(1: string part_name) 325 | throws(1: MetaException o1) 326 | 327 | //index 328 | Index add_index(1:Index new_index, 2: Table index_table) 329 | throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) 330 | void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) 331 | throws (1:InvalidOperationException o1, 2:MetaException o2) 332 | bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) 333 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 334 | Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) 335 | throws(1:MetaException o1, 2:NoSuchObjectException o2) 336 | 337 | list get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 338 | throws(1:NoSuchObjectException o1, 2:MetaException o2) 339 | list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) 340 | throws(1:MetaException o2) 341 | 342 | //authorization privileges 343 | 344 | bool create_role(1:Role role) throws(1:MetaException o1) 345 | bool drop_role(1:string role_name) throws(1:MetaException o1) 346 | list get_role_names() throws(1:MetaException o1) 347 | bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, 348 | 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) 349 | bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) 350 | throws(1:MetaException o1) 351 | list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) 352 | 353 | PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, 354 | 3: list group_names) throws(1:MetaException o1) 355 | list list_privileges(1:string principal_name, 2:PrincipalType principal_type, 356 | 3: HiveObjectRef hiveObject) throws(1:MetaException o1) 357 | 358 | bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 359 | bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) 360 | 361 | //Authentication (delegation token) interfaces 362 | 363 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 364 | // to metastore server 365 | string get_delegation_token(1:string renewer_kerberos_principal_name) throws (1:MetaException o1) 366 | 367 | // get metastore server delegation token for use from the map/reduce tasks to authenticate 368 | // to metastore server - this method takes an extra token signature string which is just 369 | // an identifier to associate with the token - this will be used by the token selector code 370 | // to pick the right token given the associated identifier. 371 | string get_delegation_token_with_signature(1:string renewer_kerberos_principal_name, 372 | 2:string token_signature) throws (1:MetaException o1) 373 | 374 | // method to renew delegation token obtained from metastore server 375 | i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) 376 | 377 | // method to cancel delegation token obtained from metastore server 378 | void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) 379 | } 380 | 381 | // * Note about the DDL_TIME: When creating or altering a table or a partition, 382 | // if the DDL_TIME is not set, the current time will be used. 383 | 384 | // For storing info about archived partitions in parameters 385 | 386 | // Whether the partition is archived 387 | const string IS_ARCHIVED = "is_archived", 388 | // The original location of the partition, before archiving. After archiving, 389 | // this directory will contain the archive. When the partition 390 | // is dropped, this directory will be deleted 391 | const string ORIGINAL_LOCATION = "original_location", 392 | 393 | // these should be needed only for backward compatibility with filestore 394 | const string META_TABLE_COLUMNS = "columns", 395 | const string META_TABLE_COLUMN_TYPES = "columns.types", 396 | const string BUCKET_FIELD_NAME = "bucket_field_name", 397 | const string BUCKET_COUNT = "bucket_count", 398 | const string FIELD_TO_DIMENSION = "field_to_dimension", 399 | const string META_TABLE_NAME = "name", 400 | const string META_TABLE_DB = "db", 401 | const string META_TABLE_LOCATION = "location", 402 | const string META_TABLE_SERDE = "serde", 403 | const string META_TABLE_PARTITION_COLUMNS = "partition_columns", 404 | const string FILE_INPUT_FORMAT = "file.inputformat", 405 | const string FILE_OUTPUT_FORMAT = "file.outputformat", 406 | const string META_TABLE_STORAGE = "storage_handler", 407 | 408 | 409 | 410 | -------------------------------------------------------------------------------- /lib/0.7.1-cdh3u1/queryplan_types.js: -------------------------------------------------------------------------------- 1 | // 2 | // Autogenerated by Thrift Compiler (0.7.0) 3 | // 4 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | // 6 | var Thrift = require('thrift').Thrift; 7 | var ttypes = module.exports = {}; 8 | ttypes.AdjacencyType = { 9 | 'CONJUNCTIVE' : 0, 10 | 'DISJUNCTIVE' : 1 11 | }; 12 | ttypes.NodeType = { 13 | 'OPERATOR' : 0, 14 | 'STAGE' : 1 15 | }; 16 | ttypes.OperatorType = { 17 | 'JOIN' : 0, 18 | 'MAPJOIN' : 1, 19 | 'EXTRACT' : 2, 20 | 'FILTER' : 3, 21 | 'FORWARD' : 4, 22 | 'GROUPBY' : 5, 23 | 'LIMIT' : 6, 24 | 'SCRIPT' : 7, 25 | 'SELECT' : 8, 26 | 'TABLESCAN' : 9, 27 | 'FILESINK' : 10, 28 | 'REDUCESINK' : 11, 29 | 'UNION' : 12, 30 | 'UDTF' : 13, 31 | 'LATERALVIEWJOIN' : 14, 32 | 'LATERALVIEWFORWARD' : 15, 33 | 'HASHTABLESINK' : 16, 34 | 'HASHTABLEDUMMY' : 17 35 | }; 36 | ttypes.TaskType = { 37 | 'MAP' : 0, 38 | 'REDUCE' : 1, 39 | 'OTHER' : 2 40 | }; 41 | ttypes.StageType = { 42 | 'CONDITIONAL' : 0, 43 | 'COPY' : 1, 44 | 'DDL' : 2, 45 | 'MAPRED' : 3, 46 | 'EXPLAIN' : 4, 47 | 'FETCH' : 5, 48 | 'FUNC' : 6, 49 | 'MAPREDLOCAL' : 7, 50 | 'MOVE' : 8, 51 | 'STATS' : 9 52 | }; 53 | var Adjacency = module.exports.Adjacency = function(args) { 54 | this.node = null; 55 | this.children = null; 56 | this.adjacencyType = null; 57 | if (args) { 58 | if (args.node !== undefined) { 59 | this.node = args.node; 60 | } 61 | if (args.children !== undefined) { 62 | this.children = args.children; 63 | } 64 | if (args.adjacencyType !== undefined) { 65 | this.adjacencyType = args.adjacencyType; 66 | } 67 | } 68 | }; 69 | Adjacency.prototype = {}; 70 | Adjacency.prototype.read = function(input) { 71 | input.readStructBegin(); 72 | while (true) 73 | { 74 | var ret = input.readFieldBegin(); 75 | var fname = ret.fname; 76 | var ftype = ret.ftype; 77 | var fid = ret.fid; 78 | if (ftype == Thrift.Type.STOP) { 79 | break; 80 | } 81 | switch (fid) 82 | { 83 | case 1: 84 | if (ftype == Thrift.Type.STRING) { 85 | this.node = input.readString(); 86 | } else { 87 | input.skip(ftype); 88 | } 89 | break; 90 | case 2: 91 | if (ftype == Thrift.Type.LIST) { 92 | var _size0 = 0; 93 | var _rtmp34; 94 | this.children = []; 95 | var _etype3 = 0; 96 | _rtmp34 = input.readListBegin(); 97 | _etype3 = _rtmp34.etype; 98 | _size0 = _rtmp34.size; 99 | for (var _i5 = 0; _i5 < _size0; ++_i5) 100 | { 101 | var elem6 = null; 102 | elem6 = input.readString(); 103 | this.children.push(elem6); 104 | } 105 | input.readListEnd(); 106 | } else { 107 | input.skip(ftype); 108 | } 109 | break; 110 | case 3: 111 | if (ftype == Thrift.Type.I32) { 112 | this.adjacencyType = input.readI32(); 113 | } else { 114 | input.skip(ftype); 115 | } 116 | break; 117 | default: 118 | input.skip(ftype); 119 | } 120 | input.readFieldEnd(); 121 | } 122 | input.readStructEnd(); 123 | return; 124 | }; 125 | 126 | Adjacency.prototype.write = function(output) { 127 | output.writeStructBegin('Adjacency'); 128 | if (this.node) { 129 | output.writeFieldBegin('node', Thrift.Type.STRING, 1); 130 | output.writeString(this.node); 131 | output.writeFieldEnd(); 132 | } 133 | if (this.children) { 134 | output.writeFieldBegin('children', Thrift.Type.LIST, 2); 135 | output.writeListBegin(Thrift.Type.STRING, this.children.length); 136 | for (var iter7 in this.children) 137 | { 138 | if (this.children.hasOwnProperty(iter7)) 139 | { 140 | iter7 = this.children[iter7]; 141 | output.writeString(iter7); 142 | } 143 | } 144 | output.writeListEnd(); 145 | output.writeFieldEnd(); 146 | } 147 | if (this.adjacencyType) { 148 | output.writeFieldBegin('adjacencyType', Thrift.Type.I32, 3); 149 | output.writeI32(this.adjacencyType); 150 | output.writeFieldEnd(); 151 | } 152 | output.writeFieldStop(); 153 | output.writeStructEnd(); 154 | return; 155 | }; 156 | 157 | var Graph = module.exports.Graph = function(args) { 158 | this.nodeType = null; 159 | this.roots = null; 160 | this.adjacencyList = null; 161 | if (args) { 162 | if (args.nodeType !== undefined) { 163 | this.nodeType = args.nodeType; 164 | } 165 | if (args.roots !== undefined) { 166 | this.roots = args.roots; 167 | } 168 | if (args.adjacencyList !== undefined) { 169 | this.adjacencyList = args.adjacencyList; 170 | } 171 | } 172 | }; 173 | Graph.prototype = {}; 174 | Graph.prototype.read = function(input) { 175 | input.readStructBegin(); 176 | while (true) 177 | { 178 | var ret = input.readFieldBegin(); 179 | var fname = ret.fname; 180 | var ftype = ret.ftype; 181 | var fid = ret.fid; 182 | if (ftype == Thrift.Type.STOP) { 183 | break; 184 | } 185 | switch (fid) 186 | { 187 | case 1: 188 | if (ftype == Thrift.Type.I32) { 189 | this.nodeType = input.readI32(); 190 | } else { 191 | input.skip(ftype); 192 | } 193 | break; 194 | case 2: 195 | if (ftype == Thrift.Type.LIST) { 196 | var _size8 = 0; 197 | var _rtmp312; 198 | this.roots = []; 199 | var _etype11 = 0; 200 | _rtmp312 = input.readListBegin(); 201 | _etype11 = _rtmp312.etype; 202 | _size8 = _rtmp312.size; 203 | for (var _i13 = 0; _i13 < _size8; ++_i13) 204 | { 205 | var elem14 = null; 206 | elem14 = input.readString(); 207 | this.roots.push(elem14); 208 | } 209 | input.readListEnd(); 210 | } else { 211 | input.skip(ftype); 212 | } 213 | break; 214 | case 3: 215 | if (ftype == Thrift.Type.LIST) { 216 | var _size15 = 0; 217 | var _rtmp319; 218 | this.adjacencyList = []; 219 | var _etype18 = 0; 220 | _rtmp319 = input.readListBegin(); 221 | _etype18 = _rtmp319.etype; 222 | _size15 = _rtmp319.size; 223 | for (var _i20 = 0; _i20 < _size15; ++_i20) 224 | { 225 | var elem21 = null; 226 | elem21 = new ttypes.Adjacency(); 227 | elem21.read(input); 228 | this.adjacencyList.push(elem21); 229 | } 230 | input.readListEnd(); 231 | } else { 232 | input.skip(ftype); 233 | } 234 | break; 235 | default: 236 | input.skip(ftype); 237 | } 238 | input.readFieldEnd(); 239 | } 240 | input.readStructEnd(); 241 | return; 242 | }; 243 | 244 | Graph.prototype.write = function(output) { 245 | output.writeStructBegin('Graph'); 246 | if (this.nodeType) { 247 | output.writeFieldBegin('nodeType', Thrift.Type.I32, 1); 248 | output.writeI32(this.nodeType); 249 | output.writeFieldEnd(); 250 | } 251 | if (this.roots) { 252 | output.writeFieldBegin('roots', Thrift.Type.LIST, 2); 253 | output.writeListBegin(Thrift.Type.STRING, this.roots.length); 254 | for (var iter22 in this.roots) 255 | { 256 | if (this.roots.hasOwnProperty(iter22)) 257 | { 258 | iter22 = this.roots[iter22]; 259 | output.writeString(iter22); 260 | } 261 | } 262 | output.writeListEnd(); 263 | output.writeFieldEnd(); 264 | } 265 | if (this.adjacencyList) { 266 | output.writeFieldBegin('adjacencyList', Thrift.Type.LIST, 3); 267 | output.writeListBegin(Thrift.Type.STRUCT, this.adjacencyList.length); 268 | for (var iter23 in this.adjacencyList) 269 | { 270 | if (this.adjacencyList.hasOwnProperty(iter23)) 271 | { 272 | iter23 = this.adjacencyList[iter23]; 273 | iter23.write(output); 274 | } 275 | } 276 | output.writeListEnd(); 277 | output.writeFieldEnd(); 278 | } 279 | output.writeFieldStop(); 280 | output.writeStructEnd(); 281 | return; 282 | }; 283 | 284 | var Operator = module.exports.Operator = function(args) { 285 | this.operatorId = null; 286 | this.operatorType = null; 287 | this.operatorAttributes = null; 288 | this.operatorCounters = null; 289 | this.done = null; 290 | this.started = null; 291 | if (args) { 292 | if (args.operatorId !== undefined) { 293 | this.operatorId = args.operatorId; 294 | } 295 | if (args.operatorType !== undefined) { 296 | this.operatorType = args.operatorType; 297 | } 298 | if (args.operatorAttributes !== undefined) { 299 | this.operatorAttributes = args.operatorAttributes; 300 | } 301 | if (args.operatorCounters !== undefined) { 302 | this.operatorCounters = args.operatorCounters; 303 | } 304 | if (args.done !== undefined) { 305 | this.done = args.done; 306 | } 307 | if (args.started !== undefined) { 308 | this.started = args.started; 309 | } 310 | } 311 | }; 312 | Operator.prototype = {}; 313 | Operator.prototype.read = function(input) { 314 | input.readStructBegin(); 315 | while (true) 316 | { 317 | var ret = input.readFieldBegin(); 318 | var fname = ret.fname; 319 | var ftype = ret.ftype; 320 | var fid = ret.fid; 321 | if (ftype == Thrift.Type.STOP) { 322 | break; 323 | } 324 | switch (fid) 325 | { 326 | case 1: 327 | if (ftype == Thrift.Type.STRING) { 328 | this.operatorId = input.readString(); 329 | } else { 330 | input.skip(ftype); 331 | } 332 | break; 333 | case 2: 334 | if (ftype == Thrift.Type.I32) { 335 | this.operatorType = input.readI32(); 336 | } else { 337 | input.skip(ftype); 338 | } 339 | break; 340 | case 3: 341 | if (ftype == Thrift.Type.MAP) { 342 | var _size24 = 0; 343 | var _rtmp328; 344 | this.operatorAttributes = {}; 345 | var _ktype25 = 0; 346 | var _vtype26 = 0; 347 | _rtmp328 = input.readMapBegin(); 348 | _ktype25 = _rtmp328.ktype; 349 | _vtype26 = _rtmp328.vtype; 350 | _size24 = _rtmp328.size; 351 | for (var _i29 = 0; _i29 < _size24; ++_i29) 352 | { 353 | var key30 = null; 354 | var val31 = null; 355 | key30 = input.readString(); 356 | val31 = input.readString(); 357 | this.operatorAttributes[key30] = val31; 358 | } 359 | input.readMapEnd(); 360 | } else { 361 | input.skip(ftype); 362 | } 363 | break; 364 | case 4: 365 | if (ftype == Thrift.Type.MAP) { 366 | var _size32 = 0; 367 | var _rtmp336; 368 | this.operatorCounters = {}; 369 | var _ktype33 = 0; 370 | var _vtype34 = 0; 371 | _rtmp336 = input.readMapBegin(); 372 | _ktype33 = _rtmp336.ktype; 373 | _vtype34 = _rtmp336.vtype; 374 | _size32 = _rtmp336.size; 375 | for (var _i37 = 0; _i37 < _size32; ++_i37) 376 | { 377 | var key38 = null; 378 | var val39 = null; 379 | key38 = input.readString(); 380 | val39 = input.readI64(); 381 | this.operatorCounters[key38] = val39; 382 | } 383 | input.readMapEnd(); 384 | } else { 385 | input.skip(ftype); 386 | } 387 | break; 388 | case 5: 389 | if (ftype == Thrift.Type.BOOL) { 390 | this.done = input.readBool(); 391 | } else { 392 | input.skip(ftype); 393 | } 394 | break; 395 | case 6: 396 | if (ftype == Thrift.Type.BOOL) { 397 | this.started = input.readBool(); 398 | } else { 399 | input.skip(ftype); 400 | } 401 | break; 402 | default: 403 | input.skip(ftype); 404 | } 405 | input.readFieldEnd(); 406 | } 407 | input.readStructEnd(); 408 | return; 409 | }; 410 | 411 | Operator.prototype.write = function(output) { 412 | output.writeStructBegin('Operator'); 413 | if (this.operatorId) { 414 | output.writeFieldBegin('operatorId', Thrift.Type.STRING, 1); 415 | output.writeString(this.operatorId); 416 | output.writeFieldEnd(); 417 | } 418 | if (this.operatorType) { 419 | output.writeFieldBegin('operatorType', Thrift.Type.I32, 2); 420 | output.writeI32(this.operatorType); 421 | output.writeFieldEnd(); 422 | } 423 | if (this.operatorAttributes) { 424 | output.writeFieldBegin('operatorAttributes', Thrift.Type.MAP, 3); 425 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.STRING, Thrift.objectLength(this.operatorAttributes)); 426 | for (var kiter40 in this.operatorAttributes) 427 | { 428 | if (this.operatorAttributes.hasOwnProperty(kiter40)) 429 | { 430 | var viter41 = this.operatorAttributes[kiter40]; 431 | output.writeString(kiter40); 432 | output.writeString(viter41); 433 | } 434 | } 435 | output.writeMapEnd(); 436 | output.writeFieldEnd(); 437 | } 438 | if (this.operatorCounters) { 439 | output.writeFieldBegin('operatorCounters', Thrift.Type.MAP, 4); 440 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.I64, Thrift.objectLength(this.operatorCounters)); 441 | for (var kiter42 in this.operatorCounters) 442 | { 443 | if (this.operatorCounters.hasOwnProperty(kiter42)) 444 | { 445 | var viter43 = this.operatorCounters[kiter42]; 446 | output.writeString(kiter42); 447 | output.writeI64(viter43); 448 | } 449 | } 450 | output.writeMapEnd(); 451 | output.writeFieldEnd(); 452 | } 453 | if (this.done) { 454 | output.writeFieldBegin('done', Thrift.Type.BOOL, 5); 455 | output.writeBool(this.done); 456 | output.writeFieldEnd(); 457 | } 458 | if (this.started) { 459 | output.writeFieldBegin('started', Thrift.Type.BOOL, 6); 460 | output.writeBool(this.started); 461 | output.writeFieldEnd(); 462 | } 463 | output.writeFieldStop(); 464 | output.writeStructEnd(); 465 | return; 466 | }; 467 | 468 | var Task = module.exports.Task = function(args) { 469 | this.taskId = null; 470 | this.taskType = null; 471 | this.taskAttributes = null; 472 | this.taskCounters = null; 473 | this.operatorGraph = null; 474 | this.operatorList = null; 475 | this.done = null; 476 | this.started = null; 477 | if (args) { 478 | if (args.taskId !== undefined) { 479 | this.taskId = args.taskId; 480 | } 481 | if (args.taskType !== undefined) { 482 | this.taskType = args.taskType; 483 | } 484 | if (args.taskAttributes !== undefined) { 485 | this.taskAttributes = args.taskAttributes; 486 | } 487 | if (args.taskCounters !== undefined) { 488 | this.taskCounters = args.taskCounters; 489 | } 490 | if (args.operatorGraph !== undefined) { 491 | this.operatorGraph = args.operatorGraph; 492 | } 493 | if (args.operatorList !== undefined) { 494 | this.operatorList = args.operatorList; 495 | } 496 | if (args.done !== undefined) { 497 | this.done = args.done; 498 | } 499 | if (args.started !== undefined) { 500 | this.started = args.started; 501 | } 502 | } 503 | }; 504 | Task.prototype = {}; 505 | Task.prototype.read = function(input) { 506 | input.readStructBegin(); 507 | while (true) 508 | { 509 | var ret = input.readFieldBegin(); 510 | var fname = ret.fname; 511 | var ftype = ret.ftype; 512 | var fid = ret.fid; 513 | if (ftype == Thrift.Type.STOP) { 514 | break; 515 | } 516 | switch (fid) 517 | { 518 | case 1: 519 | if (ftype == Thrift.Type.STRING) { 520 | this.taskId = input.readString(); 521 | } else { 522 | input.skip(ftype); 523 | } 524 | break; 525 | case 2: 526 | if (ftype == Thrift.Type.I32) { 527 | this.taskType = input.readI32(); 528 | } else { 529 | input.skip(ftype); 530 | } 531 | break; 532 | case 3: 533 | if (ftype == Thrift.Type.MAP) { 534 | var _size44 = 0; 535 | var _rtmp348; 536 | this.taskAttributes = {}; 537 | var _ktype45 = 0; 538 | var _vtype46 = 0; 539 | _rtmp348 = input.readMapBegin(); 540 | _ktype45 = _rtmp348.ktype; 541 | _vtype46 = _rtmp348.vtype; 542 | _size44 = _rtmp348.size; 543 | for (var _i49 = 0; _i49 < _size44; ++_i49) 544 | { 545 | var key50 = null; 546 | var val51 = null; 547 | key50 = input.readString(); 548 | val51 = input.readString(); 549 | this.taskAttributes[key50] = val51; 550 | } 551 | input.readMapEnd(); 552 | } else { 553 | input.skip(ftype); 554 | } 555 | break; 556 | case 4: 557 | if (ftype == Thrift.Type.MAP) { 558 | var _size52 = 0; 559 | var _rtmp356; 560 | this.taskCounters = {}; 561 | var _ktype53 = 0; 562 | var _vtype54 = 0; 563 | _rtmp356 = input.readMapBegin(); 564 | _ktype53 = _rtmp356.ktype; 565 | _vtype54 = _rtmp356.vtype; 566 | _size52 = _rtmp356.size; 567 | for (var _i57 = 0; _i57 < _size52; ++_i57) 568 | { 569 | var key58 = null; 570 | var val59 = null; 571 | key58 = input.readString(); 572 | val59 = input.readI64(); 573 | this.taskCounters[key58] = val59; 574 | } 575 | input.readMapEnd(); 576 | } else { 577 | input.skip(ftype); 578 | } 579 | break; 580 | case 5: 581 | if (ftype == Thrift.Type.STRUCT) { 582 | this.operatorGraph = new ttypes.Graph(); 583 | this.operatorGraph.read(input); 584 | } else { 585 | input.skip(ftype); 586 | } 587 | break; 588 | case 6: 589 | if (ftype == Thrift.Type.LIST) { 590 | var _size60 = 0; 591 | var _rtmp364; 592 | this.operatorList = []; 593 | var _etype63 = 0; 594 | _rtmp364 = input.readListBegin(); 595 | _etype63 = _rtmp364.etype; 596 | _size60 = _rtmp364.size; 597 | for (var _i65 = 0; _i65 < _size60; ++_i65) 598 | { 599 | var elem66 = null; 600 | elem66 = new ttypes.Operator(); 601 | elem66.read(input); 602 | this.operatorList.push(elem66); 603 | } 604 | input.readListEnd(); 605 | } else { 606 | input.skip(ftype); 607 | } 608 | break; 609 | case 7: 610 | if (ftype == Thrift.Type.BOOL) { 611 | this.done = input.readBool(); 612 | } else { 613 | input.skip(ftype); 614 | } 615 | break; 616 | case 8: 617 | if (ftype == Thrift.Type.BOOL) { 618 | this.started = input.readBool(); 619 | } else { 620 | input.skip(ftype); 621 | } 622 | break; 623 | default: 624 | input.skip(ftype); 625 | } 626 | input.readFieldEnd(); 627 | } 628 | input.readStructEnd(); 629 | return; 630 | }; 631 | 632 | Task.prototype.write = function(output) { 633 | output.writeStructBegin('Task'); 634 | if (this.taskId) { 635 | output.writeFieldBegin('taskId', Thrift.Type.STRING, 1); 636 | output.writeString(this.taskId); 637 | output.writeFieldEnd(); 638 | } 639 | if (this.taskType) { 640 | output.writeFieldBegin('taskType', Thrift.Type.I32, 2); 641 | output.writeI32(this.taskType); 642 | output.writeFieldEnd(); 643 | } 644 | if (this.taskAttributes) { 645 | output.writeFieldBegin('taskAttributes', Thrift.Type.MAP, 3); 646 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.STRING, Thrift.objectLength(this.taskAttributes)); 647 | for (var kiter67 in this.taskAttributes) 648 | { 649 | if (this.taskAttributes.hasOwnProperty(kiter67)) 650 | { 651 | var viter68 = this.taskAttributes[kiter67]; 652 | output.writeString(kiter67); 653 | output.writeString(viter68); 654 | } 655 | } 656 | output.writeMapEnd(); 657 | output.writeFieldEnd(); 658 | } 659 | if (this.taskCounters) { 660 | output.writeFieldBegin('taskCounters', Thrift.Type.MAP, 4); 661 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.I64, Thrift.objectLength(this.taskCounters)); 662 | for (var kiter69 in this.taskCounters) 663 | { 664 | if (this.taskCounters.hasOwnProperty(kiter69)) 665 | { 666 | var viter70 = this.taskCounters[kiter69]; 667 | output.writeString(kiter69); 668 | output.writeI64(viter70); 669 | } 670 | } 671 | output.writeMapEnd(); 672 | output.writeFieldEnd(); 673 | } 674 | if (this.operatorGraph) { 675 | output.writeFieldBegin('operatorGraph', Thrift.Type.STRUCT, 5); 676 | this.operatorGraph.write(output); 677 | output.writeFieldEnd(); 678 | } 679 | if (this.operatorList) { 680 | output.writeFieldBegin('operatorList', Thrift.Type.LIST, 6); 681 | output.writeListBegin(Thrift.Type.STRUCT, this.operatorList.length); 682 | for (var iter71 in this.operatorList) 683 | { 684 | if (this.operatorList.hasOwnProperty(iter71)) 685 | { 686 | iter71 = this.operatorList[iter71]; 687 | iter71.write(output); 688 | } 689 | } 690 | output.writeListEnd(); 691 | output.writeFieldEnd(); 692 | } 693 | if (this.done) { 694 | output.writeFieldBegin('done', Thrift.Type.BOOL, 7); 695 | output.writeBool(this.done); 696 | output.writeFieldEnd(); 697 | } 698 | if (this.started) { 699 | output.writeFieldBegin('started', Thrift.Type.BOOL, 8); 700 | output.writeBool(this.started); 701 | output.writeFieldEnd(); 702 | } 703 | output.writeFieldStop(); 704 | output.writeStructEnd(); 705 | return; 706 | }; 707 | 708 | var Stage = module.exports.Stage = function(args) { 709 | this.stageId = null; 710 | this.stageType = null; 711 | this.stageAttributes = null; 712 | this.stageCounters = null; 713 | this.taskList = null; 714 | this.done = null; 715 | this.started = null; 716 | if (args) { 717 | if (args.stageId !== undefined) { 718 | this.stageId = args.stageId; 719 | } 720 | if (args.stageType !== undefined) { 721 | this.stageType = args.stageType; 722 | } 723 | if (args.stageAttributes !== undefined) { 724 | this.stageAttributes = args.stageAttributes; 725 | } 726 | if (args.stageCounters !== undefined) { 727 | this.stageCounters = args.stageCounters; 728 | } 729 | if (args.taskList !== undefined) { 730 | this.taskList = args.taskList; 731 | } 732 | if (args.done !== undefined) { 733 | this.done = args.done; 734 | } 735 | if (args.started !== undefined) { 736 | this.started = args.started; 737 | } 738 | } 739 | }; 740 | Stage.prototype = {}; 741 | Stage.prototype.read = function(input) { 742 | input.readStructBegin(); 743 | while (true) 744 | { 745 | var ret = input.readFieldBegin(); 746 | var fname = ret.fname; 747 | var ftype = ret.ftype; 748 | var fid = ret.fid; 749 | if (ftype == Thrift.Type.STOP) { 750 | break; 751 | } 752 | switch (fid) 753 | { 754 | case 1: 755 | if (ftype == Thrift.Type.STRING) { 756 | this.stageId = input.readString(); 757 | } else { 758 | input.skip(ftype); 759 | } 760 | break; 761 | case 2: 762 | if (ftype == Thrift.Type.I32) { 763 | this.stageType = input.readI32(); 764 | } else { 765 | input.skip(ftype); 766 | } 767 | break; 768 | case 3: 769 | if (ftype == Thrift.Type.MAP) { 770 | var _size72 = 0; 771 | var _rtmp376; 772 | this.stageAttributes = {}; 773 | var _ktype73 = 0; 774 | var _vtype74 = 0; 775 | _rtmp376 = input.readMapBegin(); 776 | _ktype73 = _rtmp376.ktype; 777 | _vtype74 = _rtmp376.vtype; 778 | _size72 = _rtmp376.size; 779 | for (var _i77 = 0; _i77 < _size72; ++_i77) 780 | { 781 | var key78 = null; 782 | var val79 = null; 783 | key78 = input.readString(); 784 | val79 = input.readString(); 785 | this.stageAttributes[key78] = val79; 786 | } 787 | input.readMapEnd(); 788 | } else { 789 | input.skip(ftype); 790 | } 791 | break; 792 | case 4: 793 | if (ftype == Thrift.Type.MAP) { 794 | var _size80 = 0; 795 | var _rtmp384; 796 | this.stageCounters = {}; 797 | var _ktype81 = 0; 798 | var _vtype82 = 0; 799 | _rtmp384 = input.readMapBegin(); 800 | _ktype81 = _rtmp384.ktype; 801 | _vtype82 = _rtmp384.vtype; 802 | _size80 = _rtmp384.size; 803 | for (var _i85 = 0; _i85 < _size80; ++_i85) 804 | { 805 | var key86 = null; 806 | var val87 = null; 807 | key86 = input.readString(); 808 | val87 = input.readI64(); 809 | this.stageCounters[key86] = val87; 810 | } 811 | input.readMapEnd(); 812 | } else { 813 | input.skip(ftype); 814 | } 815 | break; 816 | case 5: 817 | if (ftype == Thrift.Type.LIST) { 818 | var _size88 = 0; 819 | var _rtmp392; 820 | this.taskList = []; 821 | var _etype91 = 0; 822 | _rtmp392 = input.readListBegin(); 823 | _etype91 = _rtmp392.etype; 824 | _size88 = _rtmp392.size; 825 | for (var _i93 = 0; _i93 < _size88; ++_i93) 826 | { 827 | var elem94 = null; 828 | elem94 = new ttypes.Task(); 829 | elem94.read(input); 830 | this.taskList.push(elem94); 831 | } 832 | input.readListEnd(); 833 | } else { 834 | input.skip(ftype); 835 | } 836 | break; 837 | case 6: 838 | if (ftype == Thrift.Type.BOOL) { 839 | this.done = input.readBool(); 840 | } else { 841 | input.skip(ftype); 842 | } 843 | break; 844 | case 7: 845 | if (ftype == Thrift.Type.BOOL) { 846 | this.started = input.readBool(); 847 | } else { 848 | input.skip(ftype); 849 | } 850 | break; 851 | default: 852 | input.skip(ftype); 853 | } 854 | input.readFieldEnd(); 855 | } 856 | input.readStructEnd(); 857 | return; 858 | }; 859 | 860 | Stage.prototype.write = function(output) { 861 | output.writeStructBegin('Stage'); 862 | if (this.stageId) { 863 | output.writeFieldBegin('stageId', Thrift.Type.STRING, 1); 864 | output.writeString(this.stageId); 865 | output.writeFieldEnd(); 866 | } 867 | if (this.stageType) { 868 | output.writeFieldBegin('stageType', Thrift.Type.I32, 2); 869 | output.writeI32(this.stageType); 870 | output.writeFieldEnd(); 871 | } 872 | if (this.stageAttributes) { 873 | output.writeFieldBegin('stageAttributes', Thrift.Type.MAP, 3); 874 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.STRING, Thrift.objectLength(this.stageAttributes)); 875 | for (var kiter95 in this.stageAttributes) 876 | { 877 | if (this.stageAttributes.hasOwnProperty(kiter95)) 878 | { 879 | var viter96 = this.stageAttributes[kiter95]; 880 | output.writeString(kiter95); 881 | output.writeString(viter96); 882 | } 883 | } 884 | output.writeMapEnd(); 885 | output.writeFieldEnd(); 886 | } 887 | if (this.stageCounters) { 888 | output.writeFieldBegin('stageCounters', Thrift.Type.MAP, 4); 889 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.I64, Thrift.objectLength(this.stageCounters)); 890 | for (var kiter97 in this.stageCounters) 891 | { 892 | if (this.stageCounters.hasOwnProperty(kiter97)) 893 | { 894 | var viter98 = this.stageCounters[kiter97]; 895 | output.writeString(kiter97); 896 | output.writeI64(viter98); 897 | } 898 | } 899 | output.writeMapEnd(); 900 | output.writeFieldEnd(); 901 | } 902 | if (this.taskList) { 903 | output.writeFieldBegin('taskList', Thrift.Type.LIST, 5); 904 | output.writeListBegin(Thrift.Type.STRUCT, this.taskList.length); 905 | for (var iter99 in this.taskList) 906 | { 907 | if (this.taskList.hasOwnProperty(iter99)) 908 | { 909 | iter99 = this.taskList[iter99]; 910 | iter99.write(output); 911 | } 912 | } 913 | output.writeListEnd(); 914 | output.writeFieldEnd(); 915 | } 916 | if (this.done) { 917 | output.writeFieldBegin('done', Thrift.Type.BOOL, 6); 918 | output.writeBool(this.done); 919 | output.writeFieldEnd(); 920 | } 921 | if (this.started) { 922 | output.writeFieldBegin('started', Thrift.Type.BOOL, 7); 923 | output.writeBool(this.started); 924 | output.writeFieldEnd(); 925 | } 926 | output.writeFieldStop(); 927 | output.writeStructEnd(); 928 | return; 929 | }; 930 | 931 | var Query = module.exports.Query = function(args) { 932 | this.queryId = null; 933 | this.queryType = null; 934 | this.queryAttributes = null; 935 | this.queryCounters = null; 936 | this.stageGraph = null; 937 | this.stageList = null; 938 | this.done = null; 939 | this.started = null; 940 | if (args) { 941 | if (args.queryId !== undefined) { 942 | this.queryId = args.queryId; 943 | } 944 | if (args.queryType !== undefined) { 945 | this.queryType = args.queryType; 946 | } 947 | if (args.queryAttributes !== undefined) { 948 | this.queryAttributes = args.queryAttributes; 949 | } 950 | if (args.queryCounters !== undefined) { 951 | this.queryCounters = args.queryCounters; 952 | } 953 | if (args.stageGraph !== undefined) { 954 | this.stageGraph = args.stageGraph; 955 | } 956 | if (args.stageList !== undefined) { 957 | this.stageList = args.stageList; 958 | } 959 | if (args.done !== undefined) { 960 | this.done = args.done; 961 | } 962 | if (args.started !== undefined) { 963 | this.started = args.started; 964 | } 965 | } 966 | }; 967 | Query.prototype = {}; 968 | Query.prototype.read = function(input) { 969 | input.readStructBegin(); 970 | while (true) 971 | { 972 | var ret = input.readFieldBegin(); 973 | var fname = ret.fname; 974 | var ftype = ret.ftype; 975 | var fid = ret.fid; 976 | if (ftype == Thrift.Type.STOP) { 977 | break; 978 | } 979 | switch (fid) 980 | { 981 | case 1: 982 | if (ftype == Thrift.Type.STRING) { 983 | this.queryId = input.readString(); 984 | } else { 985 | input.skip(ftype); 986 | } 987 | break; 988 | case 2: 989 | if (ftype == Thrift.Type.STRING) { 990 | this.queryType = input.readString(); 991 | } else { 992 | input.skip(ftype); 993 | } 994 | break; 995 | case 3: 996 | if (ftype == Thrift.Type.MAP) { 997 | var _size100 = 0; 998 | var _rtmp3104; 999 | this.queryAttributes = {}; 1000 | var _ktype101 = 0; 1001 | var _vtype102 = 0; 1002 | _rtmp3104 = input.readMapBegin(); 1003 | _ktype101 = _rtmp3104.ktype; 1004 | _vtype102 = _rtmp3104.vtype; 1005 | _size100 = _rtmp3104.size; 1006 | for (var _i105 = 0; _i105 < _size100; ++_i105) 1007 | { 1008 | var key106 = null; 1009 | var val107 = null; 1010 | key106 = input.readString(); 1011 | val107 = input.readString(); 1012 | this.queryAttributes[key106] = val107; 1013 | } 1014 | input.readMapEnd(); 1015 | } else { 1016 | input.skip(ftype); 1017 | } 1018 | break; 1019 | case 4: 1020 | if (ftype == Thrift.Type.MAP) { 1021 | var _size108 = 0; 1022 | var _rtmp3112; 1023 | this.queryCounters = {}; 1024 | var _ktype109 = 0; 1025 | var _vtype110 = 0; 1026 | _rtmp3112 = input.readMapBegin(); 1027 | _ktype109 = _rtmp3112.ktype; 1028 | _vtype110 = _rtmp3112.vtype; 1029 | _size108 = _rtmp3112.size; 1030 | for (var _i113 = 0; _i113 < _size108; ++_i113) 1031 | { 1032 | var key114 = null; 1033 | var val115 = null; 1034 | key114 = input.readString(); 1035 | val115 = input.readI64(); 1036 | this.queryCounters[key114] = val115; 1037 | } 1038 | input.readMapEnd(); 1039 | } else { 1040 | input.skip(ftype); 1041 | } 1042 | break; 1043 | case 5: 1044 | if (ftype == Thrift.Type.STRUCT) { 1045 | this.stageGraph = new ttypes.Graph(); 1046 | this.stageGraph.read(input); 1047 | } else { 1048 | input.skip(ftype); 1049 | } 1050 | break; 1051 | case 6: 1052 | if (ftype == Thrift.Type.LIST) { 1053 | var _size116 = 0; 1054 | var _rtmp3120; 1055 | this.stageList = []; 1056 | var _etype119 = 0; 1057 | _rtmp3120 = input.readListBegin(); 1058 | _etype119 = _rtmp3120.etype; 1059 | _size116 = _rtmp3120.size; 1060 | for (var _i121 = 0; _i121 < _size116; ++_i121) 1061 | { 1062 | var elem122 = null; 1063 | elem122 = new ttypes.Stage(); 1064 | elem122.read(input); 1065 | this.stageList.push(elem122); 1066 | } 1067 | input.readListEnd(); 1068 | } else { 1069 | input.skip(ftype); 1070 | } 1071 | break; 1072 | case 7: 1073 | if (ftype == Thrift.Type.BOOL) { 1074 | this.done = input.readBool(); 1075 | } else { 1076 | input.skip(ftype); 1077 | } 1078 | break; 1079 | case 8: 1080 | if (ftype == Thrift.Type.BOOL) { 1081 | this.started = input.readBool(); 1082 | } else { 1083 | input.skip(ftype); 1084 | } 1085 | break; 1086 | default: 1087 | input.skip(ftype); 1088 | } 1089 | input.readFieldEnd(); 1090 | } 1091 | input.readStructEnd(); 1092 | return; 1093 | }; 1094 | 1095 | Query.prototype.write = function(output) { 1096 | output.writeStructBegin('Query'); 1097 | if (this.queryId) { 1098 | output.writeFieldBegin('queryId', Thrift.Type.STRING, 1); 1099 | output.writeString(this.queryId); 1100 | output.writeFieldEnd(); 1101 | } 1102 | if (this.queryType) { 1103 | output.writeFieldBegin('queryType', Thrift.Type.STRING, 2); 1104 | output.writeString(this.queryType); 1105 | output.writeFieldEnd(); 1106 | } 1107 | if (this.queryAttributes) { 1108 | output.writeFieldBegin('queryAttributes', Thrift.Type.MAP, 3); 1109 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.STRING, Thrift.objectLength(this.queryAttributes)); 1110 | for (var kiter123 in this.queryAttributes) 1111 | { 1112 | if (this.queryAttributes.hasOwnProperty(kiter123)) 1113 | { 1114 | var viter124 = this.queryAttributes[kiter123]; 1115 | output.writeString(kiter123); 1116 | output.writeString(viter124); 1117 | } 1118 | } 1119 | output.writeMapEnd(); 1120 | output.writeFieldEnd(); 1121 | } 1122 | if (this.queryCounters) { 1123 | output.writeFieldBegin('queryCounters', Thrift.Type.MAP, 4); 1124 | output.writeMapBegin(Thrift.Type.STRING, Thrift.Type.I64, Thrift.objectLength(this.queryCounters)); 1125 | for (var kiter125 in this.queryCounters) 1126 | { 1127 | if (this.queryCounters.hasOwnProperty(kiter125)) 1128 | { 1129 | var viter126 = this.queryCounters[kiter125]; 1130 | output.writeString(kiter125); 1131 | output.writeI64(viter126); 1132 | } 1133 | } 1134 | output.writeMapEnd(); 1135 | output.writeFieldEnd(); 1136 | } 1137 | if (this.stageGraph) { 1138 | output.writeFieldBegin('stageGraph', Thrift.Type.STRUCT, 5); 1139 | this.stageGraph.write(output); 1140 | output.writeFieldEnd(); 1141 | } 1142 | if (this.stageList) { 1143 | output.writeFieldBegin('stageList', Thrift.Type.LIST, 6); 1144 | output.writeListBegin(Thrift.Type.STRUCT, this.stageList.length); 1145 | for (var iter127 in this.stageList) 1146 | { 1147 | if (this.stageList.hasOwnProperty(iter127)) 1148 | { 1149 | iter127 = this.stageList[iter127]; 1150 | iter127.write(output); 1151 | } 1152 | } 1153 | output.writeListEnd(); 1154 | output.writeFieldEnd(); 1155 | } 1156 | if (this.done) { 1157 | output.writeFieldBegin('done', Thrift.Type.BOOL, 7); 1158 | output.writeBool(this.done); 1159 | output.writeFieldEnd(); 1160 | } 1161 | if (this.started) { 1162 | output.writeFieldBegin('started', Thrift.Type.BOOL, 8); 1163 | output.writeBool(this.started); 1164 | output.writeFieldEnd(); 1165 | } 1166 | output.writeFieldStop(); 1167 | output.writeStructEnd(); 1168 | return; 1169 | }; 1170 | 1171 | var QueryPlan = module.exports.QueryPlan = function(args) { 1172 | this.queries = null; 1173 | this.done = null; 1174 | this.started = null; 1175 | if (args) { 1176 | if (args.queries !== undefined) { 1177 | this.queries = args.queries; 1178 | } 1179 | if (args.done !== undefined) { 1180 | this.done = args.done; 1181 | } 1182 | if (args.started !== undefined) { 1183 | this.started = args.started; 1184 | } 1185 | } 1186 | }; 1187 | QueryPlan.prototype = {}; 1188 | QueryPlan.prototype.read = function(input) { 1189 | input.readStructBegin(); 1190 | while (true) 1191 | { 1192 | var ret = input.readFieldBegin(); 1193 | var fname = ret.fname; 1194 | var ftype = ret.ftype; 1195 | var fid = ret.fid; 1196 | if (ftype == Thrift.Type.STOP) { 1197 | break; 1198 | } 1199 | switch (fid) 1200 | { 1201 | case 1: 1202 | if (ftype == Thrift.Type.LIST) { 1203 | var _size128 = 0; 1204 | var _rtmp3132; 1205 | this.queries = []; 1206 | var _etype131 = 0; 1207 | _rtmp3132 = input.readListBegin(); 1208 | _etype131 = _rtmp3132.etype; 1209 | _size128 = _rtmp3132.size; 1210 | for (var _i133 = 0; _i133 < _size128; ++_i133) 1211 | { 1212 | var elem134 = null; 1213 | elem134 = new ttypes.Query(); 1214 | elem134.read(input); 1215 | this.queries.push(elem134); 1216 | } 1217 | input.readListEnd(); 1218 | } else { 1219 | input.skip(ftype); 1220 | } 1221 | break; 1222 | case 2: 1223 | if (ftype == Thrift.Type.BOOL) { 1224 | this.done = input.readBool(); 1225 | } else { 1226 | input.skip(ftype); 1227 | } 1228 | break; 1229 | case 3: 1230 | if (ftype == Thrift.Type.BOOL) { 1231 | this.started = input.readBool(); 1232 | } else { 1233 | input.skip(ftype); 1234 | } 1235 | break; 1236 | default: 1237 | input.skip(ftype); 1238 | } 1239 | input.readFieldEnd(); 1240 | } 1241 | input.readStructEnd(); 1242 | return; 1243 | }; 1244 | 1245 | QueryPlan.prototype.write = function(output) { 1246 | output.writeStructBegin('QueryPlan'); 1247 | if (this.queries) { 1248 | output.writeFieldBegin('queries', Thrift.Type.LIST, 1); 1249 | output.writeListBegin(Thrift.Type.STRUCT, this.queries.length); 1250 | for (var iter135 in this.queries) 1251 | { 1252 | if (this.queries.hasOwnProperty(iter135)) 1253 | { 1254 | iter135 = this.queries[iter135]; 1255 | iter135.write(output); 1256 | } 1257 | } 1258 | output.writeListEnd(); 1259 | output.writeFieldEnd(); 1260 | } 1261 | if (this.done) { 1262 | output.writeFieldBegin('done', Thrift.Type.BOOL, 2); 1263 | output.writeBool(this.done); 1264 | output.writeFieldEnd(); 1265 | } 1266 | if (this.started) { 1267 | output.writeFieldBegin('started', Thrift.Type.BOOL, 3); 1268 | output.writeBool(this.started); 1269 | output.writeFieldEnd(); 1270 | } 1271 | output.writeFieldStop(); 1272 | output.writeStructEnd(); 1273 | return; 1274 | }; 1275 | 1276 | --------------------------------------------------------------------------------